diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index ce47fa36103f45..6a9bbd67997311 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -52,6 +52,8 @@
 /test/test262/data
 /test/test262/harness
 /test/wasm-js/data
+/test/wasm-js/tests
+/test/wasm-js/tests.tar.gz
 /test/wasm-spec-tests/tests
 /test/wasm-spec-tests/tests.tar.gz
 /third_party/*
diff --git a/deps/v8/.gn b/deps/v8/.gn
index 328778fb46bcdf..d4ad959954845f 100644
--- a/deps/v8/.gn
+++ b/deps/v8/.gn
@@ -16,4 +16,5 @@ check_targets = []
 # These are the list of GN files that run exec_script. This whitelist exists
 # to force additional review for new uses of exec_script, which is strongly
 # discouraged except for gypi_to_gn calls.
-exec_script_whitelist = build_dotfile_settings.exec_script_whitelist + []
+exec_script_whitelist = build_dotfile_settings.exec_script_whitelist +
+                        [ "//build_overrides/build.gni" ]
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 827d124b0dcf0d..1198de8f358fbc 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -39,6 +39,7 @@ Vewd Software AS <*@vewd.com>
 Groupon <*@groupon.com>
 Meteor Development Group <*@meteor.com>
 Cloudflare, Inc. <*@cloudflare.com>
+Julia Computing, Inc. <*@juliacomputing.com>
 
 Aaron Bieber <deftly@gmail.com>
 Abdulla Kamar <abdulla.kamar@gmail.com>
@@ -74,6 +75,7 @@ Colin Ihrig <cjihrig@gmail.com>
 Daniel Andersson <kodandersson@gmail.com>
 Daniel Bevenius <daniel.bevenius@gmail.com>
 Daniel James <dnljms@gmail.com>
+David Carlier <devnexen@gmail.com>
 Deepak Mohan <hop2deep@gmail.com>
 Deon Dior <diaoyuanjie@gmail.com>
 Dominic Farolini <domfarolino@gmail.com>
@@ -163,6 +165,7 @@ Rob Wu <rob@robwu.nl>
 Robert Meijer <robert.s.meijer@gmail.com>
 Robert Mustacchi <rm@fingolfin.org>
 Robert Nagy <robert.nagy@gmail.com>
+Rong Wang <wangrong089@gmail.com>
 Ross Kirsling <rkirsling@gmail.com>
 Ruben Bridgewater <ruben@bridgewater.de>
 Ryan Dahl <ry@tinyclouds.org>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index efca4a626f1633..c486f1a1c86abc 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -91,7 +91,7 @@ declare_args() {
   # Enable embedded builtins.
   v8_enable_embedded_builtins = true
 
-  # Enable the registration of unwinding info for Windows/x64.
+  # Enable the registration of unwinding info for Windows x64 and ARM64.
   v8_win64_unwinding_info = true
 
   # Enable code comments for builtins in the snapshot (impacts performance).
@@ -187,15 +187,21 @@ declare_args() {
   # Enable sharing read-only space across isolates.
   # Sets -DV8_SHARED_RO_HEAP.
   v8_enable_shared_ro_heap = ""
-}
 
-# We reuse the snapshot toolchain for building torque and other generators to
-# avoid building v8_libbase on the host more than once. On mips with big endian,
-# the snapshot toolchain is the target toolchain and, hence, can't be used.
-v8_generator_toolchain = v8_snapshot_toolchain
-if (host_cpu == "x64" &&
-    (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
-  v8_generator_toolchain = "//build/toolchain/linux:clang_x64"
+  # Enable lazy source positions by default.
+  v8_enable_lazy_source_positions = true
+
+  # Disable write barriers when GCs are non-incremental and
+  # heap has single generation.
+  v8_disable_write_barriers = false
+
+  # Redirect allocation in young generation so that there will be
+  # only one single generation.
+  v8_enable_single_generation = ""
+
+  # Use token threaded dispatch for the regular expression interpreter.
+  # Use switch-based dispatch if this is false
+  v8_enable_regexp_interpreter_threaded_dispatch = true
 }
 
 # Derived defaults.
@@ -231,6 +237,13 @@ if (v8_enable_fast_torque == "") {
   v8_enable_fast_torque = v8_enable_fast_mksnapshot
 }
 
+if (v8_enable_single_generation == "") {
+  v8_enable_single_generation = v8_disable_write_barriers
+}
+
+assert(!v8_disable_write_barriers || v8_enable_single_generation,
+       "Disabling write barriers works only with single generation")
+
 assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
        "Untrusted code mitigations are unsupported on ia32")
 
@@ -424,12 +437,21 @@ config("features") {
       defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ]
     }
   }
+  if (v8_enable_single_generation) {
+    defines += [ "V8_ENABLE_SINGLE_GENERATION" ]
+  }
+  if (v8_disable_write_barriers) {
+    defines += [ "V8_DISABLE_WRITE_BARRIERS" ]
+  }
   if (v8_use_external_startup_data) {
     defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
   }
   if (v8_enable_concurrent_marking) {
     defines += [ "V8_CONCURRENT_MARKING" ]
   }
+  if (v8_enable_lazy_source_positions) {
+    defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ]
+  }
   if (v8_check_microtasks_scopes_consistency) {
     defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
   }
@@ -451,6 +473,9 @@ config("features") {
   if (v8_win64_unwinding_info) {
     defines += [ "V8_WIN64_UNWINDING_INFO" ]
   }
+  if (v8_enable_regexp_interpreter_threaded_dispatch) {
+    defines += [ "V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH" ]
+  }
 }
 
 config("toolchain") {
@@ -968,16 +993,21 @@ torque_files = [
   "src/builtins/proxy-set-prototype-of.tq",
   "src/builtins/proxy.tq",
   "src/builtins/reflect.tq",
+  "src/builtins/regexp-match.tq",
   "src/builtins/regexp-replace.tq",
+  "src/builtins/regexp-source.tq",
+  "src/builtins/regexp-test.tq",
   "src/builtins/regexp.tq",
   "src/builtins/string.tq",
   "src/builtins/string-endswith.tq",
   "src/builtins/string-html.tq",
   "src/builtins/string-iterator.tq",
+  "src/builtins/string-pad.tq",
   "src/builtins/string-repeat.tq",
   "src/builtins/string-slice.tq",
   "src/builtins/string-startswith.tq",
   "src/builtins/string-substring.tq",
+  "src/builtins/torque-internal.tq",
   "src/builtins/typed-array-createtypedarray.tq",
   "src/builtins/typed-array-every.tq",
   "src/builtins/typed-array-filter.tq",
@@ -1002,6 +1032,7 @@ if (!v8_enable_i18n_support) {
 action("run_torque") {
   visibility = [
     ":*",
+    "tools/debug_helper/:*",
     "tools/gcmole/:*",
     "test/cctest/:*",
   ]
@@ -1023,6 +1054,8 @@ action("run_torque") {
     "$target_gen_dir/torque-generated/class-definitions-tq.cc",
     "$target_gen_dir/torque-generated/class-definitions-tq-inl.h",
     "$target_gen_dir/torque-generated/class-definitions-tq.h",
+    "$target_gen_dir/torque-generated/class-debug-readers-tq.cc",
+    "$target_gen_dir/torque-generated/class-debug-readers-tq.h",
     "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc",
     "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h",
     "$target_gen_dir/torque-generated/csa-types-tq.h",
@@ -1752,6 +1785,8 @@ v8_compiler_sources = [
   "src/compiler/escape-analysis-reducer.h",
   "src/compiler/escape-analysis.cc",
   "src/compiler/escape-analysis.h",
+  "src/compiler/feedback-source.cc",
+  "src/compiler/feedback-source.h",
   "src/compiler/frame-states.cc",
   "src/compiler/frame-states.h",
   "src/compiler/frame.cc",
@@ -1892,8 +1927,6 @@ v8_compiler_sources = [
   "src/compiler/types.h",
   "src/compiler/value-numbering-reducer.cc",
   "src/compiler/value-numbering-reducer.h",
-  "src/compiler/vector-slot-pair.cc",
-  "src/compiler/vector-slot-pair.h",
   "src/compiler/verifier.cc",
   "src/compiler/verifier.h",
   "src/compiler/wasm-compiler.cc",
@@ -2031,7 +2064,6 @@ v8_source_set("v8_base_without_compiler") {
     "src/builtins/builtins-internal.cc",
     "src/builtins/builtins-intl.cc",
     "src/builtins/builtins-json.cc",
-    "src/builtins/builtins-math.cc",
     "src/builtins/builtins-number.cc",
     "src/builtins/builtins-object.cc",
     "src/builtins/builtins-promise.cc",
@@ -2095,6 +2127,7 @@ v8_source_set("v8_base_without_compiler") {
     "src/codegen/register-arch.h",
     "src/codegen/register-configuration.cc",
     "src/codegen/register-configuration.h",
+    "src/codegen/register.cc",
     "src/codegen/register.h",
     "src/codegen/reglist.h",
     "src/codegen/reloc-info.cc",
@@ -2194,6 +2227,9 @@ v8_source_set("v8_base_without_compiler") {
     "src/execution/messages.h",
     "src/execution/microtask-queue.cc",
     "src/execution/microtask-queue.h",
+    "src/execution/protectors-inl.h",
+    "src/execution/protectors.cc",
+    "src/execution/protectors.h",
     "src/execution/runtime-profiler.cc",
     "src/execution/runtime-profiler.h",
     "src/execution/simulator-base.cc",
@@ -2758,7 +2794,6 @@ v8_source_set("v8_base_without_compiler") {
     "src/runtime/runtime-typedarray.cc",
     "src/runtime/runtime-utils.h",
     "src/runtime/runtime-wasm.cc",
-    "src/runtime/runtime-weak-refs.cc",
     "src/runtime/runtime.cc",
     "src/runtime/runtime.h",
     "src/sanitizer/asan.h",
@@ -2922,6 +2957,8 @@ v8_source_set("v8_base_without_compiler") {
     "src/wasm/wasm-memory.h",
     "src/wasm/wasm-module-builder.cc",
     "src/wasm/wasm-module-builder.h",
+    "src/wasm/wasm-module-sourcemap.cc",
+    "src/wasm/wasm-module-sourcemap.h",
     "src/wasm/wasm-module.cc",
     "src/wasm/wasm-module.h",
     "src/wasm/wasm-objects-inl.h",
@@ -3109,6 +3146,7 @@ v8_source_set("v8_base_without_compiler") {
       "src/diagnostics/arm64/eh-frame-arm64.cc",
       "src/execution/arm64/frame-constants-arm64.cc",
       "src/execution/arm64/frame-constants-arm64.h",
+      "src/execution/arm64/pointer-auth-arm64.cc",
       "src/execution/arm64/simulator-arm64.cc",
       "src/execution/arm64/simulator-arm64.h",
       "src/execution/arm64/simulator-logic-arm64.cc",
@@ -3116,6 +3154,12 @@ v8_source_set("v8_base_without_compiler") {
       "src/regexp/arm64/regexp-macro-assembler-arm64.h",
       "src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
     ]
+    if (is_win) {
+      sources += [
+        "src/diagnostics/unwinding-info-win64.cc",
+        "src/diagnostics/unwinding-info-win64.h",
+      ]
+    }
     jumbo_excluded_sources += [
       # TODO(mostynb@vewd.com): fix this code so it doesn't need
       # to be excluded, see the comments inside.
@@ -3325,6 +3369,7 @@ v8_source_set("torque_base") {
     "src/torque/ast.h",
     "src/torque/cfg.cc",
     "src/torque/cfg.h",
+    "src/torque/class-debug-reader-generator.cc",
     "src/torque/constants.h",
     "src/torque/contextual.h",
     "src/torque/csa-generator.cc",
@@ -3351,6 +3396,8 @@ v8_source_set("torque_base") {
     "src/torque/torque-compiler.h",
     "src/torque/torque-parser.cc",
     "src/torque/torque-parser.h",
+    "src/torque/type-inference.cc",
+    "src/torque/type-inference.h",
     "src/torque/type-oracle.cc",
     "src/torque/type-oracle.h",
     "src/torque/type-visitor.cc",
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 27afc18a5117cd..be6a58859c5394 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1453 @@
+2019-09-04: Version 7.8.279
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.278
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.277
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.276
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.275
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.274
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.273
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.272
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.271
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-04: Version 7.8.270
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-03: Version 7.8.269
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-03: Version 7.8.268
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-03: Version 7.8.267
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-03: Version 7.8.266
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-03: Version 7.8.265
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-03: Version 7.8.264
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-03: Version 7.8.263
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-03: Version 7.8.262
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-03: Version 7.8.261
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.260
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.259
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.258
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.257
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.256
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.255
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.254
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.253
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.252
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.251
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.250
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.249
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.248
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.247
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.246
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-02: Version 7.8.245
+
+        Performance and stability improvements on all platforms.
+
+
+2019-09-01: Version 7.8.244
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-31: Version 7.8.243
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-31: Version 7.8.242
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.241
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.240
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.239
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.238
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.237
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.236
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.235
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.234
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.233
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-30: Version 7.8.232
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.231
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.230
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.229
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.228
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.227
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.226
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.225
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.224
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.223
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.222
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-29: Version 7.8.221
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.220
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.219
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.218
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.217
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.216
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.215
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.214
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.213
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.212
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.211
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.210
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.209
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-28: Version 7.8.208
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-27: Version 7.8.207
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-27: Version 7.8.206
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-27: Version 7.8.205
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-27: Version 7.8.204
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-27: Version 7.8.203
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-27: Version 7.8.202
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-27: Version 7.8.201
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.200
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.199
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.198
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.197
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.196
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.195
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.194
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.193
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.192
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.191
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-26: Version 7.8.190
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-25: Version 7.8.189
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.188
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.187
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.186
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.185
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.184
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.183
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.182
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.181
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.180
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.179
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-23: Version 7.8.178
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.177
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.176
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.175
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.174
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.173
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.172
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.171
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.170
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.169
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.168
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.167
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.166
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.165
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.164
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.163
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.162
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.161
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.160
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.159
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.158
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-22: Version 7.8.157
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.156
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.155
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.154
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.153
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.152
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.151
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.150
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.149
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.148
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.147
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.146
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.145
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.144
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.143
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.142
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.141
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.140
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-21: Version 7.8.139
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.138
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.137
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.136
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.135
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.134
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.133
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.132
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.131
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.130
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.129
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.128
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.127
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.126
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.125
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.124
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.123
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.122
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.121
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-20: Version 7.8.120
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-19: Version 7.8.119
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-19: Version 7.8.118
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-19: Version 7.8.117
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-19: Version 7.8.116
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-19: Version 7.8.115
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-19: Version 7.8.114
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-16: Version 7.8.113
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-15: Version 7.8.112
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-14: Version 7.8.111
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-14: Version 7.8.110
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-14: Version 7.8.109
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-14: Version 7.8.108
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-14: Version 7.8.107
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-14: Version 7.8.106
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.105
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.104
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.103
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.102
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.101
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.100
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.99
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.98
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.97
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.96
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.95
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.94
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-13: Version 7.8.93
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.92
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.91
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.90
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.89
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.88
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.87
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.86
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.85
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.84
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.83
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.82
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.81
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.80
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.79
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-12: Version 7.8.78
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-09: Version 7.8.77
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-09: Version 7.8.76
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-09: Version 7.8.75
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-09: Version 7.8.74
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-09: Version 7.8.73
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-09: Version 7.8.72
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-08: Version 7.8.71
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-08: Version 7.8.70
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-08: Version 7.8.69
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-08: Version 7.8.68
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-07: Version 7.8.67
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-07: Version 7.8.66
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-07: Version 7.8.65
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-06: Version 7.8.64
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-06: Version 7.8.63
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-06: Version 7.8.62
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-05: Version 7.8.61
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-05: Version 7.8.60
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-04: Version 7.8.59
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-02: Version 7.8.58
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-02: Version 7.8.57
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-02: Version 7.8.56
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-02: Version 7.8.55
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-02: Version 7.8.54
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-02: Version 7.8.53
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-01: Version 7.8.52
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-01: Version 7.8.51
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-01: Version 7.8.50
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-01: Version 7.8.49
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-01: Version 7.8.48
+
+        Performance and stability improvements on all platforms.
+
+
+2019-08-01: Version 7.8.47
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.46
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.45
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.44
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.43
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.42
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.41
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.40
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.39
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.38
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-31: Version 7.8.37
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.36
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.35
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.34
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.33
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.32
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.31
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.30
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.29
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.28
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.27
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.26
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.25
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.24
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.23
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.22
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.21
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.20
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.19
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.18
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.17
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-30: Version 7.8.16
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.15
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.14
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.13
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.12
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.11
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.10
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.9
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.8
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.7
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.6
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.5
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.4
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.3
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-29: Version 7.8.2
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-28: Version 7.8.1
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.310
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.309
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.308
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.307
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.306
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.305
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.304
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.303
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.302
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.301
+
+        Performance and stability improvements on all platforms.
+
+
+2019-07-24: Version 7.7.300
+
+        Performance and stability improvements on all platforms.
+
+
 2019-07-23: Version 7.7.299
 
         Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 986264356f99cc..a7d4081edb856c 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -3,6 +3,21 @@
 # all paths in here must match this assumption.
 
 vars = {
+  # Fetches only the SDK boot images which match at least one of the whitelist
+  # entries in a comma-separated list.
+  #
+  # Only the X64 and ARM64 QEMU images are downloaded by default. Developers
+  # that need to boot on other target architectures or devices can opt to
+  # download more boot images. Example of images include:
+  #
+  # Emulation:
+  #   qemu.x64, qemu.arm64
+  # Hardware:
+  #   generic.x64, generic.arm64
+  #
+  # Wildcards are supported (e.g. "qemu.*").
+  'checkout_fuchsia_boot_images': "qemu.x64,qemu.arm64",
+
   'checkout_instrumented_libraries': False,
   'chromium_url': 'https://chromium.googlesource.com',
   'android_url': 'https://android.googlesource.com',
@@ -12,7 +27,7 @@ vars = {
   'check_v8_header_includes': False,
 
   # GN CIPD package version.
-  'gn_version': 'git_revision:972ed755f8e6d31cae9ba15fcd08136ae1a7886f',
+  'gn_version': 'git_revision:152c5144ceed9592c20f0c8fd55769646077569b',
 
   # luci-go CIPD package version.
   'luci_go': 'git_revision:7d11fd9e66407c49cb6c8546a2ae45ea993a240c',
@@ -20,7 +35,7 @@ vars = {
   # Three lines of non-changing comments so that
   # the commit queue can handle CLs rolling android_sdk_build-tools_version
   # and whatever else without interference from each other.
-  'android_sdk_build-tools_version': 'DLK621q5_Bga5EsOr7cp6bHWWxFKx6UHLu_Ix_m3AckC',
+  'android_sdk_build-tools_version': '5DL7LQQjVMLClXLzLgmGysccPGsGcjJdvH9z5-uetiIC',
   # Three lines of non-changing comments so that
   # the commit queue can handle CLs rolling android_sdk_emulator_version
   # and whatever else without interference from each other.
@@ -57,15 +72,15 @@ vars = {
 
 deps = {
   'v8/build':
-    Var('chromium_url') + '/chromium/src/build.git' + '@' + '1e5d7d692f816af8136c738b79fe9e8dde8057f6',
+    Var('chromium_url') + '/chromium/src/build.git' + '@' + '693faeda4ee025796c7e473d953a5a7b6ad64c93',
   'v8/third_party/depot_tools':
-    Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ee7b9dda90e409fb92031d511151debe5db7db9f',
+    Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'f38bc1796282c61087dcf15abc61b8fd18a68402',
   'v8/third_party/icu':
-    Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'fd97d4326fac6da84452b2d5fe75ff0949368dab',
+    Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '53f6b233a41ec982d8445996247093f7aaf41639',
   'v8/third_party/instrumented_libraries':
     Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b1c3ca20848c117eb935b02c25d441f03e6fbc5e',
   'v8/buildtools':
-    Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '67b293ca1316d06f7f00160ce35c92b8849a9dc9',
+    Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '74cfb57006f83cfe050817526db359d5c8a11628',
   'v8/buildtools/clang_format/script':
     Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917',
   'v8/buildtools/linux64': {
@@ -105,9 +120,9 @@ deps = {
     'condition': 'host_os == "win"',
   },
   'v8/base/trace_event/common':
-    Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'cfe8887fa6ac3170e23a68949930e28d4705a16f',
+    Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '5e4fce17a9d2439c44a7b57ceecef6df9287ec2f',
   'v8/third_party/android_ndk': {
-    'url': Var('chromium_url') + '/android_ndk.git' + '@' + '4e2cea441bfd43f0863d14f57b1e1844260b9884',
+    'url': Var('chromium_url') + '/android_ndk.git' + '@' + '62582753e869484bf0cc7f7e8d184ce0077033c2',
     'condition': 'checkout_android',
   },
   'v8/third_party/android_sdk/public': {
@@ -153,7 +168,7 @@ deps = {
       'dep_type': 'cipd',
   },
   'v8/third_party/catapult': {
-    'url': Var('chromium_url') + '/catapult.git' + '@' + '53913cecb11a3ef993f6496b9110964e2e2aeec3',
+    'url': Var('chromium_url') + '/catapult.git' + '@' + 'e7c719c3e85f76938bf4fef0ba37c27f89246f71',
     'condition': 'checkout_android',
   },
   'v8/third_party/colorama/src': {
@@ -161,11 +176,11 @@ deps = {
     'condition': 'checkout_android',
   },
   'v8/third_party/fuchsia-sdk': {
-    'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5fd29151cf35c0813c33cc368a7c78389e3f5caa',
+    'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '1785f0ac8e1fe81cb25e260acbe7de8f62fa3e44',
     'condition': 'checkout_fuchsia',
   },
   'v8/third_party/googletest/src':
-    Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6077f444da944d96d311d358d761164261f1cdd0',
+    Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '565f1b848215b77c3732bca345fe76a0431d8b34',
   'v8/third_party/jinja2':
     Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
   'v8/third_party/markupsafe':
@@ -177,7 +192,7 @@ deps = {
   'v8/test/mozilla/data':
     Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
   'v8/test/test262/data':
-    Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '26a2268436f28f64c4539d9aab9ebd0f0b7c99c5',
+    Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '59a1a016b7cf5cf43f66b274c7d1db4ec6066935',
   'v8/test/test262/harness':
     Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b',
   'v8/third_party/qemu-linux-x64': {
@@ -201,7 +216,7 @@ deps = {
       'dep_type': 'cipd',
   },
   'v8/tools/clang':
-    Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'f485a21a9cb05494161d97d545c3b29447610ffb',
+    Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '2fef805e5b05b26a8c87c47865590b5f43218611',
   'v8/tools/luci-go': {
       'packages': [
         {
@@ -230,10 +245,8 @@ deps = {
     'condition': 'checkout_mac',
     'dep_type': 'cipd',
   },
-  'v8/test/wasm-js/data':
-    Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '1a411f713d9850ce7da24719aba5bb80c535f562',
   'v8/third_party/perfetto':
-    Var('android_url') + '/platform/external/perfetto.git' + '@' + '0e8281399fd854de13461f2c1c9f2fb0b8e9c3ae',
+    Var('android_url') + '/platform/external/perfetto.git' + '@' + '01615892494a9a8dc84414962d0a817bf97de2c2',
   'v8/third_party/protobuf':
     Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91',
 }
@@ -346,6 +359,17 @@ hooks = [
                 '-s', 'v8/test/wasm-spec-tests/tests.tar.gz.sha1',
     ],
   },
+  {
+    'name': 'wasm_js',
+    'pattern': '.',
+    'action': [ 'download_from_google_storage',
+                '--no_resume',
+                '--no_auth',
+                '-u',
+                '--bucket', 'v8-wasm-spec-tests',
+                '-s', 'v8/test/wasm-js/tests.tar.gz.sha1',
+    ],
+  },
   {
     'name': 'sysroot_arm',
     'pattern': '.',
@@ -410,6 +434,13 @@ hooks = [
     'condition': 'checkout_win',
     'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
   },
+  {
+    # Update the Mac toolchain if necessary.
+    'name': 'mac_toolchain',
+    'pattern': '.',
+    'condition': 'checkout_mac',
+    'action': ['python', 'v8/build/mac_toolchain.py'],
+  },
   # Pull binutils for linux, enabled debug fission for faster linking /
   # debugging when used with clang on Ubuntu Precise.
   # https://code.google.com/p/chromium/issues/detail?id=352046
@@ -444,6 +475,7 @@ hooks = [
     'action': [
       'python',
       'v8/build/fuchsia/update_sdk.py',
+      '--boot-images={checkout_fuchsia_boot_images}',
     ],
   },
   {
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index be360966665b38..9ab84b1e2759de 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -1,31 +1,31 @@
 # Eng reviewer. Please reach out before adding new top-level directories.
 # Disagreement among owners should be escalated to eng reviewers.
-file://ENG_REVIEW_OWNERS
+file:ENG_REVIEW_OWNERS
 
-per-file .clang-format=file://INFRA_OWNERS
-per-file .clang-tidy=file://INFRA_OWNERS
-per-file .editorconfig=file://INFRA_OWNERS
-per-file .flake8=file://INFRA_OWNERS
-per-file .git-blame-ignore-revs=file://INFRA_OWNERS
-per-file .gitattributes=file://INFRA_OWNERS
-per-file .gitignore=file://INFRA_OWNERS
-per-file .gn=file://INFRA_OWNERS
-per-file .vpython=file://INFRA_OWNERS
-per-file .ycm_extra_conf.py=file://INFRA_OWNERS
-per-file BUILD.gn=file://COMMON_OWNERS
-per-file DEPS=file://INFRA_OWNERS
+per-file .clang-format=file:INFRA_OWNERS
+per-file .clang-tidy=file:INFRA_OWNERS
+per-file .editorconfig=file:INFRA_OWNERS
+per-file .flake8=file:INFRA_OWNERS
+per-file .git-blame-ignore-revs=file:INFRA_OWNERS
+per-file .gitattributes=file:INFRA_OWNERS
+per-file .gitignore=file:INFRA_OWNERS
+per-file .gn=file:INFRA_OWNERS
+per-file .vpython=file:INFRA_OWNERS
+per-file .ycm_extra_conf.py=file:INFRA_OWNERS
+per-file BUILD.gn=file:COMMON_OWNERS
+per-file DEPS=file:INFRA_OWNERS
 # For Test262 rolls.
 per-file DEPS=mathias@chromium.org
-per-file PRESUBMIT=file://INFRA_OWNERS
-per-file codereview.settings=file://INFRA_OWNERS
+per-file PRESUBMIT=file:INFRA_OWNERS
+per-file codereview.settings=file:INFRA_OWNERS
 
-per-file AUTHORS=file://COMMON_OWNERS
-per-file WATCHLIST=file://COMMON_OWNERS
+per-file AUTHORS=file:COMMON_OWNERS
+per-file WATCHLIST=file:COMMON_OWNERS
 
-per-file *-mips*=file://MIPS_OWNERS
-per-file *-mips64*=file://MIPS_OWNERS
-per-file *-ppc*=file://PPC_OWNERS
-per-file *-s390*=file://S390_OWNERS
+per-file *-mips*=file:MIPS_OWNERS
+per-file *-mips64*=file:MIPS_OWNERS
+per-file *-ppc*=file:PPC_OWNERS
+per-file *-s390*=file:S390_OWNERS
 
 # TEAM: v8-dev@googlegroups.com
 # COMPONENT: Blink>JavaScript
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index f1878a18da91c6..57ac0254d96376 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -256,6 +256,13 @@
   INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
                            TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
                            arg2_name, arg2_val)
+#define TRACE_EVENT_INSTANT_WITH_FLAGS0(category_group, name, scope_and_flags) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           scope_and_flags)
+#define TRACE_EVENT_INSTANT_WITH_FLAGS1(category_group, name, scope_and_flags, \
+                                        arg1_name, arg1_val)                   \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           scope_and_flags, arg1_name, arg1_val)
 
 #define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
                                             timestamp)                   \
@@ -285,12 +292,12 @@
   INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
                            TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val,    \
                            arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_BEGIN0(category_group, name)                     \
-  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
-                           TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
-  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name,  \
-                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN_WITH_FLAGS0(category_group, name, flags) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, flags)
+#define TRACE_EVENT_BEGIN_WITH_FLAGS1(category_group, name, flags, arg1_name, \
+                                      arg1_val)                               \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name,     \
+                           flags, arg1_name, arg1_val)
 #define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
                                 arg2_name, arg2_val)                       \
   INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name,  \
@@ -341,12 +348,12 @@
   INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,        \
                            TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val,         \
                            arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_END0(category_group, name)                     \
-  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
-                           TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
-  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,  \
-                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_END_WITH_FLAGS0(category_group, name, flags) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags)
+#define TRACE_EVENT_END_WITH_FLAGS1(category_group, name, flags, arg1_name,    \
+                                    arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags, \
+                           arg1_name, arg1_val)
 #define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
                               arg2_name, arg2_val)                       \
   INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,  \
@@ -580,6 +587,9 @@
   INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                        \
       TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,             \
       TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_FLAGS0(category_group, name, id, flags) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN,            \
+                                   category_group, name, id, flags)
 
 // Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
 // provided.
@@ -606,6 +616,11 @@
   INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
       TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,                 \
       TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0(     \
+    category_group, name, id, timestamp, flags)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
 
 // Records a single ASYNC_STEP_INTO event for |step| immediately. If the
 // category is not enabled, then this does nothing. The |name| and |id| must
@@ -677,6 +692,9 @@
   INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                      \
       TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,             \
       TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_ASYNC_END_WITH_FLAGS0(category_group, name, id, flags) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END,            \
+                                   category_group, name, id, flags)
 
 // Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
 #define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
@@ -702,6 +720,11 @@
   INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
       TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                 \
       TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0(category_group, name, \
+                                                        id, timestamp, flags) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
 
 // NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
 // be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
@@ -935,6 +958,9 @@
 #define TRACE_TASK_EXECUTION(run_function, task) \
   INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
 
+#define TRACE_LOG_MESSAGE(file, message, line) \
+  INTERNAL_TRACE_LOG_MESSAGE(file, message, line)
+
 // TRACE_EVENT_METADATA* events are information related to other
 // injected events, not events in their own right.
 #define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
@@ -1075,6 +1101,8 @@
 // TODO(eseckler): Remove once we have native support for typed proto events in
 // TRACE_EVENT macros.
 #define TRACE_EVENT_FLAG_TYPED_PROTO_ARGS (static_cast<unsigned int>(1 << 15))
+#define TRACE_EVENT_FLAG_JAVA_STRING_LITERALS \
+  (static_cast<unsigned int>(1 << 16))
 
 #define TRACE_EVENT_FLAG_SCOPE_MASK                          \
   (static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
diff --git a/deps/v8/benchmarks/OWNERS b/deps/v8/benchmarks/OWNERS
index 852d438bb0a884..3c70cea2fd5e6a 100644
--- a/deps/v8/benchmarks/OWNERS
+++ b/deps/v8/benchmarks/OWNERS
@@ -1 +1 @@
-file://COMMON_OWNERS
+file:../COMMON_OWNERS
diff --git a/deps/v8/build_overrides/OWNERS b/deps/v8/build_overrides/OWNERS
index bdb1d555a4fb98..cb04fa0838fbb5 100644
--- a/deps/v8/build_overrides/OWNERS
+++ b/deps/v8/build_overrides/OWNERS
@@ -1 +1 @@
-file://INFRA_OWNERS
+file:../INFRA_OWNERS
diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni
index 12ef8b28d6b612..5b99eb94022596 100644
--- a/deps/v8/build_overrides/build.gni
+++ b/deps/v8/build_overrides/build.gni
@@ -35,5 +35,16 @@ tsan_suppressions_file = "//build/sanitizers/tsan_suppressions.cc"
 # Skip assertions about 4GiB file size limit.
 ignore_elf32_limitations = true
 
-# Use the system install of Xcode for tools like ibtool, libtool, etc.
-use_system_xcode = true
+if (host_os == "mac") {
+  _result = exec_script("//build/mac/should_use_hermetic_xcode.py",
+                        [ target_os ],
+                        "value")
+  assert(_result != 2,
+         "Do not allow building targets with the default" +
+             "hermetic toolchain if the minimum OS version is not met.")
+  assert(_result != 3,
+         "iOS does not support building with a hermetic toolchain. " +
+             "Please install Xcode.")
+
+  use_system_xcode = _result == 0
+}
diff --git a/deps/v8/custom_deps/OWNERS b/deps/v8/custom_deps/OWNERS
index bdb1d555a4fb98..cb04fa0838fbb5 100644
--- a/deps/v8/custom_deps/OWNERS
+++ b/deps/v8/custom_deps/OWNERS
@@ -1 +1 @@
-file://INFRA_OWNERS
+file:../INFRA_OWNERS
diff --git a/deps/v8/gni/OWNERS b/deps/v8/gni/OWNERS
index bdb1d555a4fb98..cb04fa0838fbb5 100644
--- a/deps/v8/gni/OWNERS
+++ b/deps/v8/gni/OWNERS
@@ -1 +1 @@
-file://INFRA_OWNERS
+file:../INFRA_OWNERS
diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni
index f4f1f1d88e258d..b5fb1823b382e0 100644
--- a/deps/v8/gni/snapshot_toolchain.gni
+++ b/deps/v8/gni/snapshot_toolchain.gni
@@ -107,3 +107,12 @@ if (v8_snapshot_toolchain == "") {
 assert(v8_snapshot_toolchain != "",
        "Do not know how to build a snapshot for $current_toolchain " +
            "on $host_os $host_cpu")
+
+# We reuse the snapshot toolchain for building torque and other generators to
+# avoid building v8_libbase on the host more than once. On mips with big endian,
+# the snapshot toolchain is the target toolchain and, hence, can't be used.
+v8_generator_toolchain = v8_snapshot_toolchain
+if (host_cpu == "x64" &&
+    (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
+  v8_generator_toolchain = "//build/toolchain/linux:clang_x64"
+}
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index 7ffbf74ce94d90..b64069847bc1cc 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -3,8 +3,8 @@ danno@chromium.org
 ulan@chromium.org
 yangguo@chromium.org
 
-per-file *DEPS=file://COMMON_OWNERS
-per-file v8-internal.h=file://COMMON_OWNERS
+per-file *DEPS=file:../COMMON_OWNERS
+per-file v8-internal.h=file:../COMMON_OWNERS
 per-file v8-inspector.h=dgozman@chromium.org
 per-file v8-inspector.h=pfeldman@chromium.org
 per-file v8-inspector.h=kozyatinskiy@chromium.org
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index e7cd8bfcdb66d0..df145e95bf723b 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -244,6 +244,8 @@ class V8_PLATFORM_EXPORT TracingController
 
   TracingController();
   ~TracingController() override;
+
+  // Takes ownership of |trace_buffer|.
   void Initialize(TraceBuffer* trace_buffer);
 #ifdef V8_USE_PERFETTO
   // Must be called before StartTracing() if V8_USE_PERFETTO is true. Provides
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index ef13006d137929..6ecddf45d6ae92 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -63,8 +63,8 @@ struct SmiTagging<4> {
 
   V8_INLINE static int SmiToInt(const internal::Address value) {
     int shift_bits = kSmiTagSize + kSmiShiftSize;
-    // Shift down (requires >> to be sign extending).
-    return static_cast<int>(static_cast<intptr_t>(value)) >> shift_bits;
+    // Truncate and shift down (requires >> to be sign extending).
+    return static_cast<int32_t>(static_cast<uint32_t>(value)) >> shift_bits;
   }
   V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
     // Is value in range [kSmiMinValue, kSmiMaxValue].
@@ -152,6 +152,7 @@ class Internals {
 
   static const uint32_t kNumIsolateDataSlots = 4;
 
+  // IsolateData layout guarantees.
   static const int kIsolateEmbedderDataOffset = 0;
   static const int kExternalMemoryOffset =
       kNumIsolateDataSlots * kApiSystemPointerSize;
@@ -159,8 +160,14 @@ class Internals {
       kExternalMemoryOffset + kApiInt64Size;
   static const int kExternalMemoryAtLastMarkCompactOffset =
       kExternalMemoryLimitOffset + kApiInt64Size;
-  static const int kIsolateRootsOffset =
+  static const int kIsolateFastCCallCallerFpOffset =
       kExternalMemoryAtLastMarkCompactOffset + kApiInt64Size;
+  static const int kIsolateFastCCallCallerPcOffset =
+      kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
+  static const int kIsolateStackGuardOffset =
+      kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
+  static const int kIsolateRootsOffset =
+      kIsolateStackGuardOffset + 7 * kApiSystemPointerSize;
 
   static const int kUndefinedValueRootIndex = 4;
   static const int kTheHoleValueRootIndex = 5;
@@ -177,7 +184,7 @@ class Internals {
 
   static const int kFirstNonstringType = 0x40;
   static const int kOddballType = 0x43;
-  static const int kForeignType = 0x47;
+  static const int kForeignType = 0x46;
   static const int kJSSpecialApiObjectType = 0x410;
   static const int kJSApiObjectType = 0x420;
   static const int kJSObjectType = 0x421;
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 7e43b0d9db4a9d..b707fafc49229a 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -439,14 +439,6 @@ class Platform {
    */
   virtual void DumpWithoutCrashing() {}
 
-  /**
-   * Lets the embedder to add crash keys.
-   */
-  virtual void AddCrashKey(int id, const char* name, uintptr_t value) {
-    // "noop" is a valid implementation if the embedder doesn't care to log
-    // additional data for crashes.
-  }
-
  protected:
   /**
    * Default implementation of current wall-clock time in milliseconds
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 46d3eb8aa4a24e..360850b631c7f9 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -18,14 +18,18 @@ namespace v8 {
 class HeapGraphNode;
 struct HeapStatsUpdate;
 
-typedef uint32_t SnapshotObjectId;
-
+using NativeObject = void*;
+using SnapshotObjectId = uint32_t;
 
 struct CpuProfileDeoptFrame {
   int script_id;
   size_t position;
 };
 
+namespace internal {
+class CpuProfile;
+}  // namespace internal
+
 }  // namespace v8
 
 #ifdef V8_OS_WIN
@@ -48,75 +52,6 @@ template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
 
 namespace v8 {
 
-// TickSample captures the information collected for each sample.
-struct V8_EXPORT TickSample {
-  // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
-  // include the runtime function we're calling. Externally exposed tick
-  // samples don't care.
-  enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
-
-  TickSample()
-      : state(OTHER),
-        pc(nullptr),
-        external_callback_entry(nullptr),
-        frames_count(0),
-        has_external_callback(false),
-        update_stats(true) {}
-
-  /**
-   * Initialize a tick sample from the isolate.
-   * \param isolate The isolate.
-   * \param state Execution state.
-   * \param record_c_entry_frame Include or skip the runtime function.
-   * \param update_stats Whether update the sample to the aggregated stats.
-   * \param use_simulator_reg_state When set to true and V8 is running under a
-   *                                simulator, the method will use the simulator
-   *                                register state rather than the one provided
-   *                                with |state| argument. Otherwise the method
-   *                                will use provided register |state| as is.
-   */
-  void Init(Isolate* isolate, const v8::RegisterState& state,
-            RecordCEntryFrame record_c_entry_frame, bool update_stats,
-            bool use_simulator_reg_state = true);
-  /**
-   * Get a call stack sample from the isolate.
-   * \param isolate The isolate.
-   * \param state Register state.
-   * \param record_c_entry_frame Include or skip the runtime function.
-   * \param frames Caller allocated buffer to store stack frames.
-   * \param frames_limit Maximum number of frames to capture. The buffer must
-   *                     be large enough to hold the number of frames.
-   * \param sample_info The sample info is filled up by the function
-   *                    provides number of actual captured stack frames and
-   *                    the current VM state.
-   * \param use_simulator_reg_state When set to true and V8 is running under a
-   *                                simulator, the method will use the simulator
-   *                                register state rather than the one provided
-   *                                with |state| argument. Otherwise the method
-   *                                will use provided register |state| as is.
-   * \note GetStackSample is thread and signal safe and should only be called
-   *                      when the JS thread is paused or interrupted.
-   *                      Otherwise the behavior is undefined.
-   */
-  static bool GetStackSample(Isolate* isolate, v8::RegisterState* state,
-                             RecordCEntryFrame record_c_entry_frame,
-                             void** frames, size_t frames_limit,
-                             v8::SampleInfo* sample_info,
-                             bool use_simulator_reg_state = true);
-  StateTag state;  // The state of the VM.
-  void* pc;        // Instruction pointer.
-  union {
-    void* tos;  // Top stack value (*sp).
-    void* external_callback_entry;
-  };
-  static const unsigned kMaxFramesCountLog2 = 8;
-  static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
-  void* stack[kMaxFramesCount];                 // Call stack.
-  unsigned frames_count : kMaxFramesCountLog2;  // Number of captured frames.
-  bool has_external_callback : 1;
-  bool update_stats : 1;  // Whether the sample should update aggregated stats.
-};
-
 /**
  * CpuProfileNode represents a node in a call graph.
  */
@@ -307,6 +242,15 @@ enum CpuProfilingNamingMode {
   kDebugNaming,
 };
 
+enum CpuProfilingLoggingMode {
+  // Enables logging when a profile is active, and disables logging when all
+  // profiles are detached.
+  kLazyLogging,
+  // Enables logging for the lifetime of the CpuProfiler. Calls to
+  // StartRecording are faster, at the expense of runtime overhead.
+  kEagerLogging,
+};
+
 /**
  * Optional profiling attributes.
  */
@@ -328,21 +272,25 @@ class V8_EXPORT CpuProfilingOptions {
    *                             zero, the sampling interval will be equal to
    *                             the profiler's sampling interval.
    */
-  CpuProfilingOptions(CpuProfilingMode mode = kLeafNodeLineNumbers,
-                      unsigned max_samples = kNoSampleLimit,
-                      int sampling_interval_us = 0)
-      : mode_(mode),
-        max_samples_(max_samples),
-        sampling_interval_us_(sampling_interval_us) {}
+  CpuProfilingOptions(
+      CpuProfilingMode mode = kLeafNodeLineNumbers,
+      unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
+      MaybeLocal<Context> filter_context = MaybeLocal<Context>());
 
   CpuProfilingMode mode() const { return mode_; }
   unsigned max_samples() const { return max_samples_; }
   int sampling_interval_us() const { return sampling_interval_us_; }
 
  private:
+  friend class internal::CpuProfile;
+
+  bool has_filter_context() const { return !filter_context_.IsEmpty(); }
+  void* raw_filter_context() const;
+
   CpuProfilingMode mode_;
   unsigned max_samples_;
   int sampling_interval_us_;
+  CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
 };
 
 /**
@@ -357,7 +305,8 @@ class V8_EXPORT CpuProfiler {
    * |Dispose| method.
    */
   static CpuProfiler* New(Isolate* isolate,
-                          CpuProfilingNamingMode = kDebugNaming);
+                          CpuProfilingNamingMode = kDebugNaming,
+                          CpuProfilingLoggingMode = kLazyLogging);
 
   /**
    * Synchronously collect current stack sample in all profilers attached to
@@ -798,6 +747,12 @@ class V8_EXPORT EmbedderGraph {
      */
     virtual const char* NamePrefix() { return nullptr; }
 
+    /**
+     * Returns the NativeObject that can be used for querying the
+     * |HeapSnapshot|.
+     */
+    virtual NativeObject GetNativeObject() { return nullptr; }
+
     Node(const Node&) = delete;
     Node& operator=(const Node&) = delete;
   };
@@ -860,6 +815,12 @@ class V8_EXPORT HeapProfiler {
    */
   SnapshotObjectId GetObjectId(Local<Value> value);
 
+  /**
+   * Returns SnapshotObjectId for a native object referenced by |value| if it
+   * has been seen by the heap profiler, kUnknownObjectId otherwise.
+   */
+  SnapshotObjectId GetObjectId(NativeObject value);
+
   /**
    * Returns heap object with given SnapshotObjectId if the object is alive,
    * otherwise empty handle is returned.
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 0406f65b08e684..6e61e2af999906 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
 // NOTE these macros are used by some of the tool scripts and the build
 // system so their names cannot be changed without changing the scripts.
 #define V8_MAJOR_VERSION 7
-#define V8_MINOR_VERSION 7
-#define V8_BUILD_NUMBER 299
-#define V8_PATCH_LEVEL 11
+#define V8_MINOR_VERSION 8
+#define V8_BUILD_NUMBER 279
+#define V8_PATCH_LEVEL 9
 
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index e1a467ddeebe64..d66f360c990d3e 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -19,6 +19,7 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <memory>
+#include <type_traits>
 #include <utility>
 #include <vector>
 
@@ -128,6 +129,7 @@ class PropertyCallbackArguments;
 class FunctionCallbackArguments;
 class GlobalHandles;
 class ScopedExternalStringLock;
+class ThreadLocalTop;
 
 namespace wasm {
 class NativeModule;
@@ -790,24 +792,43 @@ template <class T>
 using UniquePersistent = Global<T>;
 
 /**
- * A traced handle with move semantics, similar to std::unique_ptr. The handle
- * is to be used together with |v8::EmbedderHeapTracer| and specifies edges from
- * the embedder into V8's heap.
+ * Trait specifying behavior of |TracedGlobal<T>|.
+ */
+template <typename T>
+struct TracedGlobalTrait {
+  /**
+   * Specifies whether |TracedGlobal<T>| should clear its handle on destruction.
+   *
+   * V8 will *not* clear the embedder-side memory of the handle. The embedder is
+   * expected to report all |TracedGlobal<T>| handles through
+   * |EmbedderHeapTracer| upon garabge collection.
+   *
+   * See |EmbedderHeapTracer::IsRootForNonTracingGC| for handling with
+   * non-tracing GCs in V8.
+   */
+  static constexpr bool kRequiresExplicitDestruction = true;
+};
+
+/**
+ * A traced handle with copy and move semantics. The handle is to be used
+ * together with |v8::EmbedderHeapTracer| and specifies edges from the embedder
+ * into V8's heap.
  *
  * The exact semantics are:
  * - Tracing garbage collections use |v8::EmbedderHeapTracer|.
  * - Non-tracing garbage collections refer to
  *   |v8::EmbedderHeapTracer::IsRootForNonTracingGC()| whether the handle should
  *   be treated as root or not.
+ *
+ * For destruction semantics see |TracedGlobalTrait<T>|.
  */
 template <typename T>
-class V8_EXPORT TracedGlobal {
+class TracedGlobal {
  public:
   /**
    * An empty TracedGlobal without storage cell.
    */
   TracedGlobal() = default;
-  ~TracedGlobal() { Reset(); }
 
   /**
    * Construct a TracedGlobal from a Local.
@@ -824,7 +845,41 @@ class V8_EXPORT TracedGlobal {
   /**
    * Move constructor initializing TracedGlobal from an existing one.
    */
-  V8_INLINE TracedGlobal(TracedGlobal&& other);
+  V8_INLINE TracedGlobal(TracedGlobal&& other) {
+    // Forward to operator=.
+    *this = std::move(other);
+  }
+
+  /**
+   * Move constructor initializing TracedGlobal from an existing one.
+   */
+  template <typename S>
+  V8_INLINE TracedGlobal(TracedGlobal<S>&& other) {
+    // Forward to operator=.
+    *this = std::move(other);
+  }
+
+  /**
+   * Copy constructor initializing TracedGlobal from an existing one.
+   */
+  V8_INLINE TracedGlobal(const TracedGlobal& other) {
+    // Forward to operator=;
+    *this = other;
+  }
+
+  /**
+   * Copy constructor initializing TracedGlobal from an existing one.
+   */
+  template <typename S>
+  V8_INLINE TracedGlobal(const TracedGlobal<S>& other) {
+    // Forward to operator=;
+    *this = other;
+  }
+
+  /**
+   * Move assignment operator initializing TracedGlobal from an existing one.
+   */
+  V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs);
 
   /**
    * Move assignment operator initializing TracedGlobal from an existing one.
@@ -833,10 +888,21 @@ class V8_EXPORT TracedGlobal {
   V8_INLINE TracedGlobal& operator=(TracedGlobal<S>&& rhs);
 
   /**
-   * TracedGlobal only supports move semantics and forbids copying.
+   * Copy assignment operator initializing TracedGlobal from an existing one.
+   *
+   * Note: Prohibited when |other| has a finalization callback set through
+   * |SetFinalizationCallback|.
+   */
+  V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs);
+
+  /**
+   * Copy assignment operator initializing TracedGlobal from an existing one.
+   *
+   * Note: Prohibited when |other| has a finalization callback set through
+   * |SetFinalizationCallback|.
    */
-  TracedGlobal(const TracedGlobal&) = delete;
-  void operator=(const TracedGlobal&) = delete;
+  template <class S>
+  V8_INLINE TracedGlobal& operator=(const TracedGlobal<S>& rhs);
 
   /**
    * Returns true if this TracedGlobal is empty, i.e., has not been assigned an
@@ -870,8 +936,8 @@ class V8_EXPORT TracedGlobal {
 
   template <class S>
   V8_INLINE bool operator==(const TracedGlobal<S>& that) const {
-    internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
-    internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+    internal::Address* a = reinterpret_cast<internal::Address*>(**this);
+    internal::Address* b = reinterpret_cast<internal::Address*>(*that);
     if (a == nullptr) return b == nullptr;
     if (b == nullptr) return false;
     return *a == *b;
@@ -879,8 +945,8 @@ class V8_EXPORT TracedGlobal {
 
   template <class S>
   V8_INLINE bool operator==(const Local<S>& that) const {
-    internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
-    internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+    internal::Address* a = reinterpret_cast<internal::Address*>(**this);
+    internal::Address* b = reinterpret_cast<internal::Address*>(*that);
     if (a == nullptr) return b == nullptr;
     if (b == nullptr) return false;
     return *a == *b;
@@ -921,11 +987,32 @@ class V8_EXPORT TracedGlobal {
       void* parameter, WeakCallbackInfo<void>::Callback callback);
 
  private:
-  V8_INLINE static T* New(Isolate* isolate, T* that, T** slot);
+  // Wrapping type used when clearing on destruction is required.
+  struct WrappedForDestruction {
+    T* value;
+
+    explicit WrappedForDestruction(T* val) : value(val) {}
+    ~WrappedForDestruction();
+    operator T*() const { return value; }
+    T* operator*() const { return value; }
+    T* operator->() const { return value; }
+    WrappedForDestruction& operator=(const WrappedForDestruction& other) {
+      value = other.value;
+      return *this;
+    }
+    WrappedForDestruction& operator=(T* val) {
+      value = val;
+      return *this;
+    }
+  };
+
+  V8_INLINE static T* New(Isolate* isolate, T* that, void* slot);
 
   T* operator*() const { return this->val_; }
 
-  T* val_ = nullptr;
+  typename std::conditional<
+      TracedGlobalTrait<TracedGlobal<T>>::kRequiresExplicitDestruction,
+      WrappedForDestruction, T*>::type val_{nullptr};
 
   friend class EmbedderHeapTracer;
   template <typename F>
@@ -1968,6 +2055,7 @@ struct SampleInfo {
   StateTag vm_state;              // Current VM state.
   void* external_callback_entry;  // External callback address if VM is
                                   // executing an external callback.
+  void* top_context;              // Incumbent native context address.
 };
 
 struct MemoryRange {
@@ -3697,6 +3785,15 @@ class V8_EXPORT Object : public Value {
    */
   bool IsConstructor();
 
+  /**
+   * True if this object can carry information relevant to the embedder in its
+   * embedder fields, false otherwise. This is generally true for objects
+   * constructed through function templates but also holds for other types where
+   * V8 automatically adds internal fields at compile time, such as e.g.
+   * v8::ArrayBuffer.
+   */
+  bool IsApiWrapper();
+
   /**
    * Call an Object as a function if a callback is set by the
    * ObjectTemplate::SetCallAsFunctionHandler method.
@@ -4762,8 +4859,8 @@ class V8_EXPORT ArrayBuffer : public Object {
   bool IsDetachable() const;
 
   // TODO(913887): fix the use of 'neuter' in the API.
-  V8_DEPRECATE_SOON("Use IsDetachable() instead.",
-                    inline bool IsNeuterable() const) {
+  V8_DEPRECATED("Use IsDetachable() instead.",
+                inline bool IsNeuterable() const) {
     return IsDetachable();
   }
 
@@ -4776,7 +4873,7 @@ class V8_EXPORT ArrayBuffer : public Object {
   void Detach();
 
   // TODO(913887): fix the use of 'neuter' in the API.
-  V8_DEPRECATE_SOON("Use Detach() instead.", inline void Neuter()) { Detach(); }
+  V8_DEPRECATED("Use Detach() instead.", inline void Neuter()) { Detach(); }
 
   /**
    * Make this ArrayBuffer external. The pointer to underlying memory block
@@ -5379,6 +5476,32 @@ class V8_EXPORT RegExp : public Object {
   static void CheckCast(Value* obj);
 };
 
+/**
+ * An instance of the built-in FinalizationGroup constructor.
+ *
+ * This API is experimental and may change significantly.
+ */
+class V8_EXPORT FinalizationGroup : public Object {
+ public:
+  /**
+   * Runs the cleanup callback of the given FinalizationGroup.
+   *
+   * V8 will inform the embedder that there are finalizer callbacks be
+   * called through HostCleanupFinalizationGroupCallback.
+   *
+   * HostCleanupFinalizationGroupCallback should schedule a task to
+   * call FinalizationGroup::Cleanup() at some point in the
+   * future. It's the embedders responsiblity to make this call at a
+   * time which does not interrupt synchronous ECMAScript code
+   * execution.
+   *
+   * If the result is Nothing<bool> then an exception has
+   * occurred. Otherwise the result is |true| if the cleanup callback
+   * was called successfully. The result is never |false|.
+   */
+  static V8_WARN_UNUSED_RESULT Maybe<bool> Cleanup(
+      Local<FinalizationGroup> finalization_group);
+};
 
 /**
  * A JavaScript value that wraps a C++ void*. This type of value is mainly used
@@ -6439,11 +6562,18 @@ class V8_EXPORT ResourceConstraints {
    * provided heap size limit. The heap size includes both the young and
    * the old generation.
    *
+   * \param initial_heap_size_in_bytes The initial heap size or zero.
+   *    By default V8 starts with a small heap and dynamically grows it to
+   *    match the set of live objects. This may lead to ineffective
+   *    garbage collections at startup if the live set is large.
+   *    Setting the initial heap size avoids such garbage collections.
+   *    Note that this does not affect young generation garbage collections.
+   *
    * \param maximum_heap_size_in_bytes The hard limit for the heap size.
    *    When the heap size approaches this limit, V8 will perform series of
-   *    garbage collections and invoke the NearHeapLimitCallback.
-   *    If the garbage collections do not help and the callback does not
-   * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory.
+   *    garbage collections and invoke the NearHeapLimitCallback. If the garbage
+   *    collections do not help and the callback does not increase the limit,
+   *    then V8 will crash with V8::FatalProcessOutOfMemory.
    */
   void ConfigureDefaultsFromHeapSize(size_t initial_heap_size_in_bytes,
                                      size_t maximum_heap_size_in_bytes);
@@ -6611,10 +6741,34 @@ typedef void* (*CreateHistogramCallback)(const char* name,
 
 typedef void (*AddHistogramSampleCallback)(void* histogram, int sample);
 
+// --- Crashkeys Callback ---
+enum class CrashKeyId {
+  kIsolateAddress,
+  kReadonlySpaceFirstPageAddress,
+  kMapSpaceFirstPageAddress,
+  kCodeSpaceFirstPageAddress,
+};
+
+typedef void (*AddCrashKeyCallback)(CrashKeyId id, const std::string& value);
+
 // --- Enter/Leave Script Callback ---
 typedef void (*BeforeCallEnteredCallback)(Isolate*);
 typedef void (*CallCompletedCallback)(Isolate*);
 
+/**
+ * HostCleanupFinalizationGroupCallback is called when we require the
+ * embedder to enqueue a task that would call
+ * FinalizationGroup::Cleanup().
+ *
+ * The FinalizationGroup is the one for which the embedder needs to
+ * call FinalizationGroup::Cleanup() on.
+ *
+ * The context provided is the one in which the FinalizationGroup was
+ * created in.
+ */
+typedef void (*HostCleanupFinalizationGroupCallback)(
+    Local<Context> context, Local<FinalizationGroup> fg);
+
 /**
  * HostImportModuleDynamicallyCallback is called when we require the
  * embedder to load a module. This is used as part of the dynamic
@@ -6712,7 +6866,8 @@ class PromiseRejectMessage {
 typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
 
 // --- Microtasks Callbacks ---
-typedef void (*MicrotasksCompletedCallback)(Isolate*);
+V8_DEPRECATE_SOON("Use *WithData version.",
+                  typedef void (*MicrotasksCompletedCallback)(Isolate*));
 typedef void (*MicrotasksCompletedCallbackWithData)(Isolate*, void*);
 typedef void (*MicrotaskCallback)(void* data);
 
@@ -6884,6 +7039,10 @@ typedef void (*WasmStreamingCallback)(const FunctionCallbackInfo<Value>&);
 // --- Callback for checking if WebAssembly threads are enabled ---
 typedef bool (*WasmThreadsEnabledCallback)(Local<Context> context);
 
+// --- Callback for loading source map file for WASM profiling support
+typedef Local<String> (*WasmLoadSourceMapCallback)(Isolate* isolate,
+                                                   const char* name);
+
 // --- Garbage Collection Callbacks ---
 
 /**
@@ -7260,7 +7419,7 @@ class V8_EXPORT EmbedderHeapTracer {
   /**
    * Called at the beginning of a GC cycle.
    */
-  V8_DEPRECATE_SOON("Use version with flags.", virtual void TracePrologue()) {}
+  V8_DEPRECATED("Use version with flags.", virtual void TracePrologue()) {}
   virtual void TracePrologue(TraceFlags flags);
 
   /**
@@ -7288,8 +7447,9 @@ class V8_EXPORT EmbedderHeapTracer {
    * overriden to fill a |TraceSummary| that is used by V8 to schedule future
    * garbage collections.
    */
-  virtual void TraceEpilogue() {}
-  virtual void TraceEpilogue(TraceSummary* trace_summary) { TraceEpilogue(); }
+  V8_DEPRECATE_SOON("Use version with parameter.",
+                    virtual void TraceEpilogue()) {}
+  virtual void TraceEpilogue(TraceSummary* trace_summary);
 
   /**
    * Called upon entering the final marking pause. No more incremental marking
@@ -7311,14 +7471,37 @@ class V8_EXPORT EmbedderHeapTracer {
   /**
    * Returns true if the TracedGlobal handle should be considered as root for
    * the currently running non-tracing garbage collection and false otherwise.
+   * The default implementation will keep all TracedGlobal references as roots.
+   *
+   * If this returns false, then V8 may decide that the object referred to by
+   * such a handle is reclaimed. In that case:
+   * - No action is required if handles are used with destructors.
+   * - When run without destructors (by specializing
+   * |TracedGlobalTrait::kRequiresExplicitDestruction|) V8 calls
+   * |ResetHandleInNonTracingGC|.
    *
-   * Default implementation will keep all TracedGlobal references as roots.
+   * Note that the |handle| is different from the |TracedGlobal<T>| handle that
+   * the embedder holds for retaining the object. The embedder may use
+   * |TracedGlobal<T>::WrapperClassId()| to distinguish cases where it wants
+   * handles to be treated as roots from not being treated as roots.
    */
   virtual bool IsRootForNonTracingGC(
       const v8::TracedGlobal<v8::Value>& handle) {
     return true;
   }
 
+  /**
+   * Used in combination with |IsRootForNonTracingGC|. Called by V8 when an
+   * object that is backed by a handle is reclaimed by a non-tracing garbage
+   * collection. It is up to the embedder to reset the original handle.
+   *
+   * Note that the |handle| is different from the |TracedGlobal<T>| handle that
+   * the embedder holds for retaining the object. It is up to the embedder to
+   * find the orignal |TracedGlobal<T>| handle via the object or class id.
+   */
+  virtual void ResetHandleInNonTracingGC(
+      const v8::TracedGlobal<v8::Value>& handle) {}
+
   /*
    * Called by the embedder to immediately perform a full garbage collection.
    *
@@ -7550,6 +7733,9 @@ class V8_EXPORT Isolate {
    private:
     internal::Isolate* const isolate_;
     internal::MicrotaskQueue* const microtask_queue_;
+    internal::Address previous_stack_height_;
+
+    friend class internal::ThreadLocalTop;
   };
 
   /**
@@ -7663,9 +7849,10 @@ class V8_EXPORT Isolate {
     kStringNormalize = 75,
     kCallSiteAPIGetFunctionSloppyCall = 76,
     kCallSiteAPIGetThisSloppyCall = 77,
+    kRegExpMatchAllWithNonGlobalRegExp = 78,
 
     // If you add new values here, you'll also need to update Chromium's:
-    // web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
+    // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
     // this list need to be landed first, then changes on the Chromium side.
     kUseCounterFeatureCount  // This enum value must be last.
   };
@@ -7723,6 +7910,18 @@ class V8_EXPORT Isolate {
    */
   static Isolate* GetCurrent();
 
+  /**
+   * Clears the set of objects held strongly by the heap. This set of
+   * objects are originally built when a WeakRef is created or
+   * successfully dereferenced.
+   *
+   * The embedder is expected to call this when a synchronous sequence
+   * of ECMAScript execution completes. It's the embedders
+   * responsiblity to make this call at a time which does not
+   * interrupt synchronous ECMAScript code execution.
+   */
+  void ClearKeptObjects();
+
   /**
    * Custom callback used by embedders to help V8 determine if it should abort
    * when it throws and no internal handler is predicted to catch the
@@ -7736,6 +7935,14 @@ class V8_EXPORT Isolate {
   void SetAbortOnUncaughtExceptionCallback(
       AbortOnUncaughtExceptionCallback callback);
 
+  /**
+   * This specifies the callback to be called when finalization groups
+   * are ready to be cleaned up and require FinalizationGroup::Cleanup()
+   * to be called in a future task.
+   */
+  void SetHostCleanupFinalizationGroupCallback(
+      HostCleanupFinalizationGroupCallback callback);
+
   /**
    * This specifies the callback called by the upcoming dynamic
    * import() language feature to load modules.
@@ -8289,6 +8496,13 @@ class V8_EXPORT Isolate {
   void SetCreateHistogramFunction(CreateHistogramCallback);
   void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
 
+  /**
+   * Enables the host application to provide a mechanism for recording a
+   * predefined set of data as crash keys to be used in postmortem debugging in
+   * case of a crash.
+   */
+  void SetAddCrashKeyCallback(AddCrashKeyCallback);
+
   /**
    * Optional notification that the embedder is idle.
    * V8 uses the notification to perform garbage collection.
@@ -8488,6 +8702,8 @@ class V8_EXPORT Isolate {
 
   void SetWasmThreadsEnabledCallback(WasmThreadsEnabledCallback callback);
 
+  void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
+
   /**
   * Check if V8 is dead and therefore unusable.  This is the case after
   * fatal errors such as out-of-memory situations.
@@ -8850,11 +9066,14 @@ class V8_EXPORT V8 {
                                                internal::Address* handle);
   static internal::Address* GlobalizeTracedReference(internal::Isolate* isolate,
                                                      internal::Address* handle,
-                                                     internal::Address* slot);
+                                                     internal::Address* slot,
+                                                     bool has_destructor);
   static void MoveGlobalReference(internal::Address** from,
                                   internal::Address** to);
   static void MoveTracedGlobalReference(internal::Address** from,
                                         internal::Address** to);
+  static void CopyTracedGlobalReference(const internal::Address* const* from,
+                                        internal::Address** to);
   static internal::Address* CopyGlobalReference(internal::Address* from);
   static void DisposeGlobal(internal::Address* global_handle);
   static void DisposeTracedGlobal(internal::Address* global_handle);
@@ -9937,18 +10156,26 @@ Global<T>& Global<T>::operator=(Global<S>&& rhs) {
 }
 
 template <class T>
-T* TracedGlobal<T>::New(Isolate* isolate, T* that, T** slot) {
+TracedGlobal<T>::WrappedForDestruction::~WrappedForDestruction() {
+  if (value == nullptr) return;
+  V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(value));
+  value = nullptr;
+}
+
+template <class T>
+T* TracedGlobal<T>::New(Isolate* isolate, T* that, void* slot) {
   if (that == nullptr) return nullptr;
   internal::Address* p = reinterpret_cast<internal::Address*>(that);
   return reinterpret_cast<T*>(V8::GlobalizeTracedReference(
       reinterpret_cast<internal::Isolate*>(isolate), p,
-      reinterpret_cast<internal::Address*>(slot)));
+      reinterpret_cast<internal::Address*>(slot),
+      TracedGlobalTrait<TracedGlobal<T>>::kRequiresExplicitDestruction));
 }
 
 template <class T>
 void TracedGlobal<T>::Reset() {
   if (IsEmpty()) return;
-  V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(val_));
+  V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(**this));
   val_ = nullptr;
 }
 
@@ -9962,19 +10189,23 @@ void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
 }
 
 template <class T>
-TracedGlobal<T>::TracedGlobal(TracedGlobal&& other) : val_(other.val_) {
-  if (other.val_ != nullptr) {
-    V8::MoveTracedGlobalReference(
-        reinterpret_cast<internal::Address**>(&other.val_),
-        reinterpret_cast<internal::Address**>(&this->val_));
-    other.val_ = nullptr;
-  }
+template <class S>
+TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
+  TYPE_CHECK(T, S);
+  *this = std::move(rhs.template As<T>());
+  return *this;
 }
 
 template <class T>
 template <class S>
-TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
+TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal<S>& rhs) {
   TYPE_CHECK(T, S);
+  *this = rhs.template As<T>();
+  return *this;
+}
+
+template <class T>
+TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal&& rhs) {
   if (this != &rhs) {
     this->Reset();
     if (rhs.val_ != nullptr) {
@@ -9988,11 +10219,24 @@ TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
   return *this;
 }
 
+template <class T>
+TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
+  if (this != &rhs) {
+    this->Reset();
+    if (rhs.val_ != nullptr) {
+      V8::CopyTracedGlobalReference(
+          reinterpret_cast<const internal::Address* const*>(&rhs.val_),
+          reinterpret_cast<internal::Address**>(&this->val_));
+    }
+  }
+  return *this;
+}
+
 template <class T>
 void TracedGlobal<T>::SetWrapperClassId(uint16_t class_id) {
   typedef internal::Internals I;
   if (IsEmpty()) return;
-  internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
+  internal::Address* obj = reinterpret_cast<internal::Address*>(**this);
   uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
   *reinterpret_cast<uint16_t*>(addr) = class_id;
 }
@@ -10001,7 +10245,7 @@ template <class T>
 uint16_t TracedGlobal<T>::WrapperClassId() const {
   typedef internal::Internals I;
   if (IsEmpty()) return 0;
-  internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
+  internal::Address* obj = reinterpret_cast<internal::Address*>(**this);
   uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
   return *reinterpret_cast<uint16_t*>(addr);
 }
@@ -10010,7 +10254,7 @@ template <class T>
 void TracedGlobal<T>::SetFinalizationCallback(
     void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
   V8::SetFinalizationCallbackTraced(
-      reinterpret_cast<internal::Address*>(this->val_), parameter, callback);
+      reinterpret_cast<internal::Address*>(**this), parameter, callback);
 }
 
 template <typename T>
@@ -10944,9 +11188,12 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
   }
 
   if (change_in_bytes < 0) {
-    const int64_t lower_limit = *external_memory_limit + change_in_bytes;
-    if (lower_limit > I::kExternalAllocationSoftLimit)
+    const int64_t lower_limit =
+        static_cast<int64_t>(static_cast<uint64_t>(*external_memory_limit) +
+                             static_cast<uint64_t>(change_in_bytes));
+    if (lower_limit > I::kExternalAllocationSoftLimit) {
       *external_memory_limit = lower_limit;
+    }
   } else if (change_in_bytes > 0 && amount > *external_memory_limit) {
     ReportExternalAllocationLimitReached();
   }
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 7bd2938225bc74..7670c0e449c7fd 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -186,6 +186,8 @@
 //  V8_HAS_BUILTIN_SADD_OVERFLOW        - __builtin_sadd_overflow() supported
 //  V8_HAS_BUILTIN_SSUB_OVERFLOW        - __builtin_ssub_overflow() supported
 //  V8_HAS_BUILTIN_UADD_OVERFLOW        - __builtin_uadd_overflow() supported
+//  V8_HAS_COMPUTED_GOTO                - computed goto/labels as values
+//                                        supported
 //  V8_HAS_DECLSPEC_DEPRECATED          - __declspec(deprecated) supported
 //  V8_HAS_DECLSPEC_NOINLINE            - __declspec(noinline) supported
 //  V8_HAS_DECLSPEC_SELECTANY           - __declspec(selectany) supported
@@ -214,6 +216,7 @@
 # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
     (__has_attribute(warn_unused_result))
 
+# define V8_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned))
 # define V8_HAS_BUILTIN_BSWAP16 (__has_builtin(__builtin_bswap16))
 # define V8_HAS_BUILTIN_BSWAP32 (__has_builtin(__builtin_bswap32))
 # define V8_HAS_BUILTIN_BSWAP64 (__has_builtin(__builtin_bswap64))
@@ -226,6 +229,10 @@
 # define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
 # define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
 
+// Clang has no __has_feature for computed gotos.
+// GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html
+# define V8_HAS_COMPUTED_GOTO 1
+
 # if __cplusplus >= 201402L
 #  define V8_CAN_HAVE_DCHECK_IN_CONSTEXPR 1
 # endif
@@ -256,12 +263,16 @@
 # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
     (!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
 
+# define V8_HAS_BUILTIN_ASSUME_ALIGNED (V8_GNUC_PREREQ(4, 7, 0))
 # define V8_HAS_BUILTIN_CLZ (V8_GNUC_PREREQ(3, 4, 0))
 # define V8_HAS_BUILTIN_CTZ (V8_GNUC_PREREQ(3, 4, 0))
 # define V8_HAS_BUILTIN_EXPECT (V8_GNUC_PREREQ(2, 96, 0))
 # define V8_HAS_BUILTIN_FRAME_ADDRESS (V8_GNUC_PREREQ(2, 96, 0))
 # define V8_HAS_BUILTIN_POPCOUNT (V8_GNUC_PREREQ(3, 4, 0))
 
+// GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html
+#define V8_HAS_COMPUTED_GOTO (V8_GNUC_PREREQ(2, 0, 0))
+
 #endif
 
 #if defined(_MSC_VER)
@@ -291,6 +302,12 @@
 # define V8_INLINE inline
 #endif
 
+#if V8_HAS_BUILTIN_ASSUME_ALIGNED
+# define V8_ASSUME_ALIGNED(ptr, alignment) \
+  __builtin_assume_aligned((ptr), (alignment))
+#else
+# define V8_ASSUME_ALIGNED(ptr) (ptr)
+#endif
 
 // A macro used to tell the compiler to never inline a particular function.
 // Don't bother for debug builds.
diff --git a/deps/v8/infra/OWNERS b/deps/v8/infra/OWNERS
index a75a43666efa57..a33a8ba8ed96fa 100644
--- a/deps/v8/infra/OWNERS
+++ b/deps/v8/infra/OWNERS
@@ -1,3 +1,3 @@
-file://INFRA_OWNERS
+file:../INFRA_OWNERS
 
 tandrii@chromium.org
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index 13a73f3e94cd90..759d920b7b6a5c 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -483,13 +483,8 @@
     'swarming_dimensions' : {
       'os': 'Ubuntu-16.04',
     },
-    'swarming_task_attrs': {
-      'expiration': 14400,
-      'hard_timeout': 3600,
-      'priority': 35,
-    },
     'tests': [
-      {'name': 'v8testing', 'shards': 7},
+      {'name': 'v8testing', 'shards': 12},
     ],
   },
   ##############################################################################
@@ -546,7 +541,7 @@
   # Win64
   'v8_win64_asan_rel_ng_triggered': {
     'swarming_dimensions' : {
-      'os': 'Windows-10',
+      'os': 'Windows-10-15063',
     },
     'tests': [
       {'name': 'v8testing', 'shards': 5},
@@ -830,7 +825,7 @@
     },
     'tests': [
       {'name': 'mozilla', 'variant': 'default'},
-      {'name': 'test262', 'variant': 'default', 'shards': 4},
+      {'name': 'test262', 'variant': 'default', 'shards': 6},
       {'name': 'v8testing', 'variant': 'default', 'shards': 3},
     ],
   },
@@ -1253,7 +1248,7 @@
   },
   'V8 Win64 ASAN': {
     'swarming_dimensions': {
-      'os': 'Windows-10',
+      'os': 'Windows-10-15063',
     },
     'tests': [
       {'name': 'v8testing', 'shards': 5},
@@ -1395,8 +1390,8 @@
       'os': 'Ubuntu-16.04',
     },
     'tests': [
-      {'name': 'mjsunit_sp_frame_access'},
-      {'name': 'mozilla'},
+      {'name': 'mjsunit_sp_frame_access', 'shards': 4},
+      {'name': 'mozilla', 'shards': 4},
       {'name': 'test262'},
       {'name': 'v8testing', 'shards': 8},
       {'name': 'v8testing', 'variant': 'extra', 'shards': 4},
@@ -1406,13 +1401,13 @@
         'name': 'mozilla',
         'suffix': 'armv8-a',
         'test_args': ['--extra-flags', '--enable-armv8'],
-        'shards': 2,
+        'shards': 3,
       },
       {
         'name': 'test262',
         'suffix': 'armv8-a',
         'test_args': ['--extra-flags', '--enable-armv8'],
-        'shards': 2,
+        'shards': 3,
       },
       {
         'name': 'v8testing',
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index c6881f232117b2..3e21b6ea369970 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -1,5 +1,5 @@
-per-file *DEPS=file://COMMON_OWNERS
-per-file intl-*=file://INTL_OWNERS
-per-file *-intl*=file://INTL_OWNERS
+per-file *DEPS=file:../COMMON_OWNERS
+per-file intl-*=file:../INTL_OWNERS
+per-file *-intl*=file:../INTL_OWNERS
 
 # COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS
index ce6fb20af84d38..ef5a56dbfcecf3 100644
--- a/deps/v8/src/api/OWNERS
+++ b/deps/v8/src/api/OWNERS
@@ -1,4 +1,4 @@
-file://include/OWNERS
+file:../../include/OWNERS
 clemensh@chromium.org
 ishell@chromium.org
 jkummerow@chromium.org
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index d152412b474f95..1a6b512e83a651 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -8,6 +8,7 @@
 #include "src/api/api.h"
 #include "src/handles/handles-inl.h"
 #include "src/objects/foreign-inl.h"
+#include "src/objects/js-weak-refs.h"
 #include "src/objects/objects-inl.h"
 #include "src/objects/stack-frame-info.h"
 
@@ -84,6 +85,7 @@ MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
 MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
 MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
 MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
+MAKE_TO_LOCAL(ToLocal, JSFinalizationGroup, FinalizationGroup)
 
 TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
 
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index cd380d3cda1aa2..b96b6fc4f62b2e 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -42,9 +42,17 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
                                         bool is_prototype);
 
 MaybeHandle<JSFunction> InstantiateFunction(
-    Isolate* isolate, Handle<FunctionTemplateInfo> data,
+    Isolate* isolate, Handle<NativeContext> native_context,
+    Handle<FunctionTemplateInfo> data,
     MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
 
+MaybeHandle<JSFunction> InstantiateFunction(
+    Isolate* isolate, Handle<FunctionTemplateInfo> data,
+    MaybeHandle<Name> maybe_name = MaybeHandle<Name>()) {
+  return InstantiateFunction(isolate, isolate->native_context(), data,
+                             maybe_name);
+}
+
 MaybeHandle<Object> Instantiate(
     Isolate* isolate, Handle<Object> data,
     MaybeHandle<Name> maybe_name = MaybeHandle<Name>()) {
@@ -277,73 +285,73 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
 // the cache for those cases.
 enum class CachingMode { kLimited, kUnlimited };
 
-MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
-                                               int serial_number,
-                                               CachingMode caching_mode) {
+MaybeHandle<JSObject> ProbeInstantiationsCache(
+    Isolate* isolate, Handle<NativeContext> native_context, int serial_number,
+    CachingMode caching_mode) {
   DCHECK_LE(1, serial_number);
   if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
-    Handle<FixedArray> fast_cache =
-        isolate->fast_template_instantiations_cache();
-    Handle<Object> object{fast_cache->get(serial_number - 1), isolate};
+    FixedArray fast_cache =
+        native_context->fast_template_instantiations_cache();
+    Handle<Object> object{fast_cache.get(serial_number - 1), isolate};
     if (object->IsUndefined(isolate)) return {};
     return Handle<JSObject>::cast(object);
   }
   if (caching_mode == CachingMode::kUnlimited ||
       (serial_number <= TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
-    Handle<SimpleNumberDictionary> slow_cache =
-        isolate->slow_template_instantiations_cache();
-    int entry = slow_cache->FindEntry(isolate, serial_number);
+    SimpleNumberDictionary slow_cache =
+        native_context->slow_template_instantiations_cache();
+    int entry = slow_cache.FindEntry(isolate, serial_number);
     if (entry != SimpleNumberDictionary::kNotFound) {
-      return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
+      return handle(JSObject::cast(slow_cache.ValueAt(entry)), isolate);
     }
   }
   return {};
 }
 
-void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
-                                CachingMode caching_mode,
+void CacheTemplateInstantiation(Isolate* isolate,
+                                Handle<NativeContext> native_context,
+                                int serial_number, CachingMode caching_mode,
                                 Handle<JSObject> object) {
   DCHECK_LE(1, serial_number);
   if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
     Handle<FixedArray> fast_cache =
-        isolate->fast_template_instantiations_cache();
+        handle(native_context->fast_template_instantiations_cache(), isolate);
     Handle<FixedArray> new_cache =
         FixedArray::SetAndGrow(isolate, fast_cache, serial_number - 1, object);
     if (*new_cache != *fast_cache) {
-      isolate->native_context()->set_fast_template_instantiations_cache(
-          *new_cache);
+      native_context->set_fast_template_instantiations_cache(*new_cache);
     }
   } else if (caching_mode == CachingMode::kUnlimited ||
              (serial_number <=
               TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
     Handle<SimpleNumberDictionary> cache =
-        isolate->slow_template_instantiations_cache();
+        handle(native_context->slow_template_instantiations_cache(), isolate);
     auto new_cache =
         SimpleNumberDictionary::Set(isolate, cache, serial_number, object);
     if (*new_cache != *cache) {
-      isolate->native_context()->set_slow_template_instantiations_cache(
-          *new_cache);
+      native_context->set_slow_template_instantiations_cache(*new_cache);
     }
   }
 }
 
-void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
-                                  CachingMode caching_mode) {
+void UncacheTemplateInstantiation(Isolate* isolate,
+                                  Handle<NativeContext> native_context,
+                                  int serial_number, CachingMode caching_mode) {
   DCHECK_LE(1, serial_number);
   if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
-    Handle<FixedArray> fast_cache =
-        isolate->fast_template_instantiations_cache();
-    DCHECK(!fast_cache->get(serial_number - 1).IsUndefined(isolate));
-    fast_cache->set_undefined(serial_number - 1);
+    FixedArray fast_cache =
+        native_context->fast_template_instantiations_cache();
+    DCHECK(!fast_cache.get(serial_number - 1).IsUndefined(isolate));
+    fast_cache.set_undefined(serial_number - 1);
   } else if (caching_mode == CachingMode::kUnlimited ||
              (serial_number <=
               TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
     Handle<SimpleNumberDictionary> cache =
-        isolate->slow_template_instantiations_cache();
+        handle(native_context->slow_template_instantiations_cache(), isolate);
     int entry = cache->FindEntry(isolate, serial_number);
     DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
     cache = SimpleNumberDictionary::DeleteEntry(isolate, cache, entry);
-    isolate->native_context()->set_slow_template_instantiations_cache(*cache);
+    native_context->set_slow_template_instantiations_cache(*cache);
   }
 }
 
@@ -375,7 +383,8 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
   // Fast path.
   Handle<JSObject> result;
   if (serial_number) {
-    if (ProbeInstantiationsCache(isolate, serial_number, CachingMode::kLimited)
+    if (ProbeInstantiationsCache(isolate, isolate->native_context(),
+                                 serial_number, CachingMode::kLimited)
             .ToHandle(&result)) {
       return isolate->factory()->CopyJSObject(result);
     }
@@ -419,8 +428,8 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
     JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
     // Don't cache prototypes.
     if (serial_number) {
-      CacheTemplateInstantiation(isolate, serial_number, CachingMode::kLimited,
-                                 result);
+      CacheTemplateInstantiation(isolate, isolate->native_context(),
+                                 serial_number, CachingMode::kLimited, result);
       result = isolate->factory()->CopyJSObject(result);
     }
   }
@@ -451,13 +460,13 @@ MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
 }
 }  // namespace
 
-MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
-                                            Handle<FunctionTemplateInfo> data,
-                                            MaybeHandle<Name> maybe_name) {
+MaybeHandle<JSFunction> InstantiateFunction(
+    Isolate* isolate, Handle<NativeContext> native_context,
+    Handle<FunctionTemplateInfo> data, MaybeHandle<Name> maybe_name) {
   int serial_number = Smi::ToInt(data->serial_number());
   if (serial_number) {
     Handle<JSObject> result;
-    if (ProbeInstantiationsCache(isolate, serial_number,
+    if (ProbeInstantiationsCache(isolate, native_context, serial_number,
                                  CachingMode::kUnlimited)
             .ToHandle(&result)) {
       return Handle<JSFunction>::cast(result);
@@ -503,17 +512,17 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
           : JS_SPECIAL_API_OBJECT_TYPE;
 
   Handle<JSFunction> function = ApiNatives::CreateApiFunction(
-      isolate, data, prototype, function_type, maybe_name);
+      isolate, native_context, data, prototype, function_type, maybe_name);
   if (serial_number) {
     // Cache the function.
-    CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited,
-                               function);
+    CacheTemplateInstantiation(isolate, native_context, serial_number,
+                               CachingMode::kUnlimited, function);
   }
   MaybeHandle<JSObject> result = ConfigureInstance(isolate, function, data);
   if (result.is_null()) {
     // Uncache on error.
     if (serial_number) {
-      UncacheTemplateInstantiation(isolate, serial_number,
+      UncacheTemplateInstantiation(isolate, native_context, serial_number,
                                    CachingMode::kUnlimited);
     }
     return MaybeHandle<JSFunction>();
@@ -543,6 +552,14 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
 
 }  // namespace
 
+MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
+    Isolate* isolate, Handle<NativeContext> native_context,
+    Handle<FunctionTemplateInfo> data, MaybeHandle<Name> maybe_name) {
+  InvokeScope invoke_scope(isolate);
+  return ::v8::internal::InstantiateFunction(isolate, native_context, data,
+                                             maybe_name);
+}
+
 MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
     Handle<FunctionTemplateInfo> data, MaybeHandle<Name> maybe_name) {
   Isolate* isolate = data->GetIsolate();
@@ -626,8 +643,9 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate,
 }
 
 Handle<JSFunction> ApiNatives::CreateApiFunction(
-    Isolate* isolate, Handle<FunctionTemplateInfo> obj,
-    Handle<Object> prototype, InstanceType type, MaybeHandle<Name> maybe_name) {
+    Isolate* isolate, Handle<NativeContext> native_context,
+    Handle<FunctionTemplateInfo> obj, Handle<Object> prototype,
+    InstanceType type, MaybeHandle<Name> maybe_name) {
   Handle<SharedFunctionInfo> shared =
       FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj,
                                                           maybe_name);
@@ -635,8 +653,8 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
   DCHECK(shared->HasSharedName());
 
   Handle<JSFunction> result =
-      isolate->factory()->NewFunctionFromSharedFunctionInfo(
-          shared, isolate->native_context());
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+                                                            native_context);
 
   if (obj->remove_prototype()) {
     DCHECK(prototype.is_null());
diff --git a/deps/v8/src/api/api-natives.h b/deps/v8/src/api/api-natives.h
index 153212cc6c43d7..fb59eb6cfcdb3f 100644
--- a/deps/v8/src/api/api-natives.h
+++ b/deps/v8/src/api/api-natives.h
@@ -24,6 +24,11 @@ class ApiNatives {
  public:
   static const int kInitialFunctionCacheSize = 256;
 
+  V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> InstantiateFunction(
+      Isolate* isolate, Handle<NativeContext> native_context,
+      Handle<FunctionTemplateInfo> data,
+      MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
+
   V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> InstantiateFunction(
       Handle<FunctionTemplateInfo> data,
       MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
@@ -36,9 +41,9 @@ class ApiNatives {
       Handle<ObjectTemplateInfo> data);
 
   static Handle<JSFunction> CreateApiFunction(
-      Isolate* isolate, Handle<FunctionTemplateInfo> obj,
-      Handle<Object> prototype, InstanceType type,
-      MaybeHandle<Name> name = MaybeHandle<Name>());
+      Isolate* isolate, Handle<NativeContext> native_context,
+      Handle<FunctionTemplateInfo> obj, Handle<Object> prototype,
+      InstanceType type, MaybeHandle<Name> name = MaybeHandle<Name>());
 
   static void AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
                               Handle<Name> name, Handle<Object> value,
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index e02c74416b81c6..30eceb6223377f 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -68,6 +68,7 @@
 #include "src/objects/js-generator-inl.h"
 #include "src/objects/js-promise-inl.h"
 #include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
 #include "src/objects/module-inl.h"
 #include "src/objects/objects-inl.h"
 #include "src/objects/oddball.h"
@@ -121,9 +122,9 @@
 #include <windows.h>
 #include "include/v8-wasm-trap-handler-win.h"
 #include "src/trap-handler/handler-inside-win.h"
-#if V8_TARGET_ARCH_X64
+#if defined(V8_OS_WIN64)
 #include "src/diagnostics/unwinding-info-win64.h"
-#endif  // V8_TARGET_ARCH_X64
+#endif  // V8_OS_WIN64
 #endif  // V8_OS_WIN
 
 namespace v8 {
@@ -261,7 +262,7 @@ void CheckMicrotasksScopesConsistency(i::MicrotaskQueue* microtask_queue) {
 template <bool do_callback>
 class CallDepthScope {
  public:
-  explicit CallDepthScope(i::Isolate* isolate, Local<Context> context)
+  CallDepthScope(i::Isolate* isolate, Local<Context> context)
       : isolate_(isolate),
         context_(context),
         escaped_(false),
@@ -272,7 +273,7 @@ class CallDepthScope {
                                      ? i::InterruptsScope::kRunInterrupts
                                      : i::InterruptsScope::kPostponeInterrupts)
                               : i::InterruptsScope::kNoop) {
-    isolate_->handle_scope_implementer()->IncrementCallDepth();
+    isolate_->thread_local_top()->IncrementCallDepth(this);
     isolate_->set_next_v8_call_is_safe_for_termination(false);
     if (!context.IsEmpty()) {
       i::Handle<i::Context> env = Utils::OpenHandle(*context);
@@ -296,7 +297,7 @@ class CallDepthScope {
       i::Handle<i::Context> env = Utils::OpenHandle(*context_);
       microtask_queue = env->native_context().microtask_queue();
     }
-    if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
+    if (!escaped_) isolate_->thread_local_top()->DecrementCallDepth(this);
     if (do_callback) isolate_->FireCallCompletedCallback(microtask_queue);
 // TODO(jochen): This should be #ifdef DEBUG
 #ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
@@ -308,11 +309,10 @@ class CallDepthScope {
   void Escape() {
     DCHECK(!escaped_);
     escaped_ = true;
-    auto handle_scope_implementer = isolate_->handle_scope_implementer();
-    handle_scope_implementer->DecrementCallDepth();
-    bool clear_exception =
-        handle_scope_implementer->CallDepthIsZero() &&
-        isolate_->thread_local_top()->try_catch_handler_ == nullptr;
+    auto thread_local_top = isolate_->thread_local_top();
+    thread_local_top->DecrementCallDepth(this);
+    bool clear_exception = thread_local_top->CallDepthIsZero() &&
+                           thread_local_top->try_catch_handler_ == nullptr;
     isolate_->OptionalRescheduleException(clear_exception);
   }
 
@@ -323,6 +323,12 @@ class CallDepthScope {
   bool do_callback_;
   bool safe_for_termination_;
   i::InterruptsScope interrupts_scope_;
+  i::Address previous_stack_height_;
+
+  friend class i::ThreadLocalTop;
+
+  DISALLOW_NEW_AND_DELETE()
+  DISALLOW_COPY_AND_ASSIGN(CallDepthScope);
 };
 
 }  // namespace
@@ -811,10 +817,15 @@ StartupData SnapshotCreator::CreateBlob(
       // Complete in-object slack tracking for all functions.
       fun.CompleteInobjectSlackTrackingIfActive();
 
-      fun.ResetIfBytecodeFlushed();
-
       // Also, clear out feedback vectors, or any optimized code.
-      if (fun.IsOptimized() || fun.IsInterpreted()) {
+      // Note that checking for fun.IsOptimized() || fun.IsInterpreted() is not
+      // sufficient because the function can have a feedback vector even if it
+      // is not compiled (e.g. when the bytecode was flushed). On the other
+      // hand, only checking for the feedback vector is not sufficient because
+      // there can be multiple functions sharing the same feedback vector. So we
+      // need all these checks.
+      if (fun.IsOptimized() || fun.IsInterpreted() ||
+          !fun.raw_feedback_cell().value().IsUndefined()) {
         fun.raw_feedback_cell().set_value(
             i::ReadOnlyRoots(isolate).undefined_value());
         fun.set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy));
@@ -1022,10 +1033,11 @@ i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
 }
 
 i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
-                                         internal::Address* slot) {
+                                         internal::Address* slot,
+                                         bool has_destructor) {
   LOG_API(isolate, TracedGlobal, New);
   i::Handle<i::Object> result =
-      isolate->global_handles()->CreateTraced(*obj, slot);
+      isolate->global_handles()->CreateTraced(*obj, slot, has_destructor);
 #ifdef VERIFY_HEAP
   if (i::FLAG_verify_heap) {
     i::Object(*obj).ObjectVerify(isolate);
@@ -1048,6 +1060,11 @@ void V8::MoveTracedGlobalReference(internal::Address** from,
   i::GlobalHandles::MoveTracedGlobal(from, to);
 }
 
+void V8::CopyTracedGlobalReference(const internal::Address* const* from,
+                                   internal::Address** to) {
+  i::GlobalHandles::CopyTracedGlobal(from, to);
+}
+
 void V8::MakeWeak(i::Address* location, void* parameter,
                   WeakCallbackInfo<void>::Callback weak_callback,
                   WeakCallbackType type) {
@@ -2336,7 +2353,8 @@ Local<Module> Module::CreateSyntheticModule(
   i::Handle<i::FixedArray> i_export_names = i_isolate->factory()->NewFixedArray(
       static_cast<int>(export_names.size()));
   for (int i = 0; i < i_export_names->length(); ++i) {
-    i::Handle<i::String> str = Utils::OpenHandle(*export_names[i]);
+    i::Handle<i::String> str = i_isolate->factory()->InternalizeString(
+        Utils::OpenHandle(*export_names[i]));
     i_export_names->set(i, *str);
   }
   return v8::Utils::ToLocal(
@@ -4690,6 +4708,11 @@ bool v8::Object::IsConstructor() {
   return self->IsConstructor();
 }
 
+bool v8::Object::IsApiWrapper() {
+  auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+  return self->IsApiWrapper();
+}
+
 MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
                                          Local<Value> recv, int argc,
                                          Local<Value> argv[]) {
@@ -4870,7 +4893,7 @@ Local<Value> Function::GetDisplayName() const {
   }
   auto func = i::Handle<i::JSFunction>::cast(self);
   i::Handle<i::String> property_name =
-      isolate->factory()->NewStringFromStaticChars("displayName");
+      isolate->factory()->display_name_string();
   i::Handle<i::Object> value =
       i::JSReceiver::GetDataProperty(func, property_name);
   if (value->IsString()) {
@@ -5582,14 +5605,14 @@ bool V8::EnableWebAssemblyTrapHandler(bool use_v8_signal_handler) {
 #if defined(V8_OS_WIN)
 void V8::SetUnhandledExceptionCallback(
     UnhandledExceptionCallback unhandled_exception_callback) {
-#if defined(V8_TARGET_ARCH_X64)
+#if defined(V8_OS_WIN64)
   v8::internal::win64_unwindinfo::SetUnhandledExceptionCallback(
       unhandled_exception_callback);
 #else
-  // Not implemented on ARM64.
-#endif
+  // Not implemented, port needed.
+#endif  // V8_OS_WIN64
 }
-#endif
+#endif  // V8_OS_WIN
 
 void v8::V8::SetEntropySource(EntropySource entropy_source) {
   base::RandomNumberGenerator::SetEntropySource(entropy_source);
@@ -6228,7 +6251,7 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
 
 MaybeLocal<String> v8::String::NewExternalOneByte(
     Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
-  CHECK(resource && resource->data());
+  CHECK_NOT_NULL(resource);
   // TODO(dcarney): throw a context free exception.
   if (resource->length() > static_cast<size_t>(i::String::kMaxLength)) {
     return MaybeLocal<String>();
@@ -6236,16 +6259,16 @@ MaybeLocal<String> v8::String::NewExternalOneByte(
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   LOG_API(i_isolate, String, NewExternalOneByte);
-  if (resource->length() > 0) {
-    i::Handle<i::String> string = i_isolate->factory()
-                                      ->NewExternalStringFromOneByte(resource)
-                                      .ToHandleChecked();
-    return Utils::ToLocal(string);
-  } else {
+  if (resource->length() == 0) {
     // The resource isn't going to be used, free it immediately.
     resource->Dispose();
     return Utils::ToLocal(i_isolate->factory()->empty_string());
   }
+  CHECK_NOT_NULL(resource->data());
+  i::Handle<i::String> string = i_isolate->factory()
+                                    ->NewExternalStringFromOneByte(resource)
+                                    .ToHandleChecked();
+  return Utils::ToLocal(string);
 }
 
 bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
@@ -7693,6 +7716,11 @@ bool Isolate::InContext() {
   return !isolate->context().is_null();
 }
 
+void Isolate::ClearKeptObjects() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->ClearKeptObjects();
+}
+
 v8::Local<v8::Context> Isolate::GetCurrentContext() {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
   i::Context context = isolate->context();
@@ -7956,6 +7984,28 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
   isolate->SetAbortOnUncaughtExceptionCallback(callback);
 }
 
+void Isolate::SetHostCleanupFinalizationGroupCallback(
+    HostCleanupFinalizationGroupCallback callback) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->SetHostCleanupFinalizationGroupCallback(callback);
+}
+
+Maybe<bool> FinalizationGroup::Cleanup(
+    Local<FinalizationGroup> finalization_group) {
+  i::Handle<i::JSFinalizationGroup> fg = Utils::OpenHandle(*finalization_group);
+  i::Isolate* isolate = fg->native_context().GetIsolate();
+  i::Handle<i::Context> i_context(fg->native_context(), isolate);
+  Local<Context> context = Utils::ToLocal(i_context);
+  ENTER_V8(isolate, context, FinalizationGroup, Cleanup, Nothing<bool>(),
+           i::HandleScope);
+  i::Handle<i::Object> callback(fg->cleanup(), isolate);
+  fg->set_scheduled_for_cleanup(false);
+  has_pending_exception =
+      i::JSFinalizationGroup::Cleanup(isolate, fg, callback).IsNothing();
+  RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+  return Just(true);
+}
+
 void Isolate::SetHostImportModuleDynamicallyCallback(
     HostImportModuleDynamicallyCallback callback) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8034,13 +8084,13 @@ Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope(
     Isolate* isolate)
     : isolate_(reinterpret_cast<i::Isolate*>(isolate)),
       microtask_queue_(isolate_->default_microtask_queue()) {
-  isolate_->handle_scope_implementer()->IncrementCallDepth();
+  isolate_->thread_local_top()->IncrementCallDepth(this);
   microtask_queue_->IncrementMicrotasksSuppressions();
 }
 
 Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
   microtask_queue_->DecrementMicrotasksSuppressions();
-  isolate_->handle_scope_implementer()->DecrementCallDepth();
+  isolate_->thread_local_top()->DecrementCallDepth(this);
 }
 
 Isolate::SafeForTerminationScope::SafeForTerminationScope(v8::Isolate* isolate)
@@ -8165,8 +8215,10 @@ bool Isolate::GetHeapCodeAndMetadataStatistics(
 void Isolate::GetStackSample(const RegisterState& state, void** frames,
                              size_t frames_limit, SampleInfo* sample_info) {
   RegisterState regs = state;
-  if (TickSample::GetStackSample(this, &regs, TickSample::kSkipCEntryFrame,
-                                 frames, frames_limit, sample_info)) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  if (i::TickSample::GetStackSample(isolate, &regs,
+                                    i::TickSample::kSkipCEntryFrame, frames,
+                                    frames_limit, sample_info)) {
     return;
   }
   sample_info->frames_count = 0;
@@ -8326,6 +8378,11 @@ void Isolate::SetAddHistogramSampleFunction(
       ->SetAddHistogramSampleFunction(callback);
 }
 
+void Isolate::SetAddCrashKeyCallback(AddCrashKeyCallback callback) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->SetAddCrashKeyCallback(callback);
+}
+
 bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) {
   // Returning true tells the caller that it need not
   // continue to call IdleNotification.
@@ -8481,6 +8538,9 @@ CALLBACK_SETTER(WasmStreamingCallback, WasmStreamingCallback,
 CALLBACK_SETTER(WasmThreadsEnabledCallback, WasmThreadsEnabledCallback,
                 wasm_threads_enabled_callback)
 
+CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback,
+                wasm_load_source_map_callback)
+
 void Isolate::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
                                        void* data) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8641,7 +8701,11 @@ MicrotasksScope::MicrotasksScope(Isolate* isolate,
 MicrotasksScope::~MicrotasksScope() {
   if (run_) {
     microtask_queue_->DecrementMicrotasksScopeDepth();
-    if (MicrotasksPolicy::kScoped == microtask_queue_->microtasks_policy()) {
+    if (MicrotasksPolicy::kScoped == microtask_queue_->microtasks_policy() &&
+        !isolate_->has_scheduled_exception()) {
+      DCHECK_IMPLIES(isolate_->has_scheduled_exception(),
+                     isolate_->scheduled_exception() ==
+                         i::ReadOnlyRoots(isolate_).termination_exception());
       microtask_queue_->PerformCheckpoint(reinterpret_cast<Isolate*>(isolate_));
     }
   }
@@ -9800,9 +9864,32 @@ int CpuProfile::GetSamplesCount() const {
   return reinterpret_cast<const i::CpuProfile*>(this)->samples_count();
 }
 
-CpuProfiler* CpuProfiler::New(Isolate* isolate, CpuProfilingNamingMode mode) {
-  return reinterpret_cast<CpuProfiler*>(
-      new i::CpuProfiler(reinterpret_cast<i::Isolate*>(isolate), mode));
+CpuProfiler* CpuProfiler::New(Isolate* isolate,
+                              CpuProfilingNamingMode naming_mode,
+                              CpuProfilingLoggingMode logging_mode) {
+  return reinterpret_cast<CpuProfiler*>(new i::CpuProfiler(
+      reinterpret_cast<i::Isolate*>(isolate), naming_mode, logging_mode));
+}
+
+CpuProfilingOptions::CpuProfilingOptions(CpuProfilingMode mode,
+                                         unsigned max_samples,
+                                         int sampling_interval_us,
+                                         MaybeLocal<Context> filter_context)
+    : mode_(mode),
+      max_samples_(max_samples),
+      sampling_interval_us_(sampling_interval_us) {
+  if (!filter_context.IsEmpty()) {
+    Local<Context> local_filter_context = filter_context.ToLocalChecked();
+    filter_context_.Reset(local_filter_context->GetIsolate(),
+                          local_filter_context);
+  }
+}
+
+void* CpuProfilingOptions::raw_filter_context() const {
+  return reinterpret_cast<void*>(
+      i::Context::cast(*Utils::OpenPersistent(filter_context_))
+          .native_context()
+          .address());
 }
 
 void CpuProfiler::Dispose() { delete reinterpret_cast<i::CpuProfiler*>(this); }
@@ -10068,6 +10155,10 @@ SnapshotObjectId HeapProfiler::GetObjectId(Local<Value> value) {
   return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(obj);
 }
 
+SnapshotObjectId HeapProfiler::GetObjectId(NativeObject value) {
+  return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(value);
+}
+
 Local<Value> HeapProfiler::FindObjectById(SnapshotObjectId id) {
   i::Handle<i::Object> obj =
       reinterpret_cast<i::HeapProfiler*>(this)->FindHeapObjectById(id);
@@ -10200,6 +10291,17 @@ void EmbedderHeapTracer::TracePrologue(TraceFlags flags) {
 #endif
 }
 
+void EmbedderHeapTracer::TraceEpilogue(TraceSummary* trace_summary) {
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+  TraceEpilogue();
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+}
+
 void EmbedderHeapTracer::FinalizeTracing() {
   if (isolate_) {
     i::Isolate* isolate = reinterpret_cast<i::Isolate*>(isolate_);
@@ -10248,8 +10350,7 @@ void EmbedderHeapTracer::RegisterEmbedderReference(
   if (ref.IsEmpty()) return;
 
   i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
-  heap->RegisterExternallyReferencedObject(
-      reinterpret_cast<i::Address*>(ref.val_));
+  heap->RegisterExternallyReferencedObject(reinterpret_cast<i::Address*>(*ref));
 }
 
 void EmbedderHeapTracer::IterateTracedGlobalHandles(
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index 6135a7dfc62024..21bbb3a101549d 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -90,6 +90,7 @@ class RegisteredExtension {
   V(Data, Object)                              \
   V(RegExp, JSRegExp)                          \
   V(Object, JSReceiver)                        \
+  V(FinalizationGroup, JSFinalizationGroup)    \
   V(Array, JSArray)                            \
   V(Map, JSMap)                                \
   V(Set, JSSet)                                \
@@ -198,6 +199,8 @@ class Utils {
       v8::internal::Handle<v8::internal::JSTypedArray> obj);
   static inline Local<BigUint64Array> ToLocalBigUint64Array(
       v8::internal::Handle<v8::internal::JSTypedArray> obj);
+  static inline Local<FinalizationGroup> ToLocal(
+      v8::internal::Handle<v8::internal::JSFinalizationGroup> obj);
 
   static inline Local<SharedArrayBuffer> ToLocalShared(
       v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
@@ -248,9 +251,9 @@ class Utils {
   template <class From, class To>
   static inline Local<To> Convert(v8::internal::Handle<From> obj);
 
-  template <class T>
+  template <class T, class M>
   static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
-      const v8::Persistent<T>& persistent) {
+      const v8::Persistent<T, M>& persistent) {
     return v8::internal::Handle<v8::internal::Object>(
         reinterpret_cast<v8::internal::Address*>(persistent.val_));
   }
@@ -354,7 +357,6 @@ class HandleScopeImplementer {
   explicit HandleScopeImplementer(Isolate* isolate)
       : isolate_(isolate),
         spare_(nullptr),
-        call_depth_(0),
         last_handle_before_deferred_block_(nullptr) {}
 
   ~HandleScopeImplementer() { DeleteArray(spare_); }
@@ -373,11 +375,6 @@ class HandleScopeImplementer {
   inline internal::Address* GetSpareOrNewBlock();
   inline void DeleteExtensions(internal::Address* prev_limit);
 
-  // Call depth represents nested v8 api calls.
-  inline void IncrementCallDepth() { call_depth_++; }
-  inline void DecrementCallDepth() { call_depth_--; }
-  inline bool CallDepthIsZero() { return call_depth_ == 0; }
-
   inline void EnterContext(Context context);
   inline void LeaveContext();
   inline bool LastEnteredContextWas(Context context);
@@ -414,7 +411,6 @@ class HandleScopeImplementer {
     saved_contexts_.detach();
     spare_ = nullptr;
     last_handle_before_deferred_block_ = nullptr;
-    call_depth_ = 0;
   }
 
   void Free() {
@@ -431,7 +427,7 @@ class HandleScopeImplementer {
       DeleteArray(spare_);
       spare_ = nullptr;
     }
-    DCHECK_EQ(call_depth_, 0);
+    DCHECK(isolate_->thread_local_top()->CallDepthIsZero());
   }
 
   void BeginDeferredScope();
@@ -451,8 +447,6 @@ class HandleScopeImplementer {
   // Used as a stack to keep track of saved contexts.
   DetachableVector<Context> saved_contexts_;
   Address* spare_;
-  int call_depth_;
-
   Address* last_handle_before_deferred_block_;
   // This is only used for threading support.
   HandleScopeData handle_scope_data_;
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index b4836ff7847488..2796e59a8dbb90 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -382,6 +382,12 @@ void AstTraversalVisitor<Subclass>::VisitThrow(Throw* expr) {
   RECURSE_EXPRESSION(Visit(expr->exception()));
 }
 
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitOptionalChain(OptionalChain* expr) {
+  PROCESS_EXPRESSION(expr);
+  RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
 template <class Subclass>
 void AstTraversalVisitor<Subclass>::VisitProperty(Property* expr) {
   PROCESS_EXPRESSION(expr);
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 9987eb28449a21..4b6c4805dedc16 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -122,6 +122,10 @@ bool Expression::IsUndefinedLiteral() const {
          var_proxy->raw_name()->IsOneByteEqualTo("undefined");
 }
 
+bool Expression::IsLiteralButNotNullOrUndefined() const {
+  return IsLiteral() && !IsNullOrUndefinedLiteral();
+}
+
 bool Expression::ToBooleanIsTrue() const {
   return IsLiteral() && AsLiteral()->ToBooleanIsTrue();
 }
@@ -217,13 +221,7 @@ bool FunctionLiteral::AllowsLazyCompilation() {
 }
 
 bool FunctionLiteral::SafeToSkipArgumentsAdaptor() const {
-  // TODO(bmeurer,verwaest): The --fast_calls_with_arguments_mismatches
-  // is mostly here for checking the real-world impact of the calling
-  // convention. There's not really a point in turning off this flag
-  // otherwise, so we should remove it at some point, when we're done
-  // with the experiments (https://crbug.com/v8/8895).
-  return FLAG_fast_calls_with_arguments_mismatches &&
-         language_mode() == LanguageMode::kStrict &&
+  return language_mode() == LanguageMode::kStrict &&
          scope()->arguments() == nullptr &&
          scope()->rest_parameter() == nullptr;
 }
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index bd52d1b2c04065..ced9f775dd57bc 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -16,6 +16,7 @@
 #include "src/common/globals.h"
 #include "src/execution/isolate.h"
 #include "src/heap/factory.h"
+#include "src/objects/function-syntax-kind.h"
 #include "src/objects/literal-objects.h"
 #include "src/objects/smi.h"
 #include "src/parsing/token.h"
@@ -94,6 +95,7 @@ namespace internal {
   V(ImportCallExpression)       \
   V(Literal)                    \
   V(NativeFunctionLiteral)      \
+  V(OptionalChain)              \
   V(Property)                   \
   V(ResolvedProperty)           \
   V(Spread)                     \
@@ -168,11 +170,13 @@ class AstNode: public ZoneObject {
   void* operator new(size_t size);
 
   int position_;
-  class NodeTypeField : public BitField<NodeType, 0, 6> {};
+  using NodeTypeField = BitField<NodeType, 0, 6>;
 
  protected:
   uint32_t bit_field_;
-  static const uint8_t kNextBitFieldIndex = NodeTypeField::kNext;
+
+  template <class T, int size>
+  using NextBitField = NodeTypeField::Next<T, size>;
 
   AstNode(int position, NodeType type)
       : position_(position), bit_field_(NodeTypeField::encode(type)) {}
@@ -182,8 +186,6 @@ class AstNode: public ZoneObject {
 class Statement : public AstNode {
  protected:
   Statement(int position, NodeType type) : AstNode(position, type) {}
-
-  static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
 };
 
 
@@ -245,6 +247,14 @@ class Expression : public AstNode {
   // that this also checks for loads of the global "undefined" variable.
   bool IsUndefinedLiteral() const;
 
+  // True if either null literal or undefined literal.
+  inline bool IsNullOrUndefinedLiteral() const {
+    return IsNullLiteral() || IsUndefinedLiteral();
+  }
+
+  // True if a literal and not null or undefined.
+  bool IsLiteralButNotNullOrUndefined() const;
+
   bool IsCompileTimeValue();
 
   bool IsPattern() {
@@ -265,15 +275,15 @@ class Expression : public AstNode {
   }
 
  private:
-  class IsParenthesizedField
-      : public BitField<bool, AstNode::kNextBitFieldIndex, 1> {};
+  using IsParenthesizedField = AstNode::NextBitField<bool, 1>;
 
  protected:
   Expression(int pos, NodeType type) : AstNode(pos, type) {
     DCHECK(!is_parenthesized());
   }
 
-  static const uint8_t kNextBitFieldIndex = IsParenthesizedField::kNext;
+  template <class T, int size>
+  using NextBitField = IsParenthesizedField::Next<T, size>;
 };
 
 class FailureExpression : public Expression {
@@ -321,8 +331,7 @@ class BreakableStatement : public Statement {
   }
 
  private:
-  class BreakableTypeField
-      : public BitField<BreakableType, Statement::kNextBitFieldIndex, 1> {};
+  using BreakableTypeField = Statement::NextBitField<BreakableType, 1>;
 
  protected:
   BreakableStatement(BreakableType breakable_type, int position, NodeType type)
@@ -330,7 +339,8 @@ class BreakableStatement : public Statement {
     bit_field_ |= BreakableTypeField::encode(breakable_type);
   }
 
-  static const uint8_t kNextBitFieldIndex = BreakableTypeField::kNext;
+  template <class T, int size>
+  using NextBitField = BreakableTypeField::Next<T, size>;
 };
 
 class Block : public BreakableStatement {
@@ -357,10 +367,8 @@ class Block : public BreakableStatement {
   ZonePtrList<Statement> statements_;
   Scope* scope_;
 
-  class IgnoreCompletionField
-      : public BitField<bool, BreakableStatement::kNextBitFieldIndex, 1> {};
-  class IsLabeledField
-      : public BitField<bool, IgnoreCompletionField::kNext, 1> {};
+  using IgnoreCompletionField = BreakableStatement::NextBitField<bool, 1>;
+  using IsLabeledField = IgnoreCompletionField::Next<bool, 1>;
 
  protected:
   Block(Zone* zone, ZonePtrList<const AstRawString>* labels, int capacity,
@@ -448,8 +456,7 @@ class VariableDeclaration : public Declaration {
  private:
   friend class AstNodeFactory;
 
-  class IsNestedField
-      : public BitField<bool, Declaration::kNextBitFieldIndex, 1> {};
+  using IsNestedField = Declaration::NextBitField<bool, 1>;
 
  protected:
   explicit VariableDeclaration(int pos, bool is_nested = false)
@@ -457,7 +464,8 @@ class VariableDeclaration : public Declaration {
     bit_field_ = IsNestedField::update(bit_field_, is_nested);
   }
 
-  static const uint8_t kNextBitFieldIndex = IsNestedField::kNext;
+  template <class T, int size>
+  using NextBitField = IsNestedField::Next<T, size>;
 };
 
 // For var declarations that appear in a block scope.
@@ -524,9 +532,6 @@ class IterationStatement : public BreakableStatement {
         body_(nullptr) {}
   void Initialize(Statement* body) { body_ = body; }
 
-  static const uint8_t kNextBitFieldIndex =
-      BreakableStatement::kNextBitFieldIndex;
-
  private:
   ZonePtrList<const AstRawString>* labels_;
   ZonePtrList<const AstRawString>* own_labels_;
@@ -740,8 +745,7 @@ class ReturnStatement final : public JumpStatement {
   Expression* expression_;
   int end_position_;
 
-  class TypeField
-      : public BitField<Type, JumpStatement::kNextBitFieldIndex, 1> {};
+  using TypeField = JumpStatement::NextBitField<Type, 1>;
 };
 
 
@@ -977,8 +981,7 @@ class SloppyBlockFunctionStatement final : public Statement {
  private:
   friend class AstNodeFactory;
 
-  class TokenField
-      : public BitField<Token::Value, Statement::kNextBitFieldIndex, 8> {};
+  using TokenField = Statement::NextBitField<Token::Value, 8>;
 
   SloppyBlockFunctionStatement(int pos, Variable* var, Token::Value init,
                                Statement* statement)
@@ -1079,7 +1082,7 @@ class Literal final : public Expression {
  private:
   friend class AstNodeFactory;
 
-  class TypeField : public BitField<Type, Expression::kNextBitFieldIndex, 4> {};
+  using TypeField = Expression::NextBitField<Type, 4>;
 
   Literal(int smi, int position) : Expression(position, kLiteral), smi_(smi) {
     bit_field_ = TypeField::update(bit_field_, kSmi);
@@ -1210,10 +1213,9 @@ class AggregateLiteral : public MaterializedLiteral {
 
  private:
   int depth_ : 31;
-  class NeedsInitialAllocationSiteField
-      : public BitField<bool, MaterializedLiteral::kNextBitFieldIndex, 1> {};
-  class IsSimpleField
-      : public BitField<bool, NeedsInitialAllocationSiteField::kNext, 1> {};
+  using NeedsInitialAllocationSiteField =
+      MaterializedLiteral::NextBitField<bool, 1>;
+  using IsSimpleField = NeedsInitialAllocationSiteField::Next<bool, 1>;
 
  protected:
   friend class AstNodeFactory;
@@ -1236,7 +1238,8 @@ class AggregateLiteral : public MaterializedLiteral {
     bit_field_ = NeedsInitialAllocationSiteField::update(bit_field_, required);
   }
 
-  static const uint8_t kNextBitFieldIndex = IsSimpleField::kNext;
+  template <class T, int size>
+  using NextBitField = IsSimpleField::Next<T, size>;
 };
 
 // Common supertype for ObjectLiteralProperty and ClassLiteralProperty
@@ -1375,12 +1378,6 @@ class ObjectLiteral final : public AggregateLiteral {
       static_cast<int>(AggregateLiteral::kNeedsInitialAllocationSite) <
       static_cast<int>(kFastElements));
 
-  struct Accessors: public ZoneObject {
-    Accessors() : getter(nullptr), setter(nullptr) {}
-    ObjectLiteralProperty* getter;
-    ObjectLiteralProperty* setter;
-  };
-
  private:
   friend class AstNodeFactory;
 
@@ -1408,19 +1405,14 @@ class ObjectLiteral final : public AggregateLiteral {
   void set_has_null_protoype(bool has_null_prototype) {
     bit_field_ = HasNullPrototypeField::update(bit_field_, has_null_prototype);
   }
-
   uint32_t boilerplate_properties_;
   Handle<ObjectBoilerplateDescription> boilerplate_description_;
   ZoneList<Property*> properties_;
 
-  class HasElementsField
-      : public BitField<bool, AggregateLiteral::kNextBitFieldIndex, 1> {};
-  class HasRestPropertyField
-      : public BitField<bool, HasElementsField::kNext, 1> {};
-  class FastElementsField
-      : public BitField<bool, HasRestPropertyField::kNext, 1> {};
-  class HasNullPrototypeField
-      : public BitField<bool, FastElementsField::kNext, 1> {};
+  using HasElementsField = AggregateLiteral::NextBitField<bool, 1>;
+  using HasRestPropertyField = HasElementsField::Next<bool, 1>;
+  using FastElementsField = HasRestPropertyField::Next<bool, 1>;
+  using HasNullPrototypeField = FastElementsField::Next<bool, 1>;
 };
 
 // An array literal has a literals object that is used
@@ -1512,6 +1504,9 @@ class VariableProxy final : public Expression {
       var()->SetMaybeAssigned();
     }
   }
+  void clear_is_assigned() {
+    bit_field_ = IsAssignedField::update(bit_field_, false);
+  }
 
   bool is_resolved() const { return IsResolvedField::decode(bit_field_); }
   void set_is_resolved() {
@@ -1586,15 +1581,11 @@ class VariableProxy final : public Expression {
 
   explicit VariableProxy(const VariableProxy* copy_from);
 
-  class IsAssignedField
-      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
-  class IsResolvedField : public BitField<bool, IsAssignedField::kNext, 1> {};
-  class IsRemovedFromUnresolvedField
-      : public BitField<bool, IsResolvedField::kNext, 1> {};
-  class IsNewTargetField
-      : public BitField<bool, IsRemovedFromUnresolvedField::kNext, 1> {};
-  class HoleCheckModeField
-      : public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
+  using IsAssignedField = Expression::NextBitField<bool, 1>;
+  using IsResolvedField = IsAssignedField::Next<bool, 1>;
+  using IsRemovedFromUnresolvedField = IsResolvedField::Next<bool, 1>;
+  using IsNewTargetField = IsRemovedFromUnresolvedField::Next<bool, 1>;
+  using HoleCheckModeField = IsNewTargetField::Next<HoleCheckMode, 1>;
 
   union {
     const AstRawString* raw_name_;  // if !is_resolved_
@@ -1607,20 +1598,41 @@ class VariableProxy final : public Expression {
   friend base::ThreadedListTraits<VariableProxy>;
 };
 
+// Wraps an optional chain to provide a wrapper for jump labels.
+class OptionalChain final : public Expression {
+ public:
+  Expression* expression() const { return expression_; }
+
+ private:
+  friend class AstNodeFactory;
+
+  explicit OptionalChain(Expression* expression)
+      : Expression(0, kOptionalChain), expression_(expression) {}
+
+  Expression* expression_;
+};
+
 // Assignments to a property will use one of several types of property access.
 // Otherwise, the assignment is to a non-property (a global, a local slot, a
 // parameter slot, or a destructuring pattern).
 enum AssignType {
-  NON_PROPERTY,          // destructuring
-  NAMED_PROPERTY,        // obj.key
-  KEYED_PROPERTY,        // obj[key]
-  NAMED_SUPER_PROPERTY,  // super.key
-  KEYED_SUPER_PROPERTY,  // super[key]
-  PRIVATE_METHOD         // obj.#key: #key is a private method
+  NON_PROPERTY,              // destructuring
+  NAMED_PROPERTY,            // obj.key
+  KEYED_PROPERTY,            // obj[key]
+  NAMED_SUPER_PROPERTY,      // super.key
+  KEYED_SUPER_PROPERTY,      // super[key]
+  PRIVATE_METHOD,            // obj.#key: #key is a private method
+  PRIVATE_GETTER_ONLY,       // obj.#key: #key only has a getter defined
+  PRIVATE_SETTER_ONLY,       // obj.#key: #key only has a setter defined
+  PRIVATE_GETTER_AND_SETTER  // obj.#key: #key has both accessors defined
 };
 
 class Property final : public Expression {
  public:
+  bool is_optional_chain_link() const {
+    return IsOptionalChainLinkField::decode(bit_field_);
+  }
+
   bool IsValidReferenceExpression() const { return true; }
 
   Expression* obj() const { return obj_; }
@@ -1637,8 +1649,21 @@ class Property final : public Expression {
       VariableProxy* proxy = property->key()->AsVariableProxy();
       DCHECK_NOT_NULL(proxy);
       Variable* var = proxy->var();
-      // Use KEYED_PROPERTY for private fields.
-      return var->requires_brand_check() ? PRIVATE_METHOD : KEYED_PROPERTY;
+
+      switch (var->mode()) {
+        case VariableMode::kPrivateMethod:
+          return PRIVATE_METHOD;
+        case VariableMode::kConst:
+          return KEYED_PROPERTY;  // Use KEYED_PROPERTY for private fields.
+        case VariableMode::kPrivateGetterOnly:
+          return PRIVATE_GETTER_ONLY;
+        case VariableMode::kPrivateSetterOnly:
+          return PRIVATE_SETTER_ONLY;
+        case VariableMode::kPrivateGetterAndSetter:
+          return PRIVATE_GETTER_AND_SETTER;
+        default:
+          UNREACHABLE();
+      }
     }
     bool super_access = property->IsSuperAccess();
     return (property->key()->IsPropertyName())
@@ -1649,10 +1674,13 @@ class Property final : public Expression {
  private:
   friend class AstNodeFactory;
 
-  Property(Expression* obj, Expression* key, int pos)
+  Property(Expression* obj, Expression* key, int pos, bool optional_chain)
       : Expression(pos, kProperty), obj_(obj), key_(key) {
+    bit_field_ |= IsOptionalChainLinkField::encode(optional_chain);
   }
 
+  using IsOptionalChainLinkField = Expression::NextBitField<bool, 1>;
+
   Expression* obj_;
   Expression* key_;
 };
@@ -1690,6 +1718,10 @@ class Call final : public Expression {
     return IsTaggedTemplateField::decode(bit_field_);
   }
 
+  bool is_optional_chain_link() const {
+    return IsOptionalChainLinkField::decode(bit_field_);
+  }
+
   bool only_last_arg_is_spread() {
     return !arguments_.is_empty() && arguments_.last()->IsSpread();
   }
@@ -1722,13 +1754,14 @@ class Call final : public Expression {
 
   Call(Zone* zone, Expression* expression,
        const ScopedPtrList<Expression>& arguments, int pos,
-       PossiblyEval possibly_eval)
+       PossiblyEval possibly_eval, bool optional_chain)
       : Expression(pos, kCall),
         expression_(expression),
         arguments_(0, nullptr) {
     bit_field_ |=
         IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL) |
-        IsTaggedTemplateField::encode(false);
+        IsTaggedTemplateField::encode(false) |
+        IsOptionalChainLinkField::encode(optional_chain);
     arguments.CopyTo(&arguments_, zone);
   }
 
@@ -1739,14 +1772,14 @@ class Call final : public Expression {
         expression_(expression),
         arguments_(0, nullptr) {
     bit_field_ |= IsPossiblyEvalField::encode(false) |
-                  IsTaggedTemplateField::encode(true);
+                  IsTaggedTemplateField::encode(true) |
+                  IsOptionalChainLinkField::encode(false);
     arguments.CopyTo(&arguments_, zone);
   }
 
-  class IsPossiblyEvalField
-      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
-  class IsTaggedTemplateField
-      : public BitField<bool, IsPossiblyEvalField::kNext, 1> {};
+  using IsPossiblyEvalField = Expression::NextBitField<bool, 1>;
+  using IsTaggedTemplateField = IsPossiblyEvalField::Next<bool, 1>;
+  using IsOptionalChainLinkField = IsTaggedTemplateField::Next<bool, 1>;
 
   Expression* expression_;
   ZonePtrList<Expression> arguments_;
@@ -1838,8 +1871,7 @@ class UnaryOperation final : public Expression {
 
   Expression* expression_;
 
-  class OperatorField
-      : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
+  using OperatorField = Expression::NextBitField<Token::Value, 7>;
 };
 
 
@@ -1865,8 +1897,7 @@ class BinaryOperation final : public Expression {
   Expression* left_;
   Expression* right_;
 
-  class OperatorField
-      : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
+  using OperatorField = Expression::NextBitField<Token::Value, 7>;
 };
 
 class NaryOperation final : public Expression {
@@ -1925,8 +1956,7 @@ class NaryOperation final : public Expression {
   };
   ZoneVector<NaryOperationEntry> subsequent_;
 
-  class OperatorField
-      : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
+  using OperatorField = Expression::NextBitField<Token::Value, 7>;
 };
 
 class CountOperation final : public Expression {
@@ -1946,9 +1976,8 @@ class CountOperation final : public Expression {
     bit_field_ |= IsPrefixField::encode(is_prefix) | TokenField::encode(op);
   }
 
-  class IsPrefixField
-      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
-  class TokenField : public BitField<Token::Value, IsPrefixField::kNext, 7> {};
+  using IsPrefixField = Expression::NextBitField<bool, 1>;
+  using TokenField = IsPrefixField::Next<Token::Value, 7>;
 
   Expression* expression_;
 };
@@ -1978,8 +2007,7 @@ class CompareOperation final : public Expression {
   Expression* left_;
   Expression* right_;
 
-  class OperatorField
-      : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
+  using OperatorField = Expression::NextBitField<Token::Value, 7>;
 };
 
 
@@ -2071,10 +2099,8 @@ class Assignment : public Expression {
  private:
   friend class AstNodeFactory;
 
-  class TokenField
-      : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
-  class LookupHoistingModeField : public BitField<bool, TokenField::kNext, 1> {
-  };
+  using TokenField = Expression::NextBitField<Token::Value, 7>;
+  using LookupHoistingModeField = TokenField::Next<bool, 1>;
 
   Expression* target_;
   Expression* value_;
@@ -2132,8 +2158,7 @@ class Suspend : public Expression {
 
   Expression* expression_;
 
-  class OnAbruptResumeField
-      : public BitField<OnAbruptResume, Expression::kNextBitFieldIndex, 1> {};
+  using OnAbruptResumeField = Expression::NextBitField<OnAbruptResume, 1>;
 };
 
 class Yield final : public Suspend {
@@ -2175,14 +2200,6 @@ class Throw final : public Expression {
 
 class FunctionLiteral final : public Expression {
  public:
-  enum FunctionType {
-    kAnonymousExpression,
-    kNamedExpression,
-    kDeclaration,
-    kAccessorOrMethod,
-    kWrapped,
-  };
-
   enum ParameterFlag : uint8_t {
     kNoDuplicateParameters,
     kHasDuplicateParameters
@@ -2204,12 +2221,8 @@ class FunctionLiteral final : public Expression {
   int function_token_position() const { return function_token_position_; }
   int start_position() const;
   int end_position() const;
-  bool is_declaration() const { return function_type() == kDeclaration; }
-  bool is_named_expression() const {
-    return function_type() == kNamedExpression;
-  }
   bool is_anonymous_expression() const {
-    return function_type() == kAnonymousExpression;
+    return syntax_kind() == FunctionSyntaxKind::kAnonymousExpression;
   }
 
   void mark_as_oneshot_iife() {
@@ -2219,7 +2232,6 @@ class FunctionLiteral final : public Expression {
   bool is_toplevel() const {
     return function_literal_id() == kFunctionLiteralIdTopLevel;
   }
-  bool is_wrapped() const { return function_type() == kWrapped; }
   V8_EXPORT_PRIVATE LanguageMode language_mode() const;
 
   static bool NeedsHomeObject(Expression* expr);
@@ -2289,8 +2301,8 @@ class FunctionLiteral final : public Expression {
   V8_EXPORT_PRIVATE bool ShouldEagerCompile() const;
   V8_EXPORT_PRIVATE void SetShouldEagerCompile();
 
-  FunctionType function_type() const {
-    return FunctionTypeBits::decode(bit_field_);
+  FunctionSyntaxKind syntax_kind() const {
+    return FunctionSyntaxKindBits::decode(bit_field_);
   }
   FunctionKind kind() const;
 
@@ -2342,7 +2354,7 @@ class FunctionLiteral final : public Expression {
                   AstValueFactory* ast_value_factory, DeclarationScope* scope,
                   const ScopedPtrList<Statement>& body,
                   int expected_property_count, int parameter_count,
-                  int function_length, FunctionType function_type,
+                  int function_length, FunctionSyntaxKind function_syntax_kind,
                   ParameterFlag has_duplicate_parameters,
                   EagerCompileHint eager_compile_hint, int position,
                   bool has_braces, int function_literal_id,
@@ -2359,28 +2371,28 @@ class FunctionLiteral final : public Expression {
         body_(0, nullptr),
         raw_inferred_name_(ast_value_factory->empty_cons_string()),
         produced_preparse_data_(produced_preparse_data) {
-    bit_field_ |=
-        FunctionTypeBits::encode(function_type) | Pretenure::encode(false) |
-        HasDuplicateParameters::encode(has_duplicate_parameters ==
-                                       kHasDuplicateParameters) |
-        DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
-        RequiresInstanceMembersInitializer::encode(false) |
-        HasBracesField::encode(has_braces) | OneshotIIFEBit::encode(false);
+    bit_field_ |= FunctionSyntaxKindBits::encode(function_syntax_kind) |
+                  Pretenure::encode(false) |
+                  HasDuplicateParameters::encode(has_duplicate_parameters ==
+                                                 kHasDuplicateParameters) |
+                  DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
+                  RequiresInstanceMembersInitializer::encode(false) |
+                  HasBracesField::encode(has_braces) |
+                  OneshotIIFEBit::encode(false);
     if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
     body.CopyTo(&body_, zone);
   }
 
-  class FunctionTypeBits
-      : public BitField<FunctionType, Expression::kNextBitFieldIndex, 3> {};
-  class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
-  class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
-  class DontOptimizeReasonField
-      : public BitField<BailoutReason, HasDuplicateParameters::kNext, 8> {};
-  class RequiresInstanceMembersInitializer
-      : public BitField<bool, DontOptimizeReasonField::kNext, 1> {};
-  class HasBracesField
-      : public BitField<bool, RequiresInstanceMembersInitializer::kNext, 1> {};
-  class OneshotIIFEBit : public BitField<bool, HasBracesField::kNext, 1> {};
+  using FunctionSyntaxKindBits =
+      Expression::NextBitField<FunctionSyntaxKind, 3>;
+  using Pretenure = FunctionSyntaxKindBits::Next<bool, 1>;
+  using HasDuplicateParameters = Pretenure::Next<bool, 1>;
+  using DontOptimizeReasonField =
+      HasDuplicateParameters::Next<BailoutReason, 8>;
+  using RequiresInstanceMembersInitializer =
+      DontOptimizeReasonField::Next<bool, 1>;
+  using HasBracesField = RequiresInstanceMembersInitializer::Next<bool, 1>;
+  using OneshotIIFEBit = HasBracesField::Next<bool, 1>;
 
   // expected_property_count_ is the sum of instance fields and properties.
   // It can vary depending on whether a function is lazily or eagerly parsed.
@@ -2432,6 +2444,11 @@ class ClassLiteralProperty final : public LiteralProperty {
     return private_or_computed_name_var_;
   }
 
+  bool NeedsHomeObjectOnClassPrototype() const {
+    return is_private() && kind_ == METHOD &&
+           FunctionLiteral::NeedsHomeObject(value_);
+  }
+
  private:
   friend class AstNodeFactory;
 
@@ -2525,12 +2542,9 @@ class ClassLiteral final : public Expression {
   ZonePtrList<Property>* properties_;
   FunctionLiteral* static_fields_initializer_;
   FunctionLiteral* instance_members_initializer_function_;
-  class HasNameStaticProperty
-      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
-  class HasStaticComputedNames
-      : public BitField<bool, HasNameStaticProperty::kNext, 1> {};
-  class IsAnonymousExpression
-      : public BitField<bool, HasStaticComputedNames::kNext, 1> {};
+  using HasNameStaticProperty = Expression::NextBitField<bool, 1>;
+  using HasStaticComputedNames = HasNameStaticProperty::Next<bool, 1>;
+  using IsAnonymousExpression = HasStaticComputedNames::Next<bool, 1>;
 };
 
 
@@ -3046,8 +3060,13 @@ class AstNodeFactory final {
     return new (zone_) Variable(variable);
   }
 
-  Property* NewProperty(Expression* obj, Expression* key, int pos) {
-    return new (zone_) Property(obj, key, pos);
+  OptionalChain* NewOptionalChain(Expression* expression) {
+    return new (zone_) OptionalChain(expression);
+  }
+
+  Property* NewProperty(Expression* obj, Expression* key, int pos,
+                        bool optional_chain = false) {
+    return new (zone_) Property(obj, key, pos, optional_chain);
   }
 
   ResolvedProperty* NewResolvedProperty(VariableProxy* obj,
@@ -3058,8 +3077,10 @@ class AstNodeFactory final {
 
   Call* NewCall(Expression* expression,
                 const ScopedPtrList<Expression>& arguments, int pos,
-                Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
-    return new (zone_) Call(zone_, expression, arguments, pos, possibly_eval);
+                Call::PossiblyEval possibly_eval = Call::NOT_EVAL,
+                bool optional_chain = false) {
+    return new (zone_)
+        Call(zone_, expression, arguments, pos, possibly_eval, optional_chain);
   }
 
   Call* NewTaggedTemplate(Expression* expression,
@@ -3189,13 +3210,13 @@ class AstNodeFactory final {
       const ScopedPtrList<Statement>& body, int expected_property_count,
       int parameter_count, int function_length,
       FunctionLiteral::ParameterFlag has_duplicate_parameters,
-      FunctionLiteral::FunctionType function_type,
+      FunctionSyntaxKind function_syntax_kind,
       FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
       bool has_braces, int function_literal_id,
       ProducedPreparseData* produced_preparse_data = nullptr) {
     return new (zone_) FunctionLiteral(
         zone_, name, ast_value_factory_, scope, body, expected_property_count,
-        parameter_count, function_length, function_type,
+        parameter_count, function_length, function_syntax_kind,
         has_duplicate_parameters, eager_compile_hint, position, has_braces,
         function_literal_id, produced_preparse_data);
   }
@@ -3209,7 +3230,7 @@ class AstNodeFactory final {
     return new (zone_) FunctionLiteral(
         zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
         body, expected_property_count, parameter_count, parameter_count,
-        FunctionLiteral::kAnonymousExpression,
+        FunctionSyntaxKind::kAnonymousExpression,
         FunctionLiteral::kNoDuplicateParameters,
         FunctionLiteral::kShouldLazyCompile, 0, /* has_braces */ false,
         kFunctionLiteralIdTopLevel);
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 261b72c352a55d..dbd20f50a80869 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -84,10 +84,11 @@ void SourceTextModuleDescriptor::AddStarExport(
 }
 
 namespace {
-Handle<Object> ToStringOrUndefined(Isolate* isolate, const AstRawString* s) {
+Handle<HeapObject> ToStringOrUndefined(Isolate* isolate,
+                                       const AstRawString* s) {
   return (s == nullptr)
-             ? Handle<Object>::cast(isolate->factory()->undefined_value())
-             : Handle<Object>::cast(s->string());
+             ? Handle<HeapObject>::cast(isolate->factory()->undefined_value())
+             : Handle<HeapObject>::cast(s->string());
 }
 }  // namespace
 
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index c0fe3baff398bc..581517ee4ec34a 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -27,6 +27,8 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
   is_call_error_ = false;
   is_iterator_error_ = false;
   is_async_iterator_error_ = false;
+  destructuring_prop_ = nullptr;
+  destructuring_assignment_ = nullptr;
   is_user_js_ = is_user_js;
   function_kind_ = kNormalFunction;
   InitializeAstVisitor(isolate);
@@ -299,24 +301,50 @@ void CallPrinter::VisitVariableProxy(VariableProxy* node) {
 
 
 void CallPrinter::VisitAssignment(Assignment* node) {
-  Find(node->target());
-  if (node->target()->IsArrayLiteral()) {
-    // Special case the visit for destructuring array assignment.
-    bool was_found = false;
-    if (node->value()->position() == position_) {
-      is_iterator_error_ = true;
+  bool was_found = false;
+  if (node->target()->IsObjectLiteral()) {
+    ObjectLiteral* target = node->target()->AsObjectLiteral();
+    if (target->position() == position_) {
       was_found = !found_;
-      if (was_found) {
-        found_ = true;
+      found_ = true;
+      destructuring_assignment_ = node;
+    } else {
+      for (ObjectLiteralProperty* prop : *target->properties()) {
+        if (prop->value()->position() == position_) {
+          was_found = !found_;
+          found_ = true;
+          destructuring_prop_ = prop;
+          destructuring_assignment_ = node;
+          break;
+        }
       }
     }
-    Find(node->value(), true);
-    if (was_found) {
-      done_ = true;
-      found_ = false;
+  }
+  if (!was_found) {
+    Find(node->target());
+    if (node->target()->IsArrayLiteral()) {
+      // Special case the visit for destructuring array assignment.
+      bool was_found = false;
+      if (node->value()->position() == position_) {
+        is_iterator_error_ = true;
+        was_found = !found_;
+        found_ = true;
+      }
+      Find(node->value(), true);
+      if (was_found) {
+        done_ = true;
+        found_ = false;
+      }
+    } else {
+      Find(node->value());
     }
   } else {
-    Find(node->value());
+    Find(node->value(), true);
+  }
+
+  if (was_found) {
+    done_ = true;
+    found_ = false;
   }
 }
 
@@ -342,6 +370,9 @@ void CallPrinter::VisitAwait(Await* node) { Find(node->expression()); }
 
 void CallPrinter::VisitThrow(Throw* node) { Find(node->exception()); }
 
+void CallPrinter::VisitOptionalChain(OptionalChain* node) {
+  Find(node->expression());
+}
 
 void CallPrinter::VisitProperty(Property* node) {
   Expression* key = node->key();
@@ -349,12 +380,18 @@ void CallPrinter::VisitProperty(Property* node) {
   if (literal != nullptr &&
       literal->BuildValue(isolate_)->IsInternalizedString()) {
     Find(node->obj(), true);
+    if (node->is_optional_chain_link()) {
+      Print("?");
+    }
     Print(".");
     // TODO(adamk): Teach Literal how to print its values without
     // allocating on the heap.
     PrintLiteral(literal->BuildValue(isolate_), false);
   } else {
     Find(node->obj(), true);
+    if (node->is_optional_chain_link()) {
+      Print("?.");
+    }
     Print("[");
     Find(key, true);
     Print("]");
@@ -1272,6 +1309,11 @@ void AstPrinter::VisitThrow(Throw* node) {
   Visit(node->exception());
 }
 
+void AstPrinter::VisitOptionalChain(OptionalChain* node) {
+  IndentedScope indent(this, "OPTIONAL_CHAIN", node->position());
+  Visit(node->expression());
+}
+
 void AstPrinter::VisitProperty(Property* node) {
   EmbeddedVector<char, 128> buf;
   SNPrintF(buf, "PROPERTY");
@@ -1289,6 +1331,18 @@ void AstPrinter::VisitProperty(Property* node) {
       PrintIndentedVisit("PRIVATE_METHOD", node->key());
       break;
     }
+    case PRIVATE_GETTER_ONLY: {
+      PrintIndentedVisit("PRIVATE_GETTER_ONLY", node->key());
+      break;
+    }
+    case PRIVATE_SETTER_ONLY: {
+      PrintIndentedVisit("PRIVATE_SETTER_ONLY", node->key());
+      break;
+    }
+    case PRIVATE_GETTER_AND_SETTER: {
+      PrintIndentedVisit("PRIVATE_GETTER_AND_SETTER", node->key());
+      break;
+    }
     case KEYED_PROPERTY:
     case KEYED_SUPER_PROPERTY: {
       PrintIndentedVisit("KEY", node->key());
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index cceb5fc269b5ef..322fd9fb1437bf 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -31,6 +31,12 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
     kCallAndAsyncIterator
   };
   ErrorHint GetErrorHint() const;
+  ObjectLiteralProperty* destructuring_prop() const {
+    return destructuring_prop_;
+  }
+  Assignment* destructuring_assignment() const {
+    return destructuring_assignment_;
+  }
 
 // Individual nodes
 #define DECLARE_VISIT(type) void Visit##type(type* node);
@@ -54,6 +60,8 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
   bool is_iterator_error_;
   bool is_async_iterator_error_;
   bool is_call_error_;
+  ObjectLiteralProperty* destructuring_prop_;
+  Assignment* destructuring_assignment_;
   FunctionKind function_kind_;
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 237d98ec6047f2..c4d09999785ba7 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -40,7 +40,6 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
                                VariableKind kind,
                                InitializationFlag initialization_flag,
                                MaybeAssignedFlag maybe_assigned_flag,
-                               RequiresBrandCheckFlag requires_brand_check,
                                bool* was_added) {
   // AstRawStrings are unambiguous, i.e., the same string is always represented
   // by the same AstRawString*.
@@ -52,9 +51,8 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
   if (*was_added) {
     // The variable has not been declared yet -> insert it.
     DCHECK_EQ(name, p->key);
-    Variable* variable =
-        new (zone) Variable(scope, name, mode, kind, initialization_flag,
-                            maybe_assigned_flag, requires_brand_check);
+    Variable* variable = new (zone) Variable(
+        scope, name, mode, kind, initialization_flag, maybe_assigned_flag);
     p->value = variable;
   }
   return reinterpret_cast<Variable*>(p->value);
@@ -170,7 +168,6 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
 #ifdef DEBUG
   already_resolved_ = true;
 #endif
-  if (scope_info->CallsSloppyEval()) scope_calls_eval_ = true;
   set_language_mode(scope_info->language_mode());
   num_heap_slots_ = scope_info->ContextLength();
   DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
@@ -186,6 +183,10 @@ DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type,
       params_(0, zone) {
   DCHECK_NE(scope_type, SCRIPT_SCOPE);
   SetDefaults();
+  if (scope_info->SloppyEvalCanExtendVars()) {
+    DCHECK(!is_eval_scope());
+    sloppy_eval_can_extend_vars_ = true;
+  }
 }
 
 Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
@@ -258,7 +259,8 @@ void Scope::SetDefaults() {
 
   set_language_mode(LanguageMode::kSloppy);
 
-  scope_calls_eval_ = false;
+  calls_eval_ = false;
+  sloppy_eval_can_extend_vars_ = false;
   scope_nonlinear_ = false;
   is_hidden_ = false;
   is_debug_evaluate_scope_ = false;
@@ -380,11 +382,8 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
 
   if (deserialization_mode == DeserializationMode::kIncludingVariables &&
       script_scope->scope_info_.is_null()) {
-    Handle<ScriptContextTable> table(
-        isolate->native_context()->script_context_table(), isolate);
-    Handle<Context> first = ScriptContextTable::GetContext(isolate, table, 0);
-    Handle<ScopeInfo> scope_info(first->scope_info(), isolate);
-    script_scope->SetScriptScopeInfo(scope_info);
+    script_scope->SetScriptScopeInfo(
+        ReadOnlyRoots(isolate).global_this_binding_scope_info_handle());
   }
 
   if (innermost_scope == nullptr) return script_scope;
@@ -626,7 +625,7 @@ Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name,
                                                  : NORMAL_VARIABLE;
   function_ = new (zone())
       Variable(this, name, VariableMode::kConst, kind, kCreatedInitialized);
-  if (calls_sloppy_eval()) {
+  if (sloppy_eval_can_extend_vars()) {
     cache->NonLocal(name, VariableMode::kDynamic);
   } else {
     cache->variables_.Add(zone(), function_);
@@ -652,7 +651,8 @@ Scope* Scope::FinalizeBlockScope() {
 #endif
 
   if (variables_.occupancy() > 0 ||
-      (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval())) {
+      (is_declaration_scope() &&
+       AsDeclarationScope()->sloppy_eval_can_extend_vars())) {
     return this;
   }
 
@@ -682,10 +682,10 @@ Scope* Scope::FinalizeBlockScope() {
 
   if (inner_scope_calls_eval_) outer_scope()->inner_scope_calls_eval_ = true;
 
-  // No need to propagate scope_calls_eval_, since if it was relevant to
-  // this scope we would have had to bail out at the top.
-  DCHECK(!scope_calls_eval_ || !is_declaration_scope() ||
-         !is_sloppy(language_mode()));
+  // No need to propagate sloppy_eval_can_extend_vars_, since if it was relevant
+  // to this scope we would have had to bail out at the top.
+  DCHECK(!is_declaration_scope() ||
+         !AsDeclarationScope()->sloppy_eval_can_extend_vars());
 
   // This block does not need a context.
   num_heap_slots_ = 0;
@@ -750,8 +750,8 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) {
   outer_closure->locals_.Rewind(top_local_);
 
   // Move eval calls since Snapshot's creation into new_parent.
-  if (outer_scope_and_calls_eval_->scope_calls_eval_) {
-    new_parent->scope_calls_eval_ = true;
+  if (outer_scope_and_calls_eval_->calls_eval_) {
+    new_parent->RecordDeclarationScopeEvalCall();
     new_parent->inner_scope_calls_eval_ = true;
   }
 
@@ -787,13 +787,11 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
   VariableMode mode;
   InitializationFlag init_flag;
   MaybeAssignedFlag maybe_assigned_flag;
-  RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck;
 
   {
     location = VariableLocation::CONTEXT;
     index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
-                                        &init_flag, &maybe_assigned_flag,
-                                        &requires_brand_check);
+                                        &init_flag, &maybe_assigned_flag);
     found = index >= 0;
   }
 
@@ -818,9 +816,9 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
   }
 
   bool was_added;
-  Variable* var = cache->variables_.Declare(
-      zone(), this, name, mode, NORMAL_VARIABLE, init_flag, maybe_assigned_flag,
-      requires_brand_check, &was_added);
+  Variable* var =
+      cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
+                                init_flag, maybe_assigned_flag, &was_added);
   DCHECK(was_added);
   var->AllocateTo(location, index);
   return var;
@@ -873,6 +871,8 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
                               VariableKind kind, bool* was_added,
                               InitializationFlag init_flag) {
   DCHECK(!already_resolved_);
+  // Private methods should be declared with ClassScope::DeclarePrivateName()
+  DCHECK(!IsPrivateMethodOrAccessorVariableMode(mode));
   // This function handles VariableMode::kVar, VariableMode::kLet, and
   // VariableMode::kConst modes.  VariableMode::kDynamic variables are
   // introduced during variable allocation, and VariableMode::kTemporary
@@ -905,6 +905,8 @@ Variable* Scope::DeclareVariable(
     VariableMode mode, VariableKind kind, InitializationFlag init,
     bool* was_added, bool* sloppy_mode_block_scope_function_redefinition,
     bool* ok) {
+  // Private methods should be declared with ClassScope::DeclarePrivateName()
+  DCHECK(!IsPrivateMethodOrAccessorVariableMode(mode));
   DCHECK(IsDeclaredVariableMode(mode));
   DCHECK(!already_resolved_);
   DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
@@ -990,7 +992,8 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
   DCHECK(IsDeclaredVariableMode(mode));
   DCHECK(!already_resolved_);
   DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
-
+  // Private methods should be declared with ClassScope::DeclarePrivateName()
+  DCHECK(!IsPrivateMethodOrAccessorVariableMode(mode));
   if (mode == VariableMode::kVar && !is_declaration_scope()) {
     return GetDeclarationScope()->DeclareVariableName(name, mode, was_added,
                                                       kind);
@@ -1044,7 +1047,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
   bool was_added;
   return cache->variables_.Declare(
       zone(), this, name, VariableMode::kDynamicGlobal, kind,
-      kCreatedInitialized, kNotAssigned, kNoBrandCheck, &was_added);
+      kCreatedInitialized, kNotAssigned, &was_added);
   // TODO(neis): Mark variable as maybe-assigned?
 }
 
@@ -1243,7 +1246,7 @@ int Scope::ContextChainLengthUntilOutermostSloppyEval() const {
     if (!s->NeedsContext()) continue;
     length++;
     if (s->is_declaration_scope() &&
-        s->AsDeclarationScope()->calls_sloppy_eval()) {
+        s->AsDeclarationScope()->sloppy_eval_can_extend_vars()) {
       result = length;
     }
   }
@@ -1384,9 +1387,10 @@ void Scope::CollectNonLocals(DeclarationScope* max_outer_scope,
 
 void Scope::AnalyzePartially(DeclarationScope* max_outer_scope,
                              AstNodeFactory* ast_node_factory,
-                             UnresolvedList* new_unresolved_list) {
-  this->ForEach([max_outer_scope, ast_node_factory,
-                 new_unresolved_list](Scope* scope) {
+                             UnresolvedList* new_unresolved_list,
+                             bool maybe_in_arrowhead) {
+  this->ForEach([max_outer_scope, ast_node_factory, new_unresolved_list,
+                 maybe_in_arrowhead](Scope* scope) {
     DCHECK_IMPLIES(scope->is_declaration_scope(),
                    !scope->AsDeclarationScope()->was_lazily_parsed());
 
@@ -1399,7 +1403,8 @@ void Scope::AnalyzePartially(DeclarationScope* max_outer_scope,
         // Don't copy unresolved references to the script scope, unless it's a
         // reference to a private name or method. In that case keep it so we
         // can fail later.
-        if (!max_outer_scope->outer_scope()->is_script_scope()) {
+        if (!max_outer_scope->outer_scope()->is_script_scope() ||
+            maybe_in_arrowhead) {
           VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
           new_unresolved_list->Add(copy);
         }
@@ -1434,6 +1439,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
   sloppy_block_functions_.Clear();
   rare_data_ = nullptr;
   has_rest_ = false;
+  function_ = nullptr;
 
   DCHECK_NE(zone_, ast_value_factory->zone());
   zone_->ReleaseMemory();
@@ -1487,17 +1493,19 @@ void DeclarationScope::SavePreparseDataForDeclarationScope(Parser* parser) {
 }
 
 void DeclarationScope::AnalyzePartially(Parser* parser,
-                                        AstNodeFactory* ast_node_factory) {
+                                        AstNodeFactory* ast_node_factory,
+                                        bool maybe_in_arrowhead) {
   DCHECK(!force_eager_compilation_);
   UnresolvedList new_unresolved_list;
   if (!IsArrowFunction(function_kind_) &&
-      (!outer_scope_->is_script_scope() ||
+      (!outer_scope_->is_script_scope() || maybe_in_arrowhead ||
        (preparse_data_builder_ != nullptr &&
         preparse_data_builder_->HasInnerFunctions()))) {
     // Try to resolve unresolved variables for this Scope and migrate those
     // which cannot be resolved inside. It doesn't make sense to try to resolve
     // them in the outer Scopes here, because they are incomplete.
-    Scope::AnalyzePartially(this, ast_node_factory, &new_unresolved_list);
+    Scope::AnalyzePartially(this, ast_node_factory, &new_unresolved_list,
+                            maybe_in_arrowhead);
 
     // Migrate function_ to the right Zone.
     if (function_ != nullptr) {
@@ -1596,10 +1604,6 @@ void PrintVar(int indent, Variable* var) {
     if (comma) PrintF(", ");
     PrintF("hole initialization elided");
   }
-  if (var->requires_brand_check()) {
-    if (comma) PrintF(", ");
-    PrintF("requires brand check");
-  }
   PrintF("\n");
 }
 
@@ -1676,7 +1680,8 @@ void Scope::Print(int n) {
     Indent(n1, "// strict mode scope\n");
   }
   if (IsAsmModule()) Indent(n1, "// scope is an asm module\n");
-  if (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval()) {
+  if (is_declaration_scope() &&
+      AsDeclarationScope()->sloppy_eval_can_extend_vars()) {
     Indent(n1, "// scope calls sloppy 'eval'\n");
   }
   if (is_declaration_scope() && AsDeclarationScope()->NeedsHomeObject()) {
@@ -1774,9 +1779,9 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
   // Declare a new non-local.
   DCHECK(IsDynamicVariableMode(mode));
   bool was_added;
-  Variable* var = variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
-                                     kCreatedInitialized, kNotAssigned,
-                                     kNoBrandCheck, &was_added);
+  Variable* var =
+      variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
+                         kCreatedInitialized, kNotAssigned, &was_added);
   // Allocate it by giving it a dynamic lookup.
   var->AllocateTo(VariableLocation::LOOKUP, -1);
   return var;
@@ -1814,7 +1819,18 @@ Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
     // We found a variable and we are done. (Even if there is an 'eval' in this
     // scope which introduces the same variable again, the resulting variable
     // remains the same.)
-    if (var != nullptr) {
+    //
+    // For sloppy eval though, we skip dynamic variable to avoid resolving to a
+    // variable when the variable and proxy are in the same eval execution. The
+    // variable is not available on subsequent lazy executions of functions in
+    // the eval, so this avoids inner functions from looking up different
+    // variables during eager and lazy compilation.
+    //
+    // TODO(leszeks): Maybe we want to restrict this to e.g. lookups of a proxy
+    // living in a different scope to the current one, or some other
+    // optimisation.
+    if (var != nullptr &&
+        !(scope->is_eval_scope() && var->mode() == VariableMode::kDynamic)) {
       if (mode == kParsedScope && force_context_allocation &&
           !var->is_dynamic()) {
         var->ForceContextAllocation();
@@ -1829,8 +1845,9 @@ Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
       return LookupWith(proxy, scope, outer_scope_end, entry_point,
                         force_context_allocation);
     }
-    if (V8_UNLIKELY(scope->is_declaration_scope() &&
-                    scope->AsDeclarationScope()->calls_sloppy_eval())) {
+    if (V8_UNLIKELY(
+            scope->is_declaration_scope() &&
+            scope->AsDeclarationScope()->sloppy_eval_can_extend_vars())) {
       return LookupSloppyEval(proxy, scope, outer_scope_end, entry_point,
                               force_context_allocation);
     }
@@ -1901,7 +1918,7 @@ Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope,
                                   Scope* outer_scope_end, Scope* entry_point,
                                   bool force_context_allocation) {
   DCHECK(scope->is_declaration_scope() &&
-         scope->AsDeclarationScope()->calls_sloppy_eval());
+         scope->AsDeclarationScope()->sloppy_eval_can_extend_vars());
 
   // If we're compiling eval, it's possible that the outer scope is the first
   // ScopeInfo-backed scope.
@@ -2065,7 +2082,7 @@ bool Scope::MustAllocate(Variable* var) {
   if (!var->raw_name()->IsEmpty() &&
       (inner_scope_calls_eval_ || is_catch_scope() || is_script_scope())) {
     var->set_is_used();
-    if (inner_scope_calls_eval_) var->SetMaybeAssigned();
+    if (inner_scope_calls_eval_ && !var->is_this()) var->SetMaybeAssigned();
   }
   DCHECK(!var->has_forced_context_allocation() || var->is_used());
   // Global variables do not need to be allocated.
@@ -2081,11 +2098,14 @@ bool Scope::MustAllocateInContext(Variable* var) {
   //
   // Temporary variables are always stack-allocated.  Catch-bound variables are
   // always context-allocated.
-  if (var->mode() == VariableMode::kTemporary) return false;
+  VariableMode mode = var->mode();
+  if (mode == VariableMode::kTemporary) return false;
   if (is_catch_scope()) return true;
-  if ((is_script_scope() || is_eval_scope()) &&
-      IsLexicalVariableMode(var->mode())) {
-    return true;
+  if (is_script_scope() || is_eval_scope()) {
+    if (IsLexicalVariableMode(mode) ||
+        IsPrivateMethodOrAccessorVariableMode(mode)) {
+      return true;
+    }
   }
   return var->has_forced_context_allocation() || inner_scope_calls_eval_;
 }
@@ -2248,9 +2268,9 @@ void Scope::AllocateVariablesRecursively() {
         scope->is_with_scope() || scope->is_module_scope() ||
         scope->IsAsmModule() || scope->ForceContextForLanguageMode() ||
         (scope->is_function_scope() &&
-         scope->AsDeclarationScope()->calls_sloppy_eval()) ||
+         scope->AsDeclarationScope()->sloppy_eval_can_extend_vars()) ||
         (scope->is_block_scope() && scope->is_declaration_scope() &&
-         scope->AsDeclarationScope()->calls_sloppy_eval());
+         scope->AsDeclarationScope()->sloppy_eval_can_extend_vars());
 
     // If we didn't allocate any locals in the local context, then we only
     // need the minimal number of slots if we must have a context.
@@ -2326,15 +2346,28 @@ int Scope::ContextLocalCount() const {
          (is_function_var_in_context ? 1 : 0);
 }
 
-Variable* ClassScope::DeclarePrivateName(
-    const AstRawString* name, RequiresBrandCheckFlag requires_brand_check,
-    bool* was_added) {
+bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) {
+  switch (a) {
+    case VariableMode::kPrivateGetterOnly:
+      return b == VariableMode::kPrivateSetterOnly;
+    case VariableMode::kPrivateSetterOnly:
+      return b == VariableMode::kPrivateGetterOnly;
+    default:
+      return false;
+  }
+}
+
+Variable* ClassScope::DeclarePrivateName(const AstRawString* name,
+                                         VariableMode mode, bool* was_added) {
   Variable* result = EnsureRareData()->private_name_map.Declare(
-      zone(), this, name, VariableMode::kConst, NORMAL_VARIABLE,
+      zone(), this, name, mode, NORMAL_VARIABLE,
       InitializationFlag::kNeedsInitialization,
-      MaybeAssignedFlag::kMaybeAssigned, requires_brand_check, was_added);
+      MaybeAssignedFlag::kMaybeAssigned, was_added);
   if (*was_added) {
     locals_.Add(result);
+  } else if (IsComplementaryAccessorPair(result->mode(), mode)) {
+    *was_added = true;
+    result->set_mode(VariableMode::kPrivateGetterAndSetter);
   }
   result->ForceContextAllocation();
   return result;
@@ -2416,22 +2449,20 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
   VariableMode mode;
   InitializationFlag init_flag;
   MaybeAssignedFlag maybe_assigned_flag;
-  RequiresBrandCheckFlag requires_brand_check;
-  int index =
-      ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, &init_flag,
-                                  &maybe_assigned_flag, &requires_brand_check);
+  int index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
+                                          &init_flag, &maybe_assigned_flag);
   if (index < 0) {
     return nullptr;
   }
 
-  DCHECK_EQ(mode, VariableMode::kConst);
+  DCHECK(IsConstVariableMode(mode));
   DCHECK_EQ(init_flag, InitializationFlag::kNeedsInitialization);
   DCHECK_EQ(maybe_assigned_flag, MaybeAssignedFlag::kMaybeAssigned);
 
   // Add the found private name to the map to speed up subsequent
   // lookups for the same name.
   bool was_added;
-  Variable* var = DeclarePrivateName(name, requires_brand_check, &was_added);
+  Variable* var = DeclarePrivateName(name, mode, &was_added);
   DCHECK(was_added);
   var->AllocateTo(VariableLocation::CONTEXT, index);
   return var;
@@ -2450,7 +2481,9 @@ Variable* ClassScope::LookupPrivateName(VariableProxy* proxy) {
     if (var == nullptr && !class_scope->scope_info_.is_null()) {
       var = class_scope->LookupPrivateNameInScopeInfo(proxy->raw_name());
     }
-    return var;
+    if (var != nullptr) {
+      return var;
+    }
   }
   return nullptr;
 }
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 932d5c70b937b8..73e6e8fd89755f 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -5,6 +5,7 @@
 #ifndef V8_AST_SCOPES_H_
 #define V8_AST_SCOPES_H_
 
+#include <numeric>
 #include "src/ast/ast.h"
 #include "src/base/compiler-specific.h"
 #include "src/base/hashmap.h"
@@ -13,6 +14,7 @@
 #include "src/objects/function-kind.h"
 #include "src/objects/objects.h"
 #include "src/utils/pointer-with-payload.h"
+#include "src/utils/utils.h"
 #include "src/zone/zone.h"
 
 namespace v8 {
@@ -42,7 +44,6 @@ class VariableMap : public ZoneHashMap {
                     VariableMode mode, VariableKind kind,
                     InitializationFlag initialization_flag,
                     MaybeAssignedFlag maybe_assigned_flag,
-                    RequiresBrandCheckFlag requires_brand_check,
                     bool* was_added);
 
   V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name);
@@ -111,8 +112,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
     }
 
     void RestoreEvalFlag() {
-      outer_scope_and_calls_eval_->scope_calls_eval_ =
-          outer_scope_and_calls_eval_.GetPayload();
+      if (outer_scope_and_calls_eval_.GetPayload()) {
+        // This recreates both calls_eval and sloppy_eval_can_extend_vars.
+        outer_scope_and_calls_eval_.GetPointer()->RecordEvalCall();
+      }
     }
 
     void Reparent(DeclarationScope* new_parent);
@@ -265,9 +268,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
 
   // Inform the scope and outer scopes that the corresponding code contains an
   // eval call.
-  void RecordEvalCall() {
-    scope_calls_eval_ = true;
-  }
+  inline void RecordEvalCall();
 
   void RecordInnerScopeEvalCall() {
     inner_scope_calls_eval_ = true;
@@ -460,7 +461,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
   int ContextChainLength(Scope* scope) const;
 
   // The number of contexts between this and the outermost context that has a
-  // sloppy eval call. One if this->calls_sloppy_eval().
+  // sloppy eval call. One if this->sloppy_eval_can_extend_vars().
   int ContextChainLengthUntilOutermostSloppyEval() const;
 
   // Find the closest class scope in the current scope and outer scopes. If no
@@ -558,7 +559,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
                     MaybeAssignedFlag maybe_assigned_flag, bool* was_added) {
     Variable* result =
         variables_.Declare(zone, this, name, mode, kind, initialization_flag,
-                           maybe_assigned_flag, kNoBrandCheck, was_added);
+                           maybe_assigned_flag, was_added);
     if (*was_added) locals_.Add(result);
     return result;
   }
@@ -610,7 +611,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
   // list along the way, so full resolution cannot be done afterwards.
   void AnalyzePartially(DeclarationScope* max_outer_scope,
                         AstNodeFactory* ast_node_factory,
-                        UnresolvedList* new_unresolved_list);
+                        UnresolvedList* new_unresolved_list,
+                        bool maybe_in_arrowhead);
   void CollectNonLocals(DeclarationScope* max_outer_scope, Isolate* isolate,
                         ParseInfo* info, Handle<StringSet>* non_locals);
 
@@ -703,9 +705,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
   // The language mode of this scope.
   STATIC_ASSERT(LanguageModeSize == 2);
   bool is_strict_ : 1;
-  // This scope or a nested catch scope or with scope contain an 'eval' call. At
-  // the 'eval' call site this scope is the declaration scope.
-  bool scope_calls_eval_ : 1;
+  // This scope contains an 'eval' call.
+  bool calls_eval_ : 1;
+  // The context associated with this scope can be extended by a sloppy eval
+  // called inside of it.
+  bool sloppy_eval_can_extend_vars_ : 1;
   // This scope's declarations might not be executed in order (e.g., switch).
   bool scope_nonlinear_ : 1;
   bool is_hidden_ : 1;
@@ -753,11 +757,50 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
                                         IsClassConstructor(function_kind())));
   }
 
-  bool calls_sloppy_eval() const {
-    // TODO(delphick): Calculate this when setting and change the name of
-    // scope_calls_eval_.
-    return !is_script_scope() && scope_calls_eval_ &&
-           is_sloppy(language_mode());
+  // Inform the scope and outer scopes that the corresponding code contains an
+  // eval call.
+  void RecordDeclarationScopeEvalCall() {
+    calls_eval_ = true;
+
+    // If this isn't a sloppy eval, we don't care about it.
+    if (language_mode() != LanguageMode::kSloppy) return;
+
+    // Sloppy eval in script scopes can only introduce global variables anyway,
+    // so we don't care that it calls sloppy eval.
+    if (is_script_scope()) return;
+
+    // Sloppy eval in a eval scope can only introduce variables into the outer
+    // (non-eval) declaration scope, not into this eval scope.
+    if (is_eval_scope()) {
+#ifdef DEBUG
+      // One of three things must be true:
+      //   1. The outer non-eval declaration scope should already be marked as
+      //      being extendable by sloppy eval, by the current sloppy eval rather
+      //      than the inner one,
+      //   2. The outer non-eval declaration scope is a script scope and thus
+      //      isn't extendable anyway, or
+      //   3. This is a debug evaluate and all bets are off.
+      DeclarationScope* outer_decl_scope = outer_scope()->GetDeclarationScope();
+      while (outer_decl_scope->is_eval_scope()) {
+        outer_decl_scope = outer_decl_scope->GetDeclarationScope();
+      }
+      if (outer_decl_scope->is_debug_evaluate_scope()) {
+        // Don't check anything.
+        // TODO(9662): Figure out where variables declared by an eval inside a
+        // debug-evaluate actually go.
+      } else if (!outer_decl_scope->is_script_scope()) {
+        DCHECK(outer_decl_scope->sloppy_eval_can_extend_vars_);
+      }
+#endif
+
+      return;
+    }
+
+    sloppy_eval_can_extend_vars_ = true;
+  }
+
+  bool sloppy_eval_can_extend_vars() const {
+    return sloppy_eval_can_extend_vars_;
   }
 
   bool was_lazily_parsed() const { return was_lazily_parsed_; }
@@ -972,7 +1015,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
   // this records variables which cannot be resolved inside the Scope (we don't
   // yet know what they will resolve to since the outer Scopes are incomplete)
   // and recreates them with the correct Zone with ast_node_factory.
-  void AnalyzePartially(Parser* parser, AstNodeFactory* ast_node_factory);
+  void AnalyzePartially(Parser* parser, AstNodeFactory* ast_node_factory,
+                        bool maybe_in_arrowhead);
 
   // Allocate ScopeInfos for top scope and any inner scopes that need them.
   // Does nothing if ScopeInfo is already allocated.
@@ -1138,13 +1182,21 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
   RareData* rare_data_ = nullptr;
 };
 
+void Scope::RecordEvalCall() {
+  calls_eval_ = true;
+  GetDeclarationScope()->RecordDeclarationScopeEvalCall();
+  RecordInnerScopeEvalCall();
+}
+
 Scope::Snapshot::Snapshot(Scope* scope)
-    : outer_scope_and_calls_eval_(scope, scope->scope_calls_eval_),
+    : outer_scope_and_calls_eval_(scope, scope->calls_eval_),
       top_inner_scope_(scope->inner_scope_),
       top_unresolved_(scope->unresolved_list_.end()),
       top_local_(scope->GetClosureScope()->locals_.end()) {
   // Reset in order to record eval calls during this Snapshot's lifetime.
-  outer_scope_and_calls_eval_.GetPointer()->scope_calls_eval_ = false;
+  outer_scope_and_calls_eval_.GetPointer()->calls_eval_ = false;
+  outer_scope_and_calls_eval_.GetPointer()->sloppy_eval_can_extend_vars_ =
+      false;
 }
 
 class ModuleScope final : public DeclarationScope {
@@ -1175,8 +1227,7 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
 
   // Declare a private name in the private name map and add it to the
   // local variables of this scope.
-  Variable* DeclarePrivateName(const AstRawString* name,
-                               RequiresBrandCheckFlag requires_brand_check,
+  Variable* DeclarePrivateName(const AstRawString* name, VariableMode mode,
                                bool* was_added);
 
   void AddUnresolvedPrivateName(VariableProxy* proxy);
diff --git a/deps/v8/src/ast/source-range-ast-visitor.cc b/deps/v8/src/ast/source-range-ast-visitor.cc
index 2fcf151999ace0..d171e30587584f 100644
--- a/deps/v8/src/ast/source-range-ast-visitor.cc
+++ b/deps/v8/src/ast/source-range-ast-visitor.cc
@@ -25,14 +25,6 @@ void SourceRangeAstVisitor::VisitBlock(Block* stmt) {
   }
 }
 
-void SourceRangeAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
-  AstTraversalVisitor::VisitSwitchStatement(stmt);
-  ZonePtrList<CaseClause>* clauses = stmt->cases();
-  for (CaseClause* clause : *clauses) {
-    MaybeRemoveLastContinuationRange(clause->statements());
-  }
-}
-
 void SourceRangeAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
   AstTraversalVisitor::VisitFunctionLiteral(expr);
   ZonePtrList<Statement>* stmts = expr->body();
diff --git a/deps/v8/src/ast/source-range-ast-visitor.h b/deps/v8/src/ast/source-range-ast-visitor.h
index 4ba5feb2d299f9..4ea36a947f58e6 100644
--- a/deps/v8/src/ast/source-range-ast-visitor.h
+++ b/deps/v8/src/ast/source-range-ast-visitor.h
@@ -34,7 +34,6 @@ class SourceRangeAstVisitor final
   friend class AstTraversalVisitor<SourceRangeAstVisitor>;
 
   void VisitBlock(Block* stmt);
-  void VisitSwitchStatement(SwitchStatement* stmt);
   void VisitFunctionLiteral(FunctionLiteral* expr);
   bool VisitNode(AstNode* node);
 
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 7805fa20c8c8f6..1ff6f9f4228375 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -21,8 +21,7 @@ class Variable final : public ZoneObject {
  public:
   Variable(Scope* scope, const AstRawString* name, VariableMode mode,
            VariableKind kind, InitializationFlag initialization_flag,
-           MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
-           RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck)
+           MaybeAssignedFlag maybe_assigned_flag = kNotAssigned)
       : scope_(scope),
         name_(name),
         local_if_not_shadowed_(nullptr),
@@ -32,7 +31,6 @@ class Variable final : public ZoneObject {
         bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) |
                    InitializationFlagField::encode(initialization_flag) |
                    VariableModeField::encode(mode) |
-                   RequiresBrandCheckField::encode(requires_brand_check) |
                    IsUsedField::encode(false) |
                    ForceContextAllocationField::encode(false) |
                    ForceHoleInitializationField::encode(false) |
@@ -58,6 +56,9 @@ class Variable final : public ZoneObject {
   Handle<String> name() const { return name_->string(); }
   const AstRawString* raw_name() const { return name_; }
   VariableMode mode() const { return VariableModeField::decode(bit_field_); }
+  void set_mode(VariableMode mode) {
+    bit_field_ = VariableModeField::update(bit_field_, mode);
+  }
   bool has_forced_context_allocation() const {
     return ForceContextAllocationField::decode(bit_field_);
   }
@@ -72,6 +73,8 @@ class Variable final : public ZoneObject {
     return MaybeAssignedFlagField::decode(bit_field_);
   }
   void SetMaybeAssigned() {
+    if (mode() == VariableMode::kConst) return;
+
     // If this variable is dynamically shadowing another variable, then that
     // variable could also be assigned (in the non-shadowing case).
     if (has_local_if_not_shadowed()) {
@@ -80,22 +83,14 @@ class Variable final : public ZoneObject {
       if (!maybe_assigned()) {
         local_if_not_shadowed()->SetMaybeAssigned();
       }
-      DCHECK(local_if_not_shadowed()->maybe_assigned());
+      DCHECK_IMPLIES(local_if_not_shadowed()->mode() != VariableMode::kConst,
+                     local_if_not_shadowed()->maybe_assigned());
     }
     set_maybe_assigned();
   }
 
-  RequiresBrandCheckFlag get_requires_brand_check_flag() const {
-    return RequiresBrandCheckField::decode(bit_field_);
-  }
-
   bool requires_brand_check() const {
-    return get_requires_brand_check_flag() == kRequiresBrandCheck;
-  }
-
-  void set_requires_brand_check() {
-    bit_field_ =
-        RequiresBrandCheckField::update(bit_field_, kRequiresBrandCheck);
+    return IsPrivateMethodOrAccessorVariableMode(mode());
   }
 
   int initializer_position() { return initializer_position_; }
@@ -125,7 +120,8 @@ class Variable final : public ZoneObject {
   // declaration time. Only returns valid results after scope analysis.
   bool binding_needs_init() const {
     DCHECK_IMPLIES(initialization_flag() == kNeedsInitialization,
-                   IsLexicalVariableMode(mode()));
+                   IsLexicalVariableMode(mode()) ||
+                       IsPrivateMethodOrAccessorVariableMode(mode()));
     DCHECK_IMPLIES(ForceHoleInitializationField::decode(bit_field_),
                    initialization_flag() == kNeedsInitialization);
 
@@ -149,7 +145,8 @@ class Variable final : public ZoneObject {
   // be required at runtime.
   void ForceHoleInitialization() {
     DCHECK_EQ(kNeedsInitialization, initialization_flag());
-    DCHECK(IsLexicalVariableMode(mode()));
+    DCHECK(IsLexicalVariableMode(mode()) ||
+           IsPrivateMethodOrAccessorVariableMode(mode()));
     bit_field_ = ForceHoleInitializationField::update(bit_field_, true);
   }
 
@@ -243,25 +240,16 @@ class Variable final : public ZoneObject {
     bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned);
   }
 
-  class VariableModeField : public BitField16<VariableMode, 0, 3> {};
-  class VariableKindField
-      : public BitField16<VariableKind, VariableModeField::kNext, 3> {};
-  class LocationField
-      : public BitField16<VariableLocation, VariableKindField::kNext, 3> {};
-  class ForceContextAllocationField
-      : public BitField16<bool, LocationField::kNext, 1> {};
-  class IsUsedField
-      : public BitField16<bool, ForceContextAllocationField::kNext, 1> {};
-  class InitializationFlagField
-      : public BitField16<InitializationFlag, IsUsedField::kNext, 1> {};
-  class ForceHoleInitializationField
-      : public BitField16<bool, InitializationFlagField::kNext, 1> {};
-  class MaybeAssignedFlagField
-      : public BitField16<MaybeAssignedFlag,
-                          ForceHoleInitializationField::kNext, 1> {};
-  class RequiresBrandCheckField
-      : public BitField16<RequiresBrandCheckFlag, MaybeAssignedFlagField::kNext,
-                          1> {};
+  using VariableModeField = BitField16<VariableMode, 0, 4>;
+  using VariableKindField = VariableModeField::Next<VariableKind, 3>;
+  using LocationField = VariableKindField::Next<VariableLocation, 3>;
+  using ForceContextAllocationField = LocationField::Next<bool, 1>;
+  using IsUsedField = ForceContextAllocationField::Next<bool, 1>;
+  using InitializationFlagField = IsUsedField::Next<InitializationFlag, 1>;
+  using ForceHoleInitializationField = InitializationFlagField::Next<bool, 1>;
+  using MaybeAssignedFlagField =
+      ForceHoleInitializationField::Next<MaybeAssignedFlag, 1>;
+
   Variable** next() { return &next_; }
   friend List;
   friend base::ThreadedListTraits<Variable>;
diff --git a/deps/v8/src/base/address-region.h b/deps/v8/src/base/address-region.h
index 1fdc479f6f3e93..0f4809f9e81574 100644
--- a/deps/v8/src/base/address-region.h
+++ b/deps/v8/src/base/address-region.h
@@ -45,6 +45,13 @@ class AddressRegion {
     return contains(region.address_, region.size_);
   }
 
+  base::AddressRegion GetOverlap(AddressRegion region) const {
+    Address overlap_start = std::max(begin(), region.begin());
+    Address overlap_end =
+        std::max(overlap_start, std::min(end(), region.end()));
+    return {overlap_start, overlap_end - overlap_start};
+  }
+
   bool operator==(AddressRegion other) const {
     return address_ == other.address_ && size_ == other.size_;
   }
diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h
index 055f0ff498484f..c2b7952260a5b5 100644
--- a/deps/v8/src/base/flags.h
+++ b/deps/v8/src/base/flags.h
@@ -53,13 +53,13 @@ class Flags final {
   }
 
   constexpr Flags operator&(const Flags& flags) const {
-    return Flags(*this) &= flags;
+    return Flags(mask_ & flags.mask_);
   }
   constexpr Flags operator|(const Flags& flags) const {
-    return Flags(*this) |= flags;
+    return Flags(mask_ | flags.mask_);
   }
   constexpr Flags operator^(const Flags& flags) const {
-    return Flags(*this) ^= flags;
+    return Flags(mask_ ^ flags.mask_);
   }
 
   Flags& operator&=(flag_type flag) { return operator&=(Flags(flag)); }
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index b339f528d2b9d3..76a0aff39953a4 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -36,9 +36,9 @@ void* PageAllocator::GetRandomMmapAddr() {
   return base::OS::GetRandomMmapAddr();
 }
 
-void* PageAllocator::AllocatePages(void* address, size_t size, size_t alignment,
+void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment,
                                    PageAllocator::Permission access) {
-  return base::OS::Allocate(address, size, alignment,
+  return base::OS::Allocate(hint, size, alignment,
                             static_cast<base::OS::MemoryPermission>(access));
 }
 
diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h
index ced1156ccaee38..2b8ee1a5e5e740 100644
--- a/deps/v8/src/base/page-allocator.h
+++ b/deps/v8/src/base/page-allocator.h
@@ -26,7 +26,7 @@ class V8_BASE_EXPORT PageAllocator
 
   void* GetRandomMmapAddr() override;
 
-  void* AllocatePages(void* address, size_t size, size_t alignment,
+  void* AllocatePages(void* hint, size_t size, size_t alignment,
                       PageAllocator::Permission access) override;
 
   bool FreePages(void* address, size_t size) override;
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 2b8b55eeb5f247..c48cf8d3393c12 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -67,6 +67,8 @@ class V8_BASE_EXPORT Mutex final {
     return native_handle_;
   }
 
+  V8_INLINE void AssertHeld() { DCHECK_EQ(1, level_); }
+
  private:
   NativeHandle native_handle_;
 #ifdef DEBUG
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index 17f9aa3f17e5cb..92a5fbe490f4c3 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -95,13 +95,13 @@ double LocalTimeOffset(double time_ms, bool is_utc) {
 }
 
 // static
-void* OS::Allocate(void* address, size_t size, size_t alignment,
+void* OS::Allocate(void* hint, size_t size, size_t alignment,
                    MemoryPermission access) {
   size_t page_size = AllocatePageSize();
   DCHECK_EQ(0, size % page_size);
   DCHECK_EQ(0, alignment % page_size);
   DCHECK_LE(page_size, alignment);
-  address = AlignedAddress(address, alignment);
+  hint = AlignedAddress(hint, alignment);
 
   DWORD flags = (access == OS::MemoryPermission::kNoAccess)
                     ? MEM_RESERVE
@@ -109,7 +109,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
   DWORD protect = GetProtectionFromMemoryPermission(access);
 
   // First, try an exact size aligned allocation.
-  uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
+  uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint);
   if (base == nullptr) return nullptr;  // Can't allocate, we're OOM.
 
   // If address is suitably aligned, we're done.
@@ -120,7 +120,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
   CHECK(Free(base, size));
 
   // Clear the hint. It's unlikely we can allocate at this address.
-  address = nullptr;
+  hint = nullptr;
 
   // Add the maximum misalignment so we are guaranteed an aligned base address
   // in the allocated region.
@@ -128,7 +128,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
   const int kMaxAttempts = 3;
   aligned_base = nullptr;
   for (int i = 0; i < kMaxAttempts; ++i) {
-    base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
+    base = RandomizedVirtualAlloc(padded_size, flags, protect, hint);
     if (base == nullptr) return nullptr;  // Can't allocate, we're OOM.
 
     // Try to trim the allocation by freeing the padded allocation and then
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 6da83d7e0208a3..c50cdd7a98eefd 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -137,10 +137,10 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
   return flags;
 }
 
-void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
+void* Allocate(void* hint, size_t size, OS::MemoryPermission access) {
   int prot = GetProtectionFromMemoryPermission(access);
   int flags = GetFlagsForMemoryPermission(access);
-  void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset);
+  void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset);
   if (result == MAP_FAILED) return nullptr;
   return result;
 }
@@ -278,16 +278,16 @@ void* OS::GetRandomMmapAddr() {
 // TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files.
 #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
 // static
-void* OS::Allocate(void* address, size_t size, size_t alignment,
+void* OS::Allocate(void* hint, size_t size, size_t alignment,
                    MemoryPermission access) {
   size_t page_size = AllocatePageSize();
   DCHECK_EQ(0, size % page_size);
   DCHECK_EQ(0, alignment % page_size);
-  address = AlignedAddress(address, alignment);
+  hint = AlignedAddress(hint, alignment);
   // Add the maximum misalignment so we are guaranteed an aligned base address.
   size_t request_size = size + (alignment - page_size);
   request_size = RoundUp(request_size, OS::AllocatePageSize());
-  void* result = base::Allocate(address, request_size, access);
+  void* result = base::Allocate(hint, request_size, access);
   if (result == nullptr) return nullptr;
 
   // Unmap memory allocated before the aligned base address.
@@ -761,13 +761,12 @@ void Thread::set_name(const char* name) {
   name_[sizeof(name_) - 1] = '\0';
 }
 
-
-void Thread::Start() {
+bool Thread::Start() {
   int result;
   pthread_attr_t attr;
   memset(&attr, 0, sizeof(attr));
   result = pthread_attr_init(&attr);
-  DCHECK_EQ(0, result);
+  if (result != 0) return false;
   size_t stack_size = stack_size_;
   if (stack_size == 0) {
 #if V8_OS_MACOSX
@@ -780,17 +779,17 @@ void Thread::Start() {
   }
   if (stack_size > 0) {
     result = pthread_attr_setstacksize(&attr, stack_size);
-    DCHECK_EQ(0, result);
+    if (result != 0) return pthread_attr_destroy(&attr), false;
   }
   {
     MutexGuard lock_guard(&data_->thread_creation_mutex_);
     result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+    if (result != 0 || data_->thread_ == kNoThread) {
+      return pthread_attr_destroy(&attr), false;
+    }
   }
-  DCHECK_EQ(0, result);
   result = pthread_attr_destroy(&attr);
-  DCHECK_EQ(0, result);
-  DCHECK_NE(data_->thread_, kNoThread);
-  USE(result);
+  return result == 0;
 }
 
 void Thread::Join() { pthread_join(data_->thread_, nullptr); }
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index d01b1c07fe1926..04ef8a30f229bd 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -798,13 +798,13 @@ uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
 }  // namespace
 
 // static
-void* OS::Allocate(void* address, size_t size, size_t alignment,
+void* OS::Allocate(void* hint, size_t size, size_t alignment,
                    MemoryPermission access) {
   size_t page_size = AllocatePageSize();
   DCHECK_EQ(0, size % page_size);
   DCHECK_EQ(0, alignment % page_size);
   DCHECK_LE(page_size, alignment);
-  address = AlignedAddress(address, alignment);
+  hint = AlignedAddress(hint, alignment);
 
   DWORD flags = (access == OS::MemoryPermission::kNoAccess)
                     ? MEM_RESERVE
@@ -812,7 +812,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
   DWORD protect = GetProtectionFromMemoryPermission(access);
 
   // First, try an exact size aligned allocation.
-  uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
+  uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint);
   if (base == nullptr) return nullptr;  // Can't allocate, we're OOM.
 
   // If address is suitably aligned, we're done.
@@ -824,7 +824,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
   CHECK(Free(base, size));
 
   // Clear the hint. It's unlikely we can allocate at this address.
-  address = nullptr;
+  hint = nullptr;
 
   // Add the maximum misalignment so we are guaranteed an aligned base address
   // in the allocated region.
@@ -832,7 +832,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
   const int kMaxAttempts = 3;
   aligned_base = nullptr;
   for (int i = 0; i < kMaxAttempts; ++i) {
-    base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
+    base = RandomizedVirtualAlloc(padded_size, flags, protect, hint);
     if (base == nullptr) return nullptr;  // Can't allocate, we're OOM.
 
     // Try to trim the allocation by freeing the padded allocation and then
@@ -1352,13 +1352,13 @@ Thread::~Thread() {
 // Create a new thread. It is important to use _beginthreadex() instead of
 // the Win32 function CreateThread(), because the CreateThread() does not
 // initialize thread specific structures in the C runtime library.
-void Thread::Start() {
-  data_->thread_ = reinterpret_cast<HANDLE>(
-      _beginthreadex(nullptr, static_cast<unsigned>(stack_size_), ThreadEntry,
-                     this, 0, &data_->thread_id_));
+bool Thread::Start() {
+  uintptr_t result = _beginthreadex(nullptr, static_cast<unsigned>(stack_size_),
+                                    ThreadEntry, this, 0, &data_->thread_id_);
+  data_->thread_ = reinterpret_cast<HANDLE>(result);
+  return result != 0;
 }
 
-
 // Wait for thread to terminate.
 void Thread::Join() {
   if (data_->thread_id_ != GetCurrentThreadId()) {
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index e073704b2c1ace..e1f84043eb8a73 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -333,15 +333,16 @@ class V8_BASE_EXPORT Thread {
   virtual ~Thread();
 
   // Start new thread by calling the Run() method on the new thread.
-  void Start();
+  V8_WARN_UNUSED_RESULT bool Start();
 
   // Start new thread and wait until Run() method is called on the new thread.
-  void StartSynchronously() {
+  bool StartSynchronously() {
     start_semaphore_ = new Semaphore(0);
-    Start();
+    if (!Start()) return false;
     start_semaphore_->Wait();
     delete start_semaphore_;
     start_semaphore_ = nullptr;
+    return true;
   }
 
   // Wait until thread terminates.
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index 3b38858192970e..17c2cced8a452b 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -51,6 +51,13 @@ RandomNumberGenerator::RandomNumberGenerator() {
   result = rand_s(&second_half);
   DCHECK_EQ(0, result);
   SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
+#elif V8_OS_MACOSX
+  // Despite its prefix suggests it is not RC4 algorithm anymore.
+  // It always succeeds while having decent performance and
+  // no file descriptor involved.
+  int64_t seed;
+  arc4random_buf(&seed, sizeof(seed));
+  SetSeed(seed);
 #else
   // Gather entropy from /dev/urandom if available.
   FILE* fp = fopen("/dev/urandom", "rb");
diff --git a/deps/v8/src/builtins/OWNERS b/deps/v8/src/builtins/OWNERS
index 450423f87850ba..f52e1c9ca8effc 100644
--- a/deps/v8/src/builtins/OWNERS
+++ b/deps/v8/src/builtins/OWNERS
@@ -1,3 +1,3 @@
-file://COMMON_OWNERS
+file:../../COMMON_OWNERS
 
 # COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/builtins/arguments.tq b/deps/v8/src/builtins/arguments.tq
index 6df5f801a3945a..18d6c23b3d9a38 100644
--- a/deps/v8/src/builtins/arguments.tq
+++ b/deps/v8/src/builtins/arguments.tq
@@ -8,7 +8,7 @@ struct Arguments {
   const length: intptr;
 }
 
-extern operator '[]' macro GetArgumentValue(Arguments, intptr): Object;
+extern operator '[]' macro GetArgumentValue(Arguments, intptr): JSAny;
 
 extern macro GetFrameArguments(FrameWithArguments, intptr): Arguments;
 
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 9b9956b0fbba0a..e9b562620fcee5 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -90,12 +90,24 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
 
 namespace {
 
+void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+  DCHECK(masm->root_array_available());
+  Isolate* isolate = masm->isolate();
+  ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+  DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+  intptr_t offset =
+      TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+  CHECK(is_int32(offset));
+  __ ldr(destination, MemOperand(kRootRegister, offset));
+}
+
 void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                  Register scratch, Label* stack_overflow) {
   // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
-  __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch);
   // Make scratch the space we have left. The stack might already be overflowed
   // here which will cause scratch to become negative.
   __ sub(scratch, sp, scratch);
@@ -428,7 +440,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
   // Check the stack for overflow. We are not trying to catch interruptions
   // (i.e. debug break and preemption) here, so check the "real stack limit".
   Label stack_overflow;
-  __ CompareRoot(sp, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch);
+  __ cmp(sp, scratch);
   __ b(lo, &stack_overflow);
 
   // Push receiver.
@@ -1116,7 +1129,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
     // Do a stack check to ensure we don't go over the limit.
     Label ok;
     __ sub(r9, sp, Operand(r4));
-    __ LoadRoot(r2, RootIndex::kRealStackLimit);
+    LoadRealStackLimit(masm, r2);
     __ cmp(r9, Operand(r2));
     __ b(hs, &ok);
     __ CallRuntime(Runtime::kThrowStackOverflow);
@@ -2089,7 +2102,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
 
         // Compute the space we have left. The stack might already be overflowed
         // here which will cause remaining_stack_size to become negative.
-        __ LoadRoot(remaining_stack_size, RootIndex::kRealStackLimit);
+        LoadRealStackLimit(masm, remaining_stack_size);
         __ sub(remaining_stack_size, sp, remaining_stack_size);
 
         // Check if the arguments will overflow the stack.
@@ -2517,7 +2530,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
     __ push(kWasmCompileLazyFuncIndexRegister);
     // Load the correct CEntry builtin from the instance object.
     __ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
-                               WasmInstanceObject::kCEntryStubOffset));
+                               WasmInstanceObject::kIsolateRootOffset));
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+    __ ldr(r2, MemOperand(r2, IsolateData::builtin_slot_offset(centry_id)));
     // Initialize the JavaScript context with 0. CEntry will use it to
     // set the current context on the isolate.
     __ Move(cp, Smi::zero());
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index bcee8f0b5dcbbe..4e159a69b7ede8 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -24,6 +24,10 @@
 #include "src/runtime/runtime.h"
 #include "src/wasm/wasm-objects.h"
 
+#if defined(V8_OS_WIN)
+#include "src/diagnostics/unwinding-info-win64.h"
+#endif  // V8_OS_WIN
+
 namespace v8 {
 namespace internal {
 
@@ -85,6 +89,17 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
 
 namespace {
 
+void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+  DCHECK(masm->root_array_available());
+  Isolate* isolate = masm->isolate();
+  ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+  DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+  intptr_t offset =
+      TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+  __ Ldr(destination, MemOperand(kRootRegister, offset));
+}
+
 void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                  Label* stack_overflow) {
   UseScratchRegisterScope temps(masm);
@@ -94,7 +109,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
   // We are not trying to catch interruptions (e.g. debug break and
   // preemption) here, so the "real stack limit" is checked.
 
-  __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch);
   // Make scratch the space we have left. The stack might already be overflowed
   // here which will cause scratch to become negative.
   __ Sub(scratch, sp, scratch);
@@ -476,7 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
   // Check the stack for overflow. We are not trying to catch interruptions
   // (i.e. debug break and preemption) here, so check the "real stack limit".
   Label stack_overflow;
-  __ CompareRoot(sp, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, x10);
+  __ Cmp(sp, x10);
   __ B(lo, &stack_overflow);
 
   // Get number of arguments for generator function.
@@ -623,6 +639,23 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
     // will have no effect on the model or real hardware.
     __ EnableInstrumentation();
 
+#if defined(V8_OS_WIN)
+    // Windows ARM64 relies on a frame pointer (fp/x29 which are aliases to each
+    // other) chain to do stack unwinding, but JSEntry breaks that by setting fp
+    // to point to bad_frame_pointer below. To fix unwind information for this
+    // case, JSEntry registers the offset (from current fp to the caller's fp
+    // saved by PushCalleeSavedRegisters on stack) to xdata_encoder which then
+    // emits the offset value as part of result unwind data accordingly. The
+    // current offset is kFramePointerOffset which includes bad_frame_pointer
+    // saved below plus kFramePointerOffsetInPushCalleeSavedRegisters.
+    const int kFramePointerOffset =
+        kFramePointerOffsetInPushCalleeSavedRegisters + kSystemPointerSize;
+    win64_unwindinfo::XdataEncoder* xdata_encoder = masm->GetXdataEncoder();
+    if (xdata_encoder) {
+      xdata_encoder->onFramePointerAdjustment(kFramePointerOffset);
+    }
+#endif
+
     __ PushCalleeSavedRegisters();
 
     // Set up the reserved register for 0.0.
@@ -1223,7 +1256,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
     // Do a stack check to ensure we don't go over the limit.
     Label ok;
     __ Sub(x10, sp, Operand(x11));
-    __ CompareRoot(x10, RootIndex::kRealStackLimit);
+    {
+      UseScratchRegisterScope temps(masm);
+      Register scratch = temps.AcquireX();
+      LoadRealStackLimit(masm, scratch);
+      __ Cmp(x10, scratch);
+    }
     __ B(hs, &ok);
     __ CallRuntime(Runtime::kThrowStackOverflow);
     __ Bind(&ok);
@@ -2469,7 +2507,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
       // (i.e. debug break and preemption) here, so check the "real stack
       // limit".
       Label done;
-      __ LoadRoot(x10, RootIndex::kRealStackLimit);
+      LoadRealStackLimit(masm, x10);
       // Make x10 the space we have left. The stack might already be overflowed
       // here which will cause x10 to become negative.
       __ Sub(x10, sp, x10);
@@ -3031,9 +3069,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
     // function.
     __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
     // Load the correct CEntry builtin from the instance object.
+    __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
+                               WasmInstanceObject::kIsolateRootOffset));
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
     __ LoadTaggedPointerField(
-        x2, FieldMemOperand(kWasmInstanceRegister,
-                            WasmInstanceObject::kCEntryStubOffset));
+        x2, MemOperand(x2, IsolateData::builtin_slot_offset(centry_id)));
     // Initialize the JavaScript context with 0. CEntry will use it to
     // set the current context on the isolate.
     __ Mov(cp, Smi::zero());
diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq
index 94d871e8f74c13..574eaf9b9de4cc 100644
--- a/deps/v8/src/builtins/array-copywithin.tq
+++ b/deps/v8/src/builtins/array-copywithin.tq
@@ -9,7 +9,7 @@ namespace array_copywithin {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin
   transitioning javascript builtin ArrayPrototypeCopyWithin(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     // 1. Let O be ? ToObject(this value).
     const object: JSReceiver = ToObject_Inline(context, receiver);
 
@@ -68,7 +68,7 @@ namespace array_copywithin {
       // d. If fromPresent is true, then.
       if (fromPresent == True) {
         // i. Let fromVal be ? Get(O, fromKey).
-        const fromVal: Object = GetProperty(object, from);
+        const fromVal: JSAny = GetProperty(object, from);
 
         // ii. Perform ? Set(O, toKey, fromVal, true).
         SetProperty(object, to, fromVal);
diff --git a/deps/v8/src/builtins/array-every.tq b/deps/v8/src/builtins/array-every.tq
index 3451cd769b92e7..8f4c0e1f231dca 100644
--- a/deps/v8/src/builtins/array-every.tq
+++ b/deps/v8/src/builtins/array-every.tq
@@ -5,15 +5,14 @@
 namespace array {
   transitioning javascript builtin
   ArrayEveryLoopEagerDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object,
-      length: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
     // All continuation points in the optimized every implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -27,9 +26,9 @@ namespace array {
 
   transitioning javascript builtin
   ArrayEveryLoopLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object, length: Object,
-      result: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+      result: JSAny): JSAny {
     // All continuation points in the optimized every implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -53,9 +52,9 @@ namespace array {
   }
 
   transitioning builtin ArrayEveryLoopContinuation(implicit context: Context)(
-      _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
-      _array: Object, o: JSReceiver, initialK: Number, length: Number,
-      _initialTo: Object): Object {
+      _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
+      _array: JSAny, o: JSReceiver, initialK: Number, length: Number,
+      _initialTo: JSAny): JSAny {
     // 5. Let k be 0.
     // 6. Repeat, while k < len
     for (let k: Number = initialK; k < length; k++) {
@@ -69,10 +68,10 @@ namespace array {
       // 6c. If kPresent is true, then
       if (kPresent == True) {
         // 6c. i. Let kValue be ? Get(O, Pk).
-        const kValue: Object = GetProperty(o, k);
+        const kValue: JSAny = GetProperty(o, k);
 
         // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
-        const result: Object = Call(context, callbackfn, thisArg, kValue, k, o);
+        const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o);
 
         // iii. If selected is true, then...
         if (!ToBoolean(result)) {
@@ -86,7 +85,7 @@ namespace array {
   }
 
   transitioning macro FastArrayEvery(implicit context: Context)(
-      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
+      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
       labels Bailout(Smi) {
     let k: Smi = 0;
     const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
@@ -99,8 +98,8 @@ namespace array {
 
       // Ensure that we haven't walked beyond a possibly updated length.
       if (k >= fastOW.Get().length) goto Bailout(k);
-      const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
-      const result: Object =
+      const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
+      const result: JSAny =
           Call(context, callbackfn, thisArg, value, k, fastOW.Get());
       if (!ToBoolean(result)) {
         return False;
@@ -111,8 +110,8 @@ namespace array {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.every
   transitioning javascript builtin
-  ArrayEvery(js-implicit context: Context, receiver: Object)(...arguments):
-      Object {
+  ArrayEvery(js-implicit context: Context, receiver: JSAny)(...arguments):
+      JSAny {
     try {
       RequireObjectCoercible(receiver, 'Array.prototype.every');
 
@@ -129,7 +128,7 @@ namespace array {
       const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
 
       // 4. If thisArg is present, let T be thisArg; else let T be undefined.
-      const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+      const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
 
       // Special cases.
       try {
diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq
index 9acd0d04ee3cd7..4d23144329ab47 100644
--- a/deps/v8/src/builtins/array-filter.tq
+++ b/deps/v8/src/builtins/array-filter.tq
@@ -5,15 +5,15 @@
 namespace array_filter {
   transitioning javascript builtin
   ArrayFilterLoopEagerDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, array: Object, initialK: Object,
-      length: Object, initialTo: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
+      length: JSAny, initialTo: JSAny): JSAny {
     // All continuation points in the optimized filter implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -29,10 +29,9 @@ namespace array_filter {
 
   transitioning javascript builtin
   ArrayFilterLoopLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, array: Object, initialK: Object,
-      length: Object, valueK: Object, initialTo: Object,
-      result: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
+      length: JSAny, valueK: JSAny, initialTo: JSAny, result: JSAny): JSAny {
     // All continuation points in the optimized filter implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -60,9 +59,9 @@ namespace array_filter {
   }
 
   transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)(
-      _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+      _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
       array: JSReceiver, o: JSReceiver, initialK: Number, length: Number,
-      initialTo: Number): Object {
+      initialTo: Number): JSAny {
     let to: Number = initialTo;
     // 5. Let k be 0.
     // 6. Repeat, while k < len
@@ -77,10 +76,10 @@ namespace array_filter {
       // 6c. If kPresent is true, then
       if (kPresent == True) {
         // 6c. i. Let kValue be ? Get(O, Pk).
-        const kValue: Object = GetProperty(o, k);
+        const kValue: JSAny = GetProperty(o, k);
 
         // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
-        const result: Object = Call(context, callbackfn, thisArg, kValue, k, o);
+        const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o);
 
         // iii. If selected is true, then...
         if (ToBoolean(result)) {
@@ -97,7 +96,7 @@ namespace array_filter {
   }
 
   transitioning macro FastArrayFilter(implicit context: Context)(
-      fastO: FastJSArray, len: Smi, callbackfn: Callable, thisArg: Object,
+      fastO: FastJSArray, len: Smi, callbackfn: Callable, thisArg: JSAny,
       output: FastJSArray) labels Bailout(Number, Number) {
     let k: Smi = 0;
     let to: Smi = 0;
@@ -112,8 +111,8 @@ namespace array_filter {
 
       // Ensure that we haven't walked beyond a possibly updated length.
       if (k >= fastOW.Get().length) goto Bailout(k, to);
-      const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
-      const result: Object =
+      const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
+      const result: JSAny =
           Call(context, callbackfn, thisArg, value, k, fastOW.Get());
       if (ToBoolean(result)) {
         try {
@@ -147,8 +146,8 @@ namespace array_filter {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.filter
   transitioning javascript builtin
-  ArrayFilter(js-implicit context: Context, receiver: Object)(...arguments):
-      Object {
+  ArrayFilter(js-implicit context: Context, receiver: JSAny)(...arguments):
+      JSAny {
     try {
       RequireObjectCoercible(receiver, 'Array.prototype.filter');
 
@@ -165,7 +164,7 @@ namespace array_filter {
       const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
 
       // 4. If thisArg is present, let T be thisArg; else let T be undefined.
-      const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+      const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
       let output: JSReceiver;
 
       // Special cases.
diff --git a/deps/v8/src/builtins/array-find.tq b/deps/v8/src/builtins/array-find.tq
index ef54dd4666ef72..ec840a4c98b09b 100644
--- a/deps/v8/src/builtins/array-find.tq
+++ b/deps/v8/src/builtins/array-find.tq
@@ -5,15 +5,14 @@
 namespace array_find {
   transitioning javascript builtin
   ArrayFindLoopEagerDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object,
-      length: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
     // All continuation points in the optimized find implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -26,9 +25,9 @@ namespace array_find {
 
   transitioning javascript builtin
   ArrayFindLoopLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      _callback: Object, _thisArg: Object, _initialK: Object, _length: Object,
-      _result: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      _callback: JSAny, _thisArg: JSAny, _initialK: JSAny, _length: JSAny,
+      _result: JSAny): JSAny {
     // This deopt continuation point is never actually called, it just
     // exists to make stack traces correct from a ThrowTypeError if the
     // callback was found to be non-callable.
@@ -40,9 +39,9 @@ namespace array_find {
   // before iteration continues.
   transitioning javascript builtin
   ArrayFindLoopAfterCallbackLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object, length: Object,
-      foundValue: Object, isFound: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+      foundValue: JSAny, isFound: JSAny): JSAny {
     // All continuation points in the optimized find implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -65,8 +64,8 @@ namespace array_find {
   }
 
   transitioning builtin ArrayFindLoopContinuation(implicit context: Context)(
-      _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
-      o: JSReceiver, initialK: Number, length: Number): Object {
+      _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
+      o: JSReceiver, initialK: Number, length: Number): JSAny {
     // 5. Let k be 0.
     // 6. Repeat, while k < len
     for (let k: Number = initialK; k < length; k++) {
@@ -75,12 +74,11 @@ namespace array_find {
       // side-effect free and HasProperty/GetProperty do the conversion inline.
 
       // 6b. i. Let kValue be ? Get(O, Pk).
-      const value: Object = GetProperty(o, k);
+      const value: JSAny = GetProperty(o, k);
 
       // 6c. Let testResult be ToBoolean(? Call(predicate, T, <<kValue, k,
       // O>>)).
-      const testResult: Object =
-          Call(context, callbackfn, thisArg, value, k, o);
+      const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, o);
 
       // 6d. If testResult is true, return kValue.
       if (ToBoolean(testResult)) {
@@ -93,7 +91,7 @@ namespace array_find {
   }
 
   transitioning macro FastArrayFind(implicit context: Context)(
-      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
+      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
       labels Bailout(Smi) {
     let k: Smi = 0;
     const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
@@ -107,8 +105,8 @@ namespace array_find {
       // Ensure that we haven't walked beyond a possibly updated length.
       if (k >= fastOW.Get().length) goto Bailout(k);
 
-      const value: Object = fastOW.LoadElementOrUndefined(k);
-      const testResult: Object =
+      const value: JSAny = fastOW.LoadElementOrUndefined(k);
+      const testResult: JSAny =
           Call(context, callbackfn, thisArg, value, k, fastOW.Get());
       if (ToBoolean(testResult)) {
         return value;
@@ -119,8 +117,8 @@ namespace array_find {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.find
   transitioning javascript builtin
-  ArrayPrototypeFind(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  ArrayPrototypeFind(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     try {
       RequireObjectCoercible(receiver, 'Array.prototype.find');
 
@@ -138,7 +136,7 @@ namespace array_find {
           Cast<Callable>(arguments[0]) otherwise NotCallableError;
 
       // 4. If thisArg is present, let T be thisArg; else let T be undefined.
-      const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+      const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
 
       // Special cases.
       try {
diff --git a/deps/v8/src/builtins/array-findindex.tq b/deps/v8/src/builtins/array-findindex.tq
index 5a8bb85fbadd4c..6145c8146455b5 100644
--- a/deps/v8/src/builtins/array-findindex.tq
+++ b/deps/v8/src/builtins/array-findindex.tq
@@ -5,15 +5,14 @@
 namespace array_findindex {
   transitioning javascript builtin
   ArrayFindIndexLoopEagerDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object,
-      length: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
     // All continuation points in the optimized findIndex implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -26,9 +25,9 @@ namespace array_findindex {
 
   transitioning javascript builtin
   ArrayFindIndexLoopLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      _callback: Object, _thisArg: Object, _initialK: Object, _length: Object,
-      _result: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      _callback: JSAny, _thisArg: JSAny, _initialK: JSAny, _length: JSAny,
+      _result: JSAny): JSAny {
     // This deopt continuation point is never actually called, it just
     // exists to make stack traces correct from a ThrowTypeError if the
     // callback was found to be non-callable.
@@ -40,9 +39,9 @@ namespace array_findindex {
   // before iteration continues.
   transitioning javascript builtin
   ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object, length: Object,
-      foundValue: Object, isFound: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+      foundValue: JSAny, isFound: JSAny): JSAny {
     // All continuation points in the optimized findIndex implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -66,7 +65,7 @@ namespace array_findindex {
 
   transitioning builtin ArrayFindIndexLoopContinuation(implicit context:
                                                            Context)(
-      _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+      _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
       o: JSReceiver, initialK: Number, length: Number): Number {
     // 5. Let k be 0.
     // 6. Repeat, while k < len
@@ -76,12 +75,11 @@ namespace array_findindex {
       // side-effect free and HasProperty/GetProperty do the conversion inline.
 
       // 6b. i. Let kValue be ? Get(O, Pk).
-      const value: Object = GetProperty(o, k);
+      const value: JSAny = GetProperty(o, k);
 
       // 6c. Let testResult be ToBoolean(? Call(predicate, T, <<kValue, k,
       // O>>)).
-      const testResult: Object =
-          Call(context, callbackfn, thisArg, value, k, o);
+      const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, o);
 
       // 6d. If testResult is true, return k.
       if (ToBoolean(testResult)) {
@@ -94,7 +92,7 @@ namespace array_findindex {
   }
 
   transitioning macro FastArrayFindIndex(implicit context: Context)(
-      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Number
+      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): Number
       labels Bailout(Smi) {
     let k: Smi = 0;
     const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
@@ -108,8 +106,8 @@ namespace array_findindex {
       // Ensure that we haven't walked beyond a possibly updated length.
       if (k >= fastOW.Get().length) goto Bailout(k);
 
-      const value: Object = fastOW.LoadElementOrUndefined(k);
-      const testResult: Object =
+      const value: JSAny = fastOW.LoadElementOrUndefined(k);
+      const testResult: JSAny =
           Call(context, callbackfn, thisArg, value, k, fastOW.Get());
       if (ToBoolean(testResult)) {
         return k;
@@ -120,8 +118,8 @@ namespace array_findindex {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.findIndex
   transitioning javascript builtin
-  ArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  ArrayPrototypeFindIndex(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     try {
       RequireObjectCoercible(receiver, 'Array.prototype.findIndex');
 
@@ -139,7 +137,7 @@ namespace array_findindex {
           Cast<Callable>(arguments[0]) otherwise NotCallableError;
 
       // 4. If thisArg is present, let T be thisArg; else let T be undefined.
-      const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+      const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
 
       // Special cases.
       try {
diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq
index f52d944291ec7f..5b6e3926016c2c 100644
--- a/deps/v8/src/builtins/array-foreach.tq
+++ b/deps/v8/src/builtins/array-foreach.tq
@@ -5,9 +5,8 @@
 namespace array_foreach {
   transitioning javascript builtin
   ArrayForEachLoopEagerDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object,
-      length: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
     // All continuation points in the optimized forEach implemntation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -23,9 +22,9 @@ namespace array_foreach {
 
   transitioning javascript builtin
   ArrayForEachLoopLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object, length: Object,
-      _result: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+      _result: JSAny): JSAny {
     // All continuation points in the optimized forEach implemntation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -40,9 +39,9 @@ namespace array_foreach {
   }
 
   transitioning builtin ArrayForEachLoopContinuation(implicit context: Context)(
-      _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
-      _array: Object, o: JSReceiver, initialK: Number, len: Number,
-      _to: Object): Object {
+      _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
+      _array: JSAny, o: JSReceiver, initialK: Number, len: Number,
+      _to: JSAny): JSAny {
     // variables {array} and {to} are ignored.
 
     // 5. Let k be 0.
@@ -58,7 +57,7 @@ namespace array_foreach {
       // 6c. If kPresent is true, then
       if (kPresent == True) {
         // 6c. i. Let kValue be ? Get(O, Pk).
-        const kValue: Object = GetProperty(o, k);
+        const kValue: JSAny = GetProperty(o, k);
 
         // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
         Call(context, callbackfn, thisArg, kValue, k, o);
@@ -70,7 +69,7 @@ namespace array_foreach {
   }
 
   transitioning macro FastArrayForEach(implicit context: Context)(
-      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
+      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
       labels Bailout(Smi) {
     let k: Smi = 0;
     const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
@@ -83,7 +82,7 @@ namespace array_foreach {
 
       // Ensure that we haven't walked beyond a possibly updated length.
       if (k >= fastOW.Get().length) goto Bailout(k);
-      const value: Object = fastOW.LoadElementNoHole(k)
+      const value: JSAny = fastOW.LoadElementNoHole(k)
           otherwise continue;
       Call(context, callbackfn, thisArg, value, k, fastOW.Get());
     }
@@ -92,8 +91,8 @@ namespace array_foreach {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.foreach
   transitioning javascript builtin
-  ArrayForEach(js-implicit context: Context, receiver: Object)(...arguments):
-      Object {
+  ArrayForEach(js-implicit context: Context, receiver: JSAny)(...arguments):
+      JSAny {
     try {
       RequireObjectCoercible(receiver, 'Array.prototype.forEach');
 
@@ -110,7 +109,7 @@ namespace array_foreach {
       const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
 
       // 4. If thisArg is present, let T be thisArg; else let T be undefined.
-      const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+      const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
 
       // Special cases.
       let k: Number = 0;
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index c04233b22244ec..08b5221e099c6b 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 namespace array_join {
-  type LoadJoinElementFn = builtin(Context, JSReceiver, Number) => Object;
+  type LoadJoinElementFn = builtin(Context, JSReceiver, Number) => JSAny;
 
   // Fast C call to write a fixed array (see Buffer.fixedArray) to a single
   // string.
@@ -12,12 +12,12 @@ namespace array_join {
       FixedArray, intptr, String, String): String;
 
   transitioning builtin LoadJoinElement<T: type>(
-      context: Context, receiver: JSReceiver, k: Number): Object {
+      context: Context, receiver: JSReceiver, k: Number): JSAny {
     return GetProperty(receiver, k);
   }
 
-  LoadJoinElement<array::DictionaryElements>(
-      context: Context, receiver: JSReceiver, k: Number): Object {
+  transitioning LoadJoinElement<array::DictionaryElements>(
+      context: Context, receiver: JSReceiver, k: Number): JSAny {
     const array: JSArray = UnsafeCast<JSArray>(receiver);
     const dict: NumberDictionary = UnsafeCast<NumberDictionary>(array.elements);
     try {
@@ -33,15 +33,15 @@ namespace array_join {
   }
 
   LoadJoinElement<array::FastSmiOrObjectElements>(
-      context: Context, receiver: JSReceiver, k: Number): Object {
+      context: Context, receiver: JSReceiver, k: Number): JSAny {
     const array: JSArray = UnsafeCast<JSArray>(receiver);
     const fixedArray: FixedArray = UnsafeCast<FixedArray>(array.elements);
     const element: Object = fixedArray.objects[UnsafeCast<Smi>(k)];
-    return element == TheHole ? kEmptyString : element;
+    return element == TheHole ? kEmptyString : UnsafeCast<JSAny>(element);
   }
 
   LoadJoinElement<array::FastDoubleElements>(
-      context: Context, receiver: JSReceiver, k: Number): Object {
+      context: Context, receiver: JSReceiver, k: Number): JSAny {
     const array: JSArray = UnsafeCast<JSArray>(receiver);
     const fixedDoubleArray: FixedDoubleArray =
         UnsafeCast<FixedDoubleArray>(array.elements);
@@ -51,7 +51,7 @@ namespace array_join {
   }
 
   builtin LoadJoinTypedElement<T: type>(
-      context: Context, receiver: JSReceiver, k: Number): Object {
+      context: Context, receiver: JSReceiver, k: Number): JSAny {
     const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
     assert(!IsDetachedBuffer(typedArray.buffer));
     return typed_array::LoadFixedTypedArrayElementAsTagged(
@@ -60,14 +60,14 @@ namespace array_join {
   }
 
   transitioning builtin ConvertToLocaleString(
-      context: Context, element: Object, locales: Object,
-      options: Object): String {
+      context: Context, element: JSAny, locales: JSAny,
+      options: JSAny): String {
     if (IsNullOrUndefined(element)) return kEmptyString;
 
-    const prop: Object = GetProperty(element, 'toLocaleString');
+    const prop: JSAny = GetProperty(element, 'toLocaleString');
     try {
       const callable: Callable = Cast<Callable>(prop) otherwise TypeError;
-      let result: Object;
+      let result: JSAny;
       if (IsNullOrUndefined(locales)) {
         result = Call(context, callable, element);
       } else if (IsNullOrUndefined(options)) {
@@ -86,29 +86,25 @@ namespace array_join {
   // (see LoadJoinElement<ElementsAccessor>).
   macro CannotUseSameArrayAccessor<T: type>(implicit context: Context)(
       loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map,
-      originalLen: Number): never
-      labels Cannot, Can;
+      originalLen: Number): bool;
 
   CannotUseSameArrayAccessor<JSArray>(implicit context: Context)(
       loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map,
-      originalLen: Number): never
-      labels Cannot, Can {
-    if (loadFn == LoadJoinElement<array::GenericElementsAccessor>) goto Can;
+      originalLen: Number): bool {
+    if (loadFn == LoadJoinElement<array::GenericElementsAccessor>) return false;
 
     const array: JSArray = UnsafeCast<JSArray>(receiver);
-    if (originalMap != array.map) goto Cannot;
-    if (originalLen != array.length) goto Cannot;
-    if (IsNoElementsProtectorCellInvalid()) goto Cannot;
-    goto Can;
+    if (originalMap != array.map) return true;
+    if (originalLen != array.length) return true;
+    if (IsNoElementsProtectorCellInvalid()) return true;
+    return false;
   }
 
   CannotUseSameArrayAccessor<JSTypedArray>(implicit context: Context)(
       _loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map,
-      _initialLen: Number): never
-      labels Cannot, Can {
+      _initialLen: Number): bool {
     const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
-    if (IsDetachedBuffer(typedArray.buffer)) goto Cannot;
-    goto Can;
+    return IsDetachedBuffer(typedArray.buffer);
   }
 
   // Calculates the running total length of the resulting string.  If the
@@ -261,7 +257,7 @@ namespace array_join {
 
   transitioning macro ArrayJoinImpl<T: type>(implicit context: Context)(
       receiver: JSReceiver, sep: String, lengthNumber: Number,
-      useToLocaleString: constexpr bool, locales: Object, options: Object,
+      useToLocaleString: constexpr bool, locales: JSAny, options: JSAny,
       initialLoadFn: LoadJoinElementFn): String {
     const initialMap: Map = receiver.map;
     const len: uintptr = Convert<uintptr>(lengthNumber);
@@ -287,7 +283,7 @@ namespace array_join {
       }
 
       // b. Let element be ? Get(O, ! ToString(k)).
-      const element: Object = loadFn(context, receiver, Convert<Number>(k++));
+      const element: JSAny = loadFn(context, receiver, Convert<Number>(k++));
 
       // c. If element is undefined or null, let next be the empty String;
       //    otherwise, let next be ? ToString(element).
@@ -304,7 +300,7 @@ namespace array_join {
           case (num: Number): {
             next = NumberToString(num);
           }
-          case (obj: HeapObject): {
+          case (obj: JSAny): {
             if (IsNullOrUndefined(obj)) continue;
             next = ToString(context, obj);
           }
@@ -325,11 +321,11 @@ namespace array_join {
 
   transitioning macro ArrayJoin<T: type>(implicit context: Context)(
       useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
-      lenNumber: Number, locales: Object, options: Object): Object;
+      lenNumber: Number, locales: JSAny, options: JSAny): JSAny;
 
-  ArrayJoin<JSArray>(implicit context: Context)(
+  transitioning ArrayJoin<JSArray>(implicit context: Context)(
       useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
-      lenNumber: Number, locales: Object, options: Object): Object {
+      lenNumber: Number, locales: JSAny, options: JSAny): JSAny {
     const map: Map = receiver.map;
     const kind: ElementsKind = map.elements_kind;
     let loadFn: LoadJoinElementFn;
@@ -374,9 +370,9 @@ namespace array_join {
         receiver, sep, lenNumber, useToLocaleString, locales, options, loadFn);
   }
 
-  ArrayJoin<JSTypedArray>(implicit context: Context)(
+  transitioning ArrayJoin<JSTypedArray>(implicit context: Context)(
       useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
-      lenNumber: Number, locales: Object, options: Object): Object {
+      lenNumber: Number, locales: JSAny, options: JSAny): JSAny {
     const map: Map = receiver.map;
     const kind: ElementsKind = map.elements_kind;
     let loadFn: LoadJoinElementFn;
@@ -465,11 +461,9 @@ namespace array_join {
   }
 
   // Fast path the common non-nested calls. If the receiver is not already on
-  // the stack, add it to the stack and go to ReceiverAdded. Otherwise go to
-  // ReceiverNotAdded.
+  // the stack, add it to the stack and return true. Otherwise return false.
   macro JoinStackPushInline(implicit context: Context)(receiver: JSReceiver):
-      never
-      labels ReceiverAdded, ReceiverNotAdded {
+      bool {
     try {
       const stack: FixedArray = LoadJoinStack()
           otherwise IfUninitialized;
@@ -477,7 +471,7 @@ namespace array_join {
         stack.objects[0] = receiver;
       } else if (JoinStackPush(stack, receiver) == False)
         deferred {
-          goto ReceiverNotAdded;
+          return false;
         }
     }
     label IfUninitialized {
@@ -486,13 +480,13 @@ namespace array_join {
       stack.objects[0] = receiver;
       SetJoinStack(stack);
     }
-    goto ReceiverAdded;
+    return true;
   }
 
   // Removes a receiver from the stack. The FixedArray will automatically shrink
   // to Heap::kMinJoinStackSize once the stack becomes empty.
   builtin JoinStackPop(implicit context: Context)(
-      stack: FixedArray, receiver: JSReceiver): Object {
+      stack: FixedArray, receiver: JSReceiver): JSAny {
     const len: intptr = stack.length_intptr;
     for (let i: intptr = 0; i < len; i++) {
       if (stack.objects[i] == receiver) {
@@ -532,7 +526,7 @@ namespace array_join {
   transitioning macro CycleProtectedArrayJoin<T: type>(implicit context:
                                                             Context)(
       useToLocaleString: constexpr bool, o: JSReceiver, len: Number,
-      sepObj: Object, locales: Object, options: Object): Object {
+      sepObj: JSAny, locales: JSAny, options: JSAny): JSAny {
     // 3. If separator is undefined, let sep be the single-element String ",".
     // 4. Else, let sep be ? ToString(separator).
     const sep: String =
@@ -542,7 +536,7 @@ namespace array_join {
     // the normal join algorithm.
     if (len > 0 && JoinStackPushInline(o)) {
       try {
-        const result: Object =
+        const result: JSAny =
             ArrayJoin<T>(useToLocaleString, o, sep, len, locales, options);
         JoinStackPopInline(o);
         return result;
@@ -557,9 +551,9 @@ namespace array_join {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.join
   transitioning javascript builtin
-  ArrayPrototypeJoin(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
-    const separator: Object = arguments[0];
+  ArrayPrototypeJoin(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
+    const separator: JSAny = arguments[0];
 
     // 1. Let O be ? ToObject(this value).
     const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -577,9 +571,9 @@ namespace array_join {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.tolocalestring
   transitioning javascript builtin ArrayPrototypeToLocaleString(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const locales: Object = arguments[0];
-    const options: Object = arguments[1];
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const locales: JSAny = arguments[0];
+    const options: JSAny = arguments[1];
 
     // 1. Let O be ? ToObject(this value).
     const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -597,12 +591,12 @@ namespace array_join {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.tostring
   transitioning javascript builtin ArrayPrototypeToString(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     // 1. Let array be ? ToObject(this value).
     const array: JSReceiver = ToObject_Inline(context, receiver);
 
     // 2. Let func be ? Get(array, "join").
-    const prop: Object = GetProperty(array, 'join');
+    const prop: JSAny = GetProperty(array, 'join');
     try {
       // 3. If IsCallable(func) is false, let func be the intrinsic function
       //    %ObjProto_toString%.
@@ -618,8 +612,8 @@ namespace array_join {
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.join
   transitioning javascript builtin TypedArrayPrototypeJoin(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const separator: Object = arguments[0];
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const separator: JSAny = arguments[0];
 
     // Spec: ValidateTypedArray is applied to the this value prior to evaluating
     // the algorithm.
@@ -633,9 +627,9 @@ namespace array_join {
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring
   transitioning javascript builtin TypedArrayPrototypeToLocaleString(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const locales: Object = arguments[0];
-    const options: Object = arguments[1];
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const locales: JSAny = arguments[0];
+    const options: JSAny = arguments[1];
 
     // Spec: ValidateTypedArray is applied to the this value prior to evaluating
     // the algorithm.
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
index 5ebc451e435117..7765eff6822f34 100644
--- a/deps/v8/src/builtins/array-lastindexof.tq
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -4,20 +4,20 @@
 
 namespace array_lastindexof {
   macro LoadWithHoleCheck<Elements: type>(
-      elements: FixedArrayBase, index: Smi): Object
+      elements: FixedArrayBase, index: Smi): JSAny
       labels IfHole;
 
   LoadWithHoleCheck<FixedArray>(implicit context: Context)(
-      elements: FixedArrayBase, index: Smi): Object
+      elements: FixedArrayBase, index: Smi): JSAny
       labels IfHole {
     const elements: FixedArray = UnsafeCast<FixedArray>(elements);
     const element: Object = elements.objects[index];
     if (element == TheHole) goto IfHole;
-    return element;
+    return UnsafeCast<JSAny>(element);
   }
 
   LoadWithHoleCheck<FixedDoubleArray>(implicit context: Context)(
-      elements: FixedArrayBase, index: Smi): Object
+      elements: FixedArrayBase, index: Smi): JSAny
       labels IfHole {
     const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
     const element: float64 = LoadDoubleWithHoleCheck(elements, index)
@@ -26,7 +26,7 @@ namespace array_lastindexof {
   }
 
   macro FastArrayLastIndexOf<Elements: type>(
-      context: Context, array: JSArray, from: Smi, searchElement: Object): Smi {
+      context: Context, array: JSArray, from: Smi, searchElement: JSAny): Smi {
     const elements: FixedArrayBase = array.elements;
     let k: Smi = from;
 
@@ -40,7 +40,7 @@ namespace array_lastindexof {
 
     while (k >= 0) {
       try {
-        const element: Object = LoadWithHoleCheck<Elements>(elements, k)
+        const element: JSAny = LoadWithHoleCheck<Elements>(elements, k)
             otherwise Hole;
 
         const same: Boolean = StrictEqual(searchElement, element);
@@ -80,8 +80,8 @@ namespace array_lastindexof {
   }
 
   macro TryFastArrayLastIndexOf(
-      context: Context, receiver: JSReceiver, searchElement: Object,
-      from: Number): Object
+      context: Context, receiver: JSReceiver, searchElement: JSAny,
+      from: Number): JSAny
       labels Slow {
     const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
     const length: Smi = array.length;
@@ -99,8 +99,8 @@ namespace array_lastindexof {
   }
 
   transitioning macro GenericArrayLastIndexOf(
-      context: Context, object: JSReceiver, searchElement: Object,
-      from: Number): Object {
+      context: Context, object: JSReceiver, searchElement: JSAny,
+      from: Number): JSAny {
     let k: Number = from;
 
     // 7. Repeat, while k >= 0.
@@ -111,7 +111,7 @@ namespace array_lastindexof {
       // b. If kPresent is true, then.
       if (kPresent == True) {
         // i. Let elementK be ? Get(O, ! ToString(k)).
-        const element: Object = GetProperty(object, k);
+        const element: JSAny = GetProperty(object, k);
 
         // ii. Let same be the result of performing Strict Equality Comparison
         //     searchElement === elementK.
@@ -131,7 +131,7 @@ namespace array_lastindexof {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf
   transitioning javascript builtin ArrayPrototypeLastIndexOf(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     // 1. Let O be ? ToObject(this value).
     const object: JSReceiver = ToObject_Inline(context, receiver);
 
@@ -144,7 +144,7 @@ namespace array_lastindexof {
     // Step 4 - 6.
     const from: Number = GetFromIndex(context, length, arguments);
 
-    const searchElement: Object = arguments[0];
+    const searchElement: JSAny = arguments[0];
 
     try {
       return TryFastArrayLastIndexOf(context, object, searchElement, from)
diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq
index dda569c68236b3..c4b0e8a358a20d 100644
--- a/deps/v8/src/builtins/array-map.tq
+++ b/deps/v8/src/builtins/array-map.tq
@@ -5,15 +5,15 @@
 namespace array_map {
   transitioning javascript builtin
   ArrayMapLoopEagerDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, array: Object, initialK: Object,
-      length: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
+      length: JSAny): JSAny {
     // All continuation points in the optimized filter implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -28,9 +28,9 @@ namespace array_map {
 
   transitioning javascript builtin
   ArrayMapLoopLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, array: Object, initialK: Object,
-      length: Object, result: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
+      length: JSAny, result: JSAny): JSAny {
     // All continuation points in the optimized filter implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -57,9 +57,9 @@ namespace array_map {
   }
 
   transitioning builtin ArrayMapLoopContinuation(implicit context: Context)(
-      _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+      _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
       array: JSReceiver, o: JSReceiver, initialK: Number,
-      length: Number): Object {
+      length: Number): JSAny {
     // 6. Let k be 0.
     // 7. Repeat, while k < len
     for (let k: Number = initialK; k < length; k++) {
@@ -73,10 +73,10 @@ namespace array_map {
       // 7c. If kPresent is true, then:
       if (kPresent == True) {
         //  i. Let kValue be ? Get(O, Pk).
-        const kValue: Object = GetProperty(o, k);
+        const kValue: JSAny = GetProperty(o, k);
 
         // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
-        const mappedValue: Object =
+        const mappedValue: JSAny =
             Call(context, callbackfn, thisArg, kValue, k, o);
 
         // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
@@ -127,12 +127,12 @@ namespace array_map {
             SmiUntag(length), kAllowLargeObjectAllocation);
         a = NewJSArray(map, this.fixedArray);
         for (let i: Smi = 0; i < validLength; i++) {
-          typeswitch (this.fixedArray.objects[i]) {
+          typeswitch (
+              UnsafeCast<(Number | TheHole)>(this.fixedArray.objects[i])) {
             case (n: Number): {
               elements.floats[i] = Convert<float64>(n);
             }
-            case (h: HeapObject): {
-              assert(h == TheHole);
+            case (TheHole): {
             }
           }
         }
@@ -147,7 +147,7 @@ namespace array_map {
       return a;
     }
 
-    StoreResult(implicit context: Context)(index: Smi, result: Object) {
+    StoreResult(implicit context: Context)(index: Smi, result: JSAny) {
       typeswitch (result) {
         case (s: Smi): {
           this.fixedArray.objects[index] = s;
@@ -156,7 +156,7 @@ namespace array_map {
           this.onlySmis = false;
           this.fixedArray.objects[index] = s;
         }
-        case (s: HeapObject): {
+        case (s: JSAnyNotNumber): {
           this.onlySmis = false;
           this.onlyNumbers = false;
           this.fixedArray.objects[index] = s;
@@ -185,7 +185,7 @@ namespace array_map {
 
   transitioning macro FastArrayMap(implicit context: Context)(
       fastO: FastJSArrayForRead, len: Smi, callbackfn: Callable,
-      thisArg: Object): JSArray
+      thisArg: JSAny): JSArray
       labels Bailout(JSArray, Smi) {
     let k: Smi = 0;
     let fastOW = NewFastJSArrayForReadWitness(fastO);
@@ -201,9 +201,9 @@ namespace array_map {
         if (k >= fastOW.Get().length) goto PrepareBailout(k);
 
         try {
-          const value: Object = fastOW.LoadElementNoHole(k)
+          const value: JSAny = fastOW.LoadElementNoHole(k)
               otherwise FoundHole;
-          const result: Object =
+          const result: JSAny =
               Call(context, callbackfn, thisArg, value, k, fastOW.Get());
           vector.StoreResult(k, result);
         }
@@ -224,8 +224,7 @@ namespace array_map {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.map
   transitioning javascript builtin
-  ArrayMap(js-implicit context: Context, receiver: Object)(...arguments):
-      Object {
+  ArrayMap(js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     try {
       RequireObjectCoercible(receiver, 'Array.prototype.map');
 
@@ -241,7 +240,7 @@ namespace array_map {
       const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
 
       // 4. If thisArg is present, let T be thisArg; else let T be undefined.
-      const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+      const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
 
       let array: JSReceiver;
       let k: Number = 0;
diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq
index 72933186257231..ceb9edff63c5b8 100644
--- a/deps/v8/src/builtins/array-of.tq
+++ b/deps/v8/src/builtins/array-of.tq
@@ -5,8 +5,7 @@
 namespace array_of {
   // https://tc39.github.io/ecma262/#sec-array.of
   transitioning javascript builtin
-  ArrayOf(js-implicit context: Context, receiver: Object)(...arguments):
-      Object {
+  ArrayOf(js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     // 1. Let len be the actual number of arguments passed to this function.
     const len: Smi = Convert<Smi>(arguments.length);
 
@@ -14,7 +13,7 @@ namespace array_of {
     const items: Arguments = arguments;
 
     // 3. Let C be the this value.
-    const c: Object = receiver;
+    const c: JSAny = receiver;
 
     let a: JSReceiver;
 
@@ -24,7 +23,7 @@ namespace array_of {
         // a. Let A be ? Construct(C, « len »).
         a = Construct(c, len);
       }
-      case (Object): {
+      case (JSAny): {
         // a. Let A be ? ArrayCreate(len).
         a = ArrayCreate(len);
       }
@@ -36,7 +35,7 @@ namespace array_of {
     // 7. Repeat, while k < len
     while (k < len) {
       // a. Let kValue be items[k].
-      const kValue: Object = items[Convert<intptr>(k)];
+      const kValue: JSAny = items[Convert<intptr>(k)];
 
       // b. Let Pk be ! ToString(k).
       // c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue).
diff --git a/deps/v8/src/builtins/array-reduce-right.tq b/deps/v8/src/builtins/array-reduce-right.tq
index b1aa71b85b4623..ae5ca99d3d5e14 100644
--- a/deps/v8/src/builtins/array-reduce-right.tq
+++ b/deps/v8/src/builtins/array-reduce-right.tq
@@ -6,13 +6,13 @@ namespace array {
   transitioning javascript builtin
   ArrayReduceRightPreLoopEagerDeoptContinuation(
       js-implicit context: Context,
-      receiver: Object)(callback: Object, length: Object): Object {
+      receiver: JSAny)(callback: JSAny, length: JSAny): JSAny {
     // All continuation points in the optimized every implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -27,15 +27,15 @@ namespace array {
 
   transitioning javascript builtin
   ArrayReduceRightLoopEagerDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, initialK: Object, length: Object,
-      accumulator: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, initialK: JSAny, length: JSAny,
+      accumulator: JSAny): JSAny {
     // All continuation points in the optimized every implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -48,9 +48,8 @@ namespace array {
 
   transitioning javascript builtin
   ArrayReduceRightLoopLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, initialK: Object, length: Object,
-      result: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, initialK: JSAny, length: JSAny, result: JSAny): JSAny {
     // All continuation points in the optimized every implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -67,8 +66,9 @@ namespace array {
 
   transitioning builtin ArrayReduceRightLoopContinuation(implicit context:
                                                              Context)(
-      _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
-      o: JSReceiver, initialK: Number, _length: Number): Object {
+      _receiver: JSReceiver, callbackfn: Callable,
+      initialAccumulator: JSAny | TheHole, o: JSReceiver, initialK: Number,
+      _length: Number): JSAny {
     let accumulator = initialAccumulator;
 
     // 8b and 9. Repeat, while k >= 0
@@ -83,16 +83,20 @@ namespace array {
       // 8b iii and 9c. If kPresent is true, then
       if (present == True) {
         // 8b iii and 9c i. Let kValue be ? Get(O, Pk).
-        const value: Object = GetProperty(o, k);
-
-        if (accumulator == TheHole) {
-          // 8b iii 1.
-          accumulator = value;
-        } else {
-          // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
-          //         <accumulator, kValue, k, O>).
-          accumulator =
-              Call(context, callbackfn, Undefined, accumulator, value, k, o);
+        const value: JSAny = GetProperty(o, k);
+
+        typeswitch (accumulator) {
+          case (TheHole): {
+            // 8b iii 1.
+            accumulator = value;
+          }
+          case (accumulatorNotHole: JSAny): {
+            // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
+            //         <accumulator, kValue, k, O>).
+            accumulator = Call(
+                context, callbackfn, Undefined, accumulatorNotHole, value, k,
+                o);
+          }
         }
       }
 
@@ -102,16 +106,20 @@ namespace array {
     // 8c. if kPresent is false, throw a TypeError exception.
     // If the accumulator is discovered with the sentinel hole value,
     // this means kPresent is false.
-    if (accumulator == TheHole) {
-      ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
+    typeswitch (accumulator) {
+      case (TheHole): {
+        ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
+      }
+      case (accumulator: JSAny): {
+        return accumulator;
+      }
     }
-    return accumulator;
   }
 
   transitioning macro FastArrayReduceRight(implicit context: Context)(
       o: JSReceiver, len: Number, callbackfn: Callable,
-      initialAccumulator: Object): Object
-      labels Bailout(Number, Object) {
+      initialAccumulator: JSAny | TheHole): JSAny
+      labels Bailout(Number, JSAny | TheHole) {
     let accumulator = initialAccumulator;
     const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1, accumulator);
     const fastO = Cast<FastJSArrayForRead>(o)
@@ -125,25 +133,32 @@ namespace array {
       // Ensure that we haven't walked beyond a possibly updated length.
       if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
 
-      const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
-      if (accumulator == TheHole) {
-        accumulator = value;
-      } else {
-        accumulator = Call(
-            context, callbackfn, Undefined, accumulator, value, k,
-            fastOW.Get());
+      const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
+      typeswitch (accumulator) {
+        case (TheHole): {
+          accumulator = value;
+        }
+        case (accumulatorNotHole: JSAny): {
+          accumulator = Call(
+              context, callbackfn, Undefined, accumulatorNotHole, value, k,
+              fastOW.Get());
+        }
       }
     }
-    if (accumulator == TheHole) {
-      ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
+    typeswitch (accumulator) {
+      case (TheHole): {
+        ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
+      }
+      case (accumulator: JSAny): {
+        return accumulator;
+      }
     }
-    return accumulator;
   }
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.reduceRight
   transitioning javascript builtin
-  ArrayReduceRight(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  ArrayReduceRight(js-implicit context: Context, receiver: JSAny)(...arguments):
+      JSAny {
     try {
       RequireObjectCoercible(receiver, 'Array.prototype.reduceRight');
 
@@ -163,14 +178,14 @@ namespace array {
       // exception. (This case is handled at the end of
       // ArrayReduceRightLoopContinuation).
 
-      const initialValue: Object =
+      const initialValue: JSAny | TheHole =
           arguments.length > 1 ? arguments[1] : TheHole;
 
       try {
         return FastArrayReduceRight(o, len, callbackfn, initialValue)
             otherwise Bailout;
       }
-      label Bailout(value: Number, accumulator: Object) {
+      label Bailout(value: Number, accumulator: JSAny | TheHole) {
         return ArrayReduceRightLoopContinuation(
             o, callbackfn, accumulator, o, value, len);
       }
diff --git a/deps/v8/src/builtins/array-reduce.tq b/deps/v8/src/builtins/array-reduce.tq
index a5f6feb9ccedf3..1021c4864281fe 100644
--- a/deps/v8/src/builtins/array-reduce.tq
+++ b/deps/v8/src/builtins/array-reduce.tq
@@ -6,13 +6,13 @@ namespace array {
   transitioning javascript builtin
   ArrayReducePreLoopEagerDeoptContinuation(
       js-implicit context: Context,
-      receiver: Object)(callback: Object, length: Object): Object {
+      receiver: JSAny)(callback: JSAny, length: JSAny): JSAny {
     // All continuation points in the optimized every implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -27,15 +27,15 @@ namespace array {
 
   transitioning javascript builtin
   ArrayReduceLoopEagerDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, initialK: Object, length: Object,
-      accumulator: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, initialK: JSAny, length: JSAny,
+      accumulator: JSAny): JSAny {
     // All continuation points in the optimized every implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -48,9 +48,8 @@ namespace array {
 
   transitioning javascript builtin
   ArrayReduceLoopLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, initialK: Object, length: Object,
-      result: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, initialK: JSAny, length: JSAny, result: JSAny): JSAny {
     // All continuation points in the optimized every implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -66,8 +65,9 @@ namespace array {
   }
 
   transitioning builtin ArrayReduceLoopContinuation(implicit context: Context)(
-      _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
-      o: JSReceiver, initialK: Number, length: Number): Object {
+      _receiver: JSReceiver, callbackfn: Callable,
+      initialAccumulator: JSAny | TheHole, o: JSReceiver, initialK: Number,
+      length: Number): JSAny {
     let accumulator = initialAccumulator;
 
     // 8b and 9. Repeat, while k < len
@@ -82,16 +82,20 @@ namespace array {
       // 6c. If kPresent is true, then
       if (present == True) {
         // 6c. i. Let kValue be ? Get(O, Pk).
-        const value: Object = GetProperty(o, k);
-
-        if (accumulator == TheHole) {
-          // 8b.
-          accumulator = value;
-        } else {
-          // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
-          //         <accumulator, kValue, k, O>).
-          accumulator =
-              Call(context, callbackfn, Undefined, accumulator, value, k, o);
+        const value: JSAny = GetProperty(o, k);
+
+        typeswitch (accumulator) {
+          case (TheHole): {
+            // 8b.
+            accumulator = value;
+          }
+          case (accumulatorNotHole: JSAny): {
+            // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
+            //         <accumulator, kValue, k, O>).
+            accumulator = Call(
+                context, callbackfn, Undefined, accumulatorNotHole, value, k,
+                o);
+          }
         }
       }
 
@@ -101,16 +105,20 @@ namespace array {
     // 8c. if kPresent is false, throw a TypeError exception.
     // If the accumulator is discovered with the sentinel hole value,
     // this means kPresent is false.
-    if (accumulator == TheHole) {
-      ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
+    typeswitch (accumulator) {
+      case (TheHole): {
+        ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
+      }
+      case (accumulator: JSAny): {
+        return accumulator;
+      }
     }
-    return accumulator;
   }
 
   transitioning macro FastArrayReduce(implicit context: Context)(
       o: JSReceiver, len: Number, callbackfn: Callable,
-      initialAccumulator: Object): Object
-      labels Bailout(Number, Object) {
+      initialAccumulator: JSAny | TheHole): JSAny
+      labels Bailout(Number, JSAny | TheHole) {
     const k = 0;
     let accumulator = initialAccumulator;
     Cast<Smi>(len) otherwise goto Bailout(k, accumulator);
@@ -125,25 +133,32 @@ namespace array {
       // Ensure that we haven't walked beyond a possibly updated length.
       if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
 
-      const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
-      if (accumulator == TheHole) {
-        accumulator = value;
-      } else {
-        accumulator = Call(
-            context, callbackfn, Undefined, accumulator, value, k,
-            fastOW.Get());
+      const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
+      typeswitch (accumulator) {
+        case (TheHole): {
+          accumulator = value;
+        }
+        case (accumulatorNotHole: JSAny): {
+          accumulator = Call(
+              context, callbackfn, Undefined, accumulatorNotHole, value, k,
+              fastOW.Get());
+        }
       }
     }
-    if (accumulator == TheHole) {
-      ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
+    typeswitch (accumulator) {
+      case (TheHole): {
+        ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
+      }
+      case (accumulator: JSAny): {
+        return accumulator;
+      }
     }
-    return accumulator;
   }
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.reduce
   transitioning javascript builtin
-  ArrayReduce(js-implicit context: Context, receiver: Object)(...arguments):
-      Object {
+  ArrayReduce(js-implicit context: Context, receiver: JSAny)(...arguments):
+      JSAny {
     try {
       RequireObjectCoercible(receiver, 'Array.prototype.reduce');
 
@@ -163,14 +178,14 @@ namespace array {
       // exception. (This case is handled at the end of
       // ArrayReduceLoopContinuation).
 
-      const initialValue: Object =
+      const initialValue: JSAny | TheHole =
           arguments.length > 1 ? arguments[1] : TheHole;
 
       try {
         return FastArrayReduce(o, len, callbackfn, initialValue)
             otherwise Bailout;
       }
-      label Bailout(value: Number, accumulator: Object) {
+      label Bailout(value: Number, accumulator: JSAny | TheHole) {
         return ArrayReduceLoopContinuation(
             o, callbackfn, accumulator, o, value, len);
       }
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index 82d2e6b6058661..5e9d3a00f39d95 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -12,10 +12,10 @@ namespace array_reverse {
     return UnsafeCast<Smi>(elements.objects[index]);
   }
 
-  LoadElement<array::FastPackedObjectElements, Object>(
-      implicit context: Context)(elements: FixedArrayBase, index: Smi): Object {
+  LoadElement<array::FastPackedObjectElements, JSAny>(
+      implicit context: Context)(elements: FixedArrayBase, index: Smi): JSAny {
     const elements: FixedArray = UnsafeCast<FixedArray>(elements);
-    return elements.objects[index];
+    return UnsafeCast<JSAny>(elements.objects[index]);
   }
 
   LoadElement<array::FastPackedDoubleElements, float64>(
@@ -38,9 +38,9 @@ namespace array_reverse {
     StoreFixedArrayElement(elems, index, value, SKIP_WRITE_BARRIER);
   }
 
-  StoreElement<array::FastPackedObjectElements, Object>(
+  StoreElement<array::FastPackedObjectElements, JSAny>(
       implicit context:
-          Context)(elements: FixedArrayBase, index: Smi, value: Object) {
+          Context)(elements: FixedArrayBase, index: Smi, value: JSAny) {
     const elements: FixedArray = UnsafeCast<FixedArray>(elements);
     elements.objects[index] = value;
   }
@@ -70,8 +70,8 @@ namespace array_reverse {
     }
   }
 
-  transitioning macro GenericArrayReverse(context: Context, receiver: Object):
-      Object {
+  transitioning macro GenericArrayReverse(context: Context, receiver: JSAny):
+      JSAny {
     // 1. Let O be ? ToObject(this value).
     const object: JSReceiver = ToObject_Inline(context, receiver);
 
@@ -89,8 +89,8 @@ namespace array_reverse {
     let upper: Number = length - 1;
 
     while (lower < upper) {
-      let lowerValue: Object = Undefined;
-      let upperValue: Object = Undefined;
+      let lowerValue: JSAny = Undefined;
+      let upperValue: JSAny = Undefined;
 
       // b. Let upperP be ! ToString(upper).
       // c. Let lowerP be ! ToString(lower).
@@ -142,7 +142,7 @@ namespace array_reverse {
     return object;
   }
 
-  macro TryFastPackedArrayReverse(implicit context: Context)(receiver: Object)
+  macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny)
       labels Slow {
     const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
 
@@ -153,7 +153,7 @@ namespace array_reverse {
           array.elements, array.length);
     } else if (kind == PACKED_ELEMENTS) {
       array::EnsureWriteableFastElements(array);
-      FastPackedArrayReverse<array::FastPackedObjectElements, Object>(
+      FastPackedArrayReverse<array::FastPackedObjectElements, JSAny>(
           array.elements, array.length);
     } else if (kind == PACKED_DOUBLE_ELEMENTS) {
       FastPackedArrayReverse<array::FastPackedDoubleElements, float64>(
@@ -165,7 +165,7 @@ namespace array_reverse {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.reverse
   transitioning javascript builtin ArrayPrototypeReverse(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     try {
       TryFastPackedArrayReverse(receiver) otherwise Baseline;
       return receiver;
diff --git a/deps/v8/src/builtins/array-shift.tq b/deps/v8/src/builtins/array-shift.tq
index 4dd82d7b886d0a..48ffe3b4875805 100644
--- a/deps/v8/src/builtins/array-shift.tq
+++ b/deps/v8/src/builtins/array-shift.tq
@@ -3,11 +3,10 @@
 // found in the LICENSE file.
 
 namespace array_shift {
-  extern builtin ArrayShift(Context, JSFunction, Object, int32);
+  extern builtin ArrayShift(Context, JSFunction, JSAny, int32): JSAny;
 
-  macro TryFastArrayShift(implicit context: Context)(
-      receiver: Object, arguments: Arguments): Object
-      labels Slow {
+  macro TryFastArrayShift(implicit context: Context)(receiver: JSAny): JSAny
+      labels Slow, Runtime {
     const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
     let witness = NewFastJSArrayWitness(array);
 
@@ -17,35 +16,28 @@ namespace array_shift {
       return Undefined;
     }
 
-    try {
-      const newLength = array.length - 1;
+    const newLength = array.length - 1;
 
-      // Check that we're not supposed to right-trim the backing store, as
-      // implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
-      if ((newLength + newLength + kMinAddedElementsCapacity) <
-          array.elements.length) {
-        goto Runtime;
-      }
+    // Check that we're not supposed to right-trim the backing store, as
+    // implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+    if ((newLength + newLength + kMinAddedElementsCapacity) <
+        array.elements.length) {
+      goto Runtime;
+    }
 
-      // Check that we're not supposed to left-trim the backing store, as
-      // implemented in elements.cc:FastElementsAccessor::MoveElements.
-      if (newLength > kMaxCopyElements) goto Runtime;
+    // Check that we're not supposed to left-trim the backing store, as
+    // implemented in elements.cc:FastElementsAccessor::MoveElements.
+    if (newLength > kMaxCopyElements) goto Runtime;
 
-      const result = witness.LoadElementOrUndefined(0);
-      witness.ChangeLength(newLength);
-      witness.MoveElements(0, 1, Convert<intptr>(newLength));
-      witness.StoreHole(newLength);
-      return result;
-    }
-    label Runtime {
-      tail ArrayShift(
-          context, LoadTargetFromFrame(), Undefined,
-          Convert<int32>(arguments.length));
-    }
+    const result = witness.LoadElementOrUndefined(0);
+    witness.ChangeLength(newLength);
+    witness.MoveElements(0, 1, Convert<intptr>(newLength));
+    witness.StoreHole(newLength);
+    return result;
   }
 
   transitioning macro GenericArrayShift(implicit context:
-                                            Context)(receiver: Object): Object {
+                                            Context)(receiver: JSAny): JSAny {
     // 1. Let O be ? ToObject(this value).
     const object: JSReceiver = ToObject_Inline(context, receiver);
 
@@ -78,7 +70,7 @@ namespace array_shift {
       // d. If fromPresent is true, then
       if (fromPresent == True) {
         // i. Let fromVal be ? Get(O, from).
-        const fromValue: Object = GetProperty(object, from);
+        const fromValue: JSAny = GetProperty(object, from);
 
         // ii. Perform ? Set(O, to, fromValue, true).
         SetProperty(object, to, fromValue);
@@ -103,12 +95,17 @@ namespace array_shift {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.shift
   transitioning javascript builtin ArrayPrototypeShift(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     try {
-      return TryFastArrayShift(receiver, arguments) otherwise Slow;
+      return TryFastArrayShift(receiver) otherwise Slow, Runtime;
     }
     label Slow {
       return GenericArrayShift(receiver);
     }
+    label Runtime {
+      tail ArrayShift(
+          context, LoadTargetFromFrame(), Undefined,
+          Convert<int32>(arguments.length));
+    }
   }
 }
diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq
index c3a6ac75cb0ec7..57ddc8dea1c852 100644
--- a/deps/v8/src/builtins/array-slice.tq
+++ b/deps/v8/src/builtins/array-slice.tq
@@ -4,7 +4,7 @@
 
 namespace array_slice {
   macro HandleSimpleArgumentsSlice(
-      context: Context, args: JSArgumentsObjectWithLength, start: Smi,
+      context: NativeContext, args: JSArgumentsObjectWithLength, start: Smi,
       count: Smi): JSArray
       labels Bailout {
     // If the resulting array doesn't fit in new space, use the slow path.
@@ -27,7 +27,7 @@ namespace array_slice {
   }
 
   macro HandleFastAliasedSloppyArgumentsSlice(
-      context: Context, args: JSArgumentsObjectWithLength, start: Smi,
+      context: NativeContext, args: JSArgumentsObjectWithLength, start: Smi,
       count: Smi): JSArray
       labels Bailout {
     // If the resulting array doesn't fit in new space, use the slow path.
@@ -63,9 +63,9 @@ namespace array_slice {
     for (let current: Smi = start; current < to; ++current) {
       const e: Object =
           sloppyElements.objects[current + kSloppyArgumentsParameterMapStart];
-      const newElement: Object = e != TheHole ?
-          argumentsContext[UnsafeCast<Smi>(e)] :
-          unmappedElements.objects[current];
+      const newElement: JSAny = UnsafeCast<JSAny>(
+          e != TheHole ? argumentsContext[UnsafeCast<Smi>(e)] :
+                         unmappedElements.objects[current]);
       // It is safe to skip the write barrier here because resultElements was
       // allocated together with result in a folded allocation.
       // TODO(tebbi): The verification of this fails at the moment due to
@@ -86,7 +86,7 @@ namespace array_slice {
   }
 
   macro HandleFastSlice(
-      context: Context, o: Object, startNumber: Number,
+      context: NativeContext, o: JSAny, startNumber: Number,
       countNumber: Number): JSArray
       labels Bailout {
     const start: Smi = Cast<Smi>(startNumber) otherwise Bailout;
@@ -114,7 +114,7 @@ namespace array_slice {
               otherwise Bailout;
         }
       }
-      case (Object): {
+      case (JSAny): {
       }
     }
     goto Bailout;
@@ -122,15 +122,15 @@ namespace array_slice {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.slice
   transitioning javascript builtin
-  ArrayPrototypeSlice(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  ArrayPrototypeSlice(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     // Handle array cloning case if the receiver is a fast array.
     if (arguments.length == 0) {
       typeswitch (receiver) {
         case (a: FastJSArrayForCopy): {
           return CloneFastJSArray(context, a);
         }
-        case (Object): {
+        case (JSAny): {
         }
       }
     }
@@ -142,7 +142,7 @@ namespace array_slice {
     const len: Number = GetLengthProperty(o);
 
     // 3. Let relativeStart be ? ToInteger(start).
-    const start: Object = arguments[0];
+    const start: JSAny = arguments[0];
     const relativeStart: Number = ToInteger_Inline(context, start);
 
     // 4. If relativeStart < 0, let k be max((len + relativeStart), 0);
@@ -152,7 +152,7 @@ namespace array_slice {
 
     // 5. If end is undefined, let relativeEnd be len;
     //    else let relativeEnd be ? ToInteger(end).
-    const end: Object = arguments[1];
+    const end: JSAny = arguments[1];
     const relativeEnd: Number =
         end == Undefined ? len : ToInteger_Inline(context, end);
 
@@ -172,7 +172,8 @@ namespace array_slice {
     assert(count <= len);
 
     try {
-      return HandleFastSlice(context, o, k, count) otherwise Slow;
+      return HandleFastSlice(UnsafeCast<NativeContext>(context), o, k, count)
+          otherwise Slow;
     }
     label Slow {}
 
@@ -193,7 +194,7 @@ namespace array_slice {
       // c. If kPresent is true, then
       if (fromPresent == True) {
         // i. Let kValue be ? Get(O, Pk).
-        const kValue: Object = GetProperty(o, pK);
+        const kValue: JSAny = GetProperty(o, pK);
 
         // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(n), kValue).
         FastCreateDataProperty(a, n, kValue);
diff --git a/deps/v8/src/builtins/array-some.tq b/deps/v8/src/builtins/array-some.tq
index a30af4e47a42c4..5d93dd0b726017 100644
--- a/deps/v8/src/builtins/array-some.tq
+++ b/deps/v8/src/builtins/array-some.tq
@@ -5,15 +5,14 @@
 namespace array {
   transitioning javascript builtin
   ArraySomeLoopEagerDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object,
-      length: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
     // All continuation points in the optimized some implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
     //
     // Also, this great mass of casts is necessary because the signature
-    // of Torque javascript builtins requires Object type for all parameters
+    // of Torque javascript builtins requires JSAny type for all parameters
     // other than {context}.
     const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
     const callbackfn = Cast<Callable>(callback) otherwise unreachable;
@@ -27,9 +26,9 @@ namespace array {
 
   transitioning javascript builtin
   ArraySomeLoopLazyDeoptContinuation(
-      js-implicit context: Context, receiver: Object)(
-      callback: Object, thisArg: Object, initialK: Object, length: Object,
-      result: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(
+      callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+      result: JSAny): JSAny {
     // All continuation points in the optimized some implementation are
     // after the ToObject(O) call that ensures we are dealing with a
     // JSReceiver.
@@ -53,9 +52,9 @@ namespace array {
   }
 
   transitioning builtin ArraySomeLoopContinuation(implicit context: Context)(
-      _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
-      _array: Object, o: JSReceiver, initialK: Number, length: Number,
-      _initialTo: Object): Object {
+      _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
+      _array: JSAny, o: JSReceiver, initialK: Number, length: Number,
+      _initialTo: JSAny): JSAny {
     // 5. Let k be 0.
     // 6. Repeat, while k < len
     for (let k: Number = initialK; k < length; k++) {
@@ -69,10 +68,10 @@ namespace array {
       // 6c. If kPresent is true, then
       if (kPresent == True) {
         // 6c. i. Let kValue be ? Get(O, Pk).
-        const kValue: Object = GetProperty(o, k);
+        const kValue: JSAny = GetProperty(o, k);
 
         // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
-        const result: Object = Call(context, callbackfn, thisArg, kValue, k, o);
+        const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o);
 
         // iii. If selected is true, then...
         if (ToBoolean(result)) {
@@ -86,7 +85,7 @@ namespace array {
   }
 
   transitioning macro FastArraySome(implicit context: Context)(
-      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
+      o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
       labels Bailout(Smi) {
     let k: Smi = 0;
     const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
@@ -99,8 +98,8 @@ namespace array {
 
       // Ensure that we haven't walked beyond a possibly updated length.
       if (k >= fastOW.Get().length) goto Bailout(k);
-      const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
-      const result: Object =
+      const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
+      const result: JSAny =
           Call(context, callbackfn, thisArg, value, k, fastOW.Get());
       if (ToBoolean(result)) {
         return True;
@@ -111,8 +110,8 @@ namespace array {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.some
   transitioning javascript builtin
-  ArraySome(js-implicit context: Context, receiver: Object)(...arguments):
-      Object {
+  ArraySome(js-implicit context: Context, receiver: JSAny)(...arguments):
+      JSAny {
     try {
       RequireObjectCoercible(receiver, 'Array.prototype.some');
 
@@ -129,7 +128,7 @@ namespace array {
       const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
 
       // 4. If thisArg is present, let T be thisArg; else let T be undefined.
-      const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+      const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
 
       // Special cases.
       try {
diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq
index 3b65bb03d48bac..04885dbb9c481a 100644
--- a/deps/v8/src/builtins/array-splice.tq
+++ b/deps/v8/src/builtins/array-splice.tq
@@ -95,7 +95,7 @@ namespace array_splice {
       const typedNewElements: FixedArrayType =
           UnsafeCast<FixedArrayType>(a.elements);
       for (let i: intptr = 2; i < args.length; ++i) {
-        const e: Object = args[i];
+        const e: JSAny = args[i];
         // The argument elements were already validated to be an appropriate
         // {ElementType} to store in {FixedArrayType}.
         typedNewElements[k++] = UnsafeCast<ElementType>(e);
@@ -109,7 +109,7 @@ namespace array_splice {
   transitioning macro FastArraySplice(
       context: Context, args: Arguments, o: JSReceiver,
       originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi,
-      actualDeleteCountNumber: Number): Object
+      actualDeleteCountNumber: Number): JSAny
       labels Bailout {
     const originalLength: Smi =
         Cast<Smi>(originalLengthNumber) otherwise Bailout;
@@ -132,7 +132,7 @@ namespace array_splice {
 
     const oldElementsKind: ElementsKind = elementsKind;
     for (let i: intptr = 2; i < args.length; ++i) {
-      const e: Object = args[i];
+      const e: JSAny = args[i];
       if (IsFastSmiElementsKind(elementsKind)) {
         if (TaggedIsNotSmi(e)) {
           const heapObject: HeapObject = UnsafeCast<HeapObject>(e);
@@ -166,7 +166,7 @@ namespace array_splice {
     }
 
     if (IsFastSmiOrTaggedElementsKind(elementsKind)) {
-      FastSplice<FixedArray, Object>(
+      FastSplice<FixedArray, JSAny>(
           args, a, length, newLength, actualStart, insertCount,
           actualDeleteCount);
     } else {
@@ -180,7 +180,7 @@ namespace array_splice {
 
   transitioning macro FillDeletedElementsArray(
       context: Context, o: JSReceiver, actualStart: Number,
-      actualDeleteCount: Number, a: JSReceiver): Object {
+      actualDeleteCount: Number, a: JSReceiver): JSAny {
     // 10. Let k be 0.
     let k: Number = 0;
 
@@ -195,7 +195,7 @@ namespace array_splice {
       // c. If fromPresent is true, then
       if (fromPresent == True) {
         // i. Let fromValue be ? Get(O, from).
-        const fromValue: Object = GetProperty(o, from);
+        const fromValue: JSAny = GetProperty(o, from);
 
         // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue).
         FastCreateDataProperty(a, k, fromValue);
@@ -231,7 +231,7 @@ namespace array_splice {
       // iv. If fromPresent is true, then
       if (fromPresent == True) {
         // 1. Let fromValue be ? Get(O, from).
-        const fromValue: Object = GetProperty(o, from);
+        const fromValue: JSAny = GetProperty(o, from);
 
         // 2. Perform ? Set(O, to, fromValue, true).
         SetProperty(o, to, fromValue);
@@ -280,7 +280,7 @@ namespace array_splice {
       // iv. If fromPresent is true, then
       if (fromPresent == True) {
         // 1. Let fromValue be ? Get(O, from).
-        const fromValue: Object = GetProperty(o, from);
+        const fromValue: JSAny = GetProperty(o, from);
 
         // 2. Perform ? Set(O, to, fromValue, true).
         SetProperty(o, to, fromValue);
@@ -298,8 +298,7 @@ namespace array_splice {
 
   transitioning macro SlowSplice(
       context: Context, arguments: Arguments, o: JSReceiver, len: Number,
-      actualStart: Number, insertCount: Smi,
-      actualDeleteCount: Number): Object {
+      actualStart: Number, insertCount: Smi, actualDeleteCount: Number): JSAny {
     // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount).
     const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount);
     const itemCount: Number = insertCount;
@@ -332,7 +331,7 @@ namespace array_splice {
     //   element.
     if (arguments.length > 2) {
       for (let i: intptr = 2; i < arguments.length; ++i) {
-        const e: Object = arguments[i];
+        const e: JSAny = arguments[i];
         // b. Perform ? Set(O, ! ToString(k), E, true).
         SetProperty(o, k, e);
 
@@ -350,8 +349,8 @@ namespace array_splice {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.splice
   transitioning javascript builtin
-  ArrayPrototypeSplice(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  ArrayPrototypeSplice(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     // 1. Let O be ? ToObject(this value).
     const o: JSReceiver = ToObject(context, receiver);
 
@@ -359,7 +358,7 @@ namespace array_splice {
     const len: Number = GetLengthProperty(o);
 
     // 3. Let relativeStart be ? ToInteger(start).
-    const start: Object = arguments[0];
+    const start: JSAny = arguments[0];
     const relativeStart: Number = ToInteger_Inline(context, start);
 
     // 4. If relativeStart < 0, let actualStart be max((len + relativeStart),
@@ -388,7 +387,7 @@ namespace array_splice {
       // a. Let insertCount be the Number of actual arguments minus 2.
       insertCount = Convert<Smi>(arguments.length) - 2;
       // b. Let dc be ? ToInteger(deleteCount).
-      const deleteCount: Object = arguments[1];
+      const deleteCount: JSAny = arguments[1];
       const dc: Number = ToInteger_Inline(context, deleteCount);
       // c. Let actualDeleteCount be min(max(dc, 0), len - actualStart).
       actualDeleteCount = Min(Max(dc, 0), len - actualStart);
diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq
index e685d520cd963a..422eee158de535 100644
--- a/deps/v8/src/builtins/array-unshift.tq
+++ b/deps/v8/src/builtins/array-unshift.tq
@@ -3,25 +3,10 @@
 // found in the LICENSE file.
 
 namespace array_unshift {
-  extern builtin ArrayUnshift(Context, JSFunction, Object, int32);
-
-  macro TryFastArrayUnshift(
-      context: Context, receiver: Object, arguments: Arguments): never
-      labels Slow {
-    const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
-    array::EnsureWriteableFastElements(array);
-
-    const map: Map = array.map;
-    if (!IsExtensibleMap(map)) goto Slow;
-    EnsureArrayLengthWritable(map) otherwise Slow;
-
-    tail ArrayUnshift(
-        context, LoadTargetFromFrame(), Undefined,
-        Convert<int32>(arguments.length));
-  }
+  extern builtin ArrayUnshift(Context, JSFunction, JSAny, int32): JSAny;
 
   transitioning macro GenericArrayUnshift(
-      context: Context, receiver: Object, arguments: Arguments): Number {
+      context: Context, receiver: JSAny, arguments: Arguments): Number {
     // 1. Let O be ? ToObject(this value).
     const object: JSReceiver = ToObject_Inline(context, receiver);
 
@@ -55,7 +40,7 @@ namespace array_unshift {
         // iv. If fromPresent is true, then
         if (fromPresent == True) {
           // 1. Let fromValue be ? Get(O, from).
-          const fromValue: Object = GetProperty(object, from);
+          const fromValue: JSAny = GetProperty(object, from);
 
           // 2. Perform ? Set(O, to, fromValue, true).
           SetProperty(object, to, fromValue);
@@ -93,11 +78,20 @@ namespace array_unshift {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.unshift
   transitioning javascript builtin ArrayPrototypeUnshift(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     try {
-      TryFastArrayUnshift(context, receiver, arguments) otherwise Baseline;
+      const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+      array::EnsureWriteableFastElements(array);
+
+      const map: Map = array.map;
+      if (!IsExtensibleMap(map)) goto Slow;
+      EnsureArrayLengthWritable(map) otherwise Slow;
+
+      tail ArrayUnshift(
+          context, LoadTargetFromFrame(), Undefined,
+          Convert<int32>(arguments.length));
     }
-    label Baseline {
+    label Slow {
       return GenericArrayUnshift(context, receiver, arguments);
     }
   }
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index 7e044e086b89b3..b9ae314c08a297 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -32,30 +32,15 @@ namespace array {
     assert(array.elements.map != kCOWMap);
   }
 
-  macro IsJSArray(implicit context: Context)(o: Object): bool {
-    typeswitch (o) {
-      case (JSArray): {
-        return true;
-      }
-      case (Object): {
-        return false;
-      }
-    }
-  }
-
-  macro LoadElementOrUndefined(a: FixedArray, i: Smi): Object {
-    const e: Object = a.objects[i];
-    return e == TheHole ? Undefined : e;
+  macro LoadElementOrUndefined(implicit context:
+                                   Context)(a: FixedArray, i: Smi): JSAny {
+    const e = UnsafeCast<(JSAny | TheHole)>(a.objects[i]);
+    return ReplaceTheHoleWithUndefined(e);
   }
 
   macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined {
-    try {
-      const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise IfHole;
-      return AllocateHeapNumberWithValue(f);
-    }
-    label IfHole {
-      return Undefined;
-    }
+    const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise return Undefined;
+    return AllocateHeapNumberWithValue(f);
   }
 
   macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void {
@@ -66,5 +51,5 @@ namespace array {
     elements.objects[k] = TheHole;
   }
 
-  extern macro SetPropertyLength(implicit context: Context)(Object, Number);
+  extern macro SetPropertyLength(implicit context: Context)(JSAny, Number);
 }
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 07af1f441f8060..aa5d4cc50a731c 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -43,6 +43,29 @@ extern class HeapObject extends Tagged {
 
 type Object = Smi | HeapObject;
 
+// Defined to coincide with https://tc39.es/ecma262/#sec-ispropertykey
+// Doesn't include PrivateSymbol.
+type PropertyKey = String | PublicSymbol;
+
+// TODO(tebbi): PrivateSymbol is only exposed to JavaScript through the debugger
+// API. We should reconsider this and try not to expose it at all. Then JSAny
+// would not need to contain it.
+
+// A JavaScript primitive value as defined in
+// https://tc39.es/ecma262/#sec-primitive-value.
+type JSPrimitive = Numeric | String | Symbol | Boolean |
+    Null | Undefined;
+
+// A user-exposed JavaScript value, as opposed to V8-internal values like
+// TheHole or FixedArray.
+type JSAny = JSReceiver | JSPrimitive;
+
+type JSAnyNotNumber = BigInt | String | Symbol | Boolean |
+    Null | Undefined | JSReceiver;
+
+// This is the intersection of JSAny and HeapObject.
+type JSAnyNotSmi = JSAnyNotNumber | HeapNumber;
+
 type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
 type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
 type int31 extends int32
@@ -56,6 +79,8 @@ type uint16 extends uint31
 type int8 extends int16 generates 'TNode<Int8T>' constexpr 'int8_t';
 type uint8 extends uint16
     generates 'TNode<Uint8T>' constexpr 'uint8_t';
+type char8 extends int8 constexpr 'char';
+type char16 extends uint16 constexpr 'char16_t';
 type int64 generates 'TNode<Int64T>' constexpr 'int64_t';
 type intptr generates 'TNode<IntPtrT>' constexpr 'intptr_t';
 type uintptr generates 'TNode<UintPtrT>' constexpr 'uintptr_t';
@@ -77,7 +102,7 @@ extern class Context extends HeapObject {
   extension: Object;
   native_context: Object;
 }
-type NativeContext extends Context;
+type NativeContext extends Context generates 'TNode<NativeContext>';
 
 @generateCppClass
 extern class Oddball extends HeapObject {
@@ -97,6 +122,9 @@ type Numeric = Number | BigInt;
 extern class Name extends HeapObject {
   hash_field: uint32;
 }
+// This is the same as Name, but with the information that there are no other
+// kinds of names.
+type AnyName = PrivateSymbol | PublicSymbol | String;
 
 @generateCppClass
 extern class Symbol extends Name {
@@ -104,6 +132,9 @@ extern class Symbol extends Name {
   name: Object;  // The print name of a symbol, or undefined if none.
 }
 
+type PublicSymbol extends Symbol;
+type PrivateSymbol extends Symbol;
+
 @abstract
 @generateCppClass
 extern class String extends Name {
@@ -136,9 +167,11 @@ extern class SeqString extends String {
 }
 @generateCppClass
 extern class SeqOneByteString extends SeqString {
+  chars[length]: char8;
 }
 @generateCppClass
 extern class SeqTwoByteString extends SeqString {
+  chars[length]: char16;
 }
 
 @generateCppClass
@@ -185,7 +218,6 @@ type DirectString extends String;
 type RootIndex generates 'TNode<Int32T>' constexpr 'RootIndex';
 
 @abstract
-@generateCppClass
 extern class FixedArrayBase extends HeapObject {
   length: Smi;
 }
@@ -205,7 +237,7 @@ type LayoutDescriptor extends ByteArray
 type TransitionArray extends WeakFixedArray
     generates 'TNode<TransitionArray>';
 
-type InstanceType extends uint16 constexpr 'InstanceType';
+type InstanceType extends uint16 constexpr 'v8::internal::InstanceType';
 
 extern class Map extends HeapObject {
   instance_size_in_words: uint8;
@@ -388,8 +420,8 @@ extern class JSProxy extends JSReceiver {
 // Just a starting shape for JSObject; properties can move after initialization.
 @noVerifier
 extern class JSProxyRevocableResult extends JSObject {
-  proxy: Object;
-  revoke: Object;
+  proxy: JSAny;
+  revoke: JSAny;
 }
 
 macro NewJSProxyRevocableResult(implicit context: Context)(
@@ -412,22 +444,24 @@ extern class JSGlobalProxy extends JSObject {
 
 @generateCppClass
 extern class JSPrimitiveWrapper extends JSObject {
-  value: Object;
+  value: JSAny;
 }
 
-extern class JSArgumentsObject extends JSObject {}
+@generateCppClass
+extern class JSArgumentsObject extends JSObject {
+}
 
 // Just a starting shape for JSObject; properties can move after initialization.
 @noVerifier
 @hasSameInstanceTypeAsParent
 extern class JSArgumentsObjectWithLength extends JSArgumentsObject {
-  length: Object;
+  length: JSAny;
 }
 
 // Just a starting shape for JSObject; properties can move after initialization.
 @hasSameInstanceTypeAsParent
 extern class JSSloppyArgumentsObject extends JSArgumentsObjectWithLength {
-  callee: Object;
+  callee: JSAny;
 }
 
 // Just a starting shape for JSObject; properties can move after initialization.
@@ -492,8 +526,8 @@ type NoSharedNameSentinel extends Smi;
 
 @generateCppClass
 extern class CallHandlerInfo extends Struct {
-  callback: Foreign | Undefined;
-  js_callback: Foreign | Undefined;
+  callback: NonNullForeign | Undefined | Zero;
+  js_callback: NonNullForeign | Undefined | Zero;
   data: Object;
 }
 
@@ -510,18 +544,37 @@ extern class Module extends HeapObject {
 
 type SourceTextModuleInfo extends FixedArray;
 
+@generateCppClass
 extern class SourceTextModule extends Module {
+  // The code representing this module, or an abstraction thereof.
   code: SharedFunctionInfo | JSFunction |
       JSGeneratorObject | SourceTextModuleInfo;
+
+  // Arrays of cells corresponding to regular exports and regular imports.
+  // A cell's position in the array is determined by the cell index of the
+  // associated module entry (which coincides with the variable index of the
+  // associated variable).
   regular_exports: FixedArray;
   regular_imports: FixedArray;
+
+  // Modules imported or re-exported by this module.
+  // Corresponds 1-to-1 to the module specifier strings in
+  // SourceTextModuleInfo::module_requests.
   requested_modules: FixedArray;
+
+  // Script from which the module originates.
   script: Script;
+
+  // The value of import.meta inside of this module.
+  // Lazily initialized on first access. It's the hole before first access and
+  // a JSObject afterwards.
   import_meta: TheHole | JSObject;
+
   dfs_index: Smi;
   dfs_ancestor_index: Smi;
 }
 
+@generateCppClass
 extern class SyntheticModule extends Module {
   name: String;
   export_names: FixedArray;
@@ -529,6 +582,7 @@ extern class SyntheticModule extends Module {
 }
 
 @abstract
+@generateCppClass
 extern class JSModuleNamespace extends JSObject {
   module: Module;
 }
@@ -539,14 +593,23 @@ extern class TemplateList extends FixedArray {
 }
 
 @abstract
+@generateCppClass
 extern class JSWeakCollection extends JSObject {
+  // The backing hash table mapping keys to values.
   table: Object;
 }
-extern class JSWeakSet extends JSWeakCollection {}
-extern class JSWeakMap extends JSWeakCollection {}
+@generateCppClass
+extern class JSWeakSet extends JSWeakCollection {
+}
+@generateCppClass
+extern class JSWeakMap extends JSWeakCollection {
+}
 
+@generateCppClass
 extern class JSCollectionIterator extends JSObject {
+  // The backing hash table mapping keys to values.
   table: Object;
+  // The index into the data table.
   index: Object;
 }
 
@@ -601,7 +664,10 @@ extern class Script extends Struct {
   host_defined_options: Object;
 }
 
-extern class EmbedderDataArray extends HeapObject { length: Smi; }
+@generateCppClass
+extern class EmbedderDataArray extends HeapObject {
+  length: Smi;
+}
 
 type ScopeInfo extends HeapObject generates 'TNode<ScopeInfo>';
 
@@ -631,9 +697,15 @@ extern class SharedFunctionInfo extends HeapObject {
   @if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
 }
 
+@generateCppClass
 extern class JSBoundFunction extends JSObject {
+  // The wrapped function object.
   bound_target_function: Callable;
-  bound_this: Object;
+  // The value that is always passed as the this value when calling the wrapped
+  // function.
+  bound_this: JSAny;
+  // A list of values whose elements are used as the first arguments to any call
+  // to the wrapped function.
   bound_arguments: FixedArray;
 }
 
@@ -644,7 +716,7 @@ extern class JSBoundFunction extends JSObject {
 type NonNullForeign extends Foreign;
 
 // A function built with InstantiateFunction for the public API.
-type CallableApiObject extends HeapObject;
+type CallableApiObject extends JSObject;
 
 // A JSProxy with the callable bit set.
 type CallableJSProxy extends JSProxy;
@@ -729,14 +801,26 @@ extern class JSTypedArray extends JSArrayBufferView {
 }
 
 @abstract
+@generateCppClass
 extern class JSCollection extends JSObject {
+  // The backing hash table.
   table: Object;
 }
-extern class JSSet extends JSCollection {}
-extern class JSMap extends JSCollection {}
+@generateCppClass
+extern class JSSet extends JSCollection {
+}
+@generateCppClass
+extern class JSMap extends JSCollection {
+}
 
+@generateCppClass
 extern class JSDate extends JSObject {
+  // If one component is NaN, all of them are, indicating a NaN time value.
+
+  // The time value.
   value: NumberOrUndefined;
+
+  // Cached values:
   year: Undefined | Smi | NaN;
   month: Undefined | Smi | NaN;
   day: Undefined | Smi | NaN;
@@ -744,6 +828,9 @@ extern class JSDate extends JSObject {
   hour: Undefined | Smi | NaN;
   min: Undefined | Smi | NaN;
   sec: Undefined | Smi | NaN;
+
+  // Sample of the date cache stamp at the moment when chached fields were
+  // cached.
   cache_stamp: Undefined | Smi | NaN;
 }
 
@@ -752,8 +839,11 @@ extern class JSGlobalObject extends JSObject {
   global_proxy: JSGlobalProxy;
 }
 
+@generateCppClass
 extern class JSAsyncFromSyncIterator extends JSObject {
   sync_iterator: JSReceiver;
+  // The "next" method is loaded during GetIterator, and is not reloaded for
+  // subsequent "next" invocations.
   next: Object;
 }
 
@@ -763,6 +853,7 @@ extern class JSStringIterator extends JSObject {
 }
 
 @abstract
+@generateCppClass
 extern class TemplateInfo extends Struct {
   tag: Object;
   serial_number: Object;
@@ -772,12 +863,15 @@ extern class TemplateInfo extends Struct {
 }
 
 @generatePrint
+@generateCppClass
 extern class TemplateObjectDescription extends Struct {
   raw_strings: FixedArray;
   cooked_strings: FixedArray;
 }
 
+@generateCppClass
 extern class FunctionTemplateRareData extends Struct {
+  // See DECL_RARE_ACCESSORS in FunctionTemplateInfo.
   prototype_template: Object;
   prototype_provider_template: Object;
   parent_template: Object;
@@ -788,17 +882,31 @@ extern class FunctionTemplateRareData extends Struct {
   access_check_info: Object;
 }
 
+@generateCppClass
 extern class FunctionTemplateInfo extends TemplateInfo {
+  // Handler invoked when calling an instance of this FunctionTemplateInfo.
+  // Either CallInfoHandler or Undefined.
   call_code: Object;
   class_name: Object;
+  // If the signature is a FunctionTemplateInfo it is used to check whether the
+  // receiver calling the associated JSFunction is a compatible receiver, i.e.
+  // it is an instance of the signature FunctionTemplateInfo or any of the
+  // receiver's prototypes are.
   signature: Object;
-  function_template_rare_data: Object;
+  // If any of the setters declared by DECL_RARE_ACCESSORS are used then a
+  // FunctionTemplateRareData will be stored here. Until then this contains
+  // undefined.
+  rare_data: HeapObject;
   shared_function_info: Object;
   flag: Smi;
   length: Smi;
+  // Either the_hole or a private symbol. Used to cache the result on
+  // the receiver under the the cached_property_name when this
+  // FunctionTemplateInfo is used as a getter.
   cached_property_name: Object;
 }
 
+@generateCppClass
 extern class ObjectTemplateInfo extends TemplateInfo {
   constructor: Object;
   data: Object;
@@ -809,7 +917,7 @@ extern class PropertyArray extends HeapObject { length_and_hash: Smi; }
 type DependentCode extends WeakFixedArray;
 
 extern class PropertyCell extends HeapObject {
-  name: Name;
+  name: AnyName;
   property_details_raw: Smi;
   value: Object;
   dependent_code: DependentCode;
@@ -840,6 +948,7 @@ const UTF32:
 
 extern class Foreign extends HeapObject { foreign_address: RawPtr; }
 
+@generateCppClass
 extern class InterceptorInfo extends Struct {
   getter: NonNullForeign | Zero | Undefined;
   setter: NonNullForeign | Zero | Undefined;
@@ -852,6 +961,7 @@ extern class InterceptorInfo extends Struct {
   flags: Smi;
 }
 
+@generateCppClass
 extern class AccessCheckInfo extends Struct {
   callback: Foreign | Zero | Undefined;
   named_interceptor: InterceptorInfo | Zero | Undefined;
@@ -859,14 +969,21 @@ extern class AccessCheckInfo extends Struct {
   data: Object;
 }
 
+@generateCppClass
 extern class ArrayBoilerplateDescription extends Struct {
   flags: Smi;
   constant_elements: FixedArrayBase;
 }
 
-extern class AliasedArgumentsEntry extends Struct { aliased_context_slot: Smi; }
+@generateCppClass
+extern class AliasedArgumentsEntry extends Struct {
+  aliased_context_slot: Smi;
+}
 
-extern class Cell extends HeapObject { value: Object; }
+@generateCppClass
+extern class Cell extends HeapObject {
+  value: Object;
+}
 
 extern class DataHandler extends Struct {
   smi_handler: Smi | Code;
@@ -881,39 +998,58 @@ extern class DataHandler extends Struct {
 
 @abstract
 @dirtyInstantiatedAbstractClass
+@generateCppClass
 extern class JSGeneratorObject extends JSObject {
   function: JSFunction;
   context: Context;
-  receiver: Object;
+  receiver: JSAny;
+
+  // For executing generators: the most recent input value.
+  // For suspended generators: debug information (bytecode offset).
+  // There is currently no need to remember the most recent input value for a
+  // suspended generator.
   input_or_debug_pos: Object;
+
   resume_mode: Smi;
   continuation: Smi;
+
+  // Saved interpreter register file.
   parameters_and_registers: FixedArray;
 }
 
+@generateCppClass
 extern class JSAsyncFunctionObject extends JSGeneratorObject {
   promise: JSPromise;
 }
 
+@generateCppClass
 extern class JSAsyncGeneratorObject extends JSGeneratorObject {
+  // Pointer to the head of a singly linked list of AsyncGeneratorRequest, or
+  // undefined.
   queue: HeapObject;
   is_awaiting: Smi;
 }
 
+@generateCppClass
 extern class JSPromise extends JSObject {
+  // Smi 0 terminated list of PromiseReaction objects in case the JSPromise was
+  // not settled yet, otherwise the result.
   reactions_or_result: Object;
   flags: Smi;
 }
 
 @abstract
+@generateCppClass
 extern class Microtask extends Struct {
 }
 
+@generateCppClass
 extern class CallbackTask extends Microtask {
   callback: Foreign;
   data: Foreign;
 }
 
+@generateCppClass
 extern class CallableTask extends Microtask {
   callable: JSReceiver;
   context: Context;
@@ -931,11 +1067,13 @@ extern class StackFrameInfo extends Struct {
   type_name: String | Null | Undefined;
   eval_origin: String | Null | Undefined;
   wasm_module_name: String | Null | Undefined;
+  wasm_instance: WasmInstanceObject | Null | Undefined;
   flag: Smi;
 }
 
 type FrameArray extends FixedArray;
 
+@generateCppClass
 extern class StackTraceFrame extends Struct {
   frame_array: FrameArray | Undefined;
   frame_index: Smi;
@@ -943,6 +1081,7 @@ extern class StackTraceFrame extends Struct {
   id: Smi;
 }
 
+@generateCppClass
 extern class ClassPositions extends Struct {
   start: Smi;
   end: Smi;
@@ -958,7 +1097,7 @@ extern class WasmExportedFunctionData extends Struct {
   // The remaining fields are for fast calling from C++. The contract is
   // that they are lazily populated, and either all will be present or none.
   c_wrapper_code: Object;
-  wasm_call_target: Smi;  // Pseudo-smi: one-bit shift on all platforms.
+  wasm_call_target: Smi | Foreign;
   packed_args_size: Smi;
 }
 
@@ -972,7 +1111,7 @@ extern class WasmJSFunctionData extends Struct {
 
 extern class WasmCapiFunctionData extends Struct {
   call_target: RawPtr;
-  embedder_data: RawPtr;
+  embedder_data: Foreign;  // Managed<wasm::FuncData>
   wrapper_code: Code;
   serialized_signature: ByteArray;  // PodArray<wasm::ValueType>
 }
@@ -995,7 +1134,13 @@ extern class WasmDebugInfo extends Struct {
   c_wasm_entry_map: Foreign | Undefined;  // Managed<wasm::SignatureMap>
 }
 
-extern class WasmExceptionTag extends Struct { index: Smi; }
+@generateCppClass
+extern class WasmExceptionTag extends Struct {
+  index: Smi;
+}
+
+const kTaggedSize: constexpr int31 generates 'kTaggedSize';
+const kDoubleSize: constexpr int31 generates 'kDoubleSize';
 
 const kSmiTagSize: constexpr int31 generates 'kSmiTagSize';
 const V8_INFINITY: constexpr float64 generates 'V8_INFINITY';
@@ -1013,8 +1158,8 @@ const PACKED_DOUBLE_ELEMENTS:
     constexpr ElementsKind generates 'PACKED_DOUBLE_ELEMENTS';
 const HOLEY_DOUBLE_ELEMENTS:
     constexpr ElementsKind generates 'HOLEY_DOUBLE_ELEMENTS';
-const LAST_FROZEN_ELEMENTS_KIND:
-    constexpr ElementsKind generates 'LAST_FROZEN_ELEMENTS_KIND';
+const LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND: constexpr ElementsKind
+    generates 'LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND';
 const DICTIONARY_ELEMENTS:
     constexpr ElementsKind generates 'DICTIONARY_ELEMENTS';
 
@@ -1186,6 +1331,7 @@ extern macro Int32FalseConstant(): bool;
 extern macro EmptyStringConstant(): EmptyString;
 extern macro LengthStringConstant(): String;
 extern macro NanConstant(): NaN;
+extern macro IteratorSymbolConstant(): Symbol;
 
 const TheHole: TheHole = TheHoleConstant();
 const Null: Null = NullConstant();
@@ -1207,6 +1353,7 @@ const SKIP_WRITE_BARRIER:
 const UNSAFE_SKIP_WRITE_BARRIER:
     constexpr WriteBarrierMode generates 'UNSAFE_SKIP_WRITE_BARRIER';
 
+@generateCppClass
 extern class AsyncGeneratorRequest extends Struct {
   next: AsyncGeneratorRequest | Undefined;
   resume_mode: Smi;
@@ -1214,6 +1361,7 @@ extern class AsyncGeneratorRequest extends Struct {
   promise: JSPromise;
 }
 
+@generateCppClass
 extern class SourceTextModuleInfoEntry extends Struct {
   export_name: String | Undefined;
   local_name: String | Undefined;
@@ -1224,31 +1372,43 @@ extern class SourceTextModuleInfoEntry extends Struct {
   end_pos: Smi;
 }
 
+@generateCppClass
 extern class PromiseCapability extends Struct {
   promise: JSReceiver | Undefined;
   resolve: Object;
   reject: Object;
 }
 
+@generateCppClass
 extern class PromiseReaction extends Struct {
   next: PromiseReaction | Zero;
   reject_handler: Callable | Undefined;
   fulfill_handler: Callable | Undefined;
+  // Either a JSPromise (in case of native promises), a PromiseCapability
+  // (general case), or undefined (in case of await).
   promise_or_capability: JSPromise | PromiseCapability | Undefined;
 }
 
 @abstract
+@generateCppClass
 extern class PromiseReactionJobTask extends Microtask {
   argument: Object;
   context: Context;
   handler: Callable | Undefined;
+  // Either a JSPromise (in case of native promises), a PromiseCapability
+  // (general case), or undefined (in case of await).
   promise_or_capability: JSPromise | PromiseCapability | Undefined;
 }
 
-extern class PromiseFulfillReactionJobTask extends PromiseReactionJobTask {}
+@generateCppClass
+extern class PromiseFulfillReactionJobTask extends PromiseReactionJobTask {
+}
 
-extern class PromiseRejectReactionJobTask extends PromiseReactionJobTask {}
+@generateCppClass
+extern class PromiseRejectReactionJobTask extends PromiseReactionJobTask {
+}
 
+@generateCppClass
 extern class PromiseResolveThenableJobTask extends Microtask {
   context: Context;
   promise_to_resolve: JSPromise;
@@ -1256,6 +1416,7 @@ extern class PromiseResolveThenableJobTask extends Microtask {
   thenable: JSReceiver;
 }
 
+@generateCppClass
 extern class JSRegExp extends JSObject {
   data: FixedArray | Undefined;
   source: String | Undefined;
@@ -1263,7 +1424,7 @@ extern class JSRegExp extends JSObject {
 }
 
 extern transitioning macro AllocateJSIteratorResult(implicit context: Context)(
-    Object, Boolean): JSObject;
+    JSAny, Boolean): JSObject;
 
 // Note: Although a condition for a FastJSRegExp is having a positive smi
 // lastIndex (see RegExpBuiltinsAssembler::BranchIfFastRegExp), it is possible
@@ -1282,13 +1443,16 @@ RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void;
 
 @hasSameInstanceTypeAsParent
 extern class JSRegExpResult extends JSArray {
-  index: Object;
-  input: Object;
-  groups: Object;
+  index: JSAny;
+  input: JSAny;
+  groups: JSAny;
 }
 
+@generateCppClass
 extern class JSRegExpStringIterator extends JSObject {
-  iterating_reg_exp: Object;
+  // The [[IteratingRegExp]] internal property.
+  iterating_reg_exp: JSAny;
+  // The [[IteratedString]] internal property.
   iterated_string: String;
   flags: Smi;
 }
@@ -1466,32 +1630,33 @@ extern macro Comment(constexpr string);
 extern macro StaticAssert(bool);
 extern macro Print(Object);
 extern macro DebugBreak();
-extern transitioning macro ToInteger_Inline(Context, Object): Number;
+extern transitioning macro ToInteger_Inline(Context, JSAny): Number;
 extern transitioning macro ToInteger_Inline(
-    Context, Object, constexpr ToIntegerTruncationMode): Number;
-extern transitioning macro ToLength_Inline(Context, Object): Number;
-extern transitioning macro ToNumber_Inline(Context, Object): Number;
-extern transitioning macro ToSmiIndex(implicit context: Context)(Object):
+    Context, JSAny, constexpr ToIntegerTruncationMode): Number;
+extern transitioning macro ToLength_Inline(Context, JSAny): Number;
+extern transitioning macro ToNumber_Inline(Context, JSAny): Number;
+extern transitioning macro ToSmiIndex(implicit context: Context)(JSAny):
     PositiveSmi labels IfRangeError;
-extern transitioning macro ToSmiLength(implicit context: Context)(Object):
+extern transitioning macro ToSmiLength(implicit context: Context)(JSAny):
     PositiveSmi labels IfRangeError;
-extern transitioning macro ToString_Inline(Context, Object): String;
+extern transitioning macro ToString_Inline(Context, JSAny): String;
 extern transitioning macro ToThisString(implicit context: Context)(
-    Object, String): String;
+    JSAny, String): String;
 extern transitioning macro ToThisValue(implicit context: Context)(
-    Object, constexpr PrimitiveType, constexpr string): Object;
+    JSAny, constexpr PrimitiveType, constexpr string): JSAny;
 extern transitioning macro GetProperty(implicit context: Context)(
-    Object, Object): Object;
+    JSAny, JSAny): JSAny;
 extern transitioning builtin SetProperty(implicit context: Context)(
-    Object, Object, Object);
+    JSAny, JSAny, JSAny);
 extern transitioning builtin SetPropertyInLiteral(implicit context: Context)(
-    Object, Object, Object);
+    JSAny, JSAny, JSAny);
 extern transitioning builtin DeleteProperty(implicit context: Context)(
-    Object, Object, LanguageMode): Object;
+    JSAny, JSAny | PrivateSymbol, LanguageMode): Boolean;
 extern transitioning builtin HasProperty(implicit context: Context)(
-    Object, Object): Boolean;
+    JSAny, JSAny): Boolean;
 extern transitioning macro HasProperty_Inline(implicit context: Context)(
-    JSReceiver, Object): Boolean;
+    JSReceiver, JSAny): Boolean;
+extern builtin LoadIC(Context, JSAny, JSAny, Smi, FeedbackVector): JSAny;
 
 extern macro ThrowRangeError(implicit context: Context)(
     constexpr MessageTemplate): never;
@@ -1510,43 +1675,60 @@ extern macro ThrowTypeError(implicit context: Context)(
 extern transitioning runtime ThrowTypeErrorIfStrict(implicit context: Context)(
     Smi, Object, Object): void;
 
-extern macro ArraySpeciesCreate(Context, Object, Number): JSReceiver;
+extern transitioning macro ThrowIfNotJSReceiver(implicit context: Context)(
+    JSAny, constexpr MessageTemplate, constexpr string): void;
+
+extern macro ArraySpeciesCreate(Context, JSAny, Number): JSReceiver;
 extern macro ArrayCreate(implicit context: Context)(Number): JSArray;
 extern macro BuildAppendJSArray(
-    constexpr ElementsKind, FastJSArray, Object): void labels Bailout;
+    constexpr ElementsKind, FastJSArray, JSAny): void labels Bailout;
 
 extern macro EnsureArrayPushable(Map): ElementsKind
     labels Bailout;
 extern macro EnsureArrayLengthWritable(Map) labels Bailout;
 // TODO: Reduce duplication once varargs are supported in macros.
 extern macro Construct(implicit context: Context)(
-    Constructor, Object): JSReceiver;
+    Constructor, JSAny): JSReceiver;
 extern macro Construct(implicit context: Context)(
-    Constructor, Object, Object): JSReceiver;
+    Constructor, JSAny, JSAny): JSReceiver;
 extern macro Construct(implicit context: Context)(
-    Constructor, Object, Object, Object): JSReceiver;
+    Constructor, JSAny, JSAny, JSAny): JSReceiver;
 extern macro ConstructWithTarget(implicit context: Context)(
     Constructor, JSReceiver): JSReceiver;
 extern macro ConstructWithTarget(implicit context: Context)(
-    Constructor, JSReceiver, Object): JSReceiver;
+    Constructor, JSReceiver, JSAny): JSReceiver;
 extern macro SpeciesConstructor(implicit context: Context)(
-    Object, JSReceiver): JSReceiver;
+    JSAny, JSReceiver): JSReceiver;
 
 extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool;
 extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32):
     NameDictionary;
 
-extern builtin ToObject(Context, Object): JSReceiver;
-extern macro ToObject_Inline(Context, Object): JSReceiver;
+extern builtin ToObject(Context, JSAny): JSReceiver;
+extern macro ToObject_Inline(Context, JSAny): JSReceiver;
 extern macro IsNullOrUndefined(Object): bool;
 extern macro IsTheHole(Object): bool;
 extern macro IsString(HeapObject): bool;
-transitioning builtin ToString(context: Context, o: Object): String {
+transitioning builtin ToString(context: Context, o: JSAny): String {
   return ToStringImpl(context, o);
 }
-extern transitioning runtime ToStringRT(Context, Object): String;
+extern transitioning runtime ToStringRT(Context, JSAny): String;
 extern transitioning builtin NonPrimitiveToPrimitive_String(
-    Context, Object): Object;
+    Context, JSAny): JSPrimitive;
+extern transitioning builtin NonPrimitiveToPrimitive_Default(
+    Context, JSAny): JSPrimitive;
+
+transitioning macro ToPrimitiveDefault(implicit context: Context)(v: JSAny):
+    JSPrimitive {
+  typeswitch (v) {
+    case (v: JSReceiver): {
+      return NonPrimitiveToPrimitive_Default(context, v);
+    }
+    case (v: JSPrimitive): {
+      return v;
+    }
+  }
+}
 
 extern transitioning runtime NormalizeElements(Context, JSObject);
 extern transitioning runtime TransitionElementsKindWithKind(
@@ -1556,18 +1738,15 @@ extern macro LoadBufferObject(RawPtr, constexpr int32): Object;
 extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr;
 extern macro LoadBufferSmi(RawPtr, constexpr int32): Smi;
 
-extern macro LoadRoot(constexpr RootIndex): Object;
-extern macro StoreRoot(constexpr RootIndex, Object): Object;
-
 extern runtime StringEqual(Context, String, String): Oddball;
 extern builtin StringLessThan(Context, String, String): Boolean;
 extern macro StringCharCodeAt(String, intptr): int32;
 extern runtime StringCompareSequence(Context, String, String, Number): Boolean;
 extern macro StringFromSingleCharCode(int32): String;
 
-extern macro StrictEqual(Object, Object): Boolean;
+extern macro StrictEqual(JSAny, JSAny): Boolean;
 extern macro SmiLexicographicCompare(Smi, Smi): Smi;
-extern runtime ReThrow(Context, Object): never;
+extern runtime ReThrow(Context, JSAny): never;
 extern runtime ThrowInvalidStringLength(Context): never;
 
 extern operator '==' macro WordEqual(RawPtr, RawPtr): bool;
@@ -1638,38 +1817,51 @@ extern operator '<' macro Float64LessThan(float64, float64): bool;
 extern macro BranchIfNumberEqual(Number, Number): never
     labels Taken, NotTaken;
 operator '==' macro IsNumberEqual(a: Number, b: Number): bool {
-  return (BranchIfNumberEqual(a, b)) ? true : false;
+  BranchIfNumberEqual(a, b) otherwise return true, return false;
 }
 operator '!=' macro IsNumberNotEqual(a: Number, b: Number): bool {
-  return (BranchIfNumberEqual(a, b)) ? false : true;
+  return !(a == b);
 }
-extern operator '<' macro BranchIfNumberLessThan(Number, Number): never
+extern macro BranchIfNumberLessThan(Number, Number): never
     labels Taken, NotTaken;
-extern operator '<=' macro BranchIfNumberLessThanOrEqual(Number, Number): never
+operator '<' macro NumberIsLessThan(a: Number, b: Number): bool {
+  BranchIfNumberLessThan(a, b) otherwise return true, return false;
+}
+extern macro BranchIfNumberLessThanOrEqual(Number, Number): never
     labels Taken, NotTaken;
+operator '<=' macro NumberIsLessThanOrEqual(a: Number, b: Number): bool {
+  BranchIfNumberLessThanOrEqual(a, b) otherwise return true, return false;
+}
 
-extern operator '>' macro BranchIfNumberGreaterThan(Number, Number): never
-    labels Taken, NotTaken;
-extern operator '>=' macro BranchIfNumberGreaterThanOrEqual(
-    Number, Number): never
+operator '>' macro NumberIsGreaterThan(a: Number, b: Number): bool {
+  return b < a;
+}
+operator '>=' macro NumberIsGreaterThanOrEqual(a: Number, b: Number): bool {
+  return b <= a;
+}
+
+extern macro BranchIfFloat64IsNaN(float64): never
     labels Taken, NotTaken;
+macro Float64IsNaN(n: float64): bool {
+  BranchIfFloat64IsNaN(n) otherwise return true, return false;
+}
 
-// The type of all tagged values that can safely be compared with WordEqual.
+// The type of all tagged values that can safely be compared with TaggedEqual.
 type TaggedWithIdentity =
     JSReceiver | FixedArrayBase | Oddball | Map | EmptyString;
 
-extern operator '==' macro WordEqual(TaggedWithIdentity, Object): bool;
-extern operator '==' macro WordEqual(Object, TaggedWithIdentity): bool;
-extern operator '==' macro WordEqual(
+extern operator '==' macro TaggedEqual(TaggedWithIdentity, Object): bool;
+extern operator '==' macro TaggedEqual(Object, TaggedWithIdentity): bool;
+extern operator '==' macro TaggedEqual(
     TaggedWithIdentity, TaggedWithIdentity): bool;
-extern operator '!=' macro WordNotEqual(TaggedWithIdentity, Object): bool;
-extern operator '!=' macro WordNotEqual(Object, TaggedWithIdentity): bool;
-extern operator '!=' macro WordNotEqual(
+extern operator '!=' macro TaggedNotEqual(TaggedWithIdentity, Object): bool;
+extern operator '!=' macro TaggedNotEqual(Object, TaggedWithIdentity): bool;
+extern operator '!=' macro TaggedNotEqual(
     TaggedWithIdentity, TaggedWithIdentity): bool;
 // Do not overload == and != if it is unclear if object identity is the right
 // equality.
-extern macro WordEqual(Object, Object): bool;
-extern macro WordNotEqual(Object, Object): bool;
+extern macro TaggedEqual(Object, Object): bool;
+extern macro TaggedNotEqual(Object, Object): bool;
 
 extern operator '+' macro SmiAdd(Smi, Smi): Smi;
 extern operator '-' macro SmiSub(Smi, Smi): Smi;
@@ -1707,11 +1899,14 @@ extern operator '*' macro ConstexprInt31Mul(
     constexpr int31, constexpr int31): constexpr int31;
 extern operator '-' macro Int32Sub(int32, int32): int32;
 extern operator '*' macro Int32Mul(int32, int32): int32;
+extern operator '/' macro Int32Div(int32, int32): int32;
 extern operator '%' macro Int32Mod(int32, int32): int32;
 extern operator '&' macro Word32And(int32, int32): int32;
 extern operator '&' macro Word32And(uint32, uint32): uint32;
 extern operator '==' macro
 ConstexprInt31Equal(constexpr int31, constexpr int31): constexpr bool;
+extern operator '!=' macro
+ConstexprInt31NotEqual(constexpr int31, constexpr int31): constexpr bool;
 extern operator '>=' macro
 ConstexprInt31GreaterThanEqual(
     constexpr int31, constexpr int31): constexpr bool;
@@ -1732,6 +1927,8 @@ extern operator '!=' macro Word32NotEqual(bool, bool): bool;
 
 extern operator '+' macro Float64Add(float64, float64): float64;
 extern operator '-' macro Float64Sub(float64, float64): float64;
+extern operator '*' macro Float64Mul(float64, float64): float64;
+extern operator '/' macro Float64Div(float64, float64): float64;
 
 extern operator '+' macro NumberAdd(Number, Number): Number;
 extern operator '-' macro NumberSub(Number, Number): Number;
@@ -1845,6 +2042,146 @@ Cast<Number>(o: Object): Number
   return TaggedToNumber(o) otherwise CastError;
 }
 
+Cast<Undefined>(o: Object): Undefined
+    labels CastError {
+  if (o != Undefined) goto CastError;
+  return %RawDownCast<Undefined>(o);
+}
+
+Cast<Numeric>(o: Object): Numeric labels CastError {
+  typeswitch (o) {
+    case (o: Number): {
+      return o;
+    }
+    case (o: BigInt): {
+      return o;
+    }
+    case (HeapObject): {
+      goto CastError;
+    }
+  }
+}
+
+Cast<TheHole>(o: Object): TheHole labels CastError {
+  if (o == TheHole) return %RawDownCast<TheHole>(o);
+  goto CastError;
+}
+
+Cast<TheHole>(o: HeapObject): TheHole labels CastError {
+  const o: Object = o;
+  return Cast<TheHole>(o) otherwise CastError;
+}
+
+Cast<True>(o: Object): True labels CastError {
+  if (o == True) return %RawDownCast<True>(o);
+  goto CastError;
+}
+
+Cast<True>(o: HeapObject): True labels CastError {
+  const o: Object = o;
+  return Cast<True>(o) otherwise CastError;
+}
+
+Cast<False>(o: Object): False labels CastError {
+  if (o == False) return %RawDownCast<False>(o);
+  goto CastError;
+}
+
+Cast<False>(o: HeapObject): False labels CastError {
+  const o: Object = o;
+  return Cast<False>(o) otherwise CastError;
+}
+
+Cast<Boolean>(o: Object): Boolean labels CastError {
+  typeswitch (o) {
+    case (o: True): {
+      return o;
+    }
+    case (o: False): {
+      return o;
+    }
+    case (Object): {
+      goto CastError;
+    }
+  }
+}
+
+Cast<Boolean>(o: HeapObject): Boolean labels CastError {
+  const o: Object = o;
+  return Cast<Boolean>(o) otherwise CastError;
+}
+
+// TODO(tebbi): These trivial casts for union types should be generated
+// automatically.
+
+Cast<JSPrimitive>(o: Object): JSPrimitive labels CastError {
+  typeswitch (o) {
+    case (o: Numeric): {
+      return o;
+    }
+    case (o: String): {
+      return o;
+    }
+    case (o: Symbol): {
+      return o;
+    }
+    case (o: Boolean): {
+      return o;
+    }
+    case (o: Undefined): {
+      return o;
+    }
+    case (o: Null): {
+      return o;
+    }
+    case (Object): {
+      goto CastError;
+    }
+  }
+}
+
+Cast<JSAny>(o: Object): JSAny labels CastError {
+  typeswitch (o) {
+    case (o: JSPrimitive): {
+      return o;
+    }
+    case (o: JSReceiver): {
+      return o;
+    }
+    case (Object): {
+      goto CastError;
+    }
+  }
+}
+
+Cast<JSAny | TheHole>(o: Object): JSAny | TheHole labels CastError {
+  typeswitch (o) {
+    case (o: JSAny): {
+      return o;
+    }
+    case (o: TheHole): {
+      return o;
+    }
+    case (Object): {
+      goto CastError;
+    }
+  }
+}
+
+Cast<Number | TheHole>(o: Object): Number | TheHole labels CastError {
+  typeswitch (o) {
+    case (o: Number): {
+      return o;
+    }
+    case (o: TheHole): {
+      return o;
+    }
+    case (Object): {
+      goto CastError;
+    }
+  }
+}
+
 macro Cast<A: type>(o: HeapObject): A
     labels CastError;
 
@@ -1859,6 +2196,12 @@ Cast<Null>(o: HeapObject): Null
   return %RawDownCast<Null>(o);
 }
 
+Cast<Undefined>(o: HeapObject): Undefined
+    labels CastError {
+  const o: Object = o;
+  return Cast<Undefined>(o) otherwise CastError;
+}
+
 Cast<FixedArray>(o: HeapObject): FixedArray
     labels CastError {
   return HeapObjectToFixedArray(o) otherwise CastError;
@@ -1928,6 +2271,12 @@ Cast<Context>(o: HeapObject): Context
   goto CastError;
 }
 
+Cast<NativeContext>(o: HeapObject): NativeContext
+    labels CastError {
+  if (IsNativeContext(o)) return %RawDownCast<NativeContext>(o);
+  goto CastError;
+}
+
 Cast<JSObject>(o: HeapObject): JSObject
     labels CastError {
   if (IsJSObject(o)) return %RawDownCast<JSObject>(o);
@@ -1957,6 +2306,27 @@ Cast<Symbol>(o: HeapObject): Symbol
   goto CastError;
 }
 
+macro Cast<T: type>(o: Symbol): T labels CastError;
+Cast<PublicSymbol>(o: Symbol): PublicSymbol labels CastError {
+  if (IsPrivateSymbol(o)) goto CastError;
+  return %RawDownCast<PublicSymbol>(o);
+}
+Cast<PrivateSymbol>(o: Symbol): PrivateSymbol labels CastError {
+  if (IsPrivateSymbol(o)) {
+    return %RawDownCast<PrivateSymbol>(o);
+  }
+  goto CastError;
+}
+
+Cast<PublicSymbol>(o: HeapObject): PublicSymbol labels CastError {
+  const o = Cast<Symbol>(o) otherwise CastError;
+  return Cast<PublicSymbol>(o) otherwise CastError;
+}
+Cast<PrivateSymbol>(o: HeapObject): PrivateSymbol labels CastError {
+  const o = Cast<Symbol>(o) otherwise CastError;
+  return Cast<PrivateSymbol>(o) otherwise CastError;
+}
+
 Cast<DirectString>(o: HeapObject): DirectString
     labels CastError {
   return TaggedToDirectString(o) otherwise CastError;
@@ -2014,7 +2384,13 @@ Cast<JSArgumentsObjectWithLength>(implicit context: Context)(o: HeapObject):
 
 Cast<FastJSRegExp>(implicit context: Context)(o: HeapObject): FastJSRegExp
     labels CastError {
-  if (regexp::BranchIfFastRegExp(o)) return %RawDownCast<FastJSRegExp>(o);
+  // TODO(jgruber): Remove or redesign this. There is no single 'fast' regexp,
+  // the conditions to make a regexp object fast differ based on the callsite.
+  // For now, run the strict variant since replace (the only current callsite)
+  // accesses flag getters.
+  if (regexp::IsFastRegExpStrict(o)) {
+    return %RawDownCast<FastJSRegExp>(o);
+  }
   goto CastError;
 }
 
@@ -2042,7 +2418,8 @@ Cast<FastJSArrayForRead>(implicit context: Context)(o: HeapObject):
 
   // Bailout if receiver has slow elements.
   const elementsKind: ElementsKind = LoadMapElementsKind(map);
-  if (!IsElementsKindLessThanOrEqual(elementsKind, LAST_FROZEN_ELEMENTS_KIND))
+  if (!IsElementsKindLessThanOrEqual(
+          elementsKind, LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND))
     goto CastError;
 
   // Verify that our prototype is the initial array prototype.
@@ -2076,7 +2453,7 @@ Cast<FastJSArrayForReadWithNoCustomIteration>(implicit context: Context)(
   return %RawDownCast<FastJSArrayForReadWithNoCustomIteration>(a);
 }
 
-Cast<JSReceiver>(implicit context: Context)(o: HeapObject): JSReceiver
+Cast<JSReceiver>(o: HeapObject): JSReceiver
     labels CastError {
   if (IsJSReceiver(o)) return %RawDownCast<JSReceiver>(o);
   goto CastError;
@@ -2103,6 +2480,21 @@ Cast<CoverageInfo>(implicit context: Context)(o: HeapObject): CoverageInfo
   goto CastError;
 }
 
+Cast<JSReceiver | Null>(o: HeapObject): JSReceiver | Null
+    labels CastError {
+  typeswitch (o) {
+    case (o: Null): {
+      return o;
+    }
+    case (o: JSReceiver): {
+      return o;
+    }
+    case (HeapObject): {
+      goto CastError;
+    }
+  }
+}
+
 extern macro AllocateHeapNumberWithValue(float64): HeapNumber;
 extern macro ChangeInt32ToTagged(int32): Number;
 extern macro ChangeUint32ToTagged(uint32): Number;
@@ -2132,8 +2524,8 @@ extern macro ChangeUint32ToWord(uint32): uintptr;  // Doesn't sign-extend.
 extern macro LoadNativeContext(Context): NativeContext;
 extern macro TruncateFloat64ToFloat32(float64): float32;
 extern macro TruncateHeapNumberValueToWord32(HeapNumber): int32;
-extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map;
-extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map;
+extern macro LoadJSArrayElementsMap(constexpr ElementsKind, NativeContext): Map;
+extern macro LoadJSArrayElementsMap(ElementsKind, NativeContext): Map;
 extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr;
 extern macro TryNumberToUintPtr(Number): uintptr labels IfNegative;
 extern macro NumberConstant(constexpr float64): Number;
@@ -2157,6 +2549,7 @@ extern macro IntPtrConstant(constexpr ContextSlot): ContextSlot;
 extern macro IntPtrConstant(constexpr intptr): intptr;
 extern macro PointerConstant(constexpr RawPtr): RawPtr;
 extern macro SingleCharacterStringConstant(constexpr string): String;
+extern macro Float64SilenceNaN(float64): float64;
 
 extern macro BitcastWordToTaggedSigned(intptr): Smi;
 extern macro BitcastWordToTaggedSigned(uintptr): Smi;
@@ -2241,6 +2634,9 @@ FromConstexpr<ElementsKind, constexpr ElementsKind>(e: constexpr ElementsKind):
 FromConstexpr<Object, constexpr string>(s: constexpr string): Object {
   return StringConstant(s);
 }
+FromConstexpr<JSAny, constexpr string>(s: constexpr string): JSAny {
+  return StringConstant(s);
+}
 FromConstexpr<NativeContextSlot, constexpr NativeContextSlot>(
     c: constexpr NativeContextSlot): NativeContextSlot {
   return IntPtrConstant(c);
@@ -2384,20 +2780,9 @@ Convert<bint, Smi>(v: Smi): bint {
   return SmiToBInt(v);
 }
 
-macro BranchIf<A: type, B: type>(implicit context: Context)(o: B): never
-    labels True, False {
-  Cast<A>(o) otherwise False;
-  goto True;
-}
-
-macro BranchIfNot<A: type, B: type>(implicit context: Context)(o: B): never
-    labels True, False {
-  Cast<A>(o) otherwise True;
-  goto False;
-}
-
 macro Is<A: type, B: type>(implicit context: Context)(o: B): bool {
-  return (BranchIf<A, B>(o)) ? true : false;
+  Cast<A>(o) otherwise return false;
+  return true;
 }
 
 macro UnsafeCast<A: type>(implicit context: Context)(o: Object): A {
@@ -2405,17 +2790,15 @@ macro UnsafeCast<A: type>(implicit context: Context)(o: Object): A {
   return %RawDownCast<A>(o);
 }
 
-UnsafeCast<Object>(o: Object): Object {
-  return o;
-}
+extern macro FixedArrayMapConstant(): Map;
+extern macro FixedCOWArrayMapConstant(): Map;
+extern macro EmptyByteArrayConstant(): ByteArray;
+extern macro EmptyFixedArrayConstant(): FixedArray;
 
-const kFixedArrayMap: Map =
-    %RawDownCast<Map>(LoadRoot(kFixedArrayMapRootIndex));
-const kCOWMap: Map = %RawDownCast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
-const kEmptyByteArray: ByteArray =
-    %RawDownCast<ByteArray>(LoadRoot(kEmptyByteArrayRootIndex));
-const kEmptyFixedArray: FixedArray =
-    %RawDownCast<FixedArray>(LoadRoot(kEmptyFixedArrayRootIndex));
+const kFixedArrayMap: Map = FixedArrayMapConstant();
+const kCOWMap: Map = FixedCOWArrayMapConstant();
+const kEmptyByteArray: ByteArray = EmptyByteArrayConstant();
+const kEmptyFixedArray: FixedArray = EmptyFixedArrayConstant();
 
 extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map):
     bool;
@@ -2478,6 +2861,8 @@ extern operator '.floats[]=' macro StoreFixedDoubleArrayElement(
     FixedDoubleArray, intptr, float64): void;
 extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
     FixedDoubleArray, Smi, float64): void;
+extern operator '.floats[]' macro LoadFixedDoubleArrayElement(
+    FixedDoubleArray, intptr): float64;
 operator '[]=' macro StoreFixedDoubleArrayDirect(
     a: FixedDoubleArray, i: Smi, v: Number) {
   a.floats[i] = Convert<float64>(v);
@@ -2487,14 +2872,14 @@ operator '[]=' macro StoreFixedArrayDirect(a: FixedArray, i: Smi, v: Object) {
 }
 
 extern macro GetNumberDictionaryNumberOfElements(NumberDictionary): Smi;
-extern macro GetIteratorMethod(implicit context: Context)(HeapObject): Object
+extern macro GetIteratorMethod(implicit context: Context)(HeapObject): JSAny
     labels IfIteratorUndefined;
 
 extern macro LoadConstructorOrBackPointer(Map): Object;
 
-extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): Object
+extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): JSAny
     labels NotData, IfHole;
-extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, Object)
+extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, JSAny)
     labels NotData, IfHole, ReadOnly;
 
 extern macro IsFastElementsKind(ElementsKind): bool;
@@ -2607,16 +2992,15 @@ macro GetRegExpLastMatchInfo(implicit context: Context)(): RegExpMatchInfo {
       LoadNativeContext(context)[REGEXP_LAST_MATCH_INFO_INDEX]);
 }
 
-extern transitioning macro Call(Context, Callable, Object): Object;
-extern transitioning macro Call(Context, Callable, Object, Object): Object;
-extern transitioning macro Call(
-    Context, Callable, Object, Object, Object): Object;
+extern transitioning macro Call(Context, Callable, JSAny): JSAny;
+extern transitioning macro Call(Context, Callable, JSAny, JSAny): JSAny;
+extern transitioning macro Call(Context, Callable, JSAny, JSAny, JSAny): JSAny;
 extern transitioning macro Call(
-    Context, Callable, Object, Object, Object, Object): Object;
+    Context, Callable, JSAny, JSAny, JSAny, JSAny): JSAny;
 extern transitioning macro Call(
-    Context, Callable, Object, Object, Object, Object, Object): Object;
+    Context, Callable, JSAny, JSAny, JSAny, JSAny, JSAny): JSAny;
 extern transitioning macro Call(
-    Context, Callable, Object, Object, Object, Object, Object, Object): Object;
+    Context, Callable, JSAny, JSAny, JSAny, JSAny, JSAny, JSAny): JSAny;
 
 extern builtin CloneFastJSArray(Context, FastJSArrayForCopy): JSArray;
 extern macro ExtractFixedArray(FixedArrayBase, Smi, Smi, Smi): FixedArrayBase;
@@ -2665,20 +3049,24 @@ macro TorqueCopyElements(
       count);
 }
 
-macro LoadElementNoHole<T: type>(a: JSArray, index: Smi): Object
+macro LoadElementNoHole<T: type>(a: JSArray, index: Smi): JSAny
     labels IfHole;
 
 LoadElementNoHole<FixedArray>(implicit context: Context)(
-    a: JSArray, index: Smi): Object
+    a: JSArray, index: Smi): JSAny
     labels IfHole {
   try {
     const elements: FixedArray =
         Cast<FixedArray>(a.elements) otherwise Unexpected;
-    const e: Object = elements.objects[index];
-    if (e == TheHole) {
-      goto IfHole;
+    const e = UnsafeCast<(JSAny | TheHole)>(elements.objects[index]);
+    typeswitch (e) {
+      case (TheHole): {
+        goto IfHole;
+      }
+      case (e: JSAny): {
+        return e;
+      }
     }
-    return e;
   }
   label Unexpected {
     unreachable;
@@ -2686,7 +3074,7 @@ LoadElementNoHole<FixedArray>(implicit context: Context)(
 }
 
 LoadElementNoHole<FixedDoubleArray>(implicit context: Context)(
-    a: JSArray, index: Smi): Object
+    a: JSArray, index: Smi): JSAny
     labels IfHole {
   try {
     const elements: FixedDoubleArray =
@@ -2717,7 +3105,7 @@ struct FastJSArrayWitness {
     this.unstable = %RawDownCast<FastJSArray>(this.stable);
   }
 
-  LoadElementNoHole(implicit context: Context)(k: Smi): Object
+  LoadElementNoHole(implicit context: Context)(k: Smi): JSAny
       labels FoundHole {
     if (this.hasDoubles) {
       return LoadElementNoHole<FixedDoubleArray>(this.unstable, k)
@@ -2740,7 +3128,7 @@ struct FastJSArrayWitness {
     }
   }
 
-  LoadElementOrUndefined(implicit context: Context)(k: Smi): Object {
+  LoadElementOrUndefined(implicit context: Context)(k: Smi): JSAny {
     try {
       return this.LoadElementNoHole(k) otherwise FoundHole;
     }
@@ -2760,7 +3148,7 @@ struct FastJSArrayWitness {
     this.unstable.length = newLength;
   }
 
-  Push(value: Object) labels Failed {
+  Push(value: JSAny) labels Failed {
     assert(this.arrayIsPushable);
     if (this.hasDoubles) {
       BuildAppendJSArray(HOLEY_DOUBLE_ELEMENTS, this.unstable, value)
@@ -2832,7 +3220,7 @@ struct FastJSArrayForReadWitness {
     this.unstable = %RawDownCast<FastJSArrayForRead>(this.stable);
   }
 
-  LoadElementNoHole(implicit context: Context)(k: Smi): Object
+  LoadElementNoHole(implicit context: Context)(k: Smi): JSAny
       labels FoundHole {
     if (this.hasDoubles) {
       return LoadElementNoHole<FixedDoubleArray>(this.unstable, k)
@@ -2876,6 +3264,7 @@ extern macro IsJSObject(HeapObject): bool;
 extern macro IsJSTypedArray(HeapObject): bool;
 extern macro IsNumberDictionary(HeapObject): bool;
 extern macro IsContext(HeapObject): bool;
+extern macro IsNativeContext(HeapObject): bool;
 extern macro IsJSReceiver(HeapObject): bool;
 extern macro TaggedIsCallable(Object): bool;
 extern macro IsDetachedBuffer(JSArrayBuffer): bool;
@@ -2892,7 +3281,7 @@ extern macro IsJSArrayMap(Map): bool;
 extern macro IsExtensibleMap(Map): bool;
 extern macro IsJSPrimitiveWrapper(HeapObject): bool;
 extern macro IsCustomElementsReceiverInstanceType(int32): bool;
-extern macro Typeof(Object): Object;
+extern macro Typeof(JSAny): String;
 
 // Return true iff number is NaN.
 macro NumberIsNaN(number: Number): bool {
@@ -2908,31 +3297,35 @@ macro NumberIsNaN(number: Number): bool {
 }
 
 extern macro GotoIfForceSlowPath() labels Taken;
-extern macro BranchIfToBooleanIsTrue(Object): never
+macro IsForceSlowPath(): bool {
+  GotoIfForceSlowPath() otherwise return true;
+  return false;
+}
+
+extern macro BranchIfToBooleanIsTrue(JSAny): never
     labels Taken, NotTaken;
-extern macro BranchIfToBooleanIsFalse(Object): never
+extern macro BranchIfToBooleanIsFalse(JSAny): never
     labels Taken, NotTaken;
 
-macro ToBoolean(obj: Object): bool {
-  if (BranchIfToBooleanIsTrue(obj)) {
-    return true;
-  } else {
-    return false;
-  }
+macro ToBoolean(obj: JSAny): bool {
+  BranchIfToBooleanIsTrue(obj) otherwise return true, return false;
 }
 
 @export
 macro RequireObjectCoercible(implicit context: Context)(
-    value: Object, name: constexpr string): Object {
+    value: JSAny, name: constexpr string): JSAny {
   if (IsNullOrUndefined(value)) {
     ThrowTypeError(kCalledOnNullOrUndefined, name);
   }
   return value;
 }
 
-extern macro BranchIfSameValue(Object, Object): never labels Taken, NotTaken;
+extern macro BranchIfSameValue(JSAny, JSAny): never labels Taken, NotTaken;
+macro SameValue(a: JSAny, b: JSAny): bool {
+  BranchIfSameValue(a, b) otherwise return true, return false;
+}
 
-transitioning macro ToIndex(input: Object, context: Context): Number
+transitioning macro ToIndex(input: JSAny, context: Context): Number
     labels RangeError {
   if (input == Undefined) {
     return 0;
@@ -2946,7 +3339,7 @@ transitioning macro ToIndex(input: Object, context: Context): Number
   return value;
 }
 
-transitioning macro GetLengthProperty(implicit context: Context)(o: Object):
+transitioning macro GetLengthProperty(implicit context: Context)(o: JSAny):
     Number {
   try {
     typeswitch (o) {
@@ -2956,18 +3349,18 @@ transitioning macro GetLengthProperty(implicit context: Context)(o: Object):
       case (a: JSArgumentsObjectWithLength): {
         goto ToLength(a.length);
       }
-      case (Object): deferred {
+      case (JSAny): deferred {
         goto ToLength(GetProperty(o, kLengthString));
       }
     }
   }
-  label ToLength(length: Object) deferred {
+  label ToLength(length: JSAny) deferred {
     return ToLength_Inline(context, length);
   }
 }
 
 transitioning macro GetMethod(implicit context: Context)(
-    o: Object, name: constexpr string): Callable labels IfNullOrUndefined {
+    o: JSAny, name: constexpr string): Callable labels IfNullOrUndefined {
   const value = GetProperty(o, name);
   if (value == Undefined || value == Null) goto IfNullOrUndefined;
   return Cast<Callable>(value)
@@ -2976,44 +3369,37 @@ transitioning macro GetMethod(implicit context: Context)(
 
 extern macro NumberToString(Number): String;
 extern macro IsOneByteStringInstanceType(InstanceType): bool;
-extern macro AllocateSeqOneByteString(implicit context: Context)(uint32):
-    String;
-extern macro AllocateSeqTwoByteString(implicit context: Context)(uint32):
-    String;
+extern macro AllocateSeqOneByteString(uint32): String;
+extern macro AllocateSeqTwoByteString(uint32): String;
 extern macro ConvertToRelativeIndex(implicit context: Context)(
-    Object, intptr): intptr;
+    JSAny, intptr): intptr;
 
-extern builtin ObjectToString(Context, Object): Object;
+extern builtin ObjectToString(Context, JSAny): JSAny;
 extern builtin StringRepeat(Context, String, Number): String;
 
 struct KeyValuePair {
-  key: Object;
-  value: Object;
+  key: JSAny;
+  value: JSAny;
 }
 
 // Macro definitions for compatibility that expose functionality to the CSA
 // using "legacy" APIs. In Torque code, these should not be used.
 @export
 macro IsFastJSArray(o: Object, context: Context): bool {
-  try {
-    // Long-term, it's likely not a good idea to have this slow-path test here,
-    // since it fundamentally breaks the type system.
-    GotoIfForceSlowPath() otherwise ForceSlow;
-  }
-  label ForceSlow {
-    return false;
-  }
-
+  // Long-term, it's likely not a good idea to have this slow-path test here,
+  // since it fundamentally breaks the type system.
+  if (IsForceSlowPath()) return false;
   return Is<FastJSArray>(o);
 }
 
 @export
 macro BranchIfFastJSArray(o: Object, context: Context): never labels True,
     False {
-  // Long-term, it's likely not a good idea to have this slow-path test here,
-  // since it fundamentally breaks the type system.
-  GotoIfForceSlowPath() otherwise False;
-  BranchIf<FastJSArray>(o) otherwise True, False;
+  if (IsFastJSArray(o, context)) {
+    goto True;
+  } else {
+    goto False;
+  }
 }
 
 @export
@@ -3021,8 +3407,12 @@ macro BranchIfFastJSArrayForRead(o: Object, context: Context):
     never labels True, False {
   // Long-term, it's likely not a good idea to have this slow-path test here,
   // since it fundamentally breaks the type system.
-  GotoIfForceSlowPath() otherwise False;
-  BranchIf<FastJSArrayForRead>(o) otherwise True, False;
+  if (IsForceSlowPath()) goto False;
+  if (Is<FastJSArrayForRead>(o)) {
+    goto True;
+  } else {
+    goto False;
+  }
 }
 
 @export
@@ -3037,7 +3427,7 @@ macro IsFastJSArrayForReadWithNoCustomIteration(context: Context, o: Object):
 }
 
 extern transitioning runtime
-CreateDataProperty(implicit context: Context)(JSReceiver, Object, Object);
+CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, JSAny);
 
 namespace runtime {
   extern runtime
@@ -3045,7 +3435,7 @@ namespace runtime {
 }
 
 transitioning builtin FastCreateDataProperty(implicit context: Context)(
-    receiver: JSReceiver, key: Object, value: Object): Object {
+    receiver: JSReceiver, key: JSAny, value: JSAny): Object {
   try {
     const array = Cast<FastJSArray>(receiver) otherwise Slow;
     const index: Smi = Cast<Smi>(key) otherwise goto Slow;
@@ -3090,8 +3480,8 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)(
 }
 
 @export
-transitioning macro ToStringImpl(context: Context, o: Object): String {
-  let result: Object = o;
+transitioning macro ToStringImpl(context: Context, o: JSAny): String {
+  let result: JSAny = o;
   while (true) {
     typeswitch (result) {
       case (num: Number): {
@@ -3110,7 +3500,7 @@ transitioning macro ToStringImpl(context: Context, o: Object): String {
       case (Symbol): {
         ThrowTypeError(kSymbolToString);
       }
-      case (Object): {
+      case (JSAny): {
         return ToStringRT(context, o);
       }
     }
@@ -3160,3 +3550,14 @@ builtin CheckNumberInRange(implicit context: Context)(
     unreachable;
   }
 }
+
+macro ReplaceTheHoleWithUndefined(o: JSAny | TheHole): JSAny {
+  typeswitch (o) {
+    case (TheHole): {
+      return Undefined;
+    }
+    case (a: JSAny): {
+      return a;
+    }
+  }
+}
diff --git a/deps/v8/src/builtins/boolean.tq b/deps/v8/src/builtins/boolean.tq
index 25f9ebd3961add..e8feaf1cf1f762 100644
--- a/deps/v8/src/builtins/boolean.tq
+++ b/deps/v8/src/builtins/boolean.tq
@@ -5,8 +5,8 @@
 namespace boolean {
   javascript builtin
   BooleanConstructor(
-      js-implicit context: Context, receiver: Object, newTarget: Object,
-      target: JSFunction)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny, newTarget: JSAny,
+      target: JSFunction)(...arguments): JSAny {
     const value = SelectBooleanConstant(ToBoolean(arguments[0]));
 
     if (newTarget == Undefined) {
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index d65d57cc79b079..c4399175e9846d 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -40,20 +40,20 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
       empty ? IntPtrConstant(base_size)
             : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, mode,
                                      base_size + FixedArray::kHeaderSize);
-  TNode<Object> result = Allocate(size);
+  TNode<HeapObject> result = Allocate(size);
   Comment("Initialize arguments object");
   StoreMapNoWriteBarrier(result, map);
-  Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray);
+  TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
   StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array);
-  Node* smi_arguments_count = ParameterToTagged(arguments_count, mode);
+  TNode<Smi> smi_arguments_count = ParameterToTagged(arguments_count, mode);
   StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
                                  smi_arguments_count);
   Node* arguments = nullptr;
   if (!empty) {
-    arguments = InnerAllocate(CAST(result), elements_offset);
+    arguments = InnerAllocate(result, elements_offset);
     StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset,
                                    smi_arguments_count);
-    Node* fixed_array_map = LoadRoot(RootIndex::kFixedArrayMap);
+    TNode<Map> fixed_array_map = FixedArrayMapConstant();
     StoreMapNoWriteBarrier(arguments, fixed_array_map);
   }
   Node* parameter_map = nullptr;
@@ -63,8 +63,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
     parameter_map = InnerAllocate(CAST(arguments), parameter_map_offset);
     StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
                                    parameter_map);
-    Node* sloppy_elements_map =
-        LoadRoot(RootIndex::kSloppyArgumentsElementsMap);
+    TNode<Map> sloppy_elements_map = SloppyArgumentsElementsMapConstant();
     StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map);
     parameter_map_count = ParameterToTagged(parameter_map_count, mode);
     StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset,
@@ -97,13 +96,14 @@ Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
   VARIABLE(offset, MachineType::PointerRepresentation());
   offset.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
   VariableList list({&offset}, zone());
-  arguments.ForEach(list,
-                    [this, elements, &offset](Node* arg) {
-                      StoreNoWriteBarrier(MachineRepresentation::kTagged,
-                                          elements, offset.value(), arg);
-                      Increment(&offset, kTaggedSize);
-                    },
-                    first_arg, nullptr, param_mode);
+  arguments.ForEach(
+      list,
+      [this, elements, &offset](Node* arg) {
+        StoreNoWriteBarrier(MachineRepresentation::kTagged, elements,
+                            offset.value(), arg);
+        Increment(&offset, kTaggedSize);
+      },
+      first_arg, nullptr, param_mode);
   return result;
 }
 
@@ -121,8 +121,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
 
   Node* rest_count =
       IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count, mode);
-  Node* const native_context = LoadNativeContext(context);
-  Node* const array_map =
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Map> const array_map =
       LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
   GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode),
          &no_rest_parameters);
@@ -164,7 +164,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
   Label done(this, &result), empty(this), runtime(this, Label::kDeferred);
 
   ParameterMode mode = OptimalParameterMode();
-  Node* zero = IntPtrOrSmiConstant(0, mode);
+  TNode<BInt> zero = BIntConstant(0);
 
   TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(
       CAST(context), UncheckedCast<JSFunction>(function));
@@ -173,10 +173,10 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
       info.argument_count, &runtime,
       JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
 
-  Node* const native_context = LoadNativeContext(context);
-  Node* const map =
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Object> const map =
       LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
-  GotoIf(WordEqual(info.argument_count, zero), &empty);
+  GotoIf(BIntEqual(info.argument_count, zero), &empty);
 
   result.Bind(ConstructParametersObjectFromArgs(
       map, info.frame, info.argument_count, zero, info.argument_count, mode,
@@ -209,7 +209,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
   VARIABLE(result, MachineRepresentation::kTagged);
 
   ParameterMode mode = OptimalParameterMode();
-  Node* zero = IntPtrOrSmiConstant(0, mode);
+  TNode<BInt> zero = BIntConstant(0);
 
   Label done(this, &result), empty(this), no_parameters(this),
       runtime(this, Label::kDeferred);
@@ -217,9 +217,9 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
   TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(
       CAST(context), UncheckedCast<JSFunction>(function));
 
-  GotoIf(WordEqual(info.argument_count, zero), &empty);
+  GotoIf(BIntEqual(info.argument_count, zero), &empty);
 
-  GotoIf(WordEqual(info.formal_parameter_count, zero), &no_parameters);
+  GotoIf(BIntEqual(info.formal_parameter_count, zero), &no_parameters);
 
   {
     Comment("Mapped parameter JSSloppyArgumentsObject");
@@ -237,8 +237,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
         elements_allocated, &runtime,
         JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode);
 
-    Node* const native_context = LoadNativeContext(context);
-    Node* const map = LoadContextElement(
+    TNode<NativeContext> const native_context = LoadNativeContext(context);
+    TNode<Object> const map = LoadContextElement(
         native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
     Node* argument_object;
     Node* elements;
@@ -252,26 +252,26 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
     StoreFixedArrayElement(CAST(map_array), 1, elements, SKIP_WRITE_BARRIER);
 
     Comment("Fill in non-mapped parameters");
-    Node* argument_offset =
+    TNode<IntPtrT> argument_offset =
         ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS, mode,
                                FixedArray::kHeaderSize - kHeapObjectTag);
-    Node* mapped_offset =
+    TNode<IntPtrT> mapped_offset =
         ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, mode,
                                FixedArray::kHeaderSize - kHeapObjectTag);
     CodeStubArguments arguments(this, info.argument_count, info.frame, mode);
     VARIABLE(current_argument, MachineType::PointerRepresentation());
     current_argument.Bind(arguments.AtIndexPtr(info.argument_count, mode));
     VariableList var_list1({&current_argument}, zone());
-    mapped_offset = BuildFastLoop(
+    mapped_offset = UncheckedCast<IntPtrT>(BuildFastLoop(
         var_list1, argument_offset, mapped_offset,
         [this, elements, &current_argument](Node* offset) {
           Increment(&current_argument, kSystemPointerSize);
-          Node* arg = LoadBufferObject(
+          TNode<Object> arg = LoadBufferObject(
               UncheckedCast<RawPtrT>(current_argument.value()), 0);
           StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
                               arg);
         },
-        -kTaggedSize, INTPTR_PARAMETERS);
+        -kTaggedSize, INTPTR_PARAMETERS));
 
     // Copy the parameter slots and the holes in the arguments.
     // We need to fill in mapped_count slots. They index the context,
@@ -287,13 +287,13 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
         IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode),
                        info.formal_parameter_count, mode),
         mapped_count, mode));
-    Node* the_hole = TheHoleConstant();
+    TNode<Oddball> the_hole = TheHoleConstant();
     VariableList var_list2({&context_index}, zone());
     const int kParameterMapHeaderSize = FixedArray::OffsetOfElementAt(2);
-    Node* adjusted_map_array = IntPtrAdd(
+    TNode<IntPtrT> adjusted_map_array = IntPtrAdd(
         BitcastTaggedToWord(map_array),
         IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
-    Node* zero_offset = ElementOffsetFromIndex(
+    TNode<IntPtrT> zero_offset = ElementOffsetFromIndex(
         zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
     BuildFastLoop(
         var_list2, mapped_offset, zero_offset,
@@ -317,8 +317,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
     GotoIfFixedArraySizeDoesntFitInNewSpace(
         info.argument_count, &runtime,
         JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
-    Node* const native_context = LoadNativeContext(context);
-    Node* const map =
+    TNode<NativeContext> const native_context = LoadNativeContext(context);
+    TNode<Object> const map =
         LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
     result.Bind(ConstructParametersObjectFromArgs(
         map, info.frame, info.argument_count, zero, info.argument_count, mode,
@@ -331,8 +331,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
   BIND(&empty);
   {
     Comment("Empty JSSloppyArgumentsObject");
-    Node* const native_context = LoadNativeContext(context);
-    Node* const map =
+    TNode<NativeContext> const native_context = LoadNativeContext(context);
+    TNode<Object> const map =
         LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
     Node* arguments;
     Node* elements;
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 07f74cb4298db9..c7d8eb009125da 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -30,272 +30,267 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
       to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
       fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
 
-  void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
-    // 6. Let A be ? TypedArraySpeciesCreate(O, len).
-    TNode<JSTypedArray> original_array = CAST(o());
-    TNode<Smi> length = CAST(len_);
-    const char* method_name = "%TypedArray%.prototype.map";
-
-    TNode<JSTypedArray> a = TypedArraySpeciesCreateByLength(
-        context(), method_name, original_array, length);
-    // In the Spec and our current implementation, the length check is already
-    // performed in TypedArraySpeciesCreate.
-    CSA_ASSERT(this, UintPtrLessThanOrEqual(SmiUntag(CAST(len_)),
-                                            LoadJSTypedArrayLength(a)));
-    fast_typed_array_target_ =
-        Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a));
-    a_.Bind(a);
-  }
-
-  // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
-  Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
-    // 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
-    Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
-                                callbackfn(), this_arg(), k_value, k, o());
-    Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
-
-    // 8. d. Perform ? Set(A, Pk, mapped_value, true).
-    // Since we know that A is a TypedArray, this always ends up in
-    // #sec-integer-indexed-exotic-objects-set-p-v-receiver and then
-    // tc39.github.io/ecma262/#sec-integerindexedelementset .
-    Branch(fast_typed_array_target_, &fast, &slow);
-
-    BIND(&fast);
-    // #sec-integerindexedelementset
-    // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
-    // numValue be ? ToBigInt(v).
-    // 6. Otherwise, let numValue be ? ToNumber(value).
-    Node* num_value;
-    if (source_elements_kind_ == BIGINT64_ELEMENTS ||
-        source_elements_kind_ == BIGUINT64_ELEMENTS) {
-      num_value = ToBigInt(context(), mapped_value);
-    } else {
-      num_value = ToNumber_Inline(context(), mapped_value);
-    }
-    // The only way how this can bailout is because of a detached buffer.
-    EmitElementStore(a(), k, num_value, source_elements_kind_,
-                     KeyedAccessStoreMode::STANDARD_STORE, &detached,
-                     context());
-    Goto(&done);
+void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
+  // 6. Let A be ? TypedArraySpeciesCreate(O, len).
+  TNode<JSTypedArray> original_array = CAST(o());
+  TNode<Smi> length = CAST(len_);
+  const char* method_name = "%TypedArray%.prototype.map";
+
+  TNode<JSTypedArray> a = TypedArraySpeciesCreateByLength(
+      context(), method_name, original_array, length);
+  // In the Spec and our current implementation, the length check is already
+  // performed in TypedArraySpeciesCreate.
+  CSA_ASSERT(this, UintPtrLessThanOrEqual(SmiUntag(CAST(len_)),
+                                          LoadJSTypedArrayLength(a)));
+  fast_typed_array_target_ =
+      Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a));
+  a_.Bind(a);
+}
 
-    BIND(&slow);
-    SetPropertyStrict(context(), CAST(a()), CAST(k), CAST(mapped_value));
-    Goto(&done);
+// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
+Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
+  // 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
+  Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
+                              callbackfn(), this_arg(), k_value, k, o());
+  Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
 
-    BIND(&detached);
-    // tc39.github.io/ecma262/#sec-integerindexedelementset
-    // 8. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
-    ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
+  // 8. d. Perform ? Set(A, Pk, mapped_value, true).
+  // Since we know that A is a TypedArray, this always ends up in
+  // #sec-integer-indexed-exotic-objects-set-p-v-receiver and then
+  // tc39.github.io/ecma262/#sec-integerindexedelementset .
+  Branch(fast_typed_array_target_, &fast, &slow);
 
-    BIND(&done);
-    return a();
+  BIND(&fast);
+  // #sec-integerindexedelementset
+  // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
+  // numValue be ? ToBigInt(v).
+  // 6. Otherwise, let numValue be ? ToNumber(value).
+  Node* num_value;
+  if (source_elements_kind_ == BIGINT64_ELEMENTS ||
+      source_elements_kind_ == BIGUINT64_ELEMENTS) {
+    num_value = ToBigInt(context(), mapped_value);
+  } else {
+    num_value = ToNumber_Inline(context(), mapped_value);
   }
+  // The only way how this can bailout is because of a detached buffer.
+  EmitElementStore(a(), k, num_value, source_elements_kind_,
+                   KeyedAccessStoreMode::STANDARD_STORE, &detached, context());
+  Goto(&done);
 
-  void ArrayBuiltinsAssembler::NullPostLoopAction() {}
-
-  void ArrayBuiltinsAssembler::FillFixedArrayWithSmiZero(
-      TNode<FixedArray> array, TNode<Smi> smi_length) {
-    CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArray(array)));
+  BIND(&slow);
+  SetPropertyStrict(context(), CAST(a()), CAST(k), CAST(mapped_value));
+  Goto(&done);
 
-    TNode<IntPtrT> length = SmiToIntPtr(smi_length);
-    TNode<WordT> byte_length = TimesTaggedSize(length);
-    CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
+  BIND(&detached);
+  // tc39.github.io/ecma262/#sec-integerindexedelementset
+  // 8. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+  ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
 
-    static const int32_t fa_base_data_offset =
-        FixedArray::kHeaderSize - kHeapObjectTag;
-    TNode<IntPtrT> backing_store = IntPtrAdd(
-        BitcastTaggedToWord(array), IntPtrConstant(fa_base_data_offset));
+  BIND(&done);
+  return a();
+}
 
-    // Call out to memset to perform initialization.
-    TNode<ExternalReference> memset =
-        ExternalConstant(ExternalReference::libc_memset_function());
-    STATIC_ASSERT(kSizetSize == kIntptrSize);
-    CallCFunction(memset, MachineType::Pointer(),
-                  std::make_pair(MachineType::Pointer(), backing_store),
-                  std::make_pair(MachineType::IntPtr(), IntPtrConstant(0)),
-                  std::make_pair(MachineType::UintPtr(), byte_length));
-  }
+void ArrayBuiltinsAssembler::NullPostLoopAction() {}
+
+void ArrayBuiltinsAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
+                                                       TNode<Smi> smi_length) {
+  CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArray(array)));
+
+  TNode<IntPtrT> length = SmiToIntPtr(smi_length);
+  TNode<IntPtrT> byte_length = TimesTaggedSize(length);
+  CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
+
+  static const int32_t fa_base_data_offset =
+      FixedArray::kHeaderSize - kHeapObjectTag;
+  TNode<IntPtrT> backing_store = IntPtrAdd(BitcastTaggedToWord(array),
+                                           IntPtrConstant(fa_base_data_offset));
+
+  // Call out to memset to perform initialization.
+  TNode<ExternalReference> memset =
+      ExternalConstant(ExternalReference::libc_memset_function());
+  STATIC_ASSERT(kSizetSize == kIntptrSize);
+  CallCFunction(memset, MachineType::Pointer(),
+                std::make_pair(MachineType::Pointer(), backing_store),
+                std::make_pair(MachineType::IntPtr(), IntPtrConstant(0)),
+                std::make_pair(MachineType::UintPtr(), byte_length));
+}
 
-  void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) {
-    if (argc_ == nullptr) {
-      Return(value);
-    } else {
-      // argc_ doesn't include the receiver, so it has to be added back in
-      // manually.
-      PopAndReturn(IntPtrAdd(argc_, IntPtrConstant(1)), value);
-    }
+void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) {
+  if (argc_ == nullptr) {
+    Return(value);
+  } else {
+    // argc_ doesn't include the receiver, so it has to be added back in
+    // manually.
+    PopAndReturn(IntPtrAdd(argc_, IntPtrConstant(1)), value);
   }
+}
 
-  void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody(
-      TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
-      Node* this_arg, TNode<IntPtrT> argc) {
-    context_ = context;
-    receiver_ = receiver;
-    callbackfn_ = callbackfn;
-    this_arg_ = this_arg;
-    argc_ = argc;
-  }
+void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody(
+    TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+    Node* this_arg, TNode<IntPtrT> argc) {
+  context_ = context;
+  receiver_ = receiver;
+  callbackfn_ = callbackfn;
+  this_arg_ = this_arg;
+  argc_ = argc;
+}
 
-  void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
-      const char* name, const BuiltinResultGenerator& generator,
-      const CallResultProcessor& processor, const PostLoopAction& action,
-      ForEachDirection direction) {
-    name_ = name;
+void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
+    const char* name, const BuiltinResultGenerator& generator,
+    const CallResultProcessor& processor, const PostLoopAction& action,
+    ForEachDirection direction) {
+  name_ = name;
 
-    // ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
+  // ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
 
-    Label throw_not_typed_array(this, Label::kDeferred);
+  Label throw_not_typed_array(this, Label::kDeferred);
 
-    GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array);
-    TNode<Map> typed_array_map = LoadMap(CAST(receiver_));
-    GotoIfNot(IsJSTypedArrayMap(typed_array_map), &throw_not_typed_array);
+  GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array);
+  TNode<Map> typed_array_map = LoadMap(CAST(receiver_));
+  GotoIfNot(IsJSTypedArrayMap(typed_array_map), &throw_not_typed_array);
 
-    TNode<JSTypedArray> typed_array = CAST(receiver_);
-    o_ = typed_array;
+  TNode<JSTypedArray> typed_array = CAST(receiver_);
+  o_ = typed_array;
 
-    TNode<JSArrayBuffer> array_buffer =
-        LoadJSArrayBufferViewBuffer(typed_array);
-    ThrowIfArrayBufferIsDetached(context_, array_buffer, name_);
+  TNode<JSArrayBuffer> array_buffer = LoadJSArrayBufferViewBuffer(typed_array);
+  ThrowIfArrayBufferIsDetached(context_, array_buffer, name_);
 
-    len_ = ChangeUintPtrToTagged(LoadJSTypedArrayLength(typed_array));
+  len_ = ChangeUintPtrToTagged(LoadJSTypedArrayLength(typed_array));
 
-    Label throw_not_callable(this, Label::kDeferred);
-    Label distinguish_types(this);
-    GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable);
-    Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types,
-           &throw_not_callable);
+  Label throw_not_callable(this, Label::kDeferred);
+  Label distinguish_types(this);
+  GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable);
+  Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types,
+         &throw_not_callable);
 
-    BIND(&throw_not_typed_array);
-    ThrowTypeError(context_, MessageTemplate::kNotTypedArray);
+  BIND(&throw_not_typed_array);
+  ThrowTypeError(context_, MessageTemplate::kNotTypedArray);
 
-    BIND(&throw_not_callable);
-    ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_);
+  BIND(&throw_not_callable);
+  ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_);
 
-    Label unexpected_instance_type(this);
-    BIND(&unexpected_instance_type);
-    Unreachable();
+  Label unexpected_instance_type(this);
+  BIND(&unexpected_instance_type);
+  Unreachable();
 
-    std::vector<int32_t> elements_kinds = {
+  std::vector<int32_t> elements_kinds = {
 #define ELEMENTS_KIND(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
-        TYPED_ARRAYS(ELEMENTS_KIND)
+      TYPED_ARRAYS(ELEMENTS_KIND)
 #undef ELEMENTS_KIND
-    };
-    std::list<Label> labels;
-    for (size_t i = 0; i < elements_kinds.size(); ++i) {
-      labels.emplace_back(this);
-    }
-    std::vector<Label*> label_ptrs;
-    for (Label& label : labels) {
-      label_ptrs.push_back(&label);
-    }
+  };
+  std::list<Label> labels;
+  for (size_t i = 0; i < elements_kinds.size(); ++i) {
+    labels.emplace_back(this);
+  }
+  std::vector<Label*> label_ptrs;
+  for (Label& label : labels) {
+    label_ptrs.push_back(&label);
+  }
 
-    BIND(&distinguish_types);
+  BIND(&distinguish_types);
 
-    generator(this);
+  generator(this);
 
-    if (direction == ForEachDirection::kForward) {
-      k_.Bind(SmiConstant(0));
-    } else {
-      k_.Bind(NumberDec(len()));
-    }
-    CSA_ASSERT(this, IsSafeInteger(k()));
-    TNode<Int32T> elements_kind = LoadMapElementsKind(typed_array_map);
-    Switch(elements_kind, &unexpected_instance_type, elements_kinds.data(),
-           label_ptrs.data(), labels.size());
-
-    size_t i = 0;
-    for (auto it = labels.begin(); it != labels.end(); ++i, ++it) {
-      BIND(&*it);
-      Label done(this);
-      source_elements_kind_ = static_cast<ElementsKind>(elements_kinds[i]);
-      // TODO(tebbi): Silently cancelling the loop on buffer detachment is a
-      // spec violation. Should go to &throw_detached and throw a TypeError
-      // instead.
-      VisitAllTypedArrayElements(array_buffer, processor, &done, direction,
-                                 typed_array);
-      Goto(&done);
-      // No exception, return success
-      BIND(&done);
-      action(this);
-      ReturnFromBuiltin(a_.value());
-    }
+  if (direction == ForEachDirection::kForward) {
+    k_.Bind(SmiConstant(0));
+  } else {
+    k_.Bind(NumberDec(len()));
+  }
+  CSA_ASSERT(this, IsSafeInteger(k()));
+  TNode<Int32T> elements_kind = LoadMapElementsKind(typed_array_map);
+  Switch(elements_kind, &unexpected_instance_type, elements_kinds.data(),
+         label_ptrs.data(), labels.size());
+
+  size_t i = 0;
+  for (auto it = labels.begin(); it != labels.end(); ++i, ++it) {
+    BIND(&*it);
+    Label done(this);
+    source_elements_kind_ = static_cast<ElementsKind>(elements_kinds[i]);
+    // TODO(tebbi): Silently cancelling the loop on buffer detachment is a
+    // spec violation. Should go to &throw_detached and throw a TypeError
+    // instead.
+    VisitAllTypedArrayElements(array_buffer, processor, &done, direction,
+                               typed_array);
+    Goto(&done);
+    // No exception, return success
+    BIND(&done);
+    action(this);
+    ReturnFromBuiltin(a_.value());
   }
+}
 
-  void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
-      Node* array_buffer, const CallResultProcessor& processor, Label* detached,
-      ForEachDirection direction, TNode<JSTypedArray> typed_array) {
-    VariableList list({&a_, &k_, &to_}, zone());
-
-    FastLoopBody body = [&](Node* index) {
-      GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
-      TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
-      Node* value = LoadFixedTypedArrayElementAsTagged(
-          data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
-      k_.Bind(index);
-      a_.Bind(processor(this, value, index));
-    };
-    Node* start = SmiConstant(0);
-    Node* end = len_;
-    IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost;
-    int incr = 1;
-    if (direction == ForEachDirection::kReverse) {
-      std::swap(start, end);
-      advance_mode = IndexAdvanceMode::kPre;
-      incr = -1;
-    }
-    BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS,
-                  advance_mode);
-  }
-
-  // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
-  void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode<Number> len) {
-    Label runtime(this, Label::kDeferred), done(this);
-
-    Node* const original_map = LoadMap(o());
-    GotoIfNot(
-        InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE),
-        &runtime);
-
-    GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
-              &runtime);
-
-    Node* species_protector = ArraySpeciesProtectorConstant();
-    Node* value =
-        LoadObjectField(species_protector, PropertyCell::kValueOffset);
-    Node* const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
-    GotoIf(WordEqual(value, protector_invalid), &runtime);
-
-    GotoIfNot(TaggedIsPositiveSmi(len), &runtime);
-    GotoIfNot(
-        IsValidFastJSArrayCapacity(len, CodeStubAssembler::SMI_PARAMETERS),
-        &runtime);
-
-    // We need to be conservative and start with holey because the builtins
-    // that create output arrays aren't guaranteed to be called for every
-    // element in the input array (maybe the callback deletes an element).
-    const ElementsKind elements_kind =
-        GetHoleyElementsKind(GetInitialFastElementsKind());
-    TNode<Context> native_context = LoadNativeContext(context());
-    TNode<Map> array_map =
-        LoadJSArrayElementsMap(elements_kind, native_context);
-    a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, CAST(len),
-                            nullptr, CodeStubAssembler::SMI_PARAMETERS,
-                            kAllowLargeObjectAllocation));
+void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
+    Node* array_buffer, const CallResultProcessor& processor, Label* detached,
+    ForEachDirection direction, TNode<JSTypedArray> typed_array) {
+  VariableList list({&a_, &k_, &to_}, zone());
+
+  FastLoopBody body = [&](Node* index) {
+    GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
+    TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
+    auto value = LoadFixedTypedArrayElementAsTagged(
+        data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
+    k_.Bind(index);
+    a_.Bind(processor(this, value, index));
+  };
+  Node* start = SmiConstant(0);
+  Node* end = len_;
+  IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost;
+  int incr = 1;
+  if (direction == ForEachDirection::kReverse) {
+    std::swap(start, end);
+    advance_mode = IndexAdvanceMode::kPre;
+    incr = -1;
+  }
+  BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS,
+                advance_mode);
+}
 
-    Goto(&done);
+// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
+void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode<Number> len) {
+  Label runtime(this, Label::kDeferred), done(this);
 
-    BIND(&runtime);
-    {
-      // 5. Let A be ? ArraySpeciesCreate(O, len).
-      TNode<JSReceiver> constructor =
-          CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context(), o()));
-      a_.Bind(Construct(context(), constructor, len));
-      Goto(&fully_spec_compliant_);
-    }
+  TNode<Map> const original_map = LoadMap(o());
+  GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE),
+            &runtime);
 
-    BIND(&done);
+  GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
+            &runtime);
+
+  TNode<PropertyCell> species_protector = ArraySpeciesProtectorConstant();
+  TNode<Object> value =
+      LoadObjectField(species_protector, PropertyCell::kValueOffset);
+  TNode<Smi> const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
+  GotoIf(TaggedEqual(value, protector_invalid), &runtime);
+
+  GotoIfNot(TaggedIsPositiveSmi(len), &runtime);
+  GotoIfNot(IsValidFastJSArrayCapacity(len, CodeStubAssembler::SMI_PARAMETERS),
+            &runtime);
+
+  // We need to be conservative and start with holey because the builtins
+  // that create output arrays aren't guaranteed to be called for every
+  // element in the input array (maybe the callback deletes an element).
+  const ElementsKind elements_kind =
+      GetHoleyElementsKind(GetInitialFastElementsKind());
+  TNode<NativeContext> native_context = LoadNativeContext(context());
+  TNode<Map> array_map = LoadJSArrayElementsMap(elements_kind, native_context);
+  a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, CAST(len),
+                          nullptr, CodeStubAssembler::SMI_PARAMETERS,
+                          kAllowLargeObjectAllocation));
+
+  Goto(&done);
+
+  BIND(&runtime);
+  {
+    // 5. Let A be ? ArraySpeciesCreate(O, len).
+    TNode<JSReceiver> constructor =
+        CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context(), o()));
+    a_.Bind(Construct(context(), constructor, len));
+    Goto(&fully_spec_compliant_);
   }
 
+  BIND(&done);
+}
+
 TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
   TNode<Int32T> argc =
       UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
@@ -331,7 +326,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
 
     // 3) Check that the elements backing store isn't copy-on-write.
     TNode<FixedArrayBase> elements = LoadElements(array_receiver);
-    GotoIf(WordEqual(LoadMap(elements), LoadRoot(RootIndex::kFixedCOWArrayMap)),
+    GotoIf(TaggedEqual(LoadMap(elements), FixedCOWArrayMapConstant()),
            &runtime);
 
     TNode<IntPtrT> new_length = IntPtrSub(length, IntPtrConstant(1));
@@ -353,17 +348,24 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
                                 Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
            &fast_elements);
 
-    Node* value = LoadFixedDoubleArrayElement(CAST(elements), new_length,
-                                              &return_undefined);
+    {
+      TNode<FixedDoubleArray> elements_known_double_array =
+          ReinterpretCast<FixedDoubleArray>(elements);
+      TNode<Float64T> value = LoadFixedDoubleArrayElement(
+          elements_known_double_array, new_length, &return_undefined);
 
-    StoreFixedDoubleArrayHole(CAST(elements), new_length);
-    args.PopAndReturn(AllocateHeapNumberWithValue(value));
+      StoreFixedDoubleArrayHole(elements_known_double_array, new_length);
+      args.PopAndReturn(AllocateHeapNumberWithValue(value));
+    }
 
     BIND(&fast_elements);
     {
-      Node* value = LoadFixedArrayElement(CAST(elements), new_length);
-      StoreFixedArrayElement(CAST(elements), new_length, TheHoleConstant());
-      GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
+      TNode<FixedArray> elements_known_fixed_array = CAST(elements);
+      TNode<Object> value =
+          LoadFixedArrayElement(elements_known_fixed_array, new_length);
+      StoreFixedArrayElement(elements_known_fixed_array, new_length,
+                             TheHoleConstant());
+      GotoIf(TaggedEqual(value, TheHoleConstant()), &return_undefined);
       args.PopAndReturn(value);
     }
 
@@ -415,8 +417,9 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
     GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
            &object_push_pre);
 
-    Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, array_receiver,
-                                          &args, &arg_index, &smi_transition);
+    TNode<Smi> new_length =
+        BuildAppendJSArray(PACKED_SMI_ELEMENTS, array_receiver, &args,
+                           &arg_index, &smi_transition);
     args.PopAndReturn(new_length);
   }
 
@@ -426,16 +429,16 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
   // the most generic implementation for the rest of the array.
   BIND(&smi_transition);
   {
-    Node* arg = args.AtIndex(arg_index.value());
+    TNode<Object> arg = args.AtIndex(arg_index.value());
     GotoIf(TaggedIsSmi(arg), &default_label);
-    Node* length = LoadJSArrayLength(array_receiver);
+    TNode<Number> length = LoadJSArrayLength(array_receiver);
     // TODO(danno): Use the KeyedStoreGeneric stub here when possible,
     // calling into the runtime to do the elements transition is overkill.
-    SetPropertyStrict(context, array_receiver, CAST(length), CAST(arg));
+    SetPropertyStrict(context, array_receiver, length, arg);
     Increment(&arg_index);
     // The runtime SetProperty call could have converted the array to dictionary
     // mode, which must be detected to abort the fast-path.
-    Node* kind = LoadElementsKind(array_receiver);
+    TNode<Int32T> kind = LoadElementsKind(array_receiver);
     GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
            &default_label);
 
@@ -451,14 +454,14 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
 
   BIND(&object_push);
   {
-    Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, array_receiver,
-                                          &args, &arg_index, &default_label);
+    TNode<Smi> new_length = BuildAppendJSArray(
+        PACKED_ELEMENTS, array_receiver, &args, &arg_index, &default_label);
     args.PopAndReturn(new_length);
   }
 
   BIND(&double_push);
   {
-    Node* new_length =
+    TNode<Smi> new_length =
         BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, array_receiver, &args,
                            &arg_index, &double_transition);
     args.PopAndReturn(new_length);
@@ -470,16 +473,16 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
   // on the most generic implementation for the rest of the array.
   BIND(&double_transition);
   {
-    Node* arg = args.AtIndex(arg_index.value());
+    TNode<Object> arg = args.AtIndex(arg_index.value());
     GotoIfNumber(arg, &default_label);
-    Node* length = LoadJSArrayLength(array_receiver);
+    TNode<Number> length = LoadJSArrayLength(array_receiver);
     // TODO(danno): Use the KeyedStoreGeneric stub here when possible,
     // calling into the runtime to do the elements transition is overkill.
-    SetPropertyStrict(context, array_receiver, CAST(length), CAST(arg));
+    SetPropertyStrict(context, array_receiver, length, arg);
     Increment(&arg_index);
     // The runtime SetProperty call could have converted the array to dictionary
     // mode, which must be detected to abort the fast-path.
-    Node* kind = LoadElementsKind(array_receiver);
+    TNode<Int32T> kind = LoadElementsKind(array_receiver);
     GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
            &default_label);
     Goto(&object_push);
@@ -491,8 +494,8 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
   {
     args.ForEach(
         [this, array_receiver, context](Node* arg) {
-          Node* length = LoadJSArrayLength(array_receiver);
-          SetPropertyStrict(context, array_receiver, CAST(length), CAST(arg));
+          TNode<Number> length = LoadJSArrayLength(array_receiver);
+          SetPropertyStrict(context, array_receiver, length, CAST(arg));
         },
         arg_index.value());
     args.PopAndReturn(LoadJSArrayLength(array_receiver));
@@ -635,7 +638,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
   GotoIfNot(Word32Equal(argc, Int32Constant(1)), &normal_iterate);
   TNode<Object> array_function = LoadContextElement(
       LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
-  Branch(WordEqual(array_function, receiver), &fast_iterate, &normal_iterate);
+  Branch(TaggedEqual(array_function, receiver), &fast_iterate, &normal_iterate);
 
   BIND(&fast_iterate);
   {
@@ -674,7 +677,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
 
   // Determine whether items[Symbol.iterator] is defined:
   IteratorBuiltinsAssembler iterator_assembler(state());
-  Node* iterator_method =
+  TNode<Object> iterator_method =
       iterator_assembler.GetIteratorMethod(context, array_like);
   Branch(IsNullOrUndefined(iterator_method), &not_iterable, &iterable);
 
@@ -708,7 +711,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
     IteratorRecord iterator_record =
         iterator_assembler.GetIterator(context, items, iterator_method);
 
-    TNode<Context> native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
     TNode<Map> fast_iterator_result_map = CAST(
         LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
 
@@ -741,7 +744,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
 
       // Store the result in the output object (catching any exceptions so the
       // iterator can be closed).
-      Node* define_status =
+      TNode<Object> define_status =
           CallRuntime(Runtime::kCreateDataProperty, context, array.value(),
                       index.value(), value.value());
       GotoIfException(define_status, &on_exception, &var_exception);
@@ -789,9 +792,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
 
     TVARIABLE(Number, index, SmiConstant(0));
 
-    // TODO(ishell): remove <Object, Object>
-    GotoIf(WordEqual<Object, Object>(length.value(), SmiConstant(0)),
-           &finished);
+    GotoIf(TaggedEqual(length.value(), SmiConstant(0)), &finished);
 
     // Loop from 0 to length-1.
     {
@@ -837,8 +838,8 @@ TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
   CodeStubArguments args(this, argc);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   TNode<Object> receiver = args.GetReceiver();
-  Node* callbackfn = args.GetOptionalArgumentValue(0);
-  Node* this_arg = args.GetOptionalArgumentValue(1);
+  TNode<Object> callbackfn = args.GetOptionalArgumentValue(0);
+  TNode<Object> this_arg = args.GetOptionalArgumentValue(1);
 
   InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
 
@@ -856,7 +857,7 @@ TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
   Label call_runtime(this), return_true(this), return_false(this);
 
   GotoIf(TaggedIsSmi(object), &return_false);
-  TNode<Int32T> instance_type = LoadInstanceType(CAST(object));
+  TNode<Uint16T> instance_type = LoadInstanceType(CAST(object));
 
   GotoIf(InstanceTypeEqual(instance_type, JS_ARRAY_TYPE), &return_true);
 
@@ -884,7 +885,7 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
   void Generate(SearchVariant variant, TNode<IntPtrT> argc,
                 TNode<Context> context);
   void GenerateSmiOrObject(SearchVariant variant, Node* context, Node* elements,
-                           Node* search_element, Node* array_length,
+                           TNode<Object> search_element, Node* array_length,
                            Node* from_index);
   void GeneratePackedDoubles(SearchVariant variant, Node* elements,
                              Node* search_element, Node* array_length,
@@ -906,7 +907,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
   TNode<Object> search_element =
       args.GetOptionalArgumentValue(kSearchElementArg);
 
-  Node* intptr_zero = IntPtrConstant(0);
+  TNode<IntPtrT> intptr_zero = IntPtrConstant(0);
 
   Label init_index(this), return_not_found(this), call_runtime(this);
 
@@ -920,8 +921,8 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
 
   // JSArray length is always a positive Smi for fast arrays.
   CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
-  Node* array_length = LoadFastJSArrayLength(array);
-  Node* array_length_untagged = SmiUntag(array_length);
+  TNode<Smi> array_length = LoadFastJSArrayLength(array);
+  TNode<IntPtrT> array_length_untagged = SmiUntag(array_length);
 
   {
     // Initialize fromIndex.
@@ -930,7 +931,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
     // If no fromIndex was passed, default to 0.
     GotoIf(IntPtrLessThanOrEqual(argc, IntPtrConstant(kFromIndexArg)), &done);
 
-    Node* start_from = args.AtIndex(kFromIndexArg);
+    TNode<Object> start_from = args.AtIndex(kFromIndexArg);
     // Handle Smis and undefined here and everything else in runtime.
     // We must be very careful with side effects from the ToInteger conversion,
     // as the side effects might render previously checked assumptions about
@@ -944,7 +945,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
     }
     BIND(&is_smi);
     {
-      Node* intptr_start_from = SmiUntag(start_from);
+      TNode<IntPtrT> intptr_start_from = SmiUntag(CAST(start_from));
       index_var.Bind(intptr_start_from);
 
       GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
@@ -965,7 +966,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
   Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
 
   TNode<Int32T> elements_kind = LoadElementsKind(array);
-  Node* elements = LoadElements(array);
+  TNode<FixedArrayBase> elements = LoadElements(array);
   STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
   STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
   STATIC_ASSERT(PACKED_ELEMENTS == 2);
@@ -977,9 +978,9 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
       &if_packed_doubles);
   GotoIf(ElementsKindEqual(elements_kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
          &if_holey_doubles);
-  GotoIf(
-      IsElementsKindLessThanOrEqual(elements_kind, LAST_FROZEN_ELEMENTS_KIND),
-      &if_smiorobjects);
+  GotoIf(IsElementsKindLessThanOrEqual(elements_kind,
+                                       LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND),
+         &if_smiorobjects);
   Goto(&return_not_found);
 
   BIND(&if_smiorobjects);
@@ -990,8 +991,8 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
                                     Builtins::kArrayIncludesSmiOrObject)
             : Builtins::CallableFor(isolate(),
                                     Builtins::kArrayIndexOfSmiOrObject);
-    Node* result = CallStub(callable, context, elements, search_element,
-                            array_length, SmiTag(index_var.value()));
+    TNode<Object> result = CallStub(callable, context, elements, search_element,
+                                    array_length, SmiTag(index_var.value()));
     args.PopAndReturn(result);
   }
 
@@ -1003,8 +1004,8 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
                                     Builtins::kArrayIncludesPackedDoubles)
             : Builtins::CallableFor(isolate(),
                                     Builtins::kArrayIndexOfPackedDoubles);
-    Node* result = CallStub(callable, context, elements, search_element,
-                            array_length, SmiTag(index_var.value()));
+    TNode<Object> result = CallStub(callable, context, elements, search_element,
+                                    array_length, SmiTag(index_var.value()));
     args.PopAndReturn(result);
   }
 
@@ -1016,8 +1017,8 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
                                     Builtins::kArrayIncludesHoleyDoubles)
             : Builtins::CallableFor(isolate(),
                                     Builtins::kArrayIndexOfHoleyDoubles);
-    Node* result = CallStub(callable, context, elements, search_element,
-                            array_length, SmiTag(index_var.value()));
+    TNode<Object> result = CallStub(callable, context, elements, search_element,
+                                    array_length, SmiTag(index_var.value()));
     args.PopAndReturn(result);
   }
 
@@ -1030,7 +1031,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
 
   BIND(&call_runtime);
   {
-    Node* start_from =
+    TNode<Object> start_from =
         args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant());
     Runtime::FunctionId function = variant == kIncludes
                                        ? Runtime::kArrayIncludes_Slow
@@ -1041,12 +1042,11 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
 }
 
 void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
-    SearchVariant variant, Node* context, Node* elements, Node* search_element,
-    Node* array_length, Node* from_index) {
-  VARIABLE(index_var, MachineType::PointerRepresentation(),
-           SmiUntag(from_index));
-  VARIABLE(search_num, MachineRepresentation::kFloat64);
-  Node* array_length_untagged = SmiUntag(array_length);
+    SearchVariant variant, Node* context, Node* elements,
+    TNode<Object> search_element, Node* array_length, Node* from_index) {
+  TVARIABLE(IntPtrT, index_var, SmiUntag(from_index));
+  TVARIABLE(Float64T, search_num);
+  TNode<IntPtrT> array_length_untagged = SmiUntag(array_length);
 
   Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
       string_loop(this), bigint_loop(this, &index_var),
@@ -1054,20 +1054,20 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
       return_found(this), return_not_found(this);
 
   GotoIfNot(TaggedIsSmi(search_element), &not_smi);
-  search_num.Bind(SmiToFloat64(search_element));
+  search_num = SmiToFloat64(CAST(search_element));
   Goto(&heap_num_loop);
 
   BIND(&not_smi);
   if (variant == kIncludes) {
     GotoIf(IsUndefined(search_element), &undef_loop);
   }
-  Node* map = LoadMap(search_element);
+  TNode<Map> map = LoadMap(CAST(search_element));
   GotoIfNot(IsHeapNumberMap(map), &not_heap_num);
-  search_num.Bind(LoadHeapNumberValue(search_element));
+  search_num = LoadHeapNumberValue(CAST(search_element));
   Goto(&heap_num_loop);
 
   BIND(&not_heap_num);
-  Node* search_type = LoadMapInstanceType(map);
+  TNode<Uint16T> search_type = LoadMapInstanceType(map);
   GotoIf(IsStringInstanceType(search_type), &string_loop);
   GotoIf(IsBigIntInstanceType(search_type), &bigint_loop);
   Goto(&ident_loop);
@@ -1076,9 +1076,9 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
   {
     GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
               &return_not_found);
-    Node* element_k =
+    TNode<Object> element_k =
         UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
-    GotoIf(WordEqual(element_k, search_element), &return_found);
+    GotoIf(TaggedEqual(element_k, search_element), &return_found);
 
     Increment(&index_var);
     Goto(&ident_loop);
@@ -1089,7 +1089,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
 
     GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
               &return_not_found);
-    Node* element_k =
+    TNode<Object> element_k =
         UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
     GotoIf(IsUndefined(element_k), &return_found);
     GotoIf(IsTheHole(element_k), &return_found);
@@ -1109,15 +1109,16 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
       Label continue_loop(this), not_smi(this);
       GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
                 &return_not_found);
-      Node* element_k =
+      TNode<Object> element_k =
           UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
       GotoIfNot(TaggedIsSmi(element_k), &not_smi);
-      Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
+      Branch(Float64Equal(search_num.value(), SmiToFloat64(CAST(element_k))),
              &return_found, &continue_loop);
 
       BIND(&not_smi);
-      GotoIfNot(IsHeapNumber(element_k), &continue_loop);
-      Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
+      GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
+      Branch(Float64Equal(search_num.value(),
+                          LoadHeapNumberValue(CAST(element_k))),
              &return_found, &continue_loop);
 
       BIND(&continue_loop);
@@ -1131,11 +1132,11 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
       Label continue_loop(this);
       GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
                 &return_not_found);
-      Node* element_k =
+      TNode<Object> element_k =
           UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
       GotoIf(TaggedIsSmi(element_k), &continue_loop);
       GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
-      BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
+      BranchIfFloat64IsNaN(LoadHeapNumberValue(CAST(element_k)), &return_found,
                            &continue_loop);
 
       BIND(&continue_loop);
@@ -1155,24 +1156,24 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
     BIND(&next_iteration);
     GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
               &return_not_found);
-    Node* element_k =
+    TNode<Object> element_k =
         UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
     GotoIf(TaggedIsSmi(element_k), &continue_loop);
-    GotoIf(WordEqual(search_element_string, element_k), &return_found);
-    Node* element_k_type = LoadInstanceType(element_k);
+    GotoIf(TaggedEqual(search_element_string, element_k), &return_found);
+    TNode<Uint16T> element_k_type = LoadInstanceType(CAST(element_k));
     GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
-    Branch(WordEqual(search_length, LoadStringLengthAsWord(element_k)),
+    Branch(IntPtrEqual(search_length, LoadStringLengthAsWord(CAST(element_k))),
            &slow_compare, &continue_loop);
 
     BIND(&slow_compare);
     StringBuiltinsAssembler string_asm(state());
-    string_asm.StringEqual_Core(context, search_element_string, search_type,
-                                element_k, element_k_type, search_length,
+    string_asm.StringEqual_Core(search_element_string, search_type,
+                                CAST(element_k), element_k_type, search_length,
                                 &return_found, &continue_loop, &runtime);
     BIND(&runtime);
     TNode<Object> result = CallRuntime(Runtime::kStringEqual, context,
                                        search_element_string, element_k);
-    Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
+    Branch(TaggedEqual(result, TrueConstant()), &return_found, &continue_loop);
 
     BIND(&continue_loop);
     Increment(&index_var);
@@ -1184,14 +1185,14 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
     GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
               &return_not_found);
 
-    Node* element_k =
+    TNode<Object> element_k =
         UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
     Label continue_loop(this);
     GotoIf(TaggedIsSmi(element_k), &continue_loop);
     GotoIfNot(IsBigInt(CAST(element_k)), &continue_loop);
     TNode<Object> result = CallRuntime(Runtime::kBigIntEqualToBigInt, context,
                                        search_element, element_k);
-    Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
+    Branch(TaggedEqual(result, TrueConstant()), &return_found, &continue_loop);
 
     BIND(&continue_loop);
     Increment(&index_var);
@@ -1217,24 +1218,23 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant,
                                                           Node* search_element,
                                                           Node* array_length,
                                                           Node* from_index) {
-  VARIABLE(index_var, MachineType::PointerRepresentation(),
-           SmiUntag(from_index));
-  Node* array_length_untagged = SmiUntag(array_length);
+  TVARIABLE(IntPtrT, index_var, SmiUntag(from_index));
+  TNode<IntPtrT> array_length_untagged = SmiUntag(array_length);
 
   Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
       hole_loop(this, &index_var), search_notnan(this), return_found(this),
       return_not_found(this);
-  VARIABLE(search_num, MachineRepresentation::kFloat64);
-  search_num.Bind(Float64Constant(0));
+  TVARIABLE(Float64T, search_num);
+  search_num = Float64Constant(0);
 
   GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
-  search_num.Bind(SmiToFloat64(search_element));
+  search_num = SmiToFloat64(search_element);
   Goto(&not_nan_loop);
 
   BIND(&search_notnan);
   GotoIfNot(IsHeapNumber(search_element), &return_not_found);
 
-  search_num.Bind(LoadHeapNumberValue(search_element));
+  search_num = LoadHeapNumberValue(search_element);
 
   Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
   BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
@@ -1244,8 +1244,8 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant,
     Label continue_loop(this);
     GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
               &return_not_found);
-    Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
-                                                  MachineType::Float64());
+    TNode<Float64T> element_k = LoadFixedDoubleArrayElement(
+        elements, index_var.value(), MachineType::Float64());
     Branch(Float64Equal(element_k, search_num.value()), &return_found,
            &continue_loop);
     BIND(&continue_loop);
@@ -1259,8 +1259,8 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant,
     Label continue_loop(this);
     GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
               &return_not_found);
-    Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
-                                                  MachineType::Float64());
+    TNode<Float64T> element_k = LoadFixedDoubleArrayElement(
+        elements, index_var.value(), MachineType::Float64());
     BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
     BIND(&continue_loop);
     Increment(&index_var);
@@ -1287,18 +1287,17 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
                                                          Node* search_element,
                                                          Node* array_length,
                                                          Node* from_index) {
-  VARIABLE(index_var, MachineType::PointerRepresentation(),
-           SmiUntag(from_index));
-  Node* array_length_untagged = SmiUntag(array_length);
+  TVARIABLE(IntPtrT, index_var, SmiUntag(from_index));
+  TNode<IntPtrT> array_length_untagged = SmiUntag(array_length);
 
   Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
       hole_loop(this, &index_var), search_notnan(this), return_found(this),
       return_not_found(this);
-  VARIABLE(search_num, MachineRepresentation::kFloat64);
-  search_num.Bind(Float64Constant(0));
+  TVARIABLE(Float64T, search_num);
+  search_num = Float64Constant(0);
 
   GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
-  search_num.Bind(SmiToFloat64(search_element));
+  search_num = SmiToFloat64(search_element);
   Goto(&not_nan_loop);
 
   BIND(&search_notnan);
@@ -1307,7 +1306,7 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
   }
   GotoIfNot(IsHeapNumber(search_element), &return_not_found);
 
-  search_num.Bind(LoadHeapNumberValue(search_element));
+  search_num = LoadHeapNumberValue(search_element);
 
   Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
   BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
@@ -1320,8 +1319,8 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
 
     // No need for hole checking here; the following Float64Equal will
     // return 'not equal' for holes anyway.
-    Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
-                                                  MachineType::Float64());
+    TNode<Float64T> element_k = LoadFixedDoubleArrayElement(
+        elements, index_var.value(), MachineType::Float64());
 
     Branch(Float64Equal(element_k, search_num.value()), &return_found,
            &continue_loop);
@@ -1338,7 +1337,7 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
               &return_not_found);
 
     // Load double value or continue if it's the hole NaN.
-    Node* element_k = LoadFixedDoubleArrayElement(
+    TNode<Float64T> element_k = LoadFixedDoubleArrayElement(
         elements, index_var.value(), MachineType::Float64(), 0,
         INTPTR_PARAMETERS, &continue_loop);
 
@@ -1387,9 +1386,9 @@ TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
 }
 
 TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) {
-  Node* context = Parameter(Descriptor::kContext);
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   Node* elements = Parameter(Descriptor::kElements);
-  Node* search_element = Parameter(Descriptor::kSearchElement);
+  TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
   Node* array_length = Parameter(Descriptor::kLength);
   Node* from_index = Parameter(Descriptor::kFromIndex);
 
@@ -1426,9 +1425,9 @@ TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) {
 }
 
 TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) {
-  Node* context = Parameter(Descriptor::kContext);
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   Node* elements = Parameter(Descriptor::kElements);
-  Node* search_element = Parameter(Descriptor::kSearchElement);
+  TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
   Node* array_length = Parameter(Descriptor::kLength);
   Node* from_index = Parameter(Descriptor::kFromIndex);
 
@@ -1512,7 +1511,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
 
   // Dispatch based on the type of the {array}.
   TNode<Map> array_map = LoadMap(array);
-  TNode<Int32T> array_type = LoadMapInstanceType(array_map);
+  TNode<Uint16T> array_type = LoadMapInstanceType(array_map);
   GotoIf(InstanceTypeEqual(array_type, JS_ARRAY_TYPE), &if_array);
   Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_typedarray,
          &if_other);
@@ -1662,7 +1661,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
 
   BIND(&allocate_iterator_result);
   {
-    Node* result =
+    TNode<JSObject> result =
         AllocateJSIteratorResult(context, var_value.value(), var_done.value());
     Return(result);
   }
@@ -1705,7 +1704,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
       // b. Let exists be ? HasProperty(source, P).
       CSA_ASSERT(this,
                  SmiGreaterThanOrEqual(CAST(source_index), SmiConstant(0)));
-      Node* const exists =
+      TNode<Oddball> const exists =
           HasProperty(context, source, source_index, kHasProperty);
 
       // c. If exists is true, then
@@ -1713,7 +1712,8 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
       GotoIfNot(IsTrue(exists), &next);
       {
         // i. Let element be ? Get(source, P).
-        Node* element = GetProperty(context, source, source_index);
+        TNode<Object> element_maybe_smi =
+            GetProperty(context, source, source_index);
 
         // ii. If mapperFunction is present, then
         if (mapper_function != nullptr) {
@@ -1723,9 +1723,9 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
 
           // 1. Set element to ? Call(mapperFunction, thisArg , « element,
           //                          sourceIndex, source »).
-          element =
+          element_maybe_smi = CAST(
               CallJS(CodeFactory::Call(isolate()), context, mapper_function,
-                     this_arg, element, source_index, source);
+                     this_arg, element_maybe_smi, source_index, source));
         }
 
         // iii. Let shouldFlatten be false.
@@ -1734,7 +1734,8 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
         // iv. If depth > 0, then
         GotoIfNumberGreaterThanOrEqual(SmiConstant(0), depth, &if_noflatten);
         // 1. Set shouldFlatten to ? IsArray(element).
-        GotoIf(TaggedIsSmi(element), &if_noflatten);
+        GotoIf(TaggedIsSmi(element_maybe_smi), &if_noflatten);
+        TNode<HeapObject> element = CAST(element_maybe_smi);
         GotoIf(IsJSArray(element), &if_flatten_array);
         GotoIfNot(IsJSProxy(element), &if_noflatten);
         Branch(IsTrue(CallRuntime(Runtime::kArrayIsArray, context, element)),
@@ -1745,7 +1746,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
           CSA_ASSERT(this, IsJSArray(element));
 
           // 1. Let elementLen be ? ToLength(? Get(element, "length")).
-          Node* const element_length =
+          TNode<Object> const element_length =
               LoadObjectField(element, JSArray::kLengthOffset);
 
           // 2. Set targetIndex to ? FlattenIntoArray(target, element,
@@ -1762,7 +1763,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
           CSA_ASSERT(this, IsJSProxy(element));
 
           // 1. Let elementLen be ? ToLength(? Get(element, "length")).
-          Node* const element_length = ToLength_Inline(
+          TNode<Number> const element_length = ToLength_Inline(
               context, GetProperty(context, element, LengthStringConstant()));
 
           // 2. Set targetIndex to ? FlattenIntoArray(target, element,
@@ -1872,7 +1873,7 @@ TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
   // 5. Let A be ? ArraySpeciesCreate(O, 0).
   TNode<JSReceiver> const constructor =
       CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context, o));
-  Node* const a = Construct(context, constructor, SmiConstant(0));
+  TNode<JSReceiver> const a = Construct(context, constructor, SmiConstant(0));
 
   // 6. Perform ? FlattenIntoArray(A, O, sourceLen, 0, depthNum).
   CallBuiltin(Builtins::kFlattenIntoArray, context, a, o, source_length,
@@ -1937,7 +1938,7 @@ TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
       SelectConstant<Object>(IsUndefined(new_target), function, new_target);
 
   // Run the native code for the Array function called as a normal function.
-  TNode<Object> no_allocation_site = UndefinedConstant();
+  TNode<Oddball> no_allocation_site = UndefinedConstant();
   TailCallBuiltin(Builtins::kArrayConstructorImpl, context, function,
                   new_target, argc, no_allocation_site);
 }
@@ -2105,7 +2106,7 @@ TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) {
       CAST(LoadObjectField(target, JSFunction::kContextOffset));
 
   Label runtime(this, Label::kDeferred);
-  GotoIf(WordNotEqual(target, new_target), &runtime);
+  GotoIf(TaggedNotEqual(target, new_target), &runtime);
 
   Label no_info(this);
   // If the feedback vector is the undefined value call an array constructor
@@ -2143,7 +2144,8 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
     Branch(SmiEqual(CAST(array_size), SmiConstant(0)), &small_smi_size, &abort);
 
     BIND(&abort);
-    Node* reason = SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray);
+    TNode<Smi> reason =
+        SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray);
     TailCallRuntime(Runtime::kAbort, context, reason);
   } else {
     int element_size =
@@ -2175,8 +2177,8 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
 void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
     ElementsKind kind, AllocationSiteOverrideMode mode) {
   using Descriptor = ArrayNoArgumentConstructorDescriptor;
-  Node* native_context = LoadObjectField(Parameter(Descriptor::kFunction),
-                                         JSFunction::kContextOffset);
+  TNode<NativeContext> native_context = CAST(LoadObjectField(
+      Parameter(Descriptor::kFunction), JSFunction::kContextOffset));
   bool track_allocation_site =
       AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES;
   Node* allocation_site =
@@ -2191,10 +2193,11 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
 void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
     ElementsKind kind, AllocationSiteOverrideMode mode) {
   using Descriptor = ArraySingleArgumentConstructorDescriptor;
-  Node* context = Parameter(Descriptor::kContext);
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   Node* function = Parameter(Descriptor::kFunction);
-  Node* native_context = LoadObjectField(function, JSFunction::kContextOffset);
-  Node* array_map = LoadJSArrayElementsMap(kind, native_context);
+  TNode<NativeContext> native_context =
+      CAST(LoadObjectField(function, JSFunction::kContextOffset));
+  TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
 
   AllocationSiteMode allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
   if (mode == DONT_OVERRIDE) {
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 96c10ed0fd545e..6c3e7246492157 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -7,6 +7,7 @@
 #include "src/codegen/code-factory.h"
 #include "src/debug/debug.h"
 #include "src/execution/isolate.h"
+#include "src/execution/protectors-inl.h"
 #include "src/handles/global-handles.h"
 #include "src/logging/counters.h"
 #include "src/objects/contexts.h"
@@ -782,10 +783,10 @@ class ArrayConcatVisitor {
     storage_ = isolate_->global_handles()->Create(storage);
   }
 
-  class FastElementsField : public BitField<bool, 0, 1> {};
-  class ExceedsLimitField : public BitField<bool, 1, 1> {};
-  class IsFixedArrayField : public BitField<bool, 2, 1> {};
-  class HasSimpleElementsField : public BitField<bool, 3, 1> {};
+  using FastElementsField = BitField<bool, 0, 1>;
+  using ExceedsLimitField = BitField<bool, 1, 1>;
+  using IsFixedArrayField = BitField<bool, 2, 1>;
+  using HasSimpleElementsField = BitField<bool, 3, 1>;
 
   bool fast_elements() const { return FastElementsField::decode(bit_field_); }
   void set_fast_elements(bool fast) {
@@ -819,8 +820,10 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
     case PACKED_ELEMENTS:
     case PACKED_FROZEN_ELEMENTS:
     case PACKED_SEALED_ELEMENTS:
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_FROZEN_ELEMENTS:
     case HOLEY_SEALED_ELEMENTS:
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_ELEMENTS: {
       // Fast elements can't have lengths that are not representable by
       // a 32-bit signed integer.
@@ -887,9 +890,11 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
     case PACKED_ELEMENTS:
     case PACKED_FROZEN_ELEMENTS:
     case PACKED_SEALED_ELEMENTS:
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_SMI_ELEMENTS:
     case HOLEY_FROZEN_ELEMENTS:
     case HOLEY_SEALED_ELEMENTS:
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_ELEMENTS: {
       DisallowHeapAllocation no_gc;
       FixedArray elements = FixedArray::cast(object->elements());
@@ -1063,9 +1068,11 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
     case PACKED_ELEMENTS:
     case PACKED_FROZEN_ELEMENTS:
     case PACKED_SEALED_ELEMENTS:
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_SMI_ELEMENTS:
     case HOLEY_FROZEN_ELEMENTS:
     case HOLEY_SEALED_ELEMENTS:
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_ELEMENTS: {
       // Run through the elements FixedArray and use HasElement and GetElement
       // to check the prototype for missing elements.
@@ -1219,7 +1226,7 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
       if (length_estimate != 0) {
         ElementsKind array_kind =
             GetPackedElementsKind(array->GetElementsKind());
-        if (IsFrozenOrSealedElementsKind(array_kind)) {
+        if (IsAnyNonextensibleElementsKind(array_kind)) {
           array_kind = PACKED_ELEMENTS;
         }
         kind = GetMoreGeneralElementsKind(kind, array_kind);
@@ -1315,9 +1322,11 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
             case HOLEY_ELEMENTS:
             case HOLEY_FROZEN_ELEMENTS:
             case HOLEY_SEALED_ELEMENTS:
+            case HOLEY_NONEXTENSIBLE_ELEMENTS:
             case PACKED_ELEMENTS:
             case PACKED_FROZEN_ELEMENTS:
             case PACKED_SEALED_ELEMENTS:
+            case PACKED_NONEXTENSIBLE_ELEMENTS:
             case DICTIONARY_ELEMENTS:
             case NO_ELEMENTS:
               DCHECK_EQ(0u, length);
@@ -1460,7 +1469,7 @@ BUILTIN(ArrayConcat) {
   // Avoid a real species read to avoid extra lookups to the array constructor
   if (V8_LIKELY(receiver->IsJSArray() &&
                 Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
-                isolate->IsArraySpeciesLookupChainIntact())) {
+                Protectors::IsArraySpeciesLookupChainIntact(isolate))) {
     if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
       return *result_array;
     }
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index a95365e4255c36..6ac37da3f6f6e9 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -109,7 +109,7 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
   TNode<HeapObject> base = AllocateInNewSpace(size);
 
   // Initialize the promise.
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<JSFunction> promise_function =
       CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
   TNode<Map> promise_map = LoadObjectField<Map>(
@@ -263,8 +263,8 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
   TNode<Object> value = CAST(Parameter(Descriptor::kValue));
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
-  Node* outer_promise = LoadObjectField(async_function_object,
-                                        JSAsyncFunctionObject::kPromiseOffset);
+  TNode<Object> outer_promise = LoadObjectField(
+      async_function_object, JSAsyncFunctionObject::kPromiseOffset);
 
   Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred);
   GotoIf(HasAsyncEventDelegate(), &call_debug_hook);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 6c04037a632cdd..70d4eac9c8be28 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -28,7 +28,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
                                        Node* on_resolve_context_index,
                                        Node* on_reject_context_index,
                                        Node* is_predicted_as_caught) {
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
 
   static const int kWrappedPromiseOffset =
       FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS);
@@ -46,7 +46,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
     StoreMapNoWriteBarrier(closure_context, RootIndex::kAwaitContextMap);
     StoreObjectFieldNoWriteBarrier(closure_context, Context::kLengthOffset,
                                    SmiConstant(Context::MIN_CONTEXT_SLOTS));
-    Node* const empty_scope_info =
+    TNode<Object> const empty_scope_info =
         LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
     StoreContextElementNoWriteBarrier(
         closure_context, Context::SCOPE_INFO_INDEX, empty_scope_info);
@@ -59,16 +59,17 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
   }
 
   // Let promiseCapability be ! NewPromiseCapability(%Promise%).
-  Node* const promise_fun =
-      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  TNode<JSFunction> const promise_fun =
+      CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
   CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
-  Node* const promise_map =
-      LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+  TNode<Map> const promise_map = CAST(
+      LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset));
   // Assert that the JSPromise map has an instance size is
   // JSPromise::kSizeWithEmbedderFields.
-  CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
-                             IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
-                                            kTaggedSize)));
+  CSA_ASSERT(this,
+             IntPtrEqual(LoadMapInstanceSizeInWords(promise_map),
+                         IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
+                                        kTaggedSize)));
   TNode<HeapObject> wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
   {
     // Initialize Promise
@@ -118,7 +119,7 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
                                              Node* on_resolve_context_index,
                                              Node* on_reject_context_index,
                                              Node* is_predicted_as_caught) {
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   CSA_ASSERT(this, IsJSPromise(promise));
 
   static const int kResolveClosureOffset =
@@ -139,7 +140,7 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
     StoreMapNoWriteBarrier(closure_context, RootIndex::kAwaitContextMap);
     StoreObjectFieldNoWriteBarrier(closure_context, Context::kLengthOffset,
                                    SmiConstant(Context::MIN_CONTEXT_SLOTS));
-    Node* const empty_scope_info =
+    TNode<Object> const empty_scope_info =
         LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
     StoreContextElementNoWriteBarrier(
         closure_context, Context::SCOPE_INFO_INDEX, empty_scope_info);
@@ -196,16 +197,16 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
   // to allocate the wrapper promise and can just use the `AwaitOptimized`
   // logic.
   GotoIf(TaggedIsSmi(value), &if_old);
-  Node* const value_map = LoadMap(value);
+  TNode<Map> const value_map = LoadMap(value);
   GotoIfNot(IsJSPromiseMap(value_map), &if_old);
   // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
   // is the (initial) Promise.prototype and the @@species protector is
   // intact, as that guards the lookup path for "constructor" on
   // JSPromise instances which have the (initial) Promise.prototype.
-  Node* const native_context = LoadNativeContext(context);
-  Node* const promise_prototype =
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Object> const promise_prototype =
       LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
-  GotoIfNot(WordEqual(LoadMapPrototype(value_map), promise_prototype),
+  GotoIfNot(TaggedEqual(LoadMapPrototype(value_map), promise_prototype),
             &if_slow_constructor);
   Branch(IsPromiseSpeciesProtectorCellInvalid(), &if_slow_constructor, &if_new);
 
@@ -214,11 +215,11 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
   // have the %Promise% as its "constructor", so we need to check that as well.
   BIND(&if_slow_constructor);
   {
-    Node* const value_constructor =
+    TNode<Object> const value_constructor =
         GetProperty(context, value, isolate()->factory()->constructor_string());
-    Node* const promise_function =
+    TNode<Object> const promise_function =
         LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
-    Branch(WordEqual(value_constructor, promise_function), &if_new, &if_old);
+    Branch(TaggedEqual(value_constructor, promise_function), &if_new, &if_old);
   }
 
   BIND(&if_old);
@@ -245,9 +246,10 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
       native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
   // Ensure that we don't have to initialize prototype_or_initial_map field of
   // JSFunction.
-  CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map),
-                             IntPtrConstant(JSFunction::kSizeWithoutPrototype /
-                                            kTaggedSize)));
+  CSA_ASSERT(this,
+             IntPtrEqual(LoadMapInstanceSizeInWords(function_map),
+                         IntPtrConstant(JSFunction::kSizeWithoutPrototype /
+                                        kTaggedSize)));
   STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
   StoreMapNoWriteBarrier(function, function_map);
   StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
@@ -276,12 +278,10 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
 
 Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
                                                   Node* done) {
-  Node* const map = LoadContextElement(
+  TNode<Object> const map = LoadContextElement(
       native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
-  Node* const on_fulfilled_shared = LoadContextElement(
-      native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN);
-  CSA_ASSERT(this,
-             HasInstanceType(on_fulfilled_shared, SHARED_FUNCTION_INFO_TYPE));
+  TNode<SharedFunctionInfo> const on_fulfilled_shared = CAST(LoadContextElement(
+      native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN));
   Node* const closure_context =
       AllocateAsyncIteratorValueUnwrapContext(native_context, done);
   return AllocateFunctionWithMapAndContext(map, on_fulfilled_shared,
@@ -304,10 +304,11 @@ TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
   Node* const value = Parameter(Descriptor::kValue);
   Node* const context = Parameter(Descriptor::kContext);
 
-  Node* const done = LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
-  CSA_ASSERT(this, IsBoolean(done));
+  TNode<Object> const done =
+      LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
+  CSA_ASSERT(this, IsBoolean(CAST(done)));
 
-  Node* const unwrapped_value =
+  TNode<Object> const unwrapped_value =
       CallBuiltin(Builtins::kCreateIterResultObject, context, value, done);
 
   Return(unwrapped_value);
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index d14e811db8d932..8053cf0dc8b268 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -25,12 +25,12 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
 
   inline Node* TaggedIsAsyncGenerator(Node* tagged_object) {
     TNode<BoolT> if_notsmi = TaggedIsNotSmi(tagged_object);
-    return Select<BoolT>(if_notsmi,
-                         [=] {
-                           return HasInstanceType(
-                               tagged_object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
-                         },
-                         [=] { return if_notsmi; });
+    return Select<BoolT>(
+        if_notsmi,
+        [=] {
+          return HasInstanceType(tagged_object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+        },
+        [=] { return if_notsmi; });
   }
   inline Node* LoadGeneratorState(Node* const generator) {
     return LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
@@ -68,7 +68,7 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
   inline TNode<BoolT> IsGeneratorAwaiting(Node* const generator) {
     TNode<Object> is_generator_awaiting =
         LoadObjectField(generator, JSAsyncGeneratorObject::kIsAwaitingOffset);
-    return WordEqual(is_generator_awaiting, SmiConstant(1));
+    return TaggedEqual(is_generator_awaiting, SmiConstant(1));
   }
 
   inline void SetGeneratorAwaiting(Node* const generator) {
@@ -93,8 +93,8 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
 
   inline Node* IsFastJSIterResult(Node* const value, Node* const context) {
     CSA_ASSERT(this, TaggedIsNotSmi(value));
-    Node* const native_context = LoadNativeContext(context);
-    return WordEqual(
+    TNode<NativeContext> const native_context = LoadNativeContext(context);
+    return TaggedEqual(
         LoadMap(value),
         LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
   }
@@ -200,7 +200,7 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
     JSAsyncGeneratorObject::ResumeMode resume_mode, Node* resume_value,
     Node* promise) {
   CSA_SLOW_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
-  Node* request = Allocate(AsyncGeneratorRequest::kSize);
+  TNode<HeapObject> request = Allocate(AsyncGeneratorRequest::kSize);
   StoreMapNoWriteBarrier(request, RootIndex::kAsyncGeneratorRequestMap);
   StoreObjectFieldNoWriteBarrier(request, AsyncGeneratorRequest::kNextOffset,
                                  UndefinedConstant());
@@ -219,7 +219,8 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
 void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
     Node* context, Node* value,
     JSAsyncGeneratorObject::ResumeMode resume_mode) {
-  Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
+  TNode<Object> const generator =
+      LoadContextElement(context, Context::EXTENSION_INDEX);
   CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
 
   SetGeneratorNotAwaiting(generator);
@@ -276,7 +277,8 @@ void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue(
   {
     Label loop_next(this), next_empty(this);
     Node* current = var_current.value();
-    Node* next = LoadObjectField(current, AsyncGeneratorRequest::kNextOffset);
+    TNode<Object> next =
+        LoadObjectField(current, AsyncGeneratorRequest::kNextOffset);
 
     Branch(IsUndefined(next), &next_empty, &loop_next);
     BIND(&next_empty);
@@ -299,11 +301,11 @@ Node* AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
   // Removes and returns the first AsyncGeneratorRequest from a
   // JSAsyncGeneratorObject's queue. Asserts that the queue is not empty.
   CSA_ASSERT(this, TaggedIsAsyncGenerator(generator));
-  Node* request =
-      LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset);
-  CSA_ASSERT(this, IsNotUndefined(request));
+  TNode<AsyncGeneratorRequest> request =
+      CAST(LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset));
 
-  Node* next = LoadObjectField(request, AsyncGeneratorRequest::kNextOffset);
+  TNode<Object> next =
+      LoadObjectField(request, AsyncGeneratorRequest::kNextOffset);
 
   StoreObjectField(generator, JSAsyncGeneratorObject::kQueueOffset, next);
   return request;
@@ -315,12 +317,12 @@ Node* AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
 TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
   const int kValueArg = 0;
 
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
-  Node* generator = args.GetReceiver();
-  Node* value = args.GetOptionalArgumentValue(kValueArg);
+  TNode<Object> generator = args.GetReceiver();
+  TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
   Node* context = Parameter(Descriptor::kContext);
 
   AsyncGeneratorEnqueue(&args, context, generator, value,
@@ -333,12 +335,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
 TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
   const int kValueArg = 0;
 
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
-  Node* generator = args.GetReceiver();
-  Node* value = args.GetOptionalArgumentValue(kValueArg);
+  TNode<Object> generator = args.GetReceiver();
+  TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
   Node* context = Parameter(Descriptor::kContext);
 
   AsyncGeneratorEnqueue(&args, context, generator, value,
@@ -351,12 +353,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
 TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
   const int kValueArg = 0;
 
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
-  Node* generator = args.GetReceiver();
-  Node* value = args.GetOptionalArgumentValue(kValueArg);
+  TNode<Object> generator = args.GetReceiver();
+  TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
   Node* context = Parameter(Descriptor::kContext);
 
   AsyncGeneratorEnqueue(&args, context, generator, value,
@@ -446,8 +448,8 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
     // generator is not closed, resume the generator with a "throw" completion.
     // If the generator was closed, perform AsyncGeneratorReject(thrownValue).
     // In all cases, the last step is to call AsyncGeneratorResumeNext.
-    Node* is_caught = CallRuntime(Runtime::kAsyncGeneratorHasCatchHandlerForPC,
-                                  context, generator);
+    TNode<Object> is_caught = CallRuntime(
+        Runtime::kAsyncGeneratorHasCatchHandlerForPC, context, generator);
     TailCallBuiltin(Builtins::kAsyncGeneratorReturn, context, generator,
                     next_value, is_caught);
 
@@ -501,10 +503,10 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
   Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next);
 
   // Let iteratorResult be CreateIterResultObject(value, done).
-  Node* const iter_result = Allocate(JSIteratorResult::kSize);
+  TNode<HeapObject> const iter_result = Allocate(JSIteratorResult::kSize);
   {
-    Node* map = LoadContextElement(LoadNativeContext(context),
-                                   Context::ITERATOR_RESULT_MAP_INDEX);
+    TNode<Object> map = LoadContextElement(LoadNativeContext(context),
+                                           Context::ITERATOR_RESULT_MAP_INDEX);
     StoreMapNoWriteBarrier(iter_result, map);
     StoreObjectFieldRoot(iter_result, JSIteratorResult::kPropertiesOrHashOffset,
                          RootIndex::kEmptyFixedArray);
@@ -585,7 +587,8 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
 TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
   Node* const value = Parameter(Descriptor::kValue);
-  Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
+  TNode<Object> const generator =
+      LoadContextElement(context, Context::EXTENSION_INDEX);
 
   SetGeneratorNotAwaiting(generator);
 
@@ -665,7 +668,8 @@ TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
            AsyncGeneratorBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
   Node* const value = Parameter(Descriptor::kValue);
-  Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
+  TNode<Object> const generator =
+      LoadContextElement(context, Context::EXTENSION_INDEX);
 
   SetGeneratorNotAwaiting(generator);
 
@@ -682,7 +686,8 @@ TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
            AsyncGeneratorBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
   Node* const value = Parameter(Descriptor::kValue);
-  Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
+  TNode<Object> const generator =
+      LoadContextElement(context, Context::EXTENSION_INDEX);
 
   SetGeneratorNotAwaiting(generator);
 
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 215faa73b142eb..0b5c5ef8b962cd 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -98,7 +98,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
     const UndefinedMethodHandler& if_method_undefined,
     const char* operation_name, Label::Type reject_label_type,
     Node* const initial_exception_value) {
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   Node* const promise = AllocateAndInitJSPromise(context);
 
   VARIABLE(var_exception, MachineRepresentation::kTagged,
@@ -109,7 +109,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
   ThrowIfNotAsyncFromSyncIterator(context, iterator, &reject_promise,
                                   &var_exception, operation_name);
 
-  Node* const sync_iterator =
+  TNode<Object> const sync_iterator =
       LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
 
   Node* const method = get_method(sync_iterator);
@@ -132,13 +132,13 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
   std::tie(value, done) = LoadIteratorResult(
       context, native_context, iter_result, &reject_promise, &var_exception);
 
-  Node* const promise_fun =
-      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  TNode<JSFunction> const promise_fun =
+      CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
   CSA_ASSERT(this, IsConstructor(promise_fun));
 
   // Let valueWrapper be PromiseResolve(%Promise%, « value »).
-  Node* const value_wrapper = CallBuiltin(Builtins::kPromiseResolve,
-                                          native_context, promise_fun, value);
+  TNode<Object> const value_wrapper = CallBuiltin(
+      Builtins::kPromiseResolve, native_context, promise_fun, value);
   // IfAbruptRejectPromise(valueWrapper, promiseCapability).
   GotoIfException(value_wrapper, &reject_promise, &var_exception);
 
@@ -167,15 +167,15 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
       done(this), if_notanobject(this, Label::kDeferred);
   GotoIf(TaggedIsSmi(iter_result), &if_notanobject);
 
-  Node* const iter_result_map = LoadMap(iter_result);
+  TNode<Map> const iter_result_map = LoadMap(iter_result);
   GotoIfNot(IsJSReceiverMap(iter_result_map), &if_notanobject);
 
-  Node* const fast_iter_result_map =
+  TNode<Object> const fast_iter_result_map =
       LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
 
   VARIABLE(var_value, MachineRepresentation::kTagged);
   VARIABLE(var_done, MachineRepresentation::kTagged);
-  Branch(WordEqual(iter_result_map, fast_iter_result_map), &if_fastpath,
+  Branch(TaggedEqual(iter_result_map, fast_iter_result_map), &if_fastpath,
          &if_slowpath);
 
   BIND(&if_fastpath);
@@ -190,13 +190,13 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
   {
     // Let nextDone be IteratorComplete(nextResult).
     // IfAbruptRejectPromise(nextDone, promiseCapability).
-    Node* const done =
+    TNode<Object> const done =
         GetProperty(context, iter_result, factory()->done_string());
     GotoIfException(done, if_exception, var_exception);
 
     // Let nextValue be IteratorValue(nextResult).
     // IfAbruptRejectPromise(nextValue, promiseCapability).
-    Node* const value =
+    TNode<Object> const value =
         GetProperty(context, iter_result, factory()->value_string());
     GotoIfException(value, if_exception, var_exception);
 
@@ -222,7 +222,7 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
 
   BIND(&to_boolean);
   {
-    Node* const result =
+    TNode<Object> const result =
         CallBuiltin(Builtins::kToBoolean, context, var_done.value());
     var_done.Bind(result);
     Goto(&done);
@@ -261,8 +261,8 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
                                  Node* const promise, Label* if_exception) {
     // If return is undefined, then
     // Let iterResult be ! CreateIterResultObject(value, true)
-    Node* const iter_result = CallBuiltin(Builtins::kCreateIterResultObject,
-                                          context, value, TrueConstant());
+    TNode<Object> const iter_result = CallBuiltin(
+        Builtins::kCreateIterResultObject, context, value, TrueConstant());
 
     // Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
     // IfAbruptRejectPromise(nextDone, promiseCapability).
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc
index d4818f0e010a9e..691ec7f8cea4d9 100644
--- a/deps/v8/src/builtins/builtins-bigint-gen.cc
+++ b/deps/v8/src/builtins/builtins-bigint-gen.cc
@@ -17,6 +17,24 @@ TF_BUILTIN(BigIntToI64, CodeStubAssembler) {
     return;
   }
 
+  TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+  TNode<BigInt> n = ToBigInt(context, value);
+
+  TVARIABLE(UintPtrT, var_low);
+  TVARIABLE(UintPtrT, var_high);
+
+  BigIntToRawBytes(n, &var_low, &var_high);
+  ReturnRaw(var_low.value());
+}
+
+// https://tc39.github.io/proposal-bigint/#sec-to-big-int64
+TF_BUILTIN(BigIntToI32Pair, CodeStubAssembler) {
+  if (!Is32()) {
+    Unreachable();
+    return;
+  }
+
   TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   TNode<BigInt> bigint = ToBigInt(context, value);
@@ -24,10 +42,9 @@ TF_BUILTIN(BigIntToI64, CodeStubAssembler) {
   TVARIABLE(UintPtrT, var_low);
   TVARIABLE(UintPtrT, var_high);
 
-  // 2. Let int64bit be n modulo 2^64.
-  // 3. If int64bit ≥ 2^63, return int64bit - 2^64;
   BigIntToRawBytes(bigint, &var_low, &var_high);
-  ReturnRaw(var_low.value());
+  Return(SloppyTNode<Object>(var_low.value()),
+         SloppyTNode<Object>(var_high.value()));
 }
 
 // https://tc39.github.io/proposal-bigint/#sec-bigint-constructor-number-value
@@ -43,5 +60,18 @@ TF_BUILTIN(I64ToBigInt, CodeStubAssembler) {
   Return(BigIntFromInt64(argument));
 }
 
+// https://tc39.github.io/proposal-bigint/#sec-bigint-constructor-number-value
+TF_BUILTIN(I32PairToBigInt, CodeStubAssembler) {
+  if (!Is32()) {
+    Unreachable();
+    return;
+  }
+
+  TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Parameter(Descriptor::kLow));
+  TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Parameter(Descriptor::kHigh));
+
+  Return(BigIntFromInt32Pair(low, high));
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 09d71a056275cb..1201ce97300ec0 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -91,7 +91,7 @@ MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
       isolate,
       NewTypeError(MessageTemplate::kNotGeneric,
                    isolate->factory()->NewStringFromAsciiChecked(caller),
-                   isolate->factory()->NewStringFromStaticChars("BigInt")),
+                   isolate->factory()->BigInt_string()),
       BigInt);
 }
 
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index deb91dee246811..91370b089679f6 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -118,15 +118,15 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
   GotoIf(TaggedIsSmi(arguments_list), &if_runtime);
 
   TNode<Map> arguments_list_map = LoadMap(CAST(arguments_list));
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
 
   // Check if {arguments_list} is an (unmodified) arguments object.
   TNode<Map> sloppy_arguments_map = CAST(
       LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
-  GotoIf(WordEqual(arguments_list_map, sloppy_arguments_map), &if_arguments);
+  GotoIf(TaggedEqual(arguments_list_map, sloppy_arguments_map), &if_arguments);
   TNode<Map> strict_arguments_map = CAST(
       LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX));
-  GotoIf(WordEqual(arguments_list_map, strict_arguments_map), &if_arguments);
+  GotoIf(TaggedEqual(arguments_list_map, strict_arguments_map), &if_arguments);
 
   // Check if {arguments_list} is a fast JSArray.
   Branch(IsJSArrayMap(arguments_list_map), &if_array, &if_runtime);
@@ -135,10 +135,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
   TVARIABLE(Int32T, var_length);
   BIND(&if_array);
   {
+    TNode<JSObject> js_object = CAST(arguments_list);
     // Try to extract the elements from a JSArray object.
-    var_elements = LoadElements(CAST(arguments_list));
+    var_elements = LoadElements(js_object);
     var_length =
-        LoadAndUntagToWord32ObjectField(arguments_list, JSArray::kLengthOffset);
+        LoadAndUntagToWord32ObjectField(js_object, JSArray::kLengthOffset);
 
     // Holey arrays and double backing stores need special treatment.
     STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
@@ -151,8 +152,9 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
 
     TNode<Int32T> kind = LoadMapElementsKind(arguments_list_map);
 
-    GotoIf(IsElementsKindGreaterThan(kind, LAST_FROZEN_ELEMENTS_KIND),
-           &if_runtime);
+    GotoIf(
+        IsElementsKindGreaterThan(kind, LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND),
+        &if_runtime);
     Branch(Word32And(kind, Int32Constant(1)), &if_holey_array, &if_done);
   }
 
@@ -173,7 +175,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
         js_arguments, JSArgumentsObjectWithLength::kLengthOffset);
     TNode<FixedArrayBase> elements = LoadElements(js_arguments);
     TNode<Smi> elements_length = LoadFixedArrayBaseLength(elements);
-    GotoIfNot(WordEqual(length, elements_length), &if_runtime);
+    GotoIfNot(TaggedEqual(length, elements_length), &if_runtime);
     var_elements = elements;
     var_length = SmiToInt32(CAST(length));
     Goto(&if_done);
@@ -292,11 +294,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
 
   // Check that the Array.prototype hasn't been modified in a way that would
   // affect iteration.
-  TNode<PropertyCell> protector_cell =
-      CAST(LoadRoot(RootIndex::kArrayIteratorProtector));
-  GotoIf(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
-                   SmiConstant(Isolate::kProtectorInvalid)),
-         &if_generic);
+  TNode<PropertyCell> protector_cell = ArrayIteratorProtectorConstant();
+  GotoIf(
+      TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+                  SmiConstant(Isolate::kProtectorInvalid)),
+      &if_generic);
   {
     // The fast-path accesses the {spread} elements directly.
     TNode<Int32T> spread_kind = LoadMapElementsKind(spread_map);
@@ -310,9 +312,9 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
            &if_smiorobject);
     GotoIf(IsElementsKindLessThanOrEqual(spread_kind, LAST_FAST_ELEMENTS_KIND),
            &if_double);
-    Branch(
-        IsElementsKindLessThanOrEqual(spread_kind, LAST_FROZEN_ELEMENTS_KIND),
-        &if_smiorobject, &if_generic);
+    Branch(IsElementsKindLessThanOrEqual(spread_kind,
+                                         LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND),
+           &if_smiorobject, &if_generic);
   }
 
   BIND(&if_generic);
@@ -430,7 +432,7 @@ TNode<JSReceiver> CallOrConstructBuiltinsAssembler::GetCompatibleReceiver(
       //     will be ruled out there).
       //
       var_template = CAST(constructor);
-      TNode<Int32T> template_type = LoadInstanceType(var_template.value());
+      TNode<Uint16T> template_type = LoadInstanceType(var_template.value());
       GotoIf(InstanceTypeEqual(template_type, JS_FUNCTION_TYPE),
              &template_from_closure);
       Branch(InstanceTypeEqual(template_type, MAP_TYPE), &template_map_loop,
@@ -461,12 +463,12 @@ TNode<JSReceiver> CallOrConstructBuiltinsAssembler::GetCompatibleReceiver(
       // end, in which case we continue with the next holder (the
       // hidden prototype) if there's any.
       TNode<HeapObject> current = var_template.value();
-      GotoIf(WordEqual(current, signature), &holder_found);
+      GotoIf(TaggedEqual(current, signature), &holder_found);
 
       GotoIfNot(IsFunctionTemplateInfoMap(LoadMap(current)), &holder_next);
 
       TNode<HeapObject> current_rare = LoadObjectField<HeapObject>(
-          current, FunctionTemplateInfo::kFunctionTemplateRareDataOffset);
+          current, FunctionTemplateInfo::kRareDataOffset);
       GotoIf(IsUndefined(current_rare), &holder_next);
       var_template = LoadObjectField<HeapObject>(
           current_rare, FunctionTemplateRareData::kParentTemplateOffset);
@@ -514,7 +516,7 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
     GotoIfNot(
         IsSetWord32<Map::IsAccessCheckNeededBit>(LoadMapBitField(receiver_map)),
         &receiver_done);
-    TNode<WordT> function_template_info_flags = LoadAndUntagObjectField(
+    TNode<IntPtrT> function_template_info_flags = LoadAndUntagObjectField(
         function_template_info, FunctionTemplateInfo::kFlagOffset);
     Branch(IsSetWord(function_template_info_flags,
                      1 << FunctionTemplateInfo::kAcceptAnyReceiver),
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 613e5f10ff2f17..dec4142c65fc9e 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -108,8 +108,8 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
   // Checks whether {collection}'s initial add/set function has been modified
   // (depending on {variant}, loaded from {native_context}).
   void GotoIfInitialAddFunctionModified(Variant variant,
-                                        TNode<Context> native_context,
-                                        TNode<Object> collection,
+                                        TNode<NativeContext> native_context,
+                                        TNode<HeapObject> collection,
                                         Label* if_modified);
 
   // Gets root index for the name of the add/set function.
@@ -186,8 +186,8 @@ void BaseCollectionsAssembler::AddConstructorEntries(
     TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
     StoreObjectField(collection, GetTableOffset(variant), table);
     GotoIf(IsNullOrUndefined(initial_entries), &exit);
-    GotoIfInitialAddFunctionModified(variant, native_context, collection,
-                                     &slow_loop);
+    GotoIfInitialAddFunctionModified(variant, CAST(native_context),
+                                     CAST(collection), &slow_loop);
     Branch(use_fast_loop.value(), &fast_loop, &slow_loop);
   }
   BIND(&fast_loop);
@@ -212,15 +212,15 @@ void BaseCollectionsAssembler::AddConstructorEntries(
       {
         // Check that add/set function has not been modified.
         Label if_not_modified(this), if_modified(this);
-        GotoIfInitialAddFunctionModified(variant, native_context, collection,
-                                         &if_modified);
+        GotoIfInitialAddFunctionModified(variant, CAST(native_context),
+                                         CAST(collection), &if_modified);
         Goto(&if_not_modified);
         BIND(&if_modified);
         Unreachable();
         BIND(&if_not_modified);
       }
-      CSA_ASSERT(this, WordEqual(original_initial_entries_map,
-                                 LoadMap(initial_entries_jsarray)));
+      CSA_ASSERT(this, TaggedEqual(original_initial_entries_map,
+                                   LoadMap(initial_entries_jsarray)));
 #endif
       use_fast_loop = Int32FalseConstant();
       Goto(&allocate_table);
@@ -242,9 +242,9 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
   TNode<FixedArrayBase> elements = LoadElements(fast_jsarray);
   TNode<Int32T> elements_kind = LoadElementsKind(fast_jsarray);
   TNode<JSFunction> add_func = GetInitialAddFunction(variant, native_context);
-  CSA_ASSERT(
-      this,
-      WordEqual(GetAddFunction(variant, native_context, collection), add_func));
+  CSA_ASSERT(this,
+             TaggedEqual(GetAddFunction(variant, native_context, collection),
+                         add_func));
   CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(context, fast_jsarray));
   TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(fast_jsarray));
   CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
@@ -301,9 +301,9 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
   BIND(&exit);
 #if DEBUG
   CSA_ASSERT(this,
-             WordEqual(original_collection_map, LoadMap(CAST(collection))));
+             TaggedEqual(original_collection_map, LoadMap(CAST(collection))));
   CSA_ASSERT(this,
-             WordEqual(original_fast_js_array_map, LoadMap(fast_jsarray)));
+             TaggedEqual(original_fast_js_array_map, LoadMap(fast_jsarray)));
 #endif
 }
 
@@ -356,21 +356,44 @@ RootIndex BaseCollectionsAssembler::GetAddFunctionNameIndex(Variant variant) {
 }
 
 void BaseCollectionsAssembler::GotoIfInitialAddFunctionModified(
-    Variant variant, TNode<Context> native_context, TNode<Object> collection,
-    Label* if_modified) {
+    Variant variant, TNode<NativeContext> native_context,
+    TNode<HeapObject> collection, Label* if_modified) {
   STATIC_ASSERT(JSCollection::kAddFunctionDescriptorIndex ==
                 JSWeakCollection::kAddFunctionDescriptorIndex);
-  GotoIfInitialPrototypePropertyModified(
-      LoadMap(CAST(collection)),
-      GetInitialCollectionPrototype(variant, native_context),
+
+  // TODO(jgruber): Investigate if this should also fall back to full prototype
+  // verification.
+  static constexpr PrototypeCheckAssembler::Flags flags{
+      PrototypeCheckAssembler::kCheckPrototypePropertyConstness};
+
+  static constexpr int kNoContextIndex = -1;
+  STATIC_ASSERT(
+      (flags & PrototypeCheckAssembler::kCheckPrototypePropertyIdentity) == 0);
+
+  using DescriptorIndexNameValue =
+      PrototypeCheckAssembler::DescriptorIndexNameValue;
+
+  DescriptorIndexNameValue property_to_check{
       JSCollection::kAddFunctionDescriptorIndex,
-      GetAddFunctionNameIndex(variant), if_modified);
+      GetAddFunctionNameIndex(variant), kNoContextIndex};
+
+  PrototypeCheckAssembler prototype_check_assembler(
+      state(), flags, native_context,
+      GetInitialCollectionPrototype(variant, native_context),
+      Vector<DescriptorIndexNameValue>(&property_to_check, 1));
+
+  TNode<HeapObject> prototype = LoadMapPrototype(LoadMap(collection));
+  Label if_unmodified(this);
+  prototype_check_assembler.CheckAndBranch(prototype, &if_unmodified,
+                                           if_modified);
+
+  BIND(&if_unmodified);
 }
 
 TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollection(
     TNode<Context> context, TNode<JSFunction> constructor,
     TNode<JSReceiver> new_target) {
-  TNode<BoolT> is_target_unmodified = WordEqual(constructor, new_target);
+  TNode<BoolT> is_target_unmodified = TaggedEqual(constructor, new_target);
 
   return Select<JSObject>(
       is_target_unmodified,
@@ -406,8 +429,8 @@ void BaseCollectionsAssembler::GenerateConstructor(
   Label if_undefined(this, Label::kDeferred);
   GotoIf(IsUndefined(new_target), &if_undefined);
 
-  TNode<Context> native_context = LoadNativeContext(context);
-  TNode<Object> collection = AllocateJSCollection(
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<JSObject> collection = AllocateJSCollection(
       context, GetConstructor(variant, native_context), CAST(new_target));
 
   AddConstructorEntries(variant, context, native_context, collection, iterable);
@@ -531,8 +554,8 @@ TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
   TNode<Map> collection_proto_map =
       LoadMap(LoadMapPrototype(LoadMap(CAST(collection))));
 
-  return WordEqual(collection_proto_map,
-                   GetInitialCollectionPrototype(variant, native_context));
+  return TaggedEqual(collection_proto_map,
+                     GetInitialCollectionPrototype(variant, native_context));
 }
 
 TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
@@ -585,13 +608,13 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
 
  protected:
   template <typename IteratorType>
-  Node* AllocateJSCollectionIterator(Node* context, int map_index,
-                                     Node* collection);
+  Node* AllocateJSCollectionIterator(SloppyTNode<Context> context,
+                                     int map_index, Node* collection);
   TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
                               TNode<IntPtrT> at_least_space_for) override;
-  Node* GetHash(Node* const key);
-  Node* CallGetHashRaw(Node* const key);
-  Node* CallGetOrCreateHashRaw(Node* const key);
+  TNode<IntPtrT> GetHash(SloppyTNode<HeapObject> const key);
+  TNode<IntPtrT> CallGetHashRaw(SloppyTNode<HeapObject> const key);
+  TNode<Smi> CallGetOrCreateHashRaw(SloppyTNode<HeapObject> const key);
 
   // Transitions the iterator to the non obsolete backing store.
   // This is a NOP if the [table] is not obsolete.
@@ -612,23 +635,25 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
   // The {result} variable will contain the entry index if the key was found,
   // or the hash code otherwise.
   template <typename CollectionType>
-  void FindOrderedHashTableEntryForSmiKey(Node* table, Node* key_tagged,
+  void FindOrderedHashTableEntryForSmiKey(Node* table,
+                                          SloppyTNode<Smi> key_tagged,
                                           Variable* result, Label* entry_found,
                                           Label* not_found);
-  void SameValueZeroSmi(Node* key_smi, Node* candidate_key, Label* if_same,
+  void SameValueZeroSmi(SloppyTNode<Smi> key_smi,
+                        SloppyTNode<Object> candidate_key, Label* if_same,
                         Label* if_not_same);
 
   // Specialization for heap numbers.
   // The {result} variable will contain the entry index if the key was found,
   // or the hash code otherwise.
-  void SameValueZeroHeapNumber(Node* key_string, Node* candidate_key,
+  void SameValueZeroHeapNumber(SloppyTNode<Float64T> key_float,
+                               SloppyTNode<Object> candidate_key,
                                Label* if_same, Label* if_not_same);
   template <typename CollectionType>
-  void FindOrderedHashTableEntryForHeapNumberKey(Node* context, Node* table,
-                                                 Node* key_heap_number,
-                                                 Variable* result,
-                                                 Label* entry_found,
-                                                 Label* not_found);
+  void FindOrderedHashTableEntryForHeapNumberKey(
+      SloppyTNode<Context> context, Node* table,
+      SloppyTNode<HeapNumber> key_heap_number, Variable* result,
+      Label* entry_found, Label* not_found);
 
   // Specialization for bigints.
   // The {result} variable will contain the entry index if the key was found,
@@ -636,8 +661,9 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
   void SameValueZeroBigInt(Node* key, Node* candidate_key, Label* if_same,
                            Label* if_not_same);
   template <typename CollectionType>
-  void FindOrderedHashTableEntryForBigIntKey(Node* context, Node* table,
-                                             Node* key, Variable* result,
+  void FindOrderedHashTableEntryForBigIntKey(SloppyTNode<Context> context,
+                                             Node* table, Node* key,
+                                             Variable* result,
                                              Label* entry_found,
                                              Label* not_found);
 
@@ -645,13 +671,15 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
   // The {result} variable will contain the entry index if the key was found,
   // or the hash code otherwise.
   template <typename CollectionType>
-  void FindOrderedHashTableEntryForStringKey(Node* context, Node* table,
-                                             Node* key_tagged, Variable* result,
-                                             Label* entry_found,
-                                             Label* not_found);
-  Node* ComputeStringHash(Node* context, Node* string_key);
-  void SameValueZeroString(Node* context, Node* key_string, Node* candidate_key,
-                           Label* if_same, Label* if_not_same);
+  void FindOrderedHashTableEntryForStringKey(
+      SloppyTNode<Context> context, Node* table, SloppyTNode<String> key_tagged,
+      Variable* result, Label* entry_found, Label* not_found);
+  TNode<IntPtrT> ComputeStringHash(TNode<Context> context,
+                                   TNode<String> string_key);
+  void SameValueZeroString(SloppyTNode<Context> context,
+                           SloppyTNode<String> key_string,
+                           SloppyTNode<Object> candidate_key, Label* if_same,
+                           Label* if_not_same);
 
   // Specialization for non-strings, non-numbers. For those we only need
   // reference equality to compare the keys.
@@ -659,10 +687,9 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
   // or the hash code otherwise. If the hash-code has not been computed, it
   // should be Smi -1.
   template <typename CollectionType>
-  void FindOrderedHashTableEntryForOtherKey(Node* context, Node* table,
-                                            Node* key, Variable* result,
-                                            Label* entry_found,
-                                            Label* not_found);
+  void FindOrderedHashTableEntryForOtherKey(
+      SloppyTNode<Context> context, Node* table, SloppyTNode<HeapObject> key,
+      Variable* result, Label* entry_found, Label* not_found);
 
   template <typename CollectionType>
   void TryLookupOrderedHashTableIndex(Node* const table, Node* const key,
@@ -704,11 +731,13 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
 
 template <typename IteratorType>
 Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
-    Node* context, int map_index, Node* collection) {
-  Node* const table = LoadObjectField(collection, JSCollection::kTableOffset);
-  Node* const native_context = LoadNativeContext(context);
-  Node* const iterator_map = LoadContextElement(native_context, map_index);
-  Node* const iterator = AllocateInNewSpace(IteratorType::kSize);
+    SloppyTNode<Context> context, int map_index, Node* collection) {
+  TNode<Object> const table =
+      LoadObjectField(collection, JSCollection::kTableOffset);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Object> const iterator_map =
+      LoadContextElement(native_context, map_index);
+  TNode<HeapObject> const iterator = AllocateInNewSpace(IteratorType::kSize);
   StoreMapNoWriteBarrier(iterator, iterator_map);
   StoreObjectFieldRoot(iterator, IteratorType::kPropertiesOrHashOffset,
                        RootIndex::kEmptyFixedArray);
@@ -748,10 +777,11 @@ TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
                       argc, context);
 }
 
-Node* CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(Node* const key) {
-  Node* const function_addr =
+TNode<Smi> CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(
+    SloppyTNode<HeapObject> const key) {
+  TNode<ExternalReference> const function_addr =
       ExternalConstant(ExternalReference::get_or_create_hash_raw());
-  Node* const isolate_ptr =
+  TNode<ExternalReference> const isolate_ptr =
       ExternalConstant(ExternalReference::isolate_address(isolate()));
 
   MachineType type_ptr = MachineType::Pointer();
@@ -761,13 +791,14 @@ Node* CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(Node* const key) {
                                      std::make_pair(type_ptr, isolate_ptr),
                                      std::make_pair(type_tagged, key));
 
-  return result;
+  return CAST(result);
 }
 
-Node* CollectionsBuiltinsAssembler::CallGetHashRaw(Node* const key) {
-  Node* const function_addr =
+TNode<IntPtrT> CollectionsBuiltinsAssembler::CallGetHashRaw(
+    SloppyTNode<HeapObject> const key) {
+  TNode<ExternalReference> const function_addr =
       ExternalConstant(ExternalReference::orderedhashmap_gethash_raw());
-  Node* const isolate_ptr =
+  TNode<ExternalReference> const isolate_ptr =
       ExternalConstant(ExternalReference::isolate_address(isolate()));
 
   MachineType type_ptr = MachineType::Pointer();
@@ -780,20 +811,21 @@ Node* CollectionsBuiltinsAssembler::CallGetHashRaw(Node* const key) {
   return SmiUntag(result);
 }
 
-Node* CollectionsBuiltinsAssembler::GetHash(Node* const key) {
-  VARIABLE(var_hash, MachineType::PointerRepresentation());
+TNode<IntPtrT> CollectionsBuiltinsAssembler::GetHash(
+    SloppyTNode<HeapObject> const key) {
+  TVARIABLE(IntPtrT, var_hash);
   Label if_receiver(this), if_other(this), done(this);
   Branch(IsJSReceiver(key), &if_receiver, &if_other);
 
   BIND(&if_receiver);
   {
-    var_hash.Bind(LoadJSReceiverIdentityHash(key));
+    var_hash = LoadJSReceiverIdentityHash(key);
     Goto(&done);
   }
 
   BIND(&if_other);
   {
-    var_hash.Bind(CallGetHashRaw(key));
+    var_hash = CallGetHashRaw(key);
     Goto(&done);
   }
 
@@ -801,12 +833,11 @@ Node* CollectionsBuiltinsAssembler::GetHash(Node* const key) {
   return var_hash.value();
 }
 
-void CollectionsBuiltinsAssembler::SameValueZeroSmi(Node* key_smi,
-                                                    Node* candidate_key,
-                                                    Label* if_same,
-                                                    Label* if_not_same) {
+void CollectionsBuiltinsAssembler::SameValueZeroSmi(
+    SloppyTNode<Smi> key_smi, SloppyTNode<Object> candidate_key, Label* if_same,
+    Label* if_not_same) {
   // If the key is the same, we are done.
-  GotoIf(WordEqual(candidate_key, key_smi), if_same);
+  GotoIf(TaggedEqual(candidate_key, key_smi), if_same);
 
   // If the candidate key is smi, then it must be different (because
   // we already checked for equality above).
@@ -814,10 +845,11 @@ void CollectionsBuiltinsAssembler::SameValueZeroSmi(Node* key_smi,
 
   // If the candidate key is not smi, we still have to check if it is a
   // heap number with the same value.
-  GotoIfNot(IsHeapNumber(candidate_key), if_not_same);
+  GotoIfNot(IsHeapNumber(CAST(candidate_key)), if_not_same);
 
-  Node* const candidate_key_number = LoadHeapNumberValue(candidate_key);
-  Node* const key_number = SmiToFloat64(key_smi);
+  TNode<Float64T> const candidate_key_number =
+      LoadHeapNumberValue(CAST(candidate_key));
+  TNode<Float64T> const key_number = SmiToFloat64(key_smi);
 
   GotoIf(Float64Equal(candidate_key_number, key_number), if_same);
 
@@ -826,11 +858,12 @@ void CollectionsBuiltinsAssembler::SameValueZeroSmi(Node* key_smi,
 
 void CollectionsBuiltinsAssembler::BranchIfMapIteratorProtectorValid(
     Label* if_true, Label* if_false) {
-  Node* protector_cell = LoadRoot(RootIndex::kMapIteratorProtector);
+  TNode<PropertyCell> protector_cell = MapIteratorProtectorConstant();
   DCHECK(isolate()->heap()->map_iterator_protector().IsPropertyCell());
-  Branch(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
-                   SmiConstant(Isolate::kProtectorValid)),
-         if_true, if_false);
+  Branch(
+      TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+                  SmiConstant(Isolate::kProtectorValid)),
+      if_true, if_false);
 }
 
 void CollectionsBuiltinsAssembler::
@@ -843,7 +876,7 @@ void CollectionsBuiltinsAssembler::
   // Check if iterator is a keys or values JSMapIterator.
   GotoIf(TaggedIsSmi(iterator), if_false);
   TNode<Map> iter_map = LoadMap(CAST(iterator));
-  Node* const instance_type = LoadMapInstanceType(iter_map);
+  TNode<Uint16T> const instance_type = LoadMapInstanceType(iter_map);
   GotoIf(InstanceTypeEqual(instance_type, JS_MAP_KEY_ITERATOR_TYPE),
          &if_key_or_value_iterator);
   Branch(InstanceTypeEqual(instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
@@ -851,25 +884,26 @@ void CollectionsBuiltinsAssembler::
 
   BIND(&if_key_or_value_iterator);
   // Check that the iterator is not partially consumed.
-  Node* const index =
+  TNode<Object> const index =
       LoadObjectField(CAST(iterator), JSMapIterator::kIndexOffset);
-  GotoIfNot(WordEqual(index, SmiConstant(0)), if_false);
+  GotoIfNot(TaggedEqual(index, SmiConstant(0)), if_false);
   BranchIfMapIteratorProtectorValid(&extra_checks, if_false);
 
   BIND(&extra_checks);
   // Check if the iterator object has the original %MapIteratorPrototype%.
-  Node* const native_context = LoadNativeContext(context);
-  Node* const initial_map_iter_proto = LoadContextElement(
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Object> const initial_map_iter_proto = LoadContextElement(
       native_context, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX);
-  Node* const map_iter_proto = LoadMapPrototype(iter_map);
-  GotoIfNot(WordEqual(map_iter_proto, initial_map_iter_proto), if_false);
+  TNode<HeapObject> const map_iter_proto = LoadMapPrototype(iter_map);
+  GotoIfNot(TaggedEqual(map_iter_proto, initial_map_iter_proto), if_false);
 
   // Check if the original MapIterator prototype has the original
   // %IteratorPrototype%.
-  Node* const initial_iter_proto = LoadContextElement(
+  TNode<Object> const initial_iter_proto = LoadContextElement(
       native_context, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX);
-  Node* const iter_proto = LoadMapPrototype(LoadMap(map_iter_proto));
-  Branch(WordEqual(iter_proto, initial_iter_proto), if_true, if_false);
+  TNode<HeapObject> const iter_proto =
+      LoadMapPrototype(LoadMap(map_iter_proto));
+  Branch(TaggedEqual(iter_proto, initial_iter_proto), if_true, if_false);
 }
 
 void BranchIfIterableWithOriginalKeyOrValueMapIterator(
@@ -883,11 +917,12 @@ void BranchIfIterableWithOriginalKeyOrValueMapIterator(
 
 void CollectionsBuiltinsAssembler::BranchIfSetIteratorProtectorValid(
     Label* if_true, Label* if_false) {
-  Node* const protector_cell = LoadRoot(RootIndex::kSetIteratorProtector);
+  TNode<PropertyCell> const protector_cell = SetIteratorProtectorConstant();
   DCHECK(isolate()->heap()->set_iterator_protector().IsPropertyCell());
-  Branch(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
-                   SmiConstant(Isolate::kProtectorValid)),
-         if_true, if_false);
+  Branch(
+      TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+                  SmiConstant(Isolate::kProtectorValid)),
+      if_true, if_false);
 }
 
 void CollectionsBuiltinsAssembler::BranchIfIterableWithOriginalValueSetIterator(
@@ -898,7 +933,7 @@ void CollectionsBuiltinsAssembler::BranchIfIterableWithOriginalValueSetIterator(
 
   GotoIf(TaggedIsSmi(iterable), if_false);
   TNode<Map> iterable_map = LoadMap(CAST(iterable));
-  Node* const instance_type = LoadMapInstanceType(iterable_map);
+  TNode<Uint16T> const instance_type = LoadMapInstanceType(iterable_map);
 
   GotoIf(InstanceTypeEqual(instance_type, JS_SET_TYPE), &if_set);
   Branch(InstanceTypeEqual(instance_type, JS_SET_VALUE_ITERATOR_TYPE),
@@ -906,31 +941,32 @@ void CollectionsBuiltinsAssembler::BranchIfIterableWithOriginalValueSetIterator(
 
   BIND(&if_set);
   // Check if the set object has the original Set prototype.
-  Node* const initial_set_proto = LoadContextElement(
+  TNode<Object> const initial_set_proto = LoadContextElement(
       LoadNativeContext(context), Context::INITIAL_SET_PROTOTYPE_INDEX);
-  Node* const set_proto = LoadMapPrototype(iterable_map);
-  GotoIfNot(WordEqual(set_proto, initial_set_proto), if_false);
+  TNode<HeapObject> const set_proto = LoadMapPrototype(iterable_map);
+  GotoIfNot(TaggedEqual(set_proto, initial_set_proto), if_false);
   Goto(&check_protector);
 
   BIND(&if_value_iterator);
   // Check that the iterator is not partially consumed.
-  Node* const index =
+  TNode<Object> const index =
       LoadObjectField(CAST(iterable), JSSetIterator::kIndexOffset);
-  GotoIfNot(WordEqual(index, SmiConstant(0)), if_false);
+  GotoIfNot(TaggedEqual(index, SmiConstant(0)), if_false);
 
   // Check if the iterator object has the original SetIterator prototype.
-  Node* const native_context = LoadNativeContext(context);
-  Node* const initial_set_iter_proto = LoadContextElement(
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Object> const initial_set_iter_proto = LoadContextElement(
       native_context, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX);
-  Node* const set_iter_proto = LoadMapPrototype(iterable_map);
-  GotoIfNot(WordEqual(set_iter_proto, initial_set_iter_proto), if_false);
+  TNode<HeapObject> const set_iter_proto = LoadMapPrototype(iterable_map);
+  GotoIfNot(TaggedEqual(set_iter_proto, initial_set_iter_proto), if_false);
 
   // Check if the original SetIterator prototype has the original
   // %IteratorPrototype%.
-  Node* const initial_iter_proto = LoadContextElement(
+  TNode<Object> const initial_iter_proto = LoadContextElement(
       native_context, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX);
-  Node* const iter_proto = LoadMapPrototype(LoadMap(set_iter_proto));
-  GotoIfNot(WordEqual(iter_proto, initial_iter_proto), if_false);
+  TNode<HeapObject> const iter_proto =
+      LoadMapPrototype(LoadMap(set_iter_proto));
+  GotoIfNot(TaggedEqual(iter_proto, initial_iter_proto), if_false);
   Goto(&check_protector);
 
   BIND(&check_protector);
@@ -1043,7 +1079,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
   TVARIABLE(OrderedHashSet, var_table);
   Label if_set(this), if_iterator(this), copy(this);
 
-  Node* const instance_type = LoadInstanceType(CAST(iterable));
+  TNode<Uint16T> const instance_type = LoadInstanceType(CAST(iterable));
   Branch(InstanceTypeEqual(instance_type, JS_SET_TYPE), &if_set, &if_iterator);
 
   BIND(&if_set);
@@ -1128,15 +1164,16 @@ TF_BUILTIN(SetOrSetIteratorToList, CollectionsBuiltinsAssembler) {
 
 template <typename CollectionType>
 void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey(
-    Node* table, Node* smi_key, Variable* result, Label* entry_found,
+    Node* table, SloppyTNode<Smi> smi_key, Variable* result, Label* entry_found,
     Label* not_found) {
-  Node* const key_untagged = SmiUntag(smi_key);
-  Node* const hash = ChangeInt32ToIntPtr(ComputeUnseededHash(key_untagged));
+  TNode<IntPtrT> const key_untagged = SmiUntag(smi_key);
+  TNode<IntPtrT> const hash =
+      ChangeInt32ToIntPtr(ComputeUnseededHash(key_untagged));
   CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
   result->Bind(hash);
   FindOrderedHashTableEntry<CollectionType>(
       table, hash,
-      [&](Node* other_key, Label* if_same, Label* if_not_same) {
+      [&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
         SameValueZeroSmi(smi_key, other_key, if_same, if_not_same);
       },
       result, entry_found, not_found);
@@ -1144,14 +1181,14 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey(
 
 template <typename CollectionType>
 void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey(
-    Node* context, Node* table, Node* key_tagged, Variable* result,
-    Label* entry_found, Label* not_found) {
-  Node* const hash = ComputeStringHash(context, key_tagged);
+    SloppyTNode<Context> context, Node* table, SloppyTNode<String> key_tagged,
+    Variable* result, Label* entry_found, Label* not_found) {
+  TNode<IntPtrT> const hash = ComputeStringHash(context, key_tagged);
   CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
   result->Bind(hash);
   FindOrderedHashTableEntry<CollectionType>(
       table, hash,
-      [&](Node* other_key, Label* if_same, Label* if_not_same) {
+      [&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
         SameValueZeroString(context, key_tagged, other_key, if_same,
                             if_not_same);
       },
@@ -1160,15 +1197,16 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey(
 
 template <typename CollectionType>
 void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey(
-    Node* context, Node* table, Node* key_heap_number, Variable* result,
+    SloppyTNode<Context> context, Node* table,
+    SloppyTNode<HeapNumber> key_heap_number, Variable* result,
     Label* entry_found, Label* not_found) {
-  Node* hash = CallGetHashRaw(key_heap_number);
+  TNode<IntPtrT> const hash = CallGetHashRaw(key_heap_number);
   CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
   result->Bind(hash);
-  Node* const key_float = LoadHeapNumberValue(key_heap_number);
+  TNode<Float64T> const key_float = LoadHeapNumberValue(key_heap_number);
   FindOrderedHashTableEntry<CollectionType>(
       table, hash,
-      [&](Node* other_key, Label* if_same, Label* if_not_same) {
+      [&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
         SameValueZeroHeapNumber(key_float, other_key, if_same, if_not_same);
       },
       result, entry_found, not_found);
@@ -1176,14 +1214,14 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey(
 
 template <typename CollectionType>
 void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForBigIntKey(
-    Node* context, Node* table, Node* key, Variable* result, Label* entry_found,
-    Label* not_found) {
-  Node* hash = CallGetHashRaw(key);
+    SloppyTNode<Context> context, Node* table, Node* key, Variable* result,
+    Label* entry_found, Label* not_found) {
+  TNode<IntPtrT> const hash = CallGetHashRaw(key);
   CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
   result->Bind(hash);
   FindOrderedHashTableEntry<CollectionType>(
       table, hash,
-      [&](Node* other_key, Label* if_same, Label* if_not_same) {
+      [&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
         SameValueZeroBigInt(key, other_key, if_same, if_not_same);
       },
       result, entry_found, not_found);
@@ -1191,49 +1229,47 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForBigIntKey(
 
 template <typename CollectionType>
 void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey(
-    Node* context, Node* table, Node* key, Variable* result, Label* entry_found,
-    Label* not_found) {
-  Node* hash = GetHash(key);
+    SloppyTNode<Context> context, Node* table, SloppyTNode<HeapObject> key,
+    Variable* result, Label* entry_found, Label* not_found) {
+  TNode<IntPtrT> const hash = GetHash(key);
   CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
   result->Bind(hash);
   FindOrderedHashTableEntry<CollectionType>(
       table, hash,
-      [&](Node* other_key, Label* if_same, Label* if_not_same) {
-        Branch(WordEqual(key, other_key), if_same, if_not_same);
+      [&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
+        Branch(TaggedEqual(key, other_key), if_same, if_not_same);
       },
       result, entry_found, not_found);
 }
 
-Node* CollectionsBuiltinsAssembler::ComputeStringHash(Node* context,
-                                                      Node* string_key) {
-  VARIABLE(var_result, MachineType::PointerRepresentation());
+TNode<IntPtrT> CollectionsBuiltinsAssembler::ComputeStringHash(
+    TNode<Context> context, TNode<String> string_key) {
+  TVARIABLE(IntPtrT, var_result);
 
   Label hash_not_computed(this), done(this, &var_result);
-  Node* hash =
+  TNode<IntPtrT> const hash =
       ChangeInt32ToIntPtr(LoadNameHash(string_key, &hash_not_computed));
-  var_result.Bind(hash);
+  var_result = hash;
   Goto(&done);
 
   BIND(&hash_not_computed);
-  var_result.Bind(CallGetHashRaw(string_key));
+  var_result = CallGetHashRaw(string_key);
   Goto(&done);
 
   BIND(&done);
   return var_result.value();
 }
 
-void CollectionsBuiltinsAssembler::SameValueZeroString(Node* context,
-                                                       Node* key_string,
-                                                       Node* candidate_key,
-                                                       Label* if_same,
-                                                       Label* if_not_same) {
+void CollectionsBuiltinsAssembler::SameValueZeroString(
+    SloppyTNode<Context> context, SloppyTNode<String> key_string,
+    SloppyTNode<Object> candidate_key, Label* if_same, Label* if_not_same) {
   // If the candidate is not a string, the keys are not equal.
   GotoIf(TaggedIsSmi(candidate_key), if_not_same);
-  GotoIfNot(IsString(candidate_key), if_not_same);
+  GotoIfNot(IsString(CAST(candidate_key)), if_not_same);
 
-  Branch(WordEqual(CallBuiltin(Builtins::kStringEqual, context, key_string,
-                               candidate_key),
-                   TrueConstant()),
+  Branch(TaggedEqual(CallBuiltin(Builtins::kStringEqual, context, key_string,
+                                 candidate_key),
+                     TrueConstant()),
          if_same, if_not_same);
 }
 
@@ -1245,24 +1281,24 @@ void CollectionsBuiltinsAssembler::SameValueZeroBigInt(Node* key,
   GotoIf(TaggedIsSmi(candidate_key), if_not_same);
   GotoIfNot(IsBigInt(candidate_key), if_not_same);
 
-  Branch(WordEqual(CallRuntime(Runtime::kBigIntEqualToBigInt,
-                               NoContextConstant(), key, candidate_key),
-                   TrueConstant()),
+  Branch(TaggedEqual(CallRuntime(Runtime::kBigIntEqualToBigInt,
+                                 NoContextConstant(), key, candidate_key),
+                     TrueConstant()),
          if_same, if_not_same);
 }
 
-void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber(Node* key_float,
-                                                           Node* candidate_key,
-                                                           Label* if_same,
-                                                           Label* if_not_same) {
+void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber(
+    SloppyTNode<Float64T> key_float, SloppyTNode<Object> candidate_key,
+    Label* if_same, Label* if_not_same) {
   Label if_smi(this), if_keyisnan(this);
 
   GotoIf(TaggedIsSmi(candidate_key), &if_smi);
-  GotoIfNot(IsHeapNumber(candidate_key), if_not_same);
+  GotoIfNot(IsHeapNumber(CAST(candidate_key)), if_not_same);
 
   {
     // {candidate_key} is a heap number.
-    Node* const candidate_float = LoadHeapNumberValue(candidate_key);
+    TNode<Float64T> const candidate_float =
+        LoadHeapNumberValue(CAST(candidate_key));
     GotoIf(Float64Equal(key_float, candidate_float), if_same);
 
     // SameValueZero needs to treat NaNs as equal. First check if {key_float}
@@ -1279,7 +1315,7 @@ void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber(Node* key_float,
 
   BIND(&if_smi);
   {
-    Node* const candidate_float = SmiToFloat64(candidate_key);
+    TNode<Float64T> const candidate_float = SmiToFloat64(CAST(candidate_key));
     Branch(Float64Equal(key_float, candidate_float), if_same, if_not_same);
   }
 }
@@ -1295,12 +1331,12 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
   // Check if the {table} was cleared.
   STATIC_ASSERT(OrderedHashMap::NumberOfDeletedElementsOffset() ==
                 OrderedHashSet::NumberOfDeletedElementsOffset());
-  Node* number_of_deleted_elements = LoadAndUntagObjectField(
+  TNode<IntPtrT> number_of_deleted_elements = LoadAndUntagObjectField(
       table, OrderedHashMap::NumberOfDeletedElementsOffset());
   STATIC_ASSERT(OrderedHashMap::kClearedTableSentinel ==
                 OrderedHashSet::kClearedTableSentinel);
-  GotoIf(WordEqual(number_of_deleted_elements,
-                   IntPtrConstant(OrderedHashMap::kClearedTableSentinel)),
+  GotoIf(IntPtrEqual(number_of_deleted_elements,
+                     IntPtrConstant(OrderedHashMap::kClearedTableSentinel)),
          &return_zero);
 
   VARIABLE(var_i, MachineType::PointerRepresentation(), IntPtrConstant(0));
@@ -1430,7 +1466,7 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
 
   ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get");
 
-  Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+  TNode<Object> const table = LoadObjectField(receiver, JSMap::kTableOffset);
   TNode<Smi> index = CAST(
       CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key));
 
@@ -1455,7 +1491,7 @@ TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) {
 
   ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has");
 
-  Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+  TNode<Object> const table = LoadObjectField(receiver, JSMap::kTableOffset);
   TNode<Smi> index = CAST(
       CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key));
 
@@ -1476,7 +1512,7 @@ Node* CollectionsBuiltinsAssembler::NormalizeNumberKey(Node* const key) {
 
   GotoIf(TaggedIsSmi(key), &done);
   GotoIfNot(IsHeapNumber(key), &done);
-  Node* const number = LoadHeapNumberValue(key);
+  TNode<Float64T> const number = LoadHeapNumberValue(key);
   GotoIfNot(Float64Equal(number, Float64Constant(0.0)), &done);
   // We know the value is zero, so we take the key to be Smi 0.
   // Another option would be to normalize to Smi here.
@@ -1539,10 +1575,10 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
         table, OrderedHashMap::NumberOfBucketsIndex()))));
 
     STATIC_ASSERT(OrderedHashMap::kLoadFactor == 2);
-    Node* const capacity = WordShl(number_of_buckets.value(), 1);
-    Node* const number_of_elements = SmiUntag(
+    TNode<WordT> const capacity = WordShl(number_of_buckets.value(), 1);
+    TNode<IntPtrT> const number_of_elements = SmiUntag(
         CAST(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset())));
-    Node* const number_of_deleted = SmiUntag(CAST(LoadObjectField(
+    TNode<IntPtrT> const number_of_deleted = SmiUntag(CAST(LoadObjectField(
         table, OrderedHashMap::NumberOfDeletedElementsOffset())));
     occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted));
     GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry);
@@ -1553,9 +1589,9 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
     table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
     number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
         table_var.value(), OrderedHashMap::NumberOfBucketsIndex()))));
-    Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
+    TNode<IntPtrT> const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
         table_var.value(), OrderedHashMap::NumberOfElementsOffset())));
-    Node* const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
+    TNode<IntPtrT> const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
         table_var.value(), OrderedHashMap::NumberOfDeletedElementsOffset())));
     occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted));
     Goto(&store_new_entry);
@@ -1571,13 +1607,13 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
 void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
     TNode<OrderedHashMap> const table, Node* const key, Node* const value,
     Node* const hash, Node* const number_of_buckets, Node* const occupancy) {
-  Node* const bucket =
+  TNode<WordT> const bucket =
       WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
   TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement(
       table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize));
 
   // Store the entry elements.
-  Node* const entry_start = IntPtrAdd(
+  TNode<WordT> const entry_start = IntPtrAdd(
       IntPtrMul(occupancy, IntPtrConstant(OrderedHashMap::kEntrySize)),
       number_of_buckets);
   UnsafeStoreFixedArrayElement(
@@ -1713,10 +1749,10 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
         table, OrderedHashSet::NumberOfBucketsIndex()))));
 
     STATIC_ASSERT(OrderedHashSet::kLoadFactor == 2);
-    Node* const capacity = WordShl(number_of_buckets.value(), 1);
-    Node* const number_of_elements = SmiUntag(
+    TNode<WordT> const capacity = WordShl(number_of_buckets.value(), 1);
+    TNode<IntPtrT> const number_of_elements = SmiUntag(
         CAST(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset())));
-    Node* const number_of_deleted = SmiUntag(CAST(LoadObjectField(
+    TNode<IntPtrT> const number_of_deleted = SmiUntag(CAST(LoadObjectField(
         table, OrderedHashSet::NumberOfDeletedElementsOffset())));
     occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted));
     GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry);
@@ -1727,9 +1763,9 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
     table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
     number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
         table_var.value(), OrderedHashSet::NumberOfBucketsIndex()))));
-    Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
+    TNode<IntPtrT> const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
         table_var.value(), OrderedHashSet::NumberOfElementsOffset())));
-    Node* const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
+    TNode<IntPtrT> const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
         table_var.value(), OrderedHashSet::NumberOfDeletedElementsOffset())));
     occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted));
     Goto(&store_new_entry);
@@ -1745,13 +1781,13 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
 void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
     TNode<OrderedHashSet> const table, Node* const key, Node* const hash,
     Node* const number_of_buckets, Node* const occupancy) {
-  Node* const bucket =
+  TNode<WordT> const bucket =
       WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
   TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement(
       table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize));
 
   // Store the entry elements.
-  Node* const entry_start = IntPtrAdd(
+  TNode<WordT> const entry_start = IntPtrAdd(
       IntPtrMul(occupancy, IntPtrConstant(OrderedHashSet::kEntrySize)),
       number_of_buckets);
   UnsafeStoreFixedArrayElement(
@@ -1846,7 +1882,8 @@ TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
   ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
                          "get Map.prototype.size");
-  Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+  TNode<OrderedHashMap> const table =
+      CAST(LoadObjectField(receiver, JSMap::kTableOffset));
   Return(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset()));
 }
 
@@ -1855,20 +1892,20 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
   Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount);
   Node* const context = Parameter(Descriptor::kContext);
   CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
-  Node* const receiver = args.GetReceiver();
-  Node* const callback = args.GetOptionalArgumentValue(0);
-  Node* const this_arg = args.GetOptionalArgumentValue(1);
+  TNode<Object> const receiver = args.GetReceiver();
+  TNode<Object> const callback = args.GetOptionalArgumentValue(0);
+  TNode<Object> const this_arg = args.GetOptionalArgumentValue(1);
 
   ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, kMethodName);
 
   // Ensure that {callback} is actually callable.
   Label callback_not_callable(this, Label::kDeferred);
   GotoIf(TaggedIsSmi(callback), &callback_not_callable);
-  GotoIfNot(IsCallable(callback), &callback_not_callable);
+  GotoIfNot(IsCallable(CAST(callback)), &callback_not_callable);
 
   TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
   TVARIABLE(OrderedHashMap, var_table,
-            CAST(LoadObjectField(receiver, JSMap::kTableOffset)));
+            CAST(LoadObjectField(CAST(receiver), JSMap::kTableOffset)));
   Label loop(this, {&var_index, &var_table}), done_loop(this);
   Goto(&loop);
   BIND(&loop);
@@ -1887,7 +1924,7 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
         NextSkipHoles<OrderedHashMap>(table, index, &done_loop);
 
     // Load the entry value as well.
-    Node* entry_value = LoadFixedArrayElement(
+    TNode<Object> entry_value = LoadFixedArrayElement(
         table, entry_start_position,
         (OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) *
             kTaggedSize);
@@ -1938,7 +1975,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
   // Ensure that the {receiver} is actually a JSMapIterator.
   Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
   GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid);
-  Node* const receiver_instance_type = LoadInstanceType(receiver);
+  TNode<Uint16T> const receiver_instance_type = LoadInstanceType(receiver);
   GotoIf(
       InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_VALUE_ITERATOR_TYPE),
       &if_receiver_valid);
@@ -1992,7 +2029,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
 
   BIND(&return_value);
   {
-    Node* result =
+    TNode<JSObject> result =
         AllocateJSIteratorResult(context, var_value.value(), var_done.value());
     Return(result);
   }
@@ -2012,7 +2049,7 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
 
   ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has");
 
-  Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+  TNode<Object> const table = LoadObjectField(receiver, JSMap::kTableOffset);
 
   VARIABLE(entry_start_position, MachineType::PointerRepresentation(),
            IntPtrConstant(0));
@@ -2022,8 +2059,8 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
 
   GotoIf(TaggedIsSmi(key), &if_key_smi);
 
-  Node* key_map = LoadMap(key);
-  Node* key_instance_type = LoadMapInstanceType(key_map);
+  TNode<Map> key_map = LoadMap(key);
+  TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
 
   GotoIf(IsStringInstanceType(key_instance_type), &if_key_string);
   GotoIf(IsHeapNumberMap(key_map), &if_key_heap_number);
@@ -2077,7 +2114,8 @@ TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
   ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
                          "get Set.prototype.size");
-  Node* const table = LoadObjectField(receiver, JSSet::kTableOffset);
+  TNode<OrderedHashSet> const table =
+      CAST(LoadObjectField(receiver, JSSet::kTableOffset));
   Return(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset()));
 }
 
@@ -2086,20 +2124,20 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
   Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount);
   Node* const context = Parameter(Descriptor::kContext);
   CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
-  Node* const receiver = args.GetReceiver();
-  Node* const callback = args.GetOptionalArgumentValue(0);
-  Node* const this_arg = args.GetOptionalArgumentValue(1);
+  TNode<Object> const receiver = args.GetReceiver();
+  TNode<Object> const callback = args.GetOptionalArgumentValue(0);
+  TNode<Object> const this_arg = args.GetOptionalArgumentValue(1);
 
   ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, kMethodName);
 
   // Ensure that {callback} is actually callable.
   Label callback_not_callable(this, Label::kDeferred);
   GotoIf(TaggedIsSmi(callback), &callback_not_callable);
-  GotoIfNot(IsCallable(callback), &callback_not_callable);
+  GotoIfNot(IsCallable(CAST(callback)), &callback_not_callable);
 
   TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
   TVARIABLE(OrderedHashSet, var_table,
-            CAST(LoadObjectField(receiver, JSSet::kTableOffset)));
+            CAST(LoadObjectField(CAST(receiver), JSSet::kTableOffset)));
   Label loop(this, {&var_index, &var_table}), done_loop(this);
   Goto(&loop);
   BIND(&loop);
@@ -2154,7 +2192,7 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
   // Ensure that the {receiver} is actually a JSSetIterator.
   Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
   GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid);
-  Node* const receiver_instance_type = LoadInstanceType(receiver);
+  TNode<Uint16T> const receiver_instance_type = LoadInstanceType(receiver);
   GotoIf(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE),
          &if_receiver_valid);
   Branch(
@@ -2200,7 +2238,7 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
 
   BIND(&return_value);
   {
-    Node* result =
+    TNode<JSObject> result =
         AllocateJSIteratorResult(context, var_value.value(), var_done.value());
     Return(result);
   }
@@ -2222,8 +2260,8 @@ void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex(
 
   GotoIf(TaggedIsSmi(key), &if_key_smi);
 
-  Node* key_map = LoadMap(key);
-  Node* key_instance_type = LoadMapInstanceType(key_map);
+  TNode<Map> key_map = LoadMap(key);
+  TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
 
   GotoIf(IsStringInstanceType(key_instance_type), &if_key_string);
   GotoIf(IsHeapNumberMap(key_map), &if_key_heap_number);
@@ -2449,7 +2487,7 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndexForKey(
   auto match_key_or_exit_on_empty = [&](TNode<Object> entry_key,
                                         Label* if_same) {
     GotoIf(IsUndefined(entry_key), if_not_found);
-    GotoIf(WordEqual(entry_key, key), if_same);
+    GotoIf(TaggedEqual(entry_key, key), if_same);
   };
   return FindKeyIndex(table, hash, entry_mask, match_key_or_exit_on_empty);
 }
@@ -2606,7 +2644,7 @@ TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) {
   TNode<Smi> const index =
       CAST(CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key));
 
-  GotoIf(WordEqual(index, SmiConstant(-1)), &return_undefined);
+  GotoIf(TaggedEqual(index, SmiConstant(-1)), &return_undefined);
 
   Return(LoadFixedArrayElement(table, SmiUntag(index)));
 
@@ -2625,10 +2663,10 @@ TF_BUILTIN(WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) {
                          "WeakMap.prototype.has");
 
   TNode<EphemeronHashTable> const table = LoadTable(CAST(receiver));
-  Node* const index =
+  TNode<Object> const index =
       CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
 
-  GotoIf(WordEqual(index, SmiConstant(-1)), &return_false);
+  GotoIf(TaggedEqual(index, SmiConstant(-1)), &return_false);
 
   Return(TrueConstant());
 
@@ -2788,11 +2826,11 @@ TF_BUILTIN(WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) {
   ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
                          "WeakSet.prototype.has");
 
-  Node* const table = LoadTable(CAST(receiver));
-  Node* const index =
+  TNode<EphemeronHashTable> const table = LoadTable(CAST(receiver));
+  TNode<Object> const index =
       CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
 
-  GotoIf(WordEqual(index, SmiConstant(-1)), &return_false);
+  GotoIf(TaggedEqual(index, SmiConstant(-1)), &return_false);
 
   Return(TrueConstant());
 
diff --git a/deps/v8/src/builtins/builtins-console-gen.cc b/deps/v8/src/builtins/builtins-console-gen.cc
index 8dc7e5e8f6cee6..1d6a22f61118f6 100644
--- a/deps/v8/src/builtins/builtins-console-gen.cc
+++ b/deps/v8/src/builtins/builtins-console-gen.cc
@@ -17,7 +17,8 @@ TF_BUILTIN(FastConsoleAssert, CodeStubAssembler) {
 
   // TODO(ishell): use constants from Descriptor once the JSFunction linkage
   // arguments are reordered.
-  Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
+  TNode<Int32T> argc =
+      UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
   Node* context = Parameter(Descriptor::kContext);
   Node* new_target = Parameter(Descriptor::kJSNewTarget);
   GotoIf(Word32Equal(argc, Int32Constant(0)), &runtime);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 767e626432e681..856718cedfbf0a 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -68,7 +68,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
 
   // Bump the closure counter encoded the {feedback_cell}s map.
   {
-    Node* const feedback_cell_map = LoadMap(feedback_cell);
+    TNode<Map> const feedback_cell_map = LoadMap(feedback_cell);
     Label no_closures(this), one_closure(this), cell_done(this);
 
     GotoIf(IsNoClosuresCellMap(feedback_cell_map), &no_closures);
@@ -93,23 +93,23 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
   Node* const flags =
       LoadObjectField(shared_function_info, SharedFunctionInfo::kFlagsOffset,
                       MachineType::Uint32());
-  Node* const function_map_index = IntPtrAdd(
+  TNode<IntPtrT> const function_map_index = Signed(IntPtrAdd(
       DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(flags),
-      IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
+      IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX)));
   CSA_ASSERT(this, UintPtrLessThanOrEqual(
                        function_map_index,
                        IntPtrConstant(Context::LAST_FUNCTION_MAP_INDEX)));
 
   // Get the function map in the current native context and set that
   // as the map of the allocated object.
-  Node* const native_context = LoadNativeContext(context);
-  Node* const function_map =
-      LoadContextElement(native_context, function_map_index);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Map> const function_map =
+      CAST(LoadContextElement(native_context, function_map_index));
 
   // Create a new closure from the given function info in new space
   TNode<IntPtrT> instance_size_in_bytes =
       TimesTaggedSize(LoadMapInstanceSizeInWords(function_map));
-  TNode<Object> result = Allocate(instance_size_in_bytes);
+  TNode<HeapObject> result = Allocate(instance_size_in_bytes);
   StoreMapNoWriteBarrier(result, function_map);
   InitializeJSObjectBodyNoSlackTracking(result, function_map,
                                         instance_size_in_bytes,
@@ -141,7 +141,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
   StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
   Handle<Code> lazy_builtin_handle =
       isolate()->builtins()->builtin_handle(Builtins::kCompileLazy);
-  Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
+  TNode<Code> lazy_builtin = HeapConstant(lazy_builtin_handle);
   StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
   Return(result);
 }
@@ -189,16 +189,18 @@ compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
   BIND(&fast);
 
   // Load the initial map and verify that it's in fact a map.
-  Node* initial_map =
+  TNode<Object> initial_map_or_proto =
       LoadObjectField(new_target, JSFunction::kPrototypeOrInitialMapOffset);
-  GotoIf(TaggedIsSmi(initial_map), call_runtime);
-  GotoIf(DoesntHaveInstanceType(initial_map, MAP_TYPE), call_runtime);
+  GotoIf(TaggedIsSmi(initial_map_or_proto), call_runtime);
+  GotoIf(DoesntHaveInstanceType(CAST(initial_map_or_proto), MAP_TYPE),
+         call_runtime);
+  TNode<Map> initial_map = CAST(initial_map_or_proto);
 
   // Fall back to runtime if the target differs from the new target's
   // initial map constructor.
-  Node* new_target_constructor =
+  TNode<Object> new_target_constructor =
       LoadObjectField(initial_map, Map::kConstructorOrBackPointerOffset);
-  GotoIf(WordNotEqual(target, new_target_constructor), call_runtime);
+  GotoIf(TaggedNotEqual(target, new_target_constructor), call_runtime);
 
   VARIABLE(properties, MachineRepresentation::kTagged);
 
@@ -253,12 +255,12 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
                                  context);
   StoreObjectFieldNoWriteBarrier(function_context, Context::kExtensionOffset,
                                  TheHoleConstant());
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   StoreObjectFieldNoWriteBarrier(function_context,
                                  Context::kNativeContextOffset, native_context);
 
   // Initialize the varrest of the slots to undefined.
-  TNode<HeapObject> undefined = UndefinedConstant();
+  TNode<Oddball> undefined = UndefinedConstant();
   TNode<IntPtrT> start_offset = IntPtrConstant(Context::kTodoHeaderSize);
   CodeStubAssembler::VariableList vars(0, zone());
   BuildFastLoop(
@@ -302,9 +304,9 @@ Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
     Node* boilerplate = literal_site;
     CSA_ASSERT(this, IsJSRegExp(boilerplate));
     int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize;
-    Node* copy = Allocate(size);
+    TNode<HeapObject> copy = Allocate(size);
     for (int offset = 0; offset < size; offset += kTaggedSize) {
-      Node* value = LoadObjectField(boilerplate, offset);
+      TNode<Object> value = LoadObjectField(boilerplate, offset);
       StoreObjectFieldNoWriteBarrier(copy, offset, value);
     }
     result.Bind(copy);
@@ -324,7 +326,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
 
 TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
   Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
-  Node* slot = SmiUntag(Parameter(Descriptor::kSlot));
+  TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot));
   Node* pattern = Parameter(Descriptor::kPattern);
   Node* flags = Parameter(Descriptor::kFlags);
   Node* context = Parameter(Descriptor::kContext);
@@ -357,7 +359,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
 
 TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
   Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
-  Node* slot = SmiUntag(Parameter(Descriptor::kSlot));
+  TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot));
   Node* constant_elements = Parameter(Descriptor::kConstantElements);
   Node* context = Parameter(Descriptor::kContext);
   Label call_runtime(this, Label::kDeferred);
@@ -400,7 +402,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
 
   BIND(&create_empty_array);
   TNode<Int32T> kind = LoadElementsKind(allocation_site.value());
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   Comment("LoadJSArrayElementsMap");
   TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
   TNode<Smi> zero = SmiConstant(0);
@@ -417,7 +419,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
 
 TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
   Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
-  Node* slot = SmiUntag(Parameter(Descriptor::kSlot));
+  TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot));
   Node* context = Parameter(Descriptor::kContext);
   Node* result = EmitCreateEmptyArrayLiteral(feedback_vector, slot, context);
   Return(result);
@@ -436,7 +438,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
 
   VARIABLE(var_properties, MachineRepresentation::kTagged);
   {
-    Node* bit_field_3 = LoadMapBitField3(boilerplate_map);
+    TNode<Uint32T> bit_field_3 = LoadMapBitField3(boilerplate_map);
     GotoIf(IsSetWord32<Map::IsDeprecatedBit>(bit_field_3), call_runtime);
     // Directly copy over the property store for dict-mode boilerplates.
     Label if_dictionary(this), if_fast(this), done(this);
@@ -453,7 +455,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
     BIND(&if_fast);
     {
       // TODO(cbruni): support copying out-of-object properties.
-      Node* boilerplate_properties = LoadFastProperties(boilerplate);
+      TNode<HeapObject> boilerplate_properties =
+          LoadFastProperties(boilerplate);
       GotoIfNot(IsEmptyFixedArray(boilerplate_properties), call_runtime);
       var_properties.Bind(EmptyFixedArrayConstant());
       Goto(&done);
@@ -465,7 +468,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
   {
     // Copy the elements backing store, assuming that it's flat.
     Label if_empty_fixed_array(this), if_copy_elements(this), done(this);
-    Node* boilerplate_elements = LoadElements(boilerplate);
+    TNode<FixedArrayBase> boilerplate_elements = LoadElements(boilerplate);
     Branch(IsEmptyFixedArray(boilerplate_elements), &if_empty_fixed_array,
            &if_copy_elements);
 
@@ -520,26 +523,28 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
     // Copy over in-object properties.
     Label continue_with_write_barrier(this), done_init(this);
     TVARIABLE(IntPtrT, offset, IntPtrConstant(JSObject::kHeaderSize));
-    // Mutable heap numbers only occur on 32-bit platforms.
+    // Heap numbers are only mutable on 32-bit platforms.
     bool may_use_mutable_heap_numbers = !FLAG_unbox_double_fields;
     {
       Comment("Copy in-object properties fast");
       Label continue_fast(this, &offset);
-      Branch(WordEqual(offset.value(), instance_size), &done_init,
+      Branch(IntPtrEqual(offset.value(), instance_size), &done_init,
              &continue_fast);
       BIND(&continue_fast);
       if (may_use_mutable_heap_numbers) {
         TNode<Object> field = LoadObjectField(boilerplate, offset.value());
         Label store_field(this);
         GotoIf(TaggedIsSmi(field), &store_field);
-        GotoIf(IsMutableHeapNumber(CAST(field)), &continue_with_write_barrier);
+        // TODO(leszeks): Read the field descriptor to decide if this heap
+        // number is mutable or not.
+        GotoIf(IsHeapNumber(CAST(field)), &continue_with_write_barrier);
         Goto(&store_field);
         BIND(&store_field);
         StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
       } else {
         // Copy fields as raw data.
-        TNode<IntPtrT> field =
-            LoadObjectField<IntPtrT>(boilerplate, offset.value());
+        TNode<TaggedT> field =
+            LoadObjectField<TaggedT>(boilerplate, offset.value());
         StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
       }
       offset = IntPtrAdd(offset.value(), IntPtrConstant(kTaggedSize));
@@ -562,7 +567,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
           offset.value(), instance_size,
           [=](Node* offset) {
             // TODO(ishell): value decompression is not necessary here.
-            Node* field = LoadObjectField(boilerplate, offset);
+            TNode<Object> field = LoadObjectField(boilerplate, offset);
             StoreObjectFieldNoWriteBarrier(copy, offset, field);
           },
           kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
@@ -570,19 +575,20 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
       BuildFastLoop(
           offset.value(), instance_size,
           [=](Node* offset) {
-            Node* field = LoadObjectField(copy, offset);
-            Label copy_mutable_heap_number(this, Label::kDeferred),
-                continue_loop(this);
+            TNode<Object> field = LoadObjectField(copy, offset);
+            Label copy_heap_number(this, Label::kDeferred), continue_loop(this);
             // We only have to clone complex field values.
             GotoIf(TaggedIsSmi(field), &continue_loop);
-            Branch(IsMutableHeapNumber(field), &copy_mutable_heap_number,
+            // TODO(leszeks): Read the field descriptor to decide if this heap
+            // number is mutable or not.
+            Branch(IsHeapNumber(CAST(field)), &copy_heap_number,
                    &continue_loop);
-            BIND(&copy_mutable_heap_number);
+            BIND(&copy_heap_number);
             {
-              Node* double_value = LoadHeapNumberValue(field);
-              Node* mutable_heap_number =
-                  AllocateMutableHeapNumberWithValue(double_value);
-              StoreObjectField(copy, offset, mutable_heap_number);
+              TNode<Float64T> double_value = LoadHeapNumberValue(CAST(field));
+              TNode<HeapNumber> heap_number =
+                  AllocateHeapNumberWithValue(double_value);
+              StoreObjectField(copy, offset, heap_number);
               Goto(&continue_loop);
             }
             BIND(&continue_loop);
@@ -598,7 +604,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
 TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
   Label call_runtime(this);
   Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
-  Node* slot = SmiUntag(Parameter(Descriptor::kSlot));
+  TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot));
   Node* copy =
       EmitCreateShallowObjectLiteral(feedback_vector, slot, &call_runtime);
   Return(copy);
@@ -615,18 +621,17 @@ TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
 // Used by the CreateEmptyObjectLiteral bytecode and the Object constructor.
 Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
     Node* context) {
-  Node* native_context = LoadNativeContext(context);
-  Node* object_function =
-      LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX);
-  Node* map = LoadObjectField(object_function,
-                              JSFunction::kPrototypeOrInitialMapOffset);
-  CSA_ASSERT(this, IsMap(map));
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<JSFunction> object_function =
+      CAST(LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
+  TNode<Map> map = CAST(LoadObjectField(
+      object_function, JSFunction::kPrototypeOrInitialMapOffset));
   // Ensure that slack tracking is disabled for the map.
   STATIC_ASSERT(Map::kNoSlackTracking == 0);
   CSA_ASSERT(
       this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
-  Node* empty_fixed_array = EmptyFixedArrayConstant();
-  Node* result =
+  TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
+  TNode<JSObject> result =
       AllocateJSObjectFromMap(map, empty_fixed_array, empty_fixed_array);
   return result;
 }
@@ -634,22 +639,22 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
 // ES #sec-object-constructor
 TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
   int const kValueArg = 0;
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
   Node* context = Parameter(Descriptor::kContext);
-  Node* new_target = Parameter(Descriptor::kJSNewTarget);
+  TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
 
   VARIABLE(var_result, MachineRepresentation::kTagged);
   Label if_subclass(this, Label::kDeferred), if_notsubclass(this),
       return_result(this);
   GotoIf(IsUndefined(new_target), &if_notsubclass);
   TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
-  Branch(WordEqual(new_target, target), &if_notsubclass, &if_subclass);
+  Branch(TaggedEqual(new_target, target), &if_notsubclass, &if_subclass);
 
   BIND(&if_subclass);
   {
-    Node* result =
+    TNode<Object> result =
         CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
     var_result.Bind(result);
     Goto(&return_result);
@@ -659,9 +664,9 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
   {
     Label if_newobject(this, Label::kDeferred), if_toobject(this);
 
-    Node* value_index = IntPtrConstant(kValueArg);
+    TNode<IntPtrT> value_index = IntPtrConstant(kValueArg);
     GotoIf(UintPtrGreaterThanOrEqual(value_index, argc), &if_newobject);
-    Node* value = args.AtIndex(value_index);
+    TNode<Object> value = args.AtIndex(value_index);
     GotoIf(IsNull(value), &if_newobject);
     Branch(IsUndefined(value), &if_newobject, &if_toobject);
 
@@ -674,7 +679,7 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
 
     BIND(&if_toobject);
     {
-      Node* result = CallBuiltin(Builtins::kToObject, context, value);
+      TNode<Object> result = CallBuiltin(Builtins::kToObject, context, value);
       var_result.Bind(result);
       Goto(&return_result);
     }
@@ -687,20 +692,20 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
 // ES #sec-number-constructor
 TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
   Node* context = Parameter(Descriptor::kContext);
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
   // 1. If no arguments were passed to this function invocation, let n be +0.
   VARIABLE(var_n, MachineRepresentation::kTagged, SmiConstant(0));
   Label if_nloaded(this, &var_n);
-  GotoIf(WordEqual(argc, IntPtrConstant(0)), &if_nloaded);
+  GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &if_nloaded);
 
   // 2. Else,
   //    a. Let prim be ? ToNumeric(value).
   //    b. If Type(prim) is BigInt, let n be the Number value for prim.
   //    c. Otherwise, let n be prim.
-  Node* value = args.AtIndex(0);
+  TNode<Object> value = args.AtIndex(0);
   var_n.Bind(ToNumber(context, value, BigIntHandling::kConvertToNumber));
   Goto(&if_nloaded);
 
@@ -726,7 +731,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
       // from the current frame here in order to reduce register pressure on the
       // fast path.
       TNode<JSFunction> target = LoadTargetFromFrame();
-      Node* result =
+      TNode<Object> result =
           CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
       StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, n_value);
       args.PopAndReturn(result);
@@ -739,66 +744,5 @@ TF_BUILTIN(GenericLazyDeoptContinuation, ConstructorBuiltinsAssembler) {
   Return(result);
 }
 
-// https://tc39.github.io/ecma262/#sec-string-constructor
-TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
-  Node* context = Parameter(Descriptor::kContext);
-  Node* argc =
-      ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
-  CodeStubArguments args(this, argc);
-
-  TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
-
-  // 1. If no arguments were passed to this function invocation, let s be "".
-  VARIABLE(var_s, MachineRepresentation::kTagged, EmptyStringConstant());
-  Label if_sloaded(this, &var_s);
-  GotoIf(WordEqual(argc, IntPtrConstant(0)), &if_sloaded);
-
-  // 2. Else,
-  //    a. If NewTarget is undefined [...]
-  Node* value = args.AtIndex(0);
-  Label if_tostring(this, &var_s);
-  GotoIfNot(IsUndefined(new_target), &if_tostring);
-
-  // 2a. [...] and Type(value) is Symbol, return SymbolDescriptiveString(value).
-  GotoIf(TaggedIsSmi(value), &if_tostring);
-  GotoIfNot(IsSymbol(value), &if_tostring);
-  {
-    Node* result =
-        CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
-    args.PopAndReturn(result);
-  }
-
-  // 2b. Let s be ? ToString(value).
-  BIND(&if_tostring);
-  {
-    var_s.Bind(CallBuiltin(Builtins::kToString, context, value));
-    Goto(&if_sloaded);
-  }
-
-  // 3. If NewTarget is undefined, return s.
-  BIND(&if_sloaded);
-  {
-    Node* s_value = var_s.value();
-    Label return_s(this), constructstring(this, Label::kDeferred);
-    Branch(IsUndefined(new_target), &return_s, &constructstring);
-
-    BIND(&return_s);
-    { args.PopAndReturn(s_value); }
-
-    BIND(&constructstring);
-    {
-      // We are not using Parameter(Descriptor::kJSTarget) and loading the value
-      // from the current frame here in order to reduce register pressure on the
-      // fast path.
-      TNode<JSFunction> target = LoadTargetFromFrame();
-
-      Node* result =
-          CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
-      StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, s_value);
-      args.PopAndReturn(result);
-    }
-  }
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 71a9cbf1452836..8a0c73b29288af 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -29,7 +29,7 @@ class ConversionBuiltinsAssembler : public CodeStubAssembler {
 void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
     Node* context, Node* input, ToPrimitiveHint hint) {
   // Lookup the @@toPrimitive property on the {input}.
-  Node* exotic_to_prim =
+  TNode<Object> exotic_to_prim =
       GetProperty(context, input, factory()->to_primitive_symbol());
 
   // Check if {exotic_to_prim} is neither null nor undefined.
@@ -40,7 +40,8 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
     // representation of the {hint}.
     Callable callable =
         CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined);
-    Node* hint_string = HeapConstant(factory()->ToPrimitiveHintString(hint));
+    TNode<String> hint_string =
+        HeapConstant(factory()->ToPrimitiveHintString(hint));
     Node* result =
         CallJS(callable, context, exotic_to_prim, input, hint_string);
 
@@ -48,7 +49,7 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
     Label if_resultisprimitive(this),
         if_resultisnotprimitive(this, Label::kDeferred);
     GotoIf(TaggedIsSmi(result), &if_resultisprimitive);
-    Node* result_instance_type = LoadInstanceType(result);
+    TNode<Uint16T> result_instance_type = LoadInstanceType(result);
     Branch(IsPrimitiveInstanceType(result_instance_type), &if_resultisprimitive,
            &if_resultisnotprimitive);
 
@@ -119,7 +120,7 @@ TF_BUILTIN(ToName, CodeStubAssembler) {
     Label if_inputisbigint(this), if_inputisname(this), if_inputisnumber(this),
         if_inputisoddball(this), if_inputisreceiver(this, Label::kDeferred);
     GotoIf(TaggedIsSmi(input), &if_inputisnumber);
-    Node* input_instance_type = LoadInstanceType(input);
+    TNode<Uint16T> input_instance_type = LoadInstanceType(input);
     STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
     GotoIf(IsNameInstanceType(input_instance_type), &if_inputisname);
     GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver);
@@ -230,13 +231,13 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
   }
   for (Handle<String> name : method_names) {
     // Lookup the {name} on the {input}.
-    Node* method = GetProperty(context, input, name);
+    TNode<Object> method = GetProperty(context, input, name);
 
     // Check if the {method} is callable.
     Label if_methodiscallable(this),
         if_methodisnotcallable(this, Label::kDeferred);
     GotoIf(TaggedIsSmi(method), &if_methodisnotcallable);
-    Node* method_map = LoadMap(method);
+    TNode<Map> method_map = LoadMap(CAST(method));
     Branch(IsCallableMap(method_map), &if_methodiscallable,
            &if_methodisnotcallable);
 
@@ -250,7 +251,7 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
 
       // Return the {result} if it is a primitive.
       GotoIf(TaggedIsSmi(result), &return_result);
-      Node* result_instance_type = LoadInstanceType(result);
+      TNode<Uint16T> result_instance_type = LoadInstanceType(result);
       GotoIf(IsPrimitiveInstanceType(result_instance_type), &return_result);
     }
 
@@ -340,7 +341,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
     BIND(&if_lenisheapnumber);
     {
       // Load the floating-point value of {len}.
-      Node* len_value = LoadHeapNumberValue(len);
+      TNode<Float64T> len_value = LoadHeapNumberValue(len);
 
       // Check if {len} is not greater than zero.
       GotoIfNot(Float64GreaterThan(len_value, Float64Constant(0.0)),
@@ -352,8 +353,8 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
              &return_two53minus1);
 
       // Round the {len} towards -Infinity.
-      Node* value = Float64Floor(len_value);
-      Node* result = ChangeFloat64ToTagged(value);
+      TNode<Float64T> value = Float64Floor(len_value);
+      TNode<Number> result = ChangeFloat64ToTagged(value);
       Return(result);
     }
 
@@ -403,11 +404,12 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
 
   GotoIf(TaggedIsSmi(object), &if_smi);
 
-  Node* map = LoadMap(object);
-  Node* instance_type = LoadMapInstanceType(map);
+  TNode<Map> map = LoadMap(object);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(map);
   GotoIf(IsJSReceiverInstanceType(instance_type), &if_jsreceiver);
 
-  Node* constructor_function_index = LoadMapConstructorFunctionIndex(map);
+  TNode<IntPtrT> constructor_function_index =
+      LoadMapConstructorFunctionIndex(map);
   GotoIf(WordEqual(constructor_function_index,
                    IntPtrConstant(Map::kNoConstructorFunctionIndex)),
          &if_noconstructor);
@@ -420,12 +422,12 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
   Goto(&if_wrapjs_primitive_wrapper);
 
   BIND(&if_wrapjs_primitive_wrapper);
-  TNode<Context> native_context = LoadNativeContext(context);
-  Node* constructor = LoadContextElement(
-      native_context, constructor_function_index_var.value());
-  Node* initial_map =
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<JSFunction> constructor = CAST(LoadContextElement(
+      native_context, constructor_function_index_var.value()));
+  TNode<Object> initial_map =
       LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
-  Node* js_primitive_wrapper = Allocate(JSPrimitiveWrapper::kSize);
+  TNode<HeapObject> js_primitive_wrapper = Allocate(JSPrimitiveWrapper::kSize);
   StoreMapNoWriteBarrier(js_primitive_wrapper, initial_map);
   StoreObjectFieldRoot(js_primitive_wrapper,
                        JSPrimitiveWrapper::kPropertiesOrHashOffset,
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index ca84948d48eabb..97600efaa49098 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -28,7 +28,7 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
   Label receiver_not_date(this, Label::kDeferred);
 
   GotoIf(TaggedIsSmi(receiver), &receiver_not_date);
-  Node* receiver_instance_type = LoadInstanceType(receiver);
+  TNode<Uint16T> receiver_instance_type = LoadInstanceType(receiver);
   GotoIfNot(InstanceTypeEqual(receiver_instance_type, JS_DATE_TYPE),
             &receiver_not_date);
 
@@ -38,20 +38,20 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
   } else {
     if (field_index < JSDate::kFirstUncachedField) {
       Label stamp_mismatch(this, Label::kDeferred);
-      Node* date_cache_stamp = Load(
-          MachineType::AnyTagged(),
+      TNode<Object> date_cache_stamp = Load<Object>(
           ExternalConstant(ExternalReference::date_cache_stamp(isolate())));
 
-      Node* cache_stamp = LoadObjectField(receiver, JSDate::kCacheStampOffset);
-      GotoIf(WordNotEqual(date_cache_stamp, cache_stamp), &stamp_mismatch);
+      TNode<Object> cache_stamp =
+          LoadObjectField(receiver, JSDate::kCacheStampOffset);
+      GotoIf(TaggedNotEqual(date_cache_stamp, cache_stamp), &stamp_mismatch);
       Return(LoadObjectField(receiver,
                              JSDate::kValueOffset + field_index * kTaggedSize));
 
       BIND(&stamp_mismatch);
     }
 
-    Node* field_index_smi = SmiConstant(field_index);
-    Node* function =
+    TNode<Smi> field_index_smi = SmiConstant(field_index);
+    TNode<ExternalReference> function =
         ExternalConstant(ExternalReference::get_date_field_function());
     Node* result = CallCFunction(
         function, MachineType::AnyTagged(),
@@ -182,7 +182,7 @@ TF_BUILTIN(DatePrototypeValueOf, DateBuiltinsAssembler) {
 TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
   Node* context = Parameter(Descriptor::kContext);
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* hint = Parameter(Descriptor::kHint);
+  TNode<Object> hint = CAST(Parameter(Descriptor::kHint));
 
   // Check if the {receiver} is actually a JSReceiver.
   Label receiver_is_invalid(this, Label::kDeferred);
@@ -194,25 +194,25 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
       hint_is_invalid(this, Label::kDeferred);
 
   // Fast cases for internalized strings.
-  Node* number_string = LoadRoot(RootIndex::knumber_string);
-  GotoIf(WordEqual(hint, number_string), &hint_is_number);
-  Node* default_string = LoadRoot(RootIndex::kdefault_string);
-  GotoIf(WordEqual(hint, default_string), &hint_is_string);
-  Node* string_string = LoadRoot(RootIndex::kstring_string);
-  GotoIf(WordEqual(hint, string_string), &hint_is_string);
+  TNode<String> number_string = numberStringConstant();
+  GotoIf(TaggedEqual(hint, number_string), &hint_is_number);
+  TNode<String> default_string = DefaultStringConstant();
+  GotoIf(TaggedEqual(hint, default_string), &hint_is_string);
+  TNode<String> string_string = StringStringConstant();
+  GotoIf(TaggedEqual(hint, string_string), &hint_is_string);
 
   // Slow-case with actual string comparisons.
   GotoIf(TaggedIsSmi(hint), &hint_is_invalid);
-  GotoIfNot(IsString(hint), &hint_is_invalid);
-  GotoIf(WordEqual(
+  GotoIfNot(IsString(CAST(hint)), &hint_is_invalid);
+  GotoIf(TaggedEqual(
              CallBuiltin(Builtins::kStringEqual, context, hint, number_string),
              TrueConstant()),
          &hint_is_number);
-  GotoIf(WordEqual(
+  GotoIf(TaggedEqual(
              CallBuiltin(Builtins::kStringEqual, context, hint, default_string),
              TrueConstant()),
          &hint_is_string);
-  GotoIf(WordEqual(
+  GotoIf(TaggedEqual(
              CallBuiltin(Builtins::kStringEqual, context, hint, string_string),
              TrueConstant()),
          &hint_is_string);
@@ -223,7 +223,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
   {
     Callable callable = CodeFactory::OrdinaryToPrimitive(
         isolate(), OrdinaryToPrimitiveHint::kNumber);
-    Node* result = CallStub(callable, context, receiver);
+    TNode<Object> result = CallStub(callable, context, receiver);
     Return(result);
   }
 
@@ -232,7 +232,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
   {
     Callable callable = CodeFactory::OrdinaryToPrimitive(
         isolate(), OrdinaryToPrimitiveHint::kString);
-    Node* result = CallStub(callable, context, receiver);
+    TNode<Object> result = CallStub(callable, context, receiver);
     Return(result);
   }
 
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index d33387354283d5..c3e7601832148f 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -111,24 +111,23 @@ const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
 // ES6 section 20.3.1.16 Date Time String Format
 double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
   str = String::Flatten(isolate, str);
-  // TODO(bmeurer): Change DateParser to not use the FixedArray.
-  Handle<FixedArray> tmp =
-      isolate->factory()->NewFixedArray(DateParser::OUTPUT_SIZE);
+  double out[DateParser::OUTPUT_SIZE];
   DisallowHeapAllocation no_gc;
   String::FlatContent str_content = str->GetFlatContent(no_gc);
   bool result;
   if (str_content.IsOneByte()) {
-    result = DateParser::Parse(isolate, str_content.ToOneByteVector(), *tmp);
+    result = DateParser::Parse(isolate, str_content.ToOneByteVector(), out);
   } else {
-    result = DateParser::Parse(isolate, str_content.ToUC16Vector(), *tmp);
+    result = DateParser::Parse(isolate, str_content.ToUC16Vector(), out);
   }
   if (!result) return std::numeric_limits<double>::quiet_NaN();
-  double const day =
-      MakeDay(tmp->get(0).Number(), tmp->get(1).Number(), tmp->get(2).Number());
-  double const time = MakeTime(tmp->get(3).Number(), tmp->get(4).Number(),
-                               tmp->get(5).Number(), tmp->get(6).Number());
+  double const day = MakeDay(out[DateParser::YEAR], out[DateParser::MONTH],
+                             out[DateParser::DAY]);
+  double const time =
+      MakeTime(out[DateParser::HOUR], out[DateParser::MINUTE],
+               out[DateParser::SECOND], out[DateParser::MILLISECOND]);
   double date = MakeDate(day, time);
-  if (tmp->get(7).IsNull(isolate)) {
+  if (std::isnan(out[DateParser::UTC_OFFSET])) {
     if (date >= -DateCache::kMaxTimeBeforeUTCInMs &&
         date <= DateCache::kMaxTimeBeforeUTCInMs) {
       date = isolate->date_cache()->ToUTC(static_cast<int64_t>(date));
@@ -136,7 +135,7 @@ double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
       return std::numeric_limits<double>::quiet_NaN();
     }
   } else {
-    date -= tmp->get(7).Number() * 1000.0;
+    date -= out[DateParser::UTC_OFFSET] * 1000.0;
   }
   return DateCache::TimeClip(date);
 }
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 23ab4a88ca14ff..95f5273f14f7fc 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -207,7 +207,9 @@ namespace internal {
   TFC(Typeof, Typeof)                                                          \
   TFC(GetSuperConstructor, Typeof)                                             \
   TFC(BigIntToI64, BigIntToI64)                                                \
+  TFC(BigIntToI32Pair, BigIntToI32Pair)                                        \
   TFC(I64ToBigInt, I64ToBigInt)                                                \
+  TFC(I32PairToBigInt, I32PairToBigInt)                                        \
                                                                                \
   /* Type conversions continuations */                                         \
   TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter)            \
@@ -222,9 +224,9 @@ namespace internal {
   TFH(LoadIC_Slow, LoadWithVector)                                             \
   TFH(LoadIC_StringLength, LoadWithVector)                                     \
   TFH(LoadIC_StringWrapperLength, LoadWithVector)                              \
-  TFH(LoadIC_Uninitialized, LoadWithVector)                                    \
+  TFH(LoadIC_NoFeedback, Load)                                                 \
   TFH(StoreGlobalIC_Slow, StoreWithVector)                                     \
-  TFH(StoreIC_Uninitialized, StoreWithVector)                                  \
+  TFH(StoreIC_NoFeedback, Store)                                               \
   TFH(StoreInArrayLiteralIC_Slow, StoreWithVector)                             \
   TFH(KeyedLoadIC_SloppyArguments, LoadWithVector)                             \
   TFH(LoadIndexedInterceptorIC, LoadWithVector)                                \
@@ -644,8 +646,6 @@ namespace internal {
   TFJ(MathCeil, 1, kReceiver, kX)                                              \
   /* ES6 #sec-math.floor */                                                    \
   TFJ(MathFloor, 1, kReceiver, kX)                                             \
-  /* ES6 #sec-math.hypot */                                                    \
-  CPP(MathHypot)                                                               \
   /* ES6 #sec-math.imul */                                                     \
   TFJ(MathImul, 2, kReceiver, kX, kY)                                          \
   /* ES6 #sec-math.max */                                                      \
@@ -847,32 +847,11 @@ namespace internal {
   TFJ(RegExpPrototypeCompile, 2, kReceiver, kPattern, kFlags)                  \
   /* ES #sec-regexp.prototype.exec */                                          \
   TFJ(RegExpPrototypeExec, 1, kReceiver, kString)                              \
-  /* ES #sec-get-regexp.prototype.dotAll */                                    \
-  TFJ(RegExpPrototypeDotAllGetter, 0, kReceiver)                               \
-  /* ES #sec-get-regexp.prototype.flags */                                     \
-  TFJ(RegExpPrototypeFlagsGetter, 0, kReceiver)                                \
-  /* ES #sec-get-regexp.prototype.global */                                    \
-  TFJ(RegExpPrototypeGlobalGetter, 0, kReceiver)                               \
-  /* ES #sec-get-regexp.prototype.ignorecase */                                \
-  TFJ(RegExpPrototypeIgnoreCaseGetter, 0, kReceiver)                           \
-  /* ES #sec-regexp.prototype-@@match */                                       \
-  TFJ(RegExpPrototypeMatch, 1, kReceiver, kString)                             \
   /* https://tc39.github.io/proposal-string-matchall/ */                       \
   TFJ(RegExpPrototypeMatchAll, 1, kReceiver, kString)                          \
-  /* ES #sec-get-regexp.prototype.multiline */                                 \
-  TFJ(RegExpPrototypeMultilineGetter, 0, kReceiver)                            \
   /* ES #sec-regexp.prototype-@@search */                                      \
   TFJ(RegExpPrototypeSearch, 1, kReceiver, kString)                            \
-  /* ES #sec-get-regexp.prototype.source */                                    \
-  TFJ(RegExpPrototypeSourceGetter, 0, kReceiver)                               \
-  /* ES #sec-get-regexp.prototype.sticky */                                    \
-  TFJ(RegExpPrototypeStickyGetter, 0, kReceiver)                               \
-  /* ES #sec-regexp.prototype.test */                                          \
-  TFJ(RegExpPrototypeTest, 1, kReceiver, kString)                              \
-  TFS(RegExpPrototypeTestFast, kReceiver, kString)                             \
   CPP(RegExpPrototypeToString)                                                 \
-  /* ES #sec-get-regexp.prototype.unicode */                                   \
-  TFJ(RegExpPrototypeUnicodeGetter, 0, kReceiver)                              \
   CPP(RegExpRightContextGetter)                                                \
                                                                                \
   /* ES #sec-regexp.prototype-@@split */                                       \
@@ -880,7 +859,7 @@ namespace internal {
   /* RegExp helpers */                                                         \
   TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo)                \
   TFS(RegExpExecInternal, kRegExp, kString, kLastIndex, kMatchInfo)            \
-  TFS(RegExpMatchFast, kReceiver, kPattern)                                    \
+  ASM(RegExpInterpreterTrampoline, CCall)                                      \
   TFS(RegExpPrototypeExecSlow, kReceiver, kString)                             \
   TFS(RegExpSearchFast, kReceiver, kPattern)                                   \
   TFS(RegExpSplit, kRegExp, kString, kLimit)                                   \
@@ -926,8 +905,6 @@ namespace internal {
   CPP(AtomicsWake)                                                             \
                                                                                \
   /* String */                                                                 \
-  /* ES #sec-string-constructor */                                             \
-  TFJ(StringConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel)      \
   /* ES #sec-string.fromcodepoint */                                           \
   CPP(StringFromCodePoint)                                                     \
   /* ES6 #sec-string.fromcharcode */                                           \
@@ -945,11 +922,6 @@ namespace internal {
   TFJ(StringPrototypeMatchAll, 1, kReceiver, kRegexp)                          \
   /* ES6 #sec-string.prototype.localecompare */                                \
   CPP(StringPrototypeLocaleCompare)                                            \
-  /* ES6 #sec-string.prototype.padEnd */                                       \
-  TFJ(StringPrototypePadEnd, SharedFunctionInfo::kDontAdaptArgumentsSentinel)  \
-  /* ES6 #sec-string.prototype.padStart */                                     \
-  TFJ(StringPrototypePadStart,                                                 \
-      SharedFunctionInfo::kDontAdaptArgumentsSentinel)                         \
   /* ES6 #sec-string.prototype.replace */                                      \
   TFJ(StringPrototypeReplace, 2, kReceiver, kSearch, kReplace)                 \
   /* ES6 #sec-string.prototype.search */                                       \
@@ -1028,7 +1000,6 @@ namespace internal {
   TFC(WasmAtomicNotify, WasmAtomicNotify)                                      \
   TFC(WasmI32AtomicWait, WasmI32AtomicWait)                                    \
   TFC(WasmI64AtomicWait, WasmI64AtomicWait)                                    \
-  TFC(WasmCallJavaScript, CallTrampoline)                                      \
   TFC(WasmMemoryGrow, WasmMemoryGrow)                                          \
   TFC(WasmTableGet, WasmTableGet)                                              \
   TFC(WasmTableSet, WasmTableSet)                                              \
@@ -1051,7 +1022,9 @@ namespace internal {
   TFS(ThrowWasmTrapElemSegmentDropped)                                         \
   TFS(ThrowWasmTrapTableOutOfBounds)                                           \
   TFC(WasmI64ToBigInt, I64ToBigInt)                                            \
+  TFC(WasmI32PairToBigInt, I32PairToBigInt)                                    \
   TFC(WasmBigIntToI64, BigIntToI64)                                            \
+  TFC(WasmBigIntToI32Pair, BigIntToI32Pair)                                    \
                                                                                \
   /* WeakMap */                                                                \
   TFJ(WeakMapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel)     \
@@ -1134,8 +1107,6 @@ namespace internal {
                                                                                \
   /* String helpers */                                                         \
   TFS(StringAdd_CheckNone, kLeft, kRight)                                      \
-  TFS(StringAdd_ConvertLeft, kLeft, kRight)                                    \
-  TFS(StringAdd_ConvertRight, kLeft, kRight)                                   \
   TFS(SubString, kString, kFrom, kTo)                                          \
                                                                                \
   /* Miscellaneous */                                                          \
@@ -1344,7 +1315,6 @@ namespace internal {
   V(WasmAtomicNotify)                    \
   V(WasmI32AtomicWait)                   \
   V(WasmI64AtomicWait)                   \
-  V(WasmCallJavaScript)                  \
   V(WasmMemoryGrow)                      \
   V(WasmTableGet)                        \
   V(WasmTableSet)                        \
@@ -1356,7 +1326,9 @@ namespace internal {
   V(WasmRethrow)                         \
   V(DoubleToI)                           \
   V(WasmI64ToBigInt)                     \
-  V(WasmBigIntToI64)
+  V(WasmI32PairToBigInt)                 \
+  V(WasmBigIntToI64)                     \
+  V(WasmBigIntToI32Pair)
 
 // The exception thrown in the following builtins are caught internally and will
 // not be propagated further or re-thrown
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 411d9a6930bc8b..ee1f67d43428b6 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -17,19 +17,20 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
 
   // TODO(ishell): use constants from Descriptor once the JSFunction linkage
   // arguments are reordered.
-  Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
+  TNode<Int32T> argc =
+      UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
   Node* context = Parameter(Descriptor::kContext);
   Node* new_target = Parameter(Descriptor::kJSNewTarget);
 
   CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
 
   // Check that receiver has instance type of JS_FUNCTION_TYPE
-  Node* receiver = args.GetReceiver();
+  TNode<Object> receiver = args.GetReceiver();
   GotoIf(TaggedIsSmi(receiver), &slow);
 
-  Node* receiver_map = LoadMap(receiver);
+  TNode<Map> receiver_map = LoadMap(CAST(receiver));
   {
-    Node* instance_type = LoadMapInstanceType(receiver_map);
+    TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
     GotoIfNot(
         Word32Or(InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE),
                  InstanceTypeEqual(instance_type, JS_BOUND_FUNCTION_TYPE)),
@@ -45,35 +46,34 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
   // AccessorInfo objects. In that case, their value can be recomputed even if
   // the actual value on the object changes.
   Comment("Check descriptor array length");
-  TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
   // Minimum descriptor array length required for fast path.
   const int min_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex,
-                                         JSFunction::kNameDescriptorIndex);
-  TNode<Int32T> nof_descriptors = LoadNumberOfDescriptors(descriptors);
-  GotoIf(
-      Int32LessThanOrEqual(nof_descriptors, Int32Constant(min_nof_descriptors)),
-      &slow);
+                                         JSFunction::kNameDescriptorIndex) +
+                                  1;
+  TNode<Int32T> nof_descriptors = LoadNumberOfOwnDescriptors(receiver_map);
+  GotoIf(Int32LessThan(nof_descriptors, Int32Constant(min_nof_descriptors)),
+         &slow);
 
   // Check whether the length and name properties are still present as
   // AccessorInfo objects. In that case, their value can be recomputed even if
   // the actual value on the object changes.
   Comment("Check name and length properties");
   {
+    TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
     const int length_index = JSFunction::kLengthDescriptorIndex;
     TNode<Name> maybe_length =
         LoadKeyByDescriptorEntry(descriptors, length_index);
-    GotoIf(WordNotEqual(maybe_length, LoadRoot(RootIndex::klength_string)),
-           &slow);
+    GotoIf(TaggedNotEqual(maybe_length, LengthStringConstant()), &slow);
 
     TNode<Object> maybe_length_accessor =
         LoadValueByDescriptorEntry(descriptors, length_index);
     GotoIf(TaggedIsSmi(maybe_length_accessor), &slow);
-    Node* length_value_map = LoadMap(CAST(maybe_length_accessor));
+    TNode<Map> length_value_map = LoadMap(CAST(maybe_length_accessor));
     GotoIfNot(IsAccessorInfoMap(length_value_map), &slow);
 
     const int name_index = JSFunction::kNameDescriptorIndex;
     TNode<Name> maybe_name = LoadKeyByDescriptorEntry(descriptors, name_index);
-    GotoIf(WordNotEqual(maybe_name, LoadRoot(RootIndex::kname_string)), &slow);
+    GotoIf(TaggedNotEqual(maybe_name, NameStringConstant()), &slow);
 
     TNode<Object> maybe_name_accessor =
         LoadValueByDescriptorEntry(descriptors, name_index);
@@ -89,7 +89,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
   {
     Label with_constructor(this);
     VariableList vars({&bound_function_map}, zone());
-    Node* native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
 
     Label map_done(this, vars);
     GotoIf(IsConstructorMap(receiver_map), &with_constructor);
@@ -108,9 +108,10 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
 
   // Verify that __proto__ matches that of a the target bound function.
   Comment("Verify that __proto__ matches target bound function");
-  Node* prototype = LoadMapPrototype(receiver_map);
-  Node* expected_prototype = LoadMapPrototype(bound_function_map.value());
-  GotoIf(WordNotEqual(prototype, expected_prototype), &slow);
+  TNode<HeapObject> prototype = LoadMapPrototype(receiver_map);
+  TNode<HeapObject> expected_prototype =
+      LoadMapPrototype(bound_function_map.value());
+  GotoIf(TaggedNotEqual(prototype, expected_prototype), &slow);
 
   // Allocate the arguments array.
   Comment("Allocate the arguments array");
@@ -126,12 +127,13 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
     VARIABLE(index, MachineType::PointerRepresentation());
     index.Bind(IntPtrConstant(0));
     VariableList foreach_vars({&index}, zone());
-    args.ForEach(foreach_vars,
-                 [this, elements, &index](Node* arg) {
-                   StoreFixedArrayElement(elements, index.value(), arg);
-                   Increment(&index);
-                 },
-                 IntPtrConstant(1));
+    args.ForEach(
+        foreach_vars,
+        [this, elements, &index](Node* arg) {
+          StoreFixedArrayElement(elements, index.value(), arg);
+          Increment(&index);
+        },
+        IntPtrConstant(1));
     argument_array.Bind(elements);
     Goto(&arguments_done);
 
@@ -162,7 +164,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
   // Allocate the resulting bound function.
   Comment("Allocate the resulting bound function");
   {
-    Node* bound_function = Allocate(JSBoundFunction::kSize);
+    TNode<HeapObject> bound_function = Allocate(JSBoundFunction::kSize);
     StoreMapNoWriteBarrier(bound_function, bound_function_map.value());
     StoreObjectFieldNoWriteBarrier(
         bound_function, JSBoundFunction::kBoundTargetFunctionOffset, receiver);
@@ -172,7 +174,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
     StoreObjectFieldNoWriteBarrier(bound_function,
                                    JSBoundFunction::kBoundArgumentsOffset,
                                    argument_array.value());
-    Node* empty_fixed_array = EmptyFixedArrayConstant();
+    TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
     StoreObjectFieldNoWriteBarrier(
         bound_function, JSObject::kPropertiesOrHashOffset, empty_fixed_array);
     StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kElementsOffset,
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index f9a356f94bf6f6..f75014d0346266 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -93,6 +93,17 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
     function->shared().set_name_should_print_as_anonymous(true);
   }
 
+  // The spec says that we have to wrap code created via the function
+  // constructor in e.g. 'function anonymous(' as above, including with extra
+  // line breaks. Ths is confusing when reporting stack traces from the eval'd
+  // code as the line number of the error is always reported with 2 extra line
+  // breaks e.g. line 1 is reported as line 3. We fix this up here by setting
+  // line_offset which is read by stack trace code.
+  Handle<Script> script(Script::cast(function->shared().script()), isolate);
+  if (script->line_offset() == 0) {
+    script->set_line_offset(-2);
+  }
+
   // If new.target is equal to target then the function created
   // is already correctly setup and nothing else should be done
   // here. But if new.target is not equal to target then we are
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index 7e75bbcee01bb1..d884c417fc04a0 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -50,8 +50,8 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
   // Resume the {receiver} using our trampoline.
   VARIABLE(var_exception, MachineRepresentation::kTagged, UndefinedConstant());
   Label if_exception(this, Label::kDeferred), if_final_return(this);
-  Node* result = CallStub(CodeFactory::ResumeGenerator(isolate()), context,
-                          value, receiver);
+  TNode<Object> result = CallStub(CodeFactory::ResumeGenerator(isolate()),
+                                  context, value, receiver);
   // Make sure we close the generator if there was an exception.
   GotoIfException(result, &if_exception, &var_exception);
 
@@ -115,12 +115,12 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
 TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
   const int kValueArg = 0;
 
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
-  Node* receiver = args.GetReceiver();
-  Node* value = args.GetOptionalArgumentValue(kValueArg);
+  TNode<Object> receiver = args.GetReceiver();
+  TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
   Node* context = Parameter(Descriptor::kContext);
 
   GeneratorPrototypeResume(&args, receiver, value, context,
@@ -132,12 +132,12 @@ TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
 TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
   const int kValueArg = 0;
 
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
-  Node* receiver = args.GetReceiver();
-  Node* value = args.GetOptionalArgumentValue(kValueArg);
+  TNode<Object> receiver = args.GetReceiver();
+  TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
   Node* context = Parameter(Descriptor::kContext);
 
   GeneratorPrototypeResume(&args, receiver, value, context,
@@ -149,12 +149,12 @@ TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
 TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
   const int kExceptionArg = 0;
 
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
-  Node* receiver = args.GetReceiver();
-  Node* exception = args.GetOptionalArgumentValue(kExceptionArg);
+  TNode<Object> receiver = args.GetReceiver();
+  TNode<Object> exception = args.GetOptionalArgumentValue(kExceptionArg);
   Node* context = Parameter(Descriptor::kContext);
 
   GeneratorPrototypeResume(&args, receiver, exception, context,
diff --git a/deps/v8/src/builtins/builtins-global-gen.cc b/deps/v8/src/builtins/builtins-global-gen.cc
index fa21f81650d61e..ca29ab3cd2220a 100644
--- a/deps/v8/src/builtins/builtins-global-gen.cc
+++ b/deps/v8/src/builtins/builtins-global-gen.cc
@@ -35,7 +35,7 @@ TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
     BIND(&if_numisheapnumber);
     {
       // Check if {num} contains a finite, non-NaN value.
-      Node* num_value = LoadHeapNumberValue(num);
+      TNode<Float64T> num_value = LoadHeapNumberValue(num);
       BranchIfFloat64IsNaN(Float64Sub(num_value, num_value), &return_false,
                            &return_true);
     }
@@ -81,7 +81,7 @@ TF_BUILTIN(GlobalIsNaN, CodeStubAssembler) {
     BIND(&if_numisheapnumber);
     {
       // Check if {num} contains a NaN.
-      Node* num_value = LoadHeapNumberValue(num);
+      TNode<Float64T> num_value = LoadHeapNumberValue(num);
       BranchIfFloat64IsNaN(num_value, &return_true, &return_false);
     }
 
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 973356f569cb67..eae8690f1facd0 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -66,9 +66,9 @@ void Builtins::Generate_KeyedStoreIC_Megamorphic(
   KeyedStoreGenericGenerator::Generate(state);
 }
 
-void Builtins::Generate_StoreIC_Uninitialized(
+void Builtins::Generate_StoreIC_NoFeedback(
     compiler::CodeAssemblerState* state) {
-  StoreICUninitializedGenerator::Generate(state);
+  StoreICNoFeedbackGenerator::Generate(state);
 }
 
 // TODO(mythria): Check if we can remove feedback vector and slot parameters in
@@ -180,7 +180,7 @@ void HandlerBuiltinsAssembler::DispatchForElementsKindTransition(
   STATIC_ASSERT(arraysize(combined_elements_kinds) ==
                 arraysize(elements_kind_labels));
 
-  TNode<Word32T> combined_elements_kind =
+  TNode<Int32T> combined_elements_kind =
       Word32Or(Word32Shl(from_kind, Int32Constant(kBitsPerByte)), to_kind);
 
   Switch(combined_elements_kind, &if_unknown_type, combined_elements_kinds,
@@ -259,25 +259,27 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW,
 
 // All elements kinds handled by EmitElementStore. Specifically, this includes
 // fast elements and fixed typed array elements.
-#define ELEMENTS_KINDS(V)   \
-  V(PACKED_SMI_ELEMENTS)    \
-  V(HOLEY_SMI_ELEMENTS)     \
-  V(PACKED_ELEMENTS)        \
-  V(PACKED_SEALED_ELEMENTS) \
-  V(HOLEY_ELEMENTS)         \
-  V(HOLEY_SEALED_ELEMENTS)  \
-  V(PACKED_DOUBLE_ELEMENTS) \
-  V(HOLEY_DOUBLE_ELEMENTS)  \
-  V(UINT8_ELEMENTS)         \
-  V(INT8_ELEMENTS)          \
-  V(UINT16_ELEMENTS)        \
-  V(INT16_ELEMENTS)         \
-  V(UINT32_ELEMENTS)        \
-  V(INT32_ELEMENTS)         \
-  V(FLOAT32_ELEMENTS)       \
-  V(FLOAT64_ELEMENTS)       \
-  V(UINT8_CLAMPED_ELEMENTS) \
-  V(BIGUINT64_ELEMENTS)     \
+#define ELEMENTS_KINDS(V)          \
+  V(PACKED_SMI_ELEMENTS)           \
+  V(HOLEY_SMI_ELEMENTS)            \
+  V(PACKED_ELEMENTS)               \
+  V(PACKED_NONEXTENSIBLE_ELEMENTS) \
+  V(PACKED_SEALED_ELEMENTS)        \
+  V(HOLEY_ELEMENTS)                \
+  V(HOLEY_NONEXTENSIBLE_ELEMENTS)  \
+  V(HOLEY_SEALED_ELEMENTS)         \
+  V(PACKED_DOUBLE_ELEMENTS)        \
+  V(HOLEY_DOUBLE_ELEMENTS)         \
+  V(UINT8_ELEMENTS)                \
+  V(INT8_ELEMENTS)                 \
+  V(UINT16_ELEMENTS)               \
+  V(INT16_ELEMENTS)                \
+  V(UINT32_ELEMENTS)               \
+  V(INT32_ELEMENTS)                \
+  V(FLOAT32_ELEMENTS)              \
+  V(FLOAT64_ELEMENTS)              \
+  V(UINT8_CLAMPED_ELEMENTS)        \
+  V(BIGUINT64_ELEMENTS)            \
   V(BIGINT64_ELEMENTS)
 
 void HandlerBuiltinsAssembler::DispatchByElementsKind(
@@ -311,7 +313,7 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind(
   BIND(&if_##KIND);                                              \
   {                                                              \
     if (!FLAG_enable_sealed_frozen_elements_kind &&              \
-        IsFrozenOrSealedElementsKindUnchecked(KIND)) {           \
+        IsAnyNonextensibleElementsKindUnchecked(KIND)) {         \
       /* Disable support for frozen or sealed elements kinds. */ \
       Unreachable();                                             \
     } else if (!handle_typed_elements_kind &&                    \
@@ -403,7 +405,7 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
   Node* context = Parameter(Descriptor::kContext);
 
   Label miss(this, Label::kDeferred);
-  Return(LoadJSFunctionPrototype(receiver, &miss));
+  Return(LoadJSFunctionPrototype(CAST(receiver), &miss));
 
   BIND(&miss);
   TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index ce944784eadf77..605b2a6b1a3aed 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -23,7 +23,7 @@ namespace internal {
 IC_BUILTIN(LoadIC)
 IC_BUILTIN(LoadIC_Megamorphic)
 IC_BUILTIN(LoadIC_Noninlined)
-IC_BUILTIN(LoadIC_Uninitialized)
+IC_BUILTIN(LoadIC_NoFeedback)
 IC_BUILTIN(LoadICTrampoline)
 IC_BUILTIN(LoadICTrampoline_Megamorphic)
 IC_BUILTIN(KeyedLoadIC)
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 8d22767b587d94..445c8c951732c8 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -35,7 +35,7 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
   Node* object = Parameter(Descriptor::kObject);
 
   // Load the {object}s elements.
-  Node* source = LoadObjectField(object, JSObject::kElementsOffset);
+  TNode<Object> source = LoadObjectField(object, JSObject::kElementsOffset);
   Node* target = CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays);
   StoreObjectField(object, JSObject::kElementsOffset, target);
   Return(target);
@@ -104,7 +104,7 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
       // the mapped elements (i.e. the first {mapped_count}) with the hole, but
       // make sure not to overshoot the {length} if some arguments are missing.
       TNode<IntPtrT> number_of_holes = IntPtrMin(mapped_count, length);
-      Node* the_hole = TheHoleConstant();
+      TNode<Oddball> the_hole = TheHoleConstant();
 
       // Fill the first elements up to {number_of_holes} with the hole.
       TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
@@ -116,7 +116,7 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
         TNode<IntPtrT> index = var_index.value();
 
         // Check if we are done.
-        GotoIf(WordEqual(index, number_of_holes), &done_loop1);
+        GotoIf(IntPtrEqual(index, number_of_holes), &done_loop1);
 
         // Store the hole into the {result}.
         StoreFixedArrayElement(result, index, the_hole, SKIP_WRITE_BARRIER);
@@ -139,7 +139,7 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
         TNode<IntPtrT> index = var_index.value();
 
         // Check if we are done.
-        GotoIf(WordEqual(index, length), &done_loop2);
+        GotoIf(IntPtrEqual(index, length), &done_loop2);
 
         // Load the parameter at the given {index}.
         TNode<Object> value = BitcastWordToTagged(
@@ -213,7 +213,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
       : CodeStubAssembler(state) {}
 
   Node* IsMarking() {
-    Node* is_marking_addr = ExternalConstant(
+    TNode<ExternalReference> is_marking_addr = ExternalConstant(
         ExternalReference::heap_is_marking_flag_address(this->isolate()));
     return Load(MachineType::Uint8(), is_marking_addr);
   }
@@ -266,12 +266,12 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
     }
   }
 
-  Node* ShouldSkipFPRegs(Node* mode) {
-    return WordEqual(mode, SmiConstant(kDontSaveFPRegs));
+  Node* ShouldSkipFPRegs(SloppyTNode<Object> mode) {
+    return TaggedEqual(mode, SmiConstant(kDontSaveFPRegs));
   }
 
-  Node* ShouldEmitRememberSet(Node* remembered_set) {
-    return WordEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
+  Node* ShouldEmitRememberSet(SloppyTNode<Object> remembered_set) {
+    return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
   }
 
   void CallCFunction1WithCallerSavedRegistersMode(MachineType return_type,
@@ -323,26 +323,27 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
 
   void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Node* mode,
                                   Label* next) {
-    Node* store_buffer_top_addr =
+    TNode<ExternalReference> store_buffer_top_addr =
         ExternalConstant(ExternalReference::store_buffer_top(this->isolate()));
     Node* store_buffer_top =
         Load(MachineType::Pointer(), store_buffer_top_addr);
     StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top,
                         slot);
-    Node* new_store_buffer_top =
+    TNode<WordT> new_store_buffer_top =
         IntPtrAdd(store_buffer_top, IntPtrConstant(kSystemPointerSize));
     StoreNoWriteBarrier(MachineType::PointerRepresentation(),
                         store_buffer_top_addr, new_store_buffer_top);
 
-    Node* test = WordAnd(new_store_buffer_top,
-                         IntPtrConstant(Heap::store_buffer_mask_constant()));
+    TNode<WordT> test =
+        WordAnd(new_store_buffer_top,
+                IntPtrConstant(Heap::store_buffer_mask_constant()));
 
     Label overflow(this);
-    Branch(WordEqual(test, IntPtrConstant(0)), &overflow, next);
+    Branch(IntPtrEqual(test, IntPtrConstant(0)), &overflow, next);
 
     BIND(&overflow);
     {
-      Node* function =
+      TNode<ExternalReference> function =
           ExternalConstant(ExternalReference::store_buffer_overflow_function());
       CallCFunction1WithCallerSavedRegistersMode(MachineType::Int32(),
                                                  MachineType::Pointer(),
@@ -395,7 +396,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
 
     BIND(&store_buffer_exit);
     {
-      Node* isolate_constant =
+      TNode<ExternalReference> isolate_constant =
           ExternalConstant(ExternalReference::isolate_address(isolate()));
       Node* fp_mode = Parameter(Descriptor::kFPMode);
       InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit);
@@ -403,7 +404,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
 
     BIND(&store_buffer_incremental_wb);
     {
-      Node* isolate_constant =
+      TNode<ExternalReference> isolate_constant =
           ExternalConstant(ExternalReference::isolate_address(isolate()));
       Node* fp_mode = Parameter(Descriptor::kFPMode);
       InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode,
@@ -435,9 +436,9 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
 
     BIND(&call_incremental_wb);
     {
-      Node* function = ExternalConstant(
+      TNode<ExternalReference> function = ExternalConstant(
           ExternalReference::incremental_marking_record_write_function());
-      Node* isolate_constant =
+      TNode<ExternalReference> isolate_constant =
           ExternalConstant(ExternalReference::isolate_address(isolate()));
       Node* fp_mode = Parameter(Descriptor::kFPMode);
       TNode<IntPtrT> object =
@@ -457,12 +458,12 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
 TF_BUILTIN(EphemeronKeyBarrier, RecordWriteCodeStubAssembler) {
   Label exit(this);
 
-  Node* function = ExternalConstant(
+  TNode<ExternalReference> function = ExternalConstant(
       ExternalReference::ephemeron_key_write_barrier_function());
-  Node* isolate_constant =
+  TNode<ExternalReference> isolate_constant =
       ExternalConstant(ExternalReference::isolate_address(isolate()));
   Node* address = Parameter(Descriptor::kSlotAddress);
-  Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
+  TNode<IntPtrT> object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
   Node* fp_mode = Parameter(Descriptor::kFPMode);
   CallCFunction3WithCallerSavedRegistersMode(
       MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
@@ -495,7 +496,7 @@ class DeletePropertyBaseAssembler : public AccessorAssembler {
     GotoIf(IsSetWord32(details, PropertyDetails::kAttributesDontDeleteMask),
            dont_delete);
     // Overwrite the entry itself (see NameDictionary::SetEntry).
-    TNode<HeapObject> filler = TheHoleConstant();
+    TNode<Oddball> filler = TheHoleConstant();
     DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kTheHoleValue));
     StoreFixedArrayElement(properties, key_index, filler, SKIP_WRITE_BARRIER);
     StoreValueByKeyIndex<NameDictionary>(properties, key_index, filler,
@@ -534,11 +535,12 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
   VARIABLE(var_index, MachineType::PointerRepresentation());
   VARIABLE(var_unique, MachineRepresentation::kTagged, key);
   Label if_index(this), if_unique_name(this), if_notunique(this),
-      if_notfound(this), slow(this);
+      if_notfound(this), slow(this), if_proxy(this);
 
   GotoIf(TaggedIsSmi(receiver), &slow);
   TNode<Map> receiver_map = LoadMap(CAST(receiver));
-  TNode<Int32T> instance_type = LoadMapInstanceType(receiver_map);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
+  GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), &if_proxy);
   GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &slow);
   TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique, &slow,
             &if_notunique);
@@ -592,6 +594,14 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
   BIND(&if_notfound);
   Return(TrueConstant());
 
+  BIND(&if_proxy);
+  {
+    TNode<Name> name = CAST(CallBuiltin(Builtins::kToName, context, key));
+    GotoIf(IsPrivateSymbol(name), &slow);
+    TailCallBuiltin(Builtins::kProxyDeleteProperty, context, receiver, name,
+                    language_mode);
+  }
+
   BIND(&slow);
   {
     TailCallRuntime(Runtime::kDeleteProperty, context, receiver, key,
@@ -622,7 +632,7 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
     // Otherwise check if {source} is a proper JSObject, and if not, defer
     // to testing for non-empty strings below.
     TNode<Map> source_map = LoadMap(CAST(source));
-    TNode<Int32T> source_instance_type = LoadMapInstanceType(source_map);
+    TNode<Uint16T> source_instance_type = LoadMapInstanceType(source_map);
     GotoIfNot(IsJSObjectInstanceType(source_instance_type),
               &if_sourcenotjsobject);
 
@@ -670,7 +680,8 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
       // handled explicitly by Object.assign() and CopyDataProperties.
       GotoIfNot(IsStringInstanceType(source_instance_type), &if_done);
       TNode<IntPtrT> source_length = LoadStringLengthAsWord(CAST(source));
-      Branch(WordEqual(source_length, IntPtrConstant(0)), &if_done, if_runtime);
+      Branch(IntPtrEqual(source_length, IntPtrConstant(0)), &if_done,
+             if_runtime);
     }
 
     BIND(&if_done);
@@ -686,7 +697,7 @@ TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
   TNode<Object> source = CAST(Parameter(Descriptor::kSource));
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
-  CSA_ASSERT(this, WordNotEqual(target, source));
+  CSA_ASSERT(this, TaggedNotEqual(target, source));
 
   Label if_runtime(this, Label::kDeferred);
   Return(SetOrCopyDataProperties(context, target, source, &if_runtime, false));
@@ -980,7 +991,7 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
   BIND(&if_proxy);
   {
     // Convert the {key} to a Name first.
-    Node* name = CallBuiltin(Builtins::kToName, context, key);
+    TNode<Object> name = CallBuiltin(Builtins::kToName, context, key);
 
     // The {object} is a JSProxy instance, look up the {name} on it, passing
     // {object} both as receiver and holder. If {name} is absent we can safely
@@ -996,7 +1007,7 @@ TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
   Node* key = Parameter(Descriptor::kKey);
   Node* context = Parameter(Descriptor::kContext);
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* on_non_existent = Parameter(Descriptor::kOnNonExistent);
+  TNode<Object> on_non_existent = CAST(Parameter(Descriptor::kOnNonExistent));
   Label if_notfound(this), if_proxy(this, Label::kDeferred),
       if_slow(this, Label::kDeferred);
 
@@ -1028,11 +1039,11 @@ TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
 
   BIND(&if_notfound);
   Label throw_reference_error(this);
-  GotoIf(WordEqual(on_non_existent,
-                   SmiConstant(OnNonExistent::kThrowReferenceError)),
+  GotoIf(TaggedEqual(on_non_existent,
+                     SmiConstant(OnNonExistent::kThrowReferenceError)),
          &throw_reference_error);
-  CSA_ASSERT(this, WordEqual(on_non_existent,
-                             SmiConstant(OnNonExistent::kReturnUndefined)));
+  CSA_ASSERT(this, TaggedEqual(on_non_existent,
+                               SmiConstant(OnNonExistent::kReturnUndefined)));
   Return(UndefinedConstant());
 
   BIND(&throw_reference_error);
@@ -1045,7 +1056,7 @@ TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
   BIND(&if_proxy);
   {
     // Convert the {key} to a Name first.
-    Node* name = CallBuiltin(Builtins::kToName, context, key);
+    TNode<Name> name = CAST(CallBuiltin(Builtins::kToName, context, key));
 
     // Proxy cannot handle private symbol so bailout.
     GotoIf(IsPrivateSymbol(name), &if_slow);
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 991790b49007ca..1a9a3b7fd9a822 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -33,10 +33,7 @@ class IntlBuiltinsAssembler : public CodeStubAssembler {
 };
 
 TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
-  Node* const string = Parameter(Descriptor::kString);
-  Node* const context = Parameter(Descriptor::kContext);
-
-  CSA_ASSERT(this, IsString(string));
+  TNode<String> const string = CAST(Parameter(Descriptor::kString));
 
   Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
 
@@ -50,14 +47,14 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
       state(), string, ToDirectStringAssembler::kDontUnpackSlicedStrings);
   to_direct.TryToDirect(&runtime);
 
-  Node* const instance_type = to_direct.instance_type();
+  TNode<Int32T> const instance_type = to_direct.instance_type();
   CSA_ASSERT(this,
              Word32BinaryNot(IsIndirectStringInstanceType(instance_type)));
   GotoIfNot(IsOneByteStringInstanceType(instance_type), &runtime);
 
   // For short strings, do the conversion in CSA through the lookup table.
 
-  Node* const dst = AllocateSeqOneByteString(context, length);
+  TNode<String> const dst = AllocateSeqOneByteString(length);
 
   const int kMaxShortStringLength = 24;  // Determined empirically.
   GotoIf(Uint32GreaterThan(length, Uint32Constant(kMaxShortStringLength)),
@@ -68,31 +65,31 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
     VARIABLE(var_cursor, MachineType::PointerRepresentation(),
              IntPtrConstant(0));
 
-    Node* const start_address = to_direct.PointerToData(&call_c);
+    TNode<RawPtrT> const start_address = to_direct.PointerToData(&call_c);
     TNode<IntPtrT> const end_address =
         Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length)));
 
-    Node* const to_lower_table_addr =
+    TNode<ExternalReference> const to_lower_table_addr =
         ExternalConstant(ExternalReference::intl_to_latin1_lower_table());
 
     VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0));
 
     VariableList push_vars({&var_cursor, &var_did_change}, zone());
-    BuildFastLoop(push_vars, start_address, end_address,
-                  [=, &var_cursor, &var_did_change](Node* current) {
-                    Node* c = Load(MachineType::Uint8(), current);
-                    Node* lower =
-                        Load(MachineType::Uint8(), to_lower_table_addr,
-                             ChangeInt32ToIntPtr(c));
-                    StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr,
-                                        var_cursor.value(), lower);
-
-                    var_did_change.Bind(Word32Or(Word32NotEqual(c, lower),
-                                                 var_did_change.value()));
-
-                    Increment(&var_cursor);
-                  },
-                  kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+    BuildFastLoop(
+        push_vars, start_address, end_address,
+        [=, &var_cursor, &var_did_change](Node* current) {
+          TNode<Uint8T> c = Load<Uint8T>(current);
+          TNode<Uint8T> lower =
+              Load<Uint8T>(to_lower_table_addr, ChangeInt32ToIntPtr(c));
+          StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr,
+                              var_cursor.value(), lower);
+
+          var_did_change.Bind(
+              Word32Or(Word32NotEqual(c, lower), var_did_change.value()));
+
+          Increment(&var_cursor);
+        },
+        kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
 
     // Return the original string if it remained unchanged in order to preserve
     // e.g. internalization and private symbols (such as the preserved object
@@ -106,9 +103,9 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
   // String ConvertOneByteToLower(String src, String dst);
   BIND(&call_c);
   {
-    Node* const src = to_direct.string();
+    TNode<String> const src = to_direct.string();
 
-    Node* const function_addr =
+    TNode<ExternalReference> const function_addr =
         ExternalConstant(ExternalReference::intl_convert_one_byte_to_lower());
 
     MachineType type_tagged = MachineType::AnyTagged();
@@ -125,8 +122,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
 
   BIND(&runtime);
   {
-    Node* const result = CallRuntime(Runtime::kStringToLowerCaseIntl,
-                                     NoContextConstant(), string);
+    TNode<Object> const result = CallRuntime(Runtime::kStringToLowerCaseIntl,
+                                             NoContextConstant(), string);
     Return(result);
   }
 }
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index b3d8e27dbc3a7d..7bd5acfdcda845 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -27,7 +27,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
                                                       Node* object,
                                                       Label* if_exception,
                                                       Variable* exception) {
-  Node* method = GetIteratorMethod(context, object);
+  TNode<Object> method = GetIteratorMethod(context, object);
   return GetIterator(context, object, method, if_exception, exception);
 }
 
@@ -44,7 +44,8 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
 
   BIND(&if_not_callable);
   {
-    Node* ret = CallRuntime(Runtime::kThrowIteratorError, context, object);
+    TNode<Object> ret =
+        CallRuntime(Runtime::kThrowIteratorError, context, object);
     GotoIfException(ret, if_exception, exception);
     Unreachable();
   }
@@ -61,13 +62,15 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
 
     BIND(&if_notobject);
     {
-      Node* ret = CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
+      TNode<Object> ret =
+          CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
       GotoIfException(ret, if_exception, exception);
       Unreachable();
     }
 
     BIND(&get_next);
-    Node* const next = GetProperty(context, iterator, factory()->next_string());
+    TNode<Object> const next =
+        GetProperty(context, iterator, factory()->next_string());
     GotoIfException(next, if_exception, exception);
 
     return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
@@ -76,8 +79,9 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
 }
 
 TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
-    Node* context, const IteratorRecord& iterator, Label* if_done,
-    Node* fast_iterator_result_map, Label* if_exception, Variable* exception) {
+    TNode<Context> context, const IteratorRecord& iterator, Label* if_done,
+    base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_exception,
+    Variable* exception) {
   DCHECK_NOT_NULL(if_done);
   // 1. a. Let result be ? Invoke(iterator, "next", « »).
   Callable callable = CodeFactory::Call(isolate());
@@ -87,18 +91,18 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
   // 3. If Type(result) is not Object, throw a TypeError exception.
   Label if_notobject(this, Label::kDeferred), return_result(this);
   GotoIf(TaggedIsSmi(result), &if_notobject);
-  Node* result_map = LoadMap(result);
+  TNode<Map> result_map = LoadMap(result);
 
-  if (fast_iterator_result_map != nullptr) {
+  if (fast_iterator_result_map) {
     // Fast iterator result case:
     Label if_generic(this);
 
     // 4. Return result.
-    GotoIfNot(WordEqual(result_map, fast_iterator_result_map), &if_generic);
+    GotoIfNot(TaggedEqual(result_map, *fast_iterator_result_map), &if_generic);
 
     // IteratorComplete
     // 2. Return ToBoolean(? Get(iterResult, "done")).
-    Node* done = LoadObjectField(result, JSIteratorResult::kDoneOffset);
+    TNode<Object> done = LoadObjectField(result, JSIteratorResult::kDoneOffset);
     BranchIfToBooleanIsTrue(done, if_done, &return_result);
 
     BIND(&if_generic);
@@ -111,14 +115,14 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
 
     // IteratorComplete
     // 2. Return ToBoolean(? Get(iterResult, "done")).
-    Node* done = GetProperty(context, result, factory()->done_string());
+    TNode<Object> done = GetProperty(context, result, factory()->done_string());
     GotoIfException(done, if_exception, exception);
     BranchIfToBooleanIsTrue(done, if_done, &return_result);
   }
 
   BIND(&if_notobject);
   {
-    Node* ret =
+    TNode<Object> ret =
         CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
     GotoIfException(ret, if_exception, exception);
     Unreachable();
@@ -137,8 +141,8 @@ TNode<Object> IteratorBuiltinsAssembler::IteratorValue(
   if (fast_iterator_result_map) {
     // Fast iterator result case:
     Label if_generic(this);
-    Node* map = LoadMap(result);
-    GotoIfNot(WordEqual(map, *fast_iterator_result_map), &if_generic);
+    TNode<Map> map = LoadMap(result);
+    GotoIfNot(TaggedEqual(map, *fast_iterator_result_map), &if_generic);
     var_value = LoadObjectField(result, JSIteratorResult::kValueOffset);
     Goto(&exit);
 
@@ -169,7 +173,7 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(
   CSA_ASSERT(this, IsJSReceiver(iterator.object));
 
   // Let return be ? GetMethod(iterator, "return").
-  Node* method =
+  TNode<Object> method =
       GetProperty(context, iterator.object, factory()->return_string());
   GotoIfException(method, if_exception, exception);
 
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index db86c653857f52..2a0a510f738782 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -36,15 +36,14 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
   // result.
   // `fast_iterator_result_map` refers to the map for the JSIteratorResult
   // object, loaded from the native context.
-  TNode<JSReceiver> IteratorStep(Node* context, const IteratorRecord& iterator,
-                                 Label* if_done,
-                                 Node* fast_iterator_result_map = nullptr,
-                                 Label* if_exception = nullptr,
-                                 Variable* exception = nullptr);
-
-  TNode<JSReceiver> IteratorStep(Node* context, const IteratorRecord& iterator,
-                                 Node* fast_iterator_result_map,
-                                 Label* if_done) {
+  TNode<JSReceiver> IteratorStep(
+      TNode<Context> context, const IteratorRecord& iterator, Label* if_done,
+      base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt,
+      Label* if_exception = nullptr, Variable* exception = nullptr);
+
+  TNode<JSReceiver> IteratorStep(
+      TNode<Context> context, const IteratorRecord& iterator,
+      base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_done) {
     return IteratorStep(context, iterator, if_done, fast_iterator_result_map);
   }
 
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index c73cbee1bca031..95d5229974d5be 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -146,8 +146,8 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
 
   BIND(&use_sfi_code);
   // If not, install the SFI's code entry and jump to that.
-  CSA_ASSERT(this, WordNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
-                                              isolate(), CompileLazy))));
+  CSA_ASSERT(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
+                                                isolate(), CompileLazy))));
   StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
   GenerateTailCallToJSCode(sfi_code, function);
 
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index 46195e74ed6373..42d0162f388d33 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -8,6 +8,7 @@
 #include "src/builtins/builtins.h"
 #include "src/codegen/code-factory.h"
 #include "src/codegen/code-stub-assembler.h"
+#include "src/objects/fixed-array.h"
 
 namespace v8 {
 namespace internal {
@@ -39,7 +40,7 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
 
       // check if support abs function
       if (IsIntPtrAbsWithOverflowSupported()) {
-        Node* pair = IntPtrAbsWithOverflow(x);
+        TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAbsWithOverflow(x);
         Node* overflow = Projection(1, pair);
         GotoIf(overflow, &if_overflow);
 
@@ -79,9 +80,9 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
 
       BIND(&if_xisheapnumber);
       {
-        Node* x_value = LoadHeapNumberValue(x);
-        Node* value = Float64Abs(x_value);
-        Node* result = AllocateHeapNumberWithValue(value);
+        TNode<Float64T> x_value = LoadHeapNumberValue(x);
+        TNode<Float64T> value = Float64Abs(x_value);
+        TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
         Return(result);
       }
 
@@ -125,9 +126,9 @@ void MathBuiltinsAssembler::MathRoundingOperation(
 
       BIND(&if_xisheapnumber);
       {
-        Node* x_value = LoadHeapNumberValue(x);
-        Node* value = (this->*float64op)(x_value);
-        Node* result = ChangeFloat64ToTagged(value);
+        TNode<Float64T> x_value = LoadHeapNumberValue(x);
+        TNode<Float64T> value = (this->*float64op)(x_value);
+        TNode<Number> result = ChangeFloat64ToTagged(value);
         Return(result);
       }
 
@@ -182,8 +183,8 @@ TF_BUILTIN(MathImul, CodeStubAssembler) {
   Node* y = Parameter(Descriptor::kY);
   Node* x_value = TruncateTaggedToWord32(context, x);
   Node* y_value = TruncateTaggedToWord32(context, y);
-  Node* value = Int32Mul(x_value, y_value);
-  Node* result = ChangeInt32ToTagged(value);
+  TNode<Int32T> value = Signed(Int32Mul(x_value, y_value));
+  TNode<Number> result = ChangeInt32ToTagged(value);
   Return(result);
 }
 
@@ -192,7 +193,7 @@ CodeStubAssembler::Node* MathBuiltinsAssembler::MathPow(Node* context,
                                                         Node* exponent) {
   Node* base_value = TruncateTaggedToFloat64(context, base);
   Node* exponent_value = TruncateTaggedToFloat64(context, exponent);
-  Node* value = Float64Pow(base_value, exponent_value);
+  TNode<Float64T> value = Float64Pow(base_value, exponent_value);
   return ChangeFloat64ToTagged(value);
 }
 
@@ -205,7 +206,7 @@ TF_BUILTIN(MathPow, MathBuiltinsAssembler) {
 // ES6 #sec-math.random
 TF_BUILTIN(MathRandom, CodeStubAssembler) {
   Node* context = Parameter(Descriptor::kContext);
-  Node* native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
 
   // Load cache index.
   TVARIABLE(Smi, smi_index);
@@ -217,9 +218,9 @@ TF_BUILTIN(MathRandom, CodeStubAssembler) {
   GotoIf(SmiAbove(smi_index.value(), SmiConstant(0)), &if_cached);
 
   // Cache exhausted, populate the cache. Return value is the new index.
-  Node* const refill_math_random =
+  TNode<ExternalReference> const refill_math_random =
       ExternalConstant(ExternalReference::refill_math_random());
-  Node* const isolate_ptr =
+  TNode<ExternalReference> const isolate_ptr =
       ExternalConstant(ExternalReference::isolate_address(isolate()));
   MachineType type_tagged = MachineType::AnyTagged();
   MachineType type_ptr = MachineType::Pointer();
@@ -236,9 +237,9 @@ TF_BUILTIN(MathRandom, CodeStubAssembler) {
                       new_smi_index);
 
   // Load and return next cached random number.
-  Node* array =
-      LoadContextElement(native_context, Context::MATH_RANDOM_CACHE_INDEX);
-  Node* random = LoadFixedDoubleArrayElement(
+  TNode<FixedDoubleArray> array = CAST(
+      LoadContextElement(native_context, Context::MATH_RANDOM_CACHE_INDEX));
+  TNode<Float64T> random = LoadFixedDoubleArrayElement(
       array, new_smi_index, MachineType::Float64(), 0, SMI_PARAMETERS);
   Return(AllocateHeapNumberWithValue(random));
 }
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
deleted file mode 100644
index cce780ab9f6a25..00000000000000
--- a/deps/v8/src/builtins/builtins-math.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils.h"
-#include "src/builtins/builtins.h"
-#include "src/logging/counters.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// ES6 section 20.2.2 Function Properties of the Math Object
-
-// ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values )
-BUILTIN(MathHypot) {
-  HandleScope scope(isolate);
-  int const length = args.length() - 1;
-  if (length == 0) return Smi::kZero;
-  DCHECK_LT(0, length);
-  double max = 0;
-  std::vector<double> abs_values;
-  abs_values.reserve(length);
-  for (int i = 0; i < length; i++) {
-    Handle<Object> x = args.at(i + 1);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
-                                       Object::ToNumber(isolate, x));
-    double abs_value = std::abs(x->Number());
-    abs_values.push_back(abs_value);
-    // Use negation here to make sure that {max} is NaN
-    // in the end in case any of the arguments was NaN.
-    if (!(abs_value <= max)) {
-      max = abs_value;
-    }
-  }
-
-  if (max == 0) {
-    return Smi::kZero;
-  } else if (max == V8_INFINITY) {
-    return ReadOnlyRoots(isolate).infinity_value();
-  }
-  DCHECK(!(max <= 0));
-
-  // Kahan summation to avoid rounding errors.
-  // Normalize the numbers to the largest one to avoid overflow.
-  double sum = 0;
-  double compensation = 0;
-  for (int i = 0; i < length; i++) {
-    double n = abs_values[i] / max;
-    double summand = n * n - compensation;
-    double preliminary = sum + summand;
-    compensation = (preliminary - sum) - summand;
-    sum = preliminary;
-  }
-
-  return *isolate->factory()->NewNumber(std::sqrt(sum) * max);
-}
-
-}  // namespace internal
-}  // namespace v8
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 4987787c359292..427fd6edb65f71 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -123,7 +123,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
   StoreRoot(RootIndex::kCurrentMicrotask, microtask);
   TNode<IntPtrT> saved_entered_context_count = GetEnteredContextCount();
   TNode<Map> microtask_map = LoadMap(microtask);
-  TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map);
+  TNode<Uint16T> microtask_type = LoadMapInstanceType(microtask_map);
 
   VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
   Label if_exception(this, Label::kDeferred);
@@ -131,21 +131,15 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
       is_promise_fulfill_reaction_job(this),
       is_promise_reject_reaction_job(this),
       is_promise_resolve_thenable_job(this),
-      is_finalization_group_cleanup_job(this),
       is_unreachable(this, Label::kDeferred), done(this);
 
-  int32_t case_values[] = {CALLABLE_TASK_TYPE,
-                           CALLBACK_TASK_TYPE,
+  int32_t case_values[] = {CALLABLE_TASK_TYPE, CALLBACK_TASK_TYPE,
                            PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
                            PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
-                           PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
-                           FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE};
-  Label* case_labels[] = {&is_callable,
-                          &is_callback,
-                          &is_promise_fulfill_reaction_job,
-                          &is_promise_reject_reaction_job,
-                          &is_promise_resolve_thenable_job,
-                          &is_finalization_group_cleanup_job};
+                           PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE};
+  Label* case_labels[] = {
+      &is_callable, &is_callback, &is_promise_fulfill_reaction_job,
+      &is_promise_reject_reaction_job, &is_promise_resolve_thenable_job};
   static_assert(arraysize(case_values) == arraysize(case_labels), "");
   Switch(microtask_type, &is_unreachable, case_values, case_labels,
          arraysize(case_labels));
@@ -155,7 +149,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
     // Enter the context of the {microtask}.
     TNode<Context> microtask_context =
         LoadObjectField<Context>(microtask, CallableTask::kContextOffset);
-    TNode<Context> native_context = LoadNativeContext(microtask_context);
+    TNode<NativeContext> native_context = LoadNativeContext(microtask_context);
     PrepareForContext(native_context, &done);
 
     TNode<JSReceiver> callable =
@@ -171,9 +165,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
 
   BIND(&is_callback);
   {
-    Node* const microtask_callback =
+    TNode<Object> const microtask_callback =
         LoadObjectField(microtask, CallbackTask::kCallbackOffset);
-    Node* const microtask_data =
+    TNode<Object> const microtask_data =
         LoadObjectField(microtask, CallbackTask::kDataOffset);
 
     // If this turns out to become a bottleneck because of the calls
@@ -186,7 +180,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
     // But from our current measurements it doesn't seem to be a
     // serious performance problem, even if the microtask is full
     // of CallHandlerTasks (which is not a realistic use case anyways).
-    Node* const result =
+    TNode<Object> const result =
         CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
                     microtask_callback, microtask_data);
     GotoIfException(result, &if_exception, &var_exception);
@@ -198,17 +192,17 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
     // Enter the context of the {microtask}.
     TNode<Context> microtask_context = LoadObjectField<Context>(
         microtask, PromiseResolveThenableJobTask::kContextOffset);
-    TNode<Context> native_context = LoadNativeContext(microtask_context);
+    TNode<NativeContext> native_context = LoadNativeContext(microtask_context);
     PrepareForContext(native_context, &done);
 
-    Node* const promise_to_resolve = LoadObjectField(
+    TNode<Object> const promise_to_resolve = LoadObjectField(
         microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset);
-    Node* const then =
+    TNode<Object> const then =
         LoadObjectField(microtask, PromiseResolveThenableJobTask::kThenOffset);
-    Node* const thenable = LoadObjectField(
+    TNode<Object> const thenable = LoadObjectField(
         microtask, PromiseResolveThenableJobTask::kThenableOffset);
 
-    Node* const result =
+    TNode<Object> const result =
         CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
                     promise_to_resolve, thenable, then);
     GotoIfException(result, &if_exception, &var_exception);
@@ -222,21 +216,21 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
     // Enter the context of the {microtask}.
     TNode<Context> microtask_context = LoadObjectField<Context>(
         microtask, PromiseReactionJobTask::kContextOffset);
-    TNode<Context> native_context = LoadNativeContext(microtask_context);
+    TNode<NativeContext> native_context = LoadNativeContext(microtask_context);
     PrepareForContext(native_context, &done);
 
-    Node* const argument =
+    TNode<Object> const argument =
         LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
-    Node* const handler =
+    TNode<Object> const handler =
         LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
-    Node* const promise_or_capability = LoadObjectField(
-        microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
+    TNode<HeapObject> const promise_or_capability = CAST(LoadObjectField(
+        microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset));
 
     // Run the promise before/debug hook if enabled.
     RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
                    promise_or_capability);
 
-    Node* const result =
+    TNode<Object> const result =
         CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
                     argument, handler, promise_or_capability);
     GotoIfException(result, &if_exception, &var_exception);
@@ -255,21 +249,21 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
     // Enter the context of the {microtask}.
     TNode<Context> microtask_context = LoadObjectField<Context>(
         microtask, PromiseReactionJobTask::kContextOffset);
-    TNode<Context> native_context = LoadNativeContext(microtask_context);
+    TNode<NativeContext> native_context = LoadNativeContext(microtask_context);
     PrepareForContext(native_context, &done);
 
-    Node* const argument =
+    TNode<Object> const argument =
         LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
-    Node* const handler =
+    TNode<Object> const handler =
         LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
-    Node* const promise_or_capability = LoadObjectField(
-        microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
+    TNode<HeapObject> const promise_or_capability = CAST(LoadObjectField(
+        microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset));
 
     // Run the promise before/debug hook if enabled.
     RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
                    promise_or_capability);
 
-    Node* const result =
+    TNode<Object> const result =
         CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
                     argument, handler, promise_or_capability);
     GotoIfException(result, &if_exception, &var_exception);
@@ -283,26 +277,6 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
     Goto(&done);
   }
 
-  BIND(&is_finalization_group_cleanup_job);
-  {
-    // Enter the context of the {finalization_group}.
-    TNode<JSFinalizationGroup> finalization_group =
-        LoadObjectField<JSFinalizationGroup>(
-            microtask,
-            FinalizationGroupCleanupJobTask::kFinalizationGroupOffset);
-    TNode<Context> native_context = LoadObjectField<Context>(
-        finalization_group, JSFinalizationGroup::kNativeContextOffset);
-    PrepareForContext(native_context, &done);
-
-    Node* const result = CallRuntime(Runtime::kFinalizationGroupCleanupJob,
-                                     native_context, finalization_group);
-
-    GotoIfException(result, &if_exception, &var_exception);
-    RewindEnteredContext(saved_entered_context_count);
-    SetCurrentContext(current_context);
-    Goto(&done);
-  }
-
   BIND(&is_unreachable);
   Unreachable();
 
@@ -407,7 +381,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
 
   BIND(&if_grow);
   {
-    Node* function =
+    TNode<ExternalReference> function =
         ExternalConstant(ExternalReference::call_enter_context_function());
     CallCFunction(function, MachineType::Int32(),
                   std::make_pair(MachineType::Pointer(), hsi),
@@ -475,7 +449,7 @@ TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) {
   TNode<Microtask> microtask =
       UncheckedCast<Microtask>(Parameter(Descriptor::kMicrotask));
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<RawPtrT> microtask_queue = GetMicrotaskQueue(native_context);
 
   // Do not store the microtask if MicrotaskQueue is not available, that may
@@ -506,9 +480,9 @@ TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) {
   // implementation to grow the buffer.
   BIND(&if_grow);
   {
-    Node* isolate_constant =
+    TNode<ExternalReference> isolate_constant =
         ExternalConstant(ExternalReference::isolate_address(isolate()));
-    Node* function =
+    TNode<ExternalReference> function =
         ExternalConstant(ExternalReference::call_enqueue_microtask_function());
     CallCFunction(function, MachineType::AnyTagged(),
                   std::make_pair(MachineType::Pointer(), isolate_constant),
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index f5c4477c23df95..2aa996eba0dc2f 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -83,7 +83,7 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
   GotoIfNot(IsHeapNumber(number), &return_false);
 
   // Check if {number} contains a finite, non-NaN value.
-  Node* number_value = LoadHeapNumberValue(number);
+  TNode<Float64T> number_value = LoadHeapNumberValue(number);
   BranchIfFloat64IsNaN(Float64Sub(number_value, number_value), &return_false,
                        &return_true);
 
@@ -95,7 +95,7 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
 }
 
 TF_BUILTIN(AllocateHeapNumber, CodeStubAssembler) {
-  Node* result = AllocateHeapNumber();
+  TNode<HeapNumber> result = AllocateHeapNumber();
   Return(result);
 }
 
@@ -118,7 +118,7 @@ TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
   GotoIfNot(IsHeapNumber(number), &return_false);
 
   // Check if {number} contains a NaN value.
-  Node* number_value = LoadHeapNumberValue(number);
+  TNode<Float64T> number_value = LoadHeapNumberValue(number);
   BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
 
   BIND(&return_true);
@@ -162,8 +162,8 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
     {
       // The {input} is a HeapObject, check if it's already a String.
       Label if_inputisstring(this), if_inputisnotstring(this);
-      Node* input_map = LoadMap(input);
-      Node* input_instance_type = LoadMapInstanceType(input_map);
+      TNode<Map> input_map = LoadMap(input);
+      TNode<Uint16T> input_instance_type = LoadMapInstanceType(input_map);
       Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
              &if_inputisnotstring);
 
@@ -172,7 +172,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
         // The {input} is already a String, check if {input} contains
         // a cached array index.
         Label if_inputcached(this), if_inputnotcached(this);
-        Node* input_hash = LoadNameHashField(input);
+        TNode<Uint32T> input_hash = LoadNameHashField(input);
         Branch(IsClearWord32(input_hash,
                              Name::kDoesNotContainCachedArrayIndexMask),
                &if_inputcached, &if_inputnotcached);
@@ -180,9 +180,9 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
         BIND(&if_inputcached);
         {
           // Just return the {input}s cached array index.
-          Node* input_array_index =
+          TNode<UintPtrT> input_array_index =
               DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
-          Return(SmiTag(input_array_index));
+          Return(SmiTag(Signed(input_array_index)));
         }
 
         BIND(&if_inputnotcached);
@@ -204,7 +204,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
         {
           // The {input} is already a Number, take care of -0.
           Label if_inputiszero(this), if_inputisnotzero(this);
-          Node* input_value = LoadHeapNumberValue(input);
+          TNode<Float64T> input_value = LoadHeapNumberValue(input);
           Branch(Float64Equal(input_value, Float64Constant(0.0)),
                  &if_inputiszero, &if_inputisnotzero);
 
@@ -229,15 +229,15 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
 
 // ES6 #sec-number.parseint
 TF_BUILTIN(ParseInt, CodeStubAssembler) {
-  Node* context = Parameter(Descriptor::kContext);
-  Node* input = Parameter(Descriptor::kString);
-  Node* radix = Parameter(Descriptor::kRadix);
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+  TNode<Object> input = CAST(Parameter(Descriptor::kString));
+  TNode<Object> radix = CAST(Parameter(Descriptor::kRadix));
 
   // Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
   Label if_radix10(this), if_generic(this, Label::kDeferred);
   GotoIf(IsUndefined(radix), &if_radix10);
-  GotoIf(WordEqual(radix, SmiConstant(10)), &if_radix10);
-  GotoIf(WordEqual(radix, SmiConstant(0)), &if_radix10);
+  GotoIf(TaggedEqual(radix, SmiConstant(10)), &if_radix10);
+  GotoIf(TaggedEqual(radix, SmiConstant(0)), &if_radix10);
   Goto(&if_generic);
 
   BIND(&if_radix10);
@@ -246,9 +246,9 @@ TF_BUILTIN(ParseInt, CodeStubAssembler) {
     Label if_inputissmi(this), if_inputisheapnumber(this),
         if_inputisstring(this);
     GotoIf(TaggedIsSmi(input), &if_inputissmi);
-    Node* input_map = LoadMap(input);
+    TNode<Map> input_map = LoadMap(CAST(input));
     GotoIf(IsHeapNumberMap(input_map), &if_inputisheapnumber);
-    Node* input_instance_type = LoadMapInstanceType(input_map);
+    TNode<Uint16T> input_instance_type = LoadMapInstanceType(input_map);
     Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
            &if_generic);
 
@@ -262,15 +262,16 @@ TF_BUILTIN(ParseInt, CodeStubAssembler) {
     {
       // Check if the {input} value is in Signed32 range.
       Label if_inputissigned32(this);
-      Node* input_value = LoadHeapNumberValue(input);
-      Node* input_value32 = TruncateFloat64ToWord32(input_value);
+      TNode<Float64T> input_value = LoadHeapNumberValue(CAST(input));
+      TNode<Int32T> input_value32 =
+          Signed(TruncateFloat64ToWord32(input_value));
       GotoIf(Float64Equal(input_value, ChangeInt32ToFloat64(input_value32)),
              &if_inputissigned32);
 
       // Check if the absolute {input} value is in the [1,1<<31[ range.
       // Take the generic path for the range [0,1[ because the result
       // could be -0.
-      Node* input_value_abs = Float64Abs(input_value);
+      TNode<Float64T> input_value_abs = Float64Abs(input_value);
 
       GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1u << 31)),
                 &if_generic);
@@ -279,28 +280,29 @@ TF_BUILTIN(ParseInt, CodeStubAssembler) {
 
       // Return the truncated int32 value, and return the tagged result.
       BIND(&if_inputissigned32);
-      Node* result = ChangeInt32ToTagged(input_value32);
+      TNode<Number> result = ChangeInt32ToTagged(input_value32);
       Return(result);
     }
 
     BIND(&if_inputisstring);
     {
       // Check if the String {input} has a cached array index.
-      Node* input_hash = LoadNameHashField(input);
+      TNode<Uint32T> input_hash = LoadNameHashField(CAST(input));
       GotoIf(IsSetWord32(input_hash, Name::kDoesNotContainCachedArrayIndexMask),
              &if_generic);
 
       // Return the cached array index as result.
-      Node* input_index =
+      TNode<UintPtrT> input_index =
           DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
-      Node* result = SmiTag(input_index);
+      TNode<Smi> result = SmiTag(Signed(input_index));
       Return(result);
     }
   }
 
   BIND(&if_generic);
   {
-    Node* result = CallRuntime(Runtime::kStringParseInt, context, input, radix);
+    TNode<Object> result =
+        CallRuntime(Runtime::kStringParseInt, context, input, radix);
     Return(result);
   }
 }
@@ -318,8 +320,8 @@ TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) {
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
 
-  Node* result = ToThisValue(context, receiver, PrimitiveType::kNumber,
-                             "Number.prototype.valueOf");
+  TNode<Object> result = ToThisValue(context, receiver, PrimitiveType::kNumber,
+                                     "Number.prototype.valueOf");
   Return(result);
 }
 
@@ -406,7 +408,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
 
       BIND(&if_right_heapobject);
       {
-        Node* right_map = LoadMap(right);
+        TNode<Map> right_map = LoadMap(right);
 
         Label if_right_not_number(this, Label::kDeferred);
         GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number);
@@ -418,7 +420,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
 
         BIND(&if_right_not_number);
         {
-          Node* right_instance_type = LoadMapInstanceType(right_map);
+          TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
           GotoIf(IsStringInstanceType(right_instance_type),
                  &string_add_convert_left);
           GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
@@ -429,7 +431,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
 
     BIND(&if_left_heapobject);
     {
-      Node* left_map = LoadMap(left);
+      TNode<Map> left_map = LoadMap(left);
       Label if_right_smi(this), if_right_heapobject(this);
       Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject);
 
@@ -445,7 +447,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
 
         BIND(&if_left_not_number);
         {
-          Node* left_instance_type = LoadMapInstanceType(left_map);
+          TNode<Uint16T> left_instance_type = LoadMapInstanceType(left_map);
           GotoIf(IsStringInstanceType(left_instance_type),
                  &string_add_convert_right);
           GotoIf(IsBigIntInstanceType(left_instance_type), &do_bigint_add);
@@ -456,7 +458,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
 
       BIND(&if_right_heapobject);
       {
-        Node* right_map = LoadMap(right);
+        TNode<Map> right_map = LoadMap(right);
 
         Label if_left_number(this), if_left_not_number(this, Label::kDeferred);
         Branch(IsHeapNumberMap(left_map), &if_left_number, &if_left_not_number);
@@ -473,7 +475,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
 
           BIND(&if_right_not_number);
           {
-            Node* right_instance_type = LoadMapInstanceType(right_map);
+            TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
             GotoIf(IsStringInstanceType(right_instance_type),
                    &string_add_convert_left);
             GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
@@ -485,10 +487,10 @@ TF_BUILTIN(Add, AddStubAssembler) {
         BIND(&if_left_not_number);
         {
           Label if_left_bigint(this);
-          Node* left_instance_type = LoadMapInstanceType(left_map);
+          TNode<Uint16T> left_instance_type = LoadMapInstanceType(left_map);
           GotoIf(IsStringInstanceType(left_instance_type),
                  &string_add_convert_right);
-          Node* right_instance_type = LoadMapInstanceType(right_map);
+          TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
           GotoIf(IsStringInstanceType(right_instance_type),
                  &string_add_convert_left);
           GotoIf(IsBigIntInstanceType(left_instance_type), &if_left_bigint);
@@ -525,15 +527,15 @@ TF_BUILTIN(Add, AddStubAssembler) {
   BIND(&string_add_convert_left);
   {
     // Convert {left} to a String and concatenate it with the String {right}.
-    TailCallBuiltin(Builtins::kStringAdd_ConvertLeft, context, var_left.value(),
+    TailCallBuiltin(Builtins::kStringAddConvertLeft, context, var_left.value(),
                     var_right.value());
   }
 
   BIND(&string_add_convert_right);
   {
     // Convert {right} to a String and concatenate it with the String {left}.
-    TailCallBuiltin(Builtins::kStringAdd_ConvertRight, context,
-                    var_left.value(), var_right.value());
+    TailCallBuiltin(Builtins::kStringAddConvertRight, context, var_left.value(),
+                    var_right.value());
   }
 
   BIND(&do_bigint_add);
@@ -544,7 +546,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
 
   BIND(&do_double_add);
   {
-    Node* value = Float64Add(var_left_double.value(), var_right_double.value());
+    TNode<Float64T> value =
+        Float64Add(var_left_double.value(), var_right_double.value());
     Return(AllocateHeapNumberWithValue(value));
   }
 }
@@ -696,7 +699,8 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
 
   BIND(&do_double_sub);
   {
-    Node* value = Float64Sub(var_left_double.value(), var_right_double.value());
+    TNode<Float64T> value =
+        Float64Sub(var_left_double.value(), var_right_double.value());
     Return(AllocateHeapNumberWithValue(value));
   }
 
@@ -780,7 +784,8 @@ TF_BUILTIN(Negate, NumberBuiltinsAssembler) {
 
   BIND(&do_double);
   {
-    Node* value = Float64Mul(var_input_double.value(), Float64Constant(-1));
+    TNode<Float64T> value =
+        Float64Mul(var_input_double.value(), Float64Constant(-1));
     Return(AllocateHeapNumberWithValue(value));
   }
 
@@ -807,7 +812,8 @@ TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
   Return(SmiMul(CAST(var_left.value()), CAST(var_right.value())));
 
   BIND(&do_double_mul);
-  Node* value = Float64Mul(var_left_double.value(), var_right_double.value());
+  TNode<Float64T> value =
+      Float64Mul(var_left_double.value(), var_right_double.value());
   Return(AllocateHeapNumberWithValue(value));
 
   BIND(&do_bigint_mul);
@@ -851,8 +857,8 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
     }
     BIND(&dividend_is_not_zero);
 
-    Node* untagged_divisor = SmiToInt32(divisor);
-    Node* untagged_dividend = SmiToInt32(dividend);
+    TNode<Int32T> untagged_divisor = SmiToInt32(divisor);
+    TNode<Int32T> untagged_dividend = SmiToInt32(dividend);
 
     // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
     // if the Smi size is 31) and {divisor} is -1.
@@ -872,8 +878,9 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
 
     // TODO(epertoso): consider adding a machine instruction that returns
     // both the result and the remainder.
-    Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
-    Node* truncated = Int32Mul(untagged_result, untagged_divisor);
+    TNode<Int32T> untagged_result =
+        Int32Div(untagged_dividend, untagged_divisor);
+    TNode<Int32T> truncated = Int32Mul(untagged_result, untagged_divisor);
     // Do floating point division if the remainder is not 0.
     GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
     Return(SmiFromInt32(untagged_result));
@@ -890,7 +897,8 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
 
   BIND(&do_double_div);
   {
-    Node* value = Float64Div(var_left_double.value(), var_right_double.value());
+    TNode<Float64T> value =
+        Float64Div(var_left_double.value(), var_right_double.value());
     Return(AllocateHeapNumberWithValue(value));
   }
 
@@ -916,7 +924,8 @@ TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
   Return(SmiMod(CAST(var_left.value()), CAST(var_right.value())));
 
   BIND(&do_double_mod);
-  Node* value = Float64Mod(var_left_double.value(), var_right_double.value());
+  TNode<Float64T> value =
+      Float64Mod(var_left_double.value(), var_right_double.value());
   Return(AllocateHeapNumberWithValue(value));
 
   BIND(&do_bigint_mod);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 8d59ee3bd107cf..db9d4ed6579244 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -11,6 +11,7 @@
 #include "src/ic/accessor-assembler.h"
 #include "src/ic/keyed-store-generic.h"
 #include "src/objects/js-generator.h"
+#include "src/objects/js-objects.h"
 #include "src/objects/property-descriptor-object.h"
 #include "src/objects/property-details.h"
 #include "src/objects/shared-function-info.h"
@@ -44,10 +45,6 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
   Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable,
                                 Node* enumerable, Node* configurable);
   Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout);
-
-  Node* IsSpecialReceiverMap(SloppyTNode<Map> map);
-
-  TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
 };
 
 class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
@@ -72,8 +69,6 @@ class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
   void GetOwnValuesOrEntries(TNode<Context> context, TNode<Object> maybe_object,
                              CollectType collect_type);
 
-  void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
-
   TNode<JSArray> FastGetOwnValuesOrEntries(
       TNode<Context> context, TNode<JSObject> object,
       Label* if_call_runtime_with_fast_path, Label* if_no_properties,
@@ -86,8 +81,8 @@ class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
 
 void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
                                                    Node* string) {
-  Node* lhs = StringConstant("[object ");
-  Node* rhs = StringConstant("]");
+  TNode<String> lhs = StringConstant("[object ");
+  TNode<String> rhs = StringConstant("]");
 
   Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE);
 
@@ -100,10 +95,10 @@ Node* ObjectBuiltinsAssembler::ConstructAccessorDescriptor(Node* context,
                                                            Node* setter,
                                                            Node* enumerable,
                                                            Node* configurable) {
-  Node* native_context = LoadNativeContext(context);
-  Node* map = LoadContextElement(
-      native_context, Context::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX);
-  Node* js_desc = AllocateJSObjectFromMap(map);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<Map> map = CAST(LoadContextElement(
+      native_context, Context::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX));
+  TNode<JSObject> js_desc = AllocateJSObjectFromMap(map);
 
   StoreObjectFieldNoWriteBarrier(
       js_desc, JSAccessorPropertyDescriptor::kGetOffset, getter);
@@ -124,10 +119,10 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
                                                        Node* writable,
                                                        Node* enumerable,
                                                        Node* configurable) {
-  Node* native_context = LoadNativeContext(context);
-  Node* map = LoadContextElement(native_context,
-                                 Context::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX);
-  Node* js_desc = AllocateJSObjectFromMap(map);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<Map> map = CAST(LoadContextElement(
+      native_context, Context::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX));
+  TNode<JSObject> js_desc = AllocateJSObjectFromMap(map);
 
   StoreObjectFieldNoWriteBarrier(js_desc,
                                  JSDataPropertyDescriptor::kValueOffset, value);
@@ -144,28 +139,6 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
   return js_desc;
 }
 
-Node* ObjectBuiltinsAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
-  CSA_SLOW_ASSERT(this, IsMap(map));
-  TNode<BoolT> is_special =
-      IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
-  uint32_t mask =
-      Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
-  USE(mask);
-  // Interceptors or access checks imply special receiver.
-  CSA_ASSERT(this,
-             SelectConstant<BoolT>(IsSetWord32(LoadMapBitField(map), mask),
-                                   is_special, Int32TrueConstant()));
-  return is_special;
-}
-
-TNode<Word32T> ObjectBuiltinsAssembler::IsStringWrapperElementsKind(
-    TNode<Map> map) {
-  Node* kind = LoadMapElementsKind(map);
-  return Word32Or(
-      Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)),
-      Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS)));
-}
-
 TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyEnumerable(
     TNode<Uint32T> details) {
   TNode<Uint32T> attributes =
@@ -209,7 +182,7 @@ void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
 
   BIND(&if_no_properties);
   {
-    Node* native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
     TNode<Map> array_map =
         LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
     TNode<JSArray> empty_array = AllocateJSArray(
@@ -242,18 +215,11 @@ void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
   }
 }
 
-void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties(
-    TNode<Map> map, Label* if_slow) {
-  GotoIf(IsStringWrapperElementsKind(map), if_slow);
-  GotoIf(IsSpecialReceiverMap(map), if_slow);
-  GotoIf(IsDictionaryMap(map), if_slow);
-}
-
 TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
     TNode<Context> context, TNode<JSObject> object,
     Label* if_call_runtime_with_fast_path, Label* if_no_properties,
     CollectType collect_type) {
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<Map> array_map =
       LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
   TNode<Map> map = LoadMap(object);
@@ -308,9 +274,10 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
     {
       // Currently, we will not invoke getters,
       // so, map will not be changed.
-      CSA_ASSERT(this, WordEqual(map, LoadMap(object)));
+      CSA_ASSERT(this, TaggedEqual(map, LoadMap(object)));
       TNode<IntPtrT> descriptor_entry = var_descriptor_number.value();
-      Node* next_key = LoadKeyByDescriptorEntry(descriptors, descriptor_entry);
+      TNode<Name> next_key =
+          LoadKeyByDescriptorEntry(descriptors, descriptor_entry);
 
       // Skip Symbols.
       GotoIf(IsSymbol(next_key), &next_descriptor);
@@ -378,7 +345,7 @@ ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
   CSA_ASSERT(this, IsJSArrayMap(array_map));
 
   GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
-  Node* array = AllocateJSArray(array_map, result, SmiTag(size));
+  TNode<JSArray> array = AllocateJSArray(array_map, result, SmiTag(size));
   return TNode<JSArray>::UncheckedCast(array);
 }
 
@@ -412,8 +379,8 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
   Branch(TaggedIsSmi(object), &to_primitive, &if_objectisnotsmi);
   BIND(&if_objectisnotsmi);
 
-  Node* map = LoadMap(object);
-  TNode<Int32T> instance_type = LoadMapInstanceType(map);
+  TNode<Map> map = LoadMap(object);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(map);
 
   {
     VARIABLE(var_index, MachineType::PointerRepresentation());
@@ -510,9 +477,9 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
 
   // Check if the {object} has a usable enum cache.
   GotoIf(TaggedIsSmi(object), &if_slow);
-  Node* object_map = LoadMap(object);
-  Node* object_bit_field3 = LoadMapBitField3(object_map);
-  Node* object_enum_length =
+  TNode<Map> object_map = LoadMap(object);
+  TNode<Uint32T> object_bit_field3 = LoadMapBitField3(object_map);
+  TNode<UintPtrT> object_enum_length =
       DecodeWordFromWord32<Map::EnumLengthBits>(object_bit_field3);
   GotoIf(
       WordEqual(object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel)),
@@ -520,7 +487,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
 
   // Ensure that the {object} doesn't have any elements.
   CSA_ASSERT(this, IsJSObjectMap(object_map));
-  Node* object_elements = LoadElements(object);
+  TNode<FixedArrayBase> object_elements = LoadElements(object);
   GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
   Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
          &if_slow);
@@ -532,19 +499,19 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
   BIND(&if_fast);
   {
     // The {object} has a usable enum cache, use that.
-    Node* object_descriptors = LoadMapDescriptors(object_map);
-    Node* object_enum_cache =
-        LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset);
-    Node* object_enum_keys =
+    TNode<DescriptorArray> object_descriptors = LoadMapDescriptors(object_map);
+    TNode<EnumCache> object_enum_cache = CAST(
+        LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset));
+    TNode<Object> object_enum_keys =
         LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
 
     // Allocate a JSArray and copy the elements from the {object_enum_keys}.
     Node* array = nullptr;
     Node* elements = nullptr;
-    Node* native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
     TNode<Map> array_map =
         LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
-    TNode<Smi> array_length = SmiTag(object_enum_length);
+    TNode<Smi> array_length = SmiTag(Signed(object_enum_length));
     std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
         PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
         INTPTR_PARAMETERS);
@@ -564,7 +531,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
   BIND(&if_slow);
   {
     // Let the runtime compute the elements.
-    Node* elements = CallRuntime(Runtime::kObjectKeys, context, object);
+    TNode<FixedArray> elements =
+        CAST(CallRuntime(Runtime::kObjectKeys, context, object));
     var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset));
     var_elements.Bind(elements);
     Goto(&if_join);
@@ -573,7 +541,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
   BIND(&if_join);
   {
     // Wrap the elements into a proper JSArray and return that.
-    Node* native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
     TNode<Map> array_map =
         LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
     TNode<JSArray> array = AllocateJSArray(
@@ -596,25 +564,25 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
   // Take the slow path if the {object} IsCustomElementsReceiverInstanceType or
   // has any elements.
   GotoIf(TaggedIsSmi(object), &if_slow);
-  Node* object_map = LoadMap(object);
-  TNode<Int32T> instance_type = LoadMapInstanceType(object_map);
+  TNode<Map> object_map = LoadMap(object);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(object_map);
   GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_slow);
-  Node* object_elements = LoadElements(object);
+  TNode<FixedArrayBase> object_elements = LoadElements(object);
   GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
   Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
          &if_slow);
 
   // Check if the {object} has a usable enum cache.
   BIND(&if_empty_elements);
-  Node* object_bit_field3 = LoadMapBitField3(object_map);
-  Node* object_enum_length =
+  TNode<Uint32T> object_bit_field3 = LoadMapBitField3(object_map);
+  TNode<UintPtrT> object_enum_length =
       DecodeWordFromWord32<Map::EnumLengthBits>(object_bit_field3);
   GotoIf(
       WordEqual(object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel)),
       &try_fast);
 
   // Check whether all own properties are enumerable.
-  Node* number_descriptors =
+  TNode<UintPtrT> number_descriptors =
       DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(object_bit_field3);
   GotoIfNot(WordEqual(object_enum_length, number_descriptors), &if_slow);
 
@@ -625,19 +593,19 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
   {
     // The {object} has a usable enum cache and all own properties are
     // enumerable, use that.
-    Node* object_descriptors = LoadMapDescriptors(object_map);
-    Node* object_enum_cache =
-        LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset);
-    Node* object_enum_keys =
+    TNode<DescriptorArray> object_descriptors = LoadMapDescriptors(object_map);
+    TNode<EnumCache> object_enum_cache = CAST(
+        LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset));
+    TNode<Object> object_enum_keys =
         LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
 
     // Allocate a JSArray and copy the elements from the {object_enum_keys}.
     Node* array = nullptr;
     Node* elements = nullptr;
-    Node* native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
     TNode<Map> array_map =
         LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
-    TNode<Smi> array_length = SmiTag(object_enum_length);
+    TNode<Smi> array_length = SmiTag(Signed(object_enum_length));
     std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
         PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
         INTPTR_PARAMETERS);
@@ -649,8 +617,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
   BIND(&try_fast);
   {
     // Let the runtime compute the elements and try initializing enum cache.
-    Node* elements = CallRuntime(Runtime::kObjectGetOwnPropertyNamesTryFast,
-                                 context, object);
+    TNode<FixedArray> elements = CAST(CallRuntime(
+        Runtime::kObjectGetOwnPropertyNamesTryFast, context, object));
     var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset));
     var_elements.Bind(elements);
     Goto(&if_join);
@@ -667,8 +635,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
   BIND(&if_slow);
   {
     // Let the runtime compute the elements.
-    Node* elements =
-        CallRuntime(Runtime::kObjectGetOwnPropertyNames, context, object);
+    TNode<FixedArray> elements =
+        CAST(CallRuntime(Runtime::kObjectGetOwnPropertyNames, context, object));
     var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset));
     var_elements.Bind(elements);
     Goto(&if_join);
@@ -677,7 +645,7 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
   BIND(&if_join);
   {
     // Wrap the elements into a proper JSArray and return that.
-    Node* native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
     TNode<Map> array_map =
         LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
     TNode<JSArray> array = AllocateJSArray(
@@ -770,8 +738,8 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
   VARIABLE(var_default, MachineRepresentation::kTagged);
   VARIABLE(var_holder, MachineRepresentation::kTagged, receiver);
   GotoIf(TaggedIsSmi(receiver), &if_number);
-  Node* receiver_map = LoadMap(receiver);
-  Node* receiver_instance_type = LoadMapInstanceType(receiver_map);
+  TNode<Map> receiver_map = LoadMap(receiver);
+  TNode<Uint16T> receiver_instance_type = LoadMapInstanceType(receiver_map);
   GotoIf(IsPrimitiveInstanceType(receiver_instance_type), &if_primitive);
   const struct {
     InstanceType value;
@@ -818,58 +786,58 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
 
   BIND(&if_arguments);
   {
-    var_default.Bind(LoadRoot(RootIndex::karguments_to_string));
+    var_default.Bind(ArgumentsToStringConstant());
     Goto(&checkstringtag);
   }
 
   BIND(&if_array);
   {
-    var_default.Bind(LoadRoot(RootIndex::karray_to_string));
+    var_default.Bind(ArrayToStringConstant());
     Goto(&checkstringtag);
   }
 
   BIND(&if_boolean);
   {
-    Node* native_context = LoadNativeContext(context);
-    Node* boolean_constructor =
-        LoadContextElement(native_context, Context::BOOLEAN_FUNCTION_INDEX);
-    Node* boolean_initial_map = LoadObjectField(
-        boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset);
-    Node* boolean_prototype =
+    TNode<NativeContext> native_context = LoadNativeContext(context);
+    TNode<JSFunction> boolean_constructor = CAST(
+        LoadContextElement(native_context, Context::BOOLEAN_FUNCTION_INDEX));
+    TNode<Map> boolean_initial_map = CAST(LoadObjectField(
+        boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset));
+    TNode<Object> boolean_prototype =
         LoadObjectField(boolean_initial_map, Map::kPrototypeOffset);
-    var_default.Bind(LoadRoot(RootIndex::kboolean_to_string));
+    var_default.Bind(BooleanToStringConstant());
     var_holder.Bind(boolean_prototype);
     Goto(&checkstringtag);
   }
 
   BIND(&if_date);
   {
-    var_default.Bind(LoadRoot(RootIndex::kdate_to_string));
+    var_default.Bind(DateToStringConstant());
     Goto(&checkstringtag);
   }
 
   BIND(&if_error);
   {
-    var_default.Bind(LoadRoot(RootIndex::kerror_to_string));
+    var_default.Bind(ErrorToStringConstant());
     Goto(&checkstringtag);
   }
 
   BIND(&if_function);
   {
-    var_default.Bind(LoadRoot(RootIndex::kfunction_to_string));
+    var_default.Bind(FunctionToStringConstant());
     Goto(&checkstringtag);
   }
 
   BIND(&if_number);
   {
-    Node* native_context = LoadNativeContext(context);
-    Node* number_constructor =
-        LoadContextElement(native_context, Context::NUMBER_FUNCTION_INDEX);
-    Node* number_initial_map = LoadObjectField(
-        number_constructor, JSFunction::kPrototypeOrInitialMapOffset);
-    Node* number_prototype =
+    TNode<NativeContext> native_context = LoadNativeContext(context);
+    TNode<JSFunction> number_constructor = CAST(
+        LoadContextElement(native_context, Context::NUMBER_FUNCTION_INDEX));
+    TNode<Map> number_initial_map = CAST(LoadObjectField(
+        number_constructor, JSFunction::kPrototypeOrInitialMapOffset));
+    TNode<Object> number_prototype =
         LoadObjectField(number_initial_map, Map::kPrototypeOffset);
-    var_default.Bind(LoadRoot(RootIndex::knumber_to_string));
+    var_default.Bind(NumberToStringConstant());
     var_holder.Bind(number_prototype);
     Goto(&checkstringtag);
   }
@@ -877,7 +845,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
   BIND(&if_object);
   {
     CSA_ASSERT(this, IsJSReceiver(receiver));
-    var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
+    var_default.Bind(ObjectToStringConstant());
     Goto(&checkstringtag);
   }
 
@@ -892,10 +860,10 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
     GotoIf(IsSymbolMap(receiver_map), &if_symbol);
     GotoIf(IsUndefined(receiver), &return_undefined);
     CSA_ASSERT(this, IsNull(receiver));
-    Return(LoadRoot(RootIndex::knull_to_string));
+    Return(NullToStringConstant());
 
     BIND(&return_undefined);
-    Return(LoadRoot(RootIndex::kundefined_to_string));
+    Return(UndefinedToStringConstant());
   }
 
   BIND(&if_proxy);
@@ -905,16 +873,15 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
     // depending on whether the {receiver} is callable. The order matters here,
     // i.e. we need to execute the %ArrayIsArray check before the [[Get]] below,
     // as the exception is observable.
-    Node* receiver_is_array =
+    TNode<Object> receiver_is_array =
         CallRuntime(Runtime::kArrayIsArray, context, receiver);
     TNode<String> builtin_tag = Select<String>(
-        IsTrue(receiver_is_array),
-        [=] { return CAST(LoadRoot(RootIndex::kArray_string)); },
+        IsTrue(receiver_is_array), [=] { return ArrayStringConstant(); },
         [=] {
           return Select<String>(
               IsCallableMap(receiver_map),
-              [=] { return CAST(LoadRoot(RootIndex::kFunction_string)); },
-              [=] { return CAST(LoadRoot(RootIndex::kObject_string)); });
+              [=] { return FunctionStringConstant(); },
+              [=] { return ObjectStringConstant(); });
         });
 
     // Lookup the @@toStringTag property on the {receiver}.
@@ -935,48 +902,48 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
 
   BIND(&if_regexp);
   {
-    var_default.Bind(LoadRoot(RootIndex::kregexp_to_string));
+    var_default.Bind(RegexpToStringConstant());
     Goto(&checkstringtag);
   }
 
   BIND(&if_string);
   {
-    Node* native_context = LoadNativeContext(context);
-    Node* string_constructor =
-        LoadContextElement(native_context, Context::STRING_FUNCTION_INDEX);
-    Node* string_initial_map = LoadObjectField(
-        string_constructor, JSFunction::kPrototypeOrInitialMapOffset);
-    Node* string_prototype =
+    TNode<NativeContext> native_context = LoadNativeContext(context);
+    TNode<JSFunction> string_constructor = CAST(
+        LoadContextElement(native_context, Context::STRING_FUNCTION_INDEX));
+    TNode<Map> string_initial_map = CAST(LoadObjectField(
+        string_constructor, JSFunction::kPrototypeOrInitialMapOffset));
+    TNode<Object> string_prototype =
         LoadObjectField(string_initial_map, Map::kPrototypeOffset);
-    var_default.Bind(LoadRoot(RootIndex::kstring_to_string));
+    var_default.Bind(StringToStringConstant());
     var_holder.Bind(string_prototype);
     Goto(&checkstringtag);
   }
 
   BIND(&if_symbol);
   {
-    Node* native_context = LoadNativeContext(context);
-    Node* symbol_constructor =
-        LoadContextElement(native_context, Context::SYMBOL_FUNCTION_INDEX);
-    Node* symbol_initial_map = LoadObjectField(
-        symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset);
-    Node* symbol_prototype =
+    TNode<NativeContext> native_context = LoadNativeContext(context);
+    TNode<JSFunction> symbol_constructor = CAST(
+        LoadContextElement(native_context, Context::SYMBOL_FUNCTION_INDEX));
+    TNode<Map> symbol_initial_map = CAST(LoadObjectField(
+        symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset));
+    TNode<Object> symbol_prototype =
         LoadObjectField(symbol_initial_map, Map::kPrototypeOffset);
-    var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
+    var_default.Bind(ObjectToStringConstant());
     var_holder.Bind(symbol_prototype);
     Goto(&checkstringtag);
   }
 
   BIND(&if_bigint);
   {
-    Node* native_context = LoadNativeContext(context);
-    Node* bigint_constructor =
-        LoadContextElement(native_context, Context::BIGINT_FUNCTION_INDEX);
-    Node* bigint_initial_map = LoadObjectField(
-        bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset);
-    Node* bigint_prototype =
+    TNode<NativeContext> native_context = LoadNativeContext(context);
+    TNode<JSFunction> bigint_constructor = CAST(
+        LoadContextElement(native_context, Context::BIGINT_FUNCTION_INDEX));
+    TNode<Map> bigint_initial_map = CAST(LoadObjectField(
+        bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset));
+    TNode<Object> bigint_prototype =
         LoadObjectField(bigint_initial_map, Map::kPrototypeOffset);
-    var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
+    var_default.Bind(ObjectToStringConstant());
     var_holder.Bind(bigint_prototype);
     Goto(&checkstringtag);
   }
@@ -994,11 +961,11 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
     // which might have interesting properties.
     var_holder.Bind(receiver);
     GotoIf(TaggedIsSmi(receiver_value), &if_value_is_number);
-    Node* receiver_value_map = LoadMap(receiver_value);
+    TNode<Map> receiver_value_map = LoadMap(receiver_value);
     GotoIf(IsHeapNumberMap(receiver_value_map), &if_value_is_number);
     GotoIf(IsBooleanMap(receiver_value_map), &if_value_is_boolean);
     GotoIf(IsSymbolMap(receiver_value_map), &if_value_is_symbol);
-    Node* receiver_value_instance_type =
+    TNode<Uint16T> receiver_value_instance_type =
         LoadMapInstanceType(receiver_value_map);
     GotoIf(IsBigIntInstanceType(receiver_value_instance_type),
            &if_value_is_bigint);
@@ -1007,31 +974,31 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
 
     BIND(&if_value_is_number);
     {
-      var_default.Bind(LoadRoot(RootIndex::knumber_to_string));
+      var_default.Bind(NumberToStringConstant());
       Goto(&checkstringtag);
     }
 
     BIND(&if_value_is_boolean);
     {
-      var_default.Bind(LoadRoot(RootIndex::kboolean_to_string));
+      var_default.Bind(BooleanToStringConstant());
       Goto(&checkstringtag);
     }
 
     BIND(&if_value_is_string);
     {
-      var_default.Bind(LoadRoot(RootIndex::kstring_to_string));
+      var_default.Bind(StringToStringConstant());
       Goto(&checkstringtag);
     }
 
     BIND(&if_value_is_bigint);
     {
-      var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
+      var_default.Bind(ObjectToStringConstant());
       Goto(&checkstringtag);
     }
 
     BIND(&if_value_is_symbol);
     {
-      var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
+      var_default.Bind(ObjectToStringConstant());
       Goto(&checkstringtag);
     }
   }
@@ -1048,8 +1015,8 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
     {
       Node* holder = var_holder.value();
       GotoIf(IsNull(holder), &return_default);
-      Node* holder_map = LoadMap(holder);
-      Node* holder_bit_field3 = LoadMapBitField3(holder_map);
+      TNode<Map> holder_map = LoadMap(holder);
+      TNode<Uint32T> holder_bit_field3 = LoadMapBitField3(holder_map);
       GotoIf(IsSetWord32<Map::MayHaveInterestingSymbolsBit>(holder_bit_field3),
              &return_generic);
       var_holder.Bind(LoadMapPrototype(holder_map));
@@ -1058,10 +1025,10 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
 
     BIND(&return_generic);
     {
-      Node* tag = GetProperty(context, ToObject(context, receiver),
-                              LoadRoot(RootIndex::kto_string_tag_symbol));
+      TNode<Object> tag = GetProperty(context, ToObject(context, receiver),
+                                      ToStringTagSymbolConstant());
       GotoIf(TaggedIsSmi(tag), &return_default);
-      GotoIfNot(IsString(tag), &return_default);
+      GotoIfNot(IsString(CAST(tag)), &return_default);
       ReturnToStringFormat(context, tag);
     }
 
@@ -1080,9 +1047,9 @@ TF_BUILTIN(ObjectPrototypeValueOf, CodeStubAssembler) {
 
 // ES #sec-object.create
 TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) {
-  Node* const prototype = Parameter(Descriptor::kPrototypeArg);
-  Node* const context = Parameter(Descriptor::kContext);
-  Node* const native_context = LoadNativeContext(context);
+  TNode<Object> const prototype = CAST(Parameter(Descriptor::kPrototypeArg));
+  TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   Label call_runtime(this, Label::kDeferred), prototype_null(this),
       prototype_jsreceiver(this);
   {
@@ -1108,16 +1075,16 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) {
   {
     Comment("Prototype is JSReceiver");
     properties.Bind(EmptyFixedArrayConstant());
-    Node* object_function =
-        LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX);
-    Node* object_function_map = LoadObjectField(
+    TNode<HeapObject> object_function = CAST(
+        LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
+    TNode<Object> object_function_map = LoadObjectField(
         object_function, JSFunction::kPrototypeOrInitialMapOffset);
     map.Bind(object_function_map);
-    GotoIf(WordEqual(prototype, LoadMapPrototype(map.value())),
+    GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())),
            &instantiate_map);
     Comment("Try loading the prototype info");
-    Node* prototype_info =
-        LoadMapPrototypeInfo(LoadMap(prototype), &call_runtime);
+    TNode<PrototypeInfo> prototype_info =
+        LoadMapPrototypeInfo(LoadMap(CAST(prototype)), &call_runtime);
     TNode<MaybeObject> maybe_map = LoadMaybeWeakObjectField(
         prototype_info, PrototypeInfo::kObjectCreateMapOffset);
     GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()), &call_runtime);
@@ -1128,15 +1095,16 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) {
   BIND(&instantiate_map);
   {
     Comment("Instantiate map");
-    Node* instance = AllocateJSObjectFromMap(map.value(), properties.value());
+    TNode<JSObject> instance =
+        AllocateJSObjectFromMap(map.value(), properties.value());
     Return(instance);
   }
 
   BIND(&call_runtime);
   {
     Comment("Call Runtime (prototype is not null/jsreceiver)");
-    Node* result = CallRuntime(Runtime::kObjectCreate, context, prototype,
-                               UndefinedConstant());
+    TNode<Object> result = CallRuntime(Runtime::kObjectCreate, context,
+                                       prototype, UndefinedConstant());
     Return(result);
   }
 }
@@ -1146,13 +1114,13 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
   int const kPrototypeArg = 0;
   int const kPropertiesArg = 1;
 
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
-  Node* prototype = args.GetOptionalArgumentValue(kPrototypeArg);
-  Node* properties = args.GetOptionalArgumentValue(kPropertiesArg);
-  Node* context = Parameter(Descriptor::kContext);
+  TNode<Object> prototype = args.GetOptionalArgumentValue(kPrototypeArg);
+  TNode<Object> properties = args.GetOptionalArgumentValue(kPropertiesArg);
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
   Label call_runtime(this, Label::kDeferred), prototype_valid(this),
       no_properties(this);
@@ -1169,14 +1137,14 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
     GotoIf(TaggedIsSmi(properties), &call_runtime);
     // Undefined implies no properties.
     GotoIf(IsUndefined(properties), &no_properties);
-    Node* properties_map = LoadMap(properties);
+    TNode<Map> properties_map = LoadMap(CAST(properties));
     GotoIf(IsSpecialReceiverMap(properties_map), &call_runtime);
     // Stay on the fast path only if there are no elements.
-    GotoIfNot(WordEqual(LoadElements(properties),
-                        LoadRoot(RootIndex::kEmptyFixedArray)),
-              &call_runtime);
+    GotoIfNot(
+        TaggedEqual(LoadElements(CAST(properties)), EmptyFixedArrayConstant()),
+        &call_runtime);
     // Handle dictionary objects or fast objects with properties in runtime.
-    Node* bit_field3 = LoadMapBitField3(properties_map);
+    TNode<Uint32T> bit_field3 = LoadMapBitField3(properties_map);
     GotoIf(IsSetWord32<Map::IsDictionaryMapBit>(bit_field3), &call_runtime);
     Branch(IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
            &call_runtime, &no_properties);
@@ -1202,16 +1170,16 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
     BIND(&non_null_proto);
     {
       properties.Bind(EmptyFixedArrayConstant());
-      Node* object_function =
-          LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX);
-      Node* object_function_map = LoadObjectField(
+      TNode<HeapObject> object_function =
+          CAST(LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX));
+      TNode<Object> object_function_map = LoadObjectField(
           object_function, JSFunction::kPrototypeOrInitialMapOffset);
       map.Bind(object_function_map);
-      GotoIf(WordEqual(prototype, LoadMapPrototype(map.value())),
+      GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())),
              &instantiate_map);
       // Try loading the prototype info.
-      Node* prototype_info =
-          LoadMapPrototypeInfo(LoadMap(prototype), &call_runtime);
+      TNode<PrototypeInfo> prototype_info =
+          LoadMapPrototypeInfo(LoadMap(CAST(prototype)), &call_runtime);
       Comment("Load ObjectCreateMap from PrototypeInfo");
       TNode<MaybeObject> maybe_map = LoadMaybeWeakObjectField(
           prototype_info, PrototypeInfo::kObjectCreateMapOffset);
@@ -1223,14 +1191,15 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
 
     BIND(&instantiate_map);
     {
-      Node* instance = AllocateJSObjectFromMap(map.value(), properties.value());
+      TNode<JSObject> instance =
+          AllocateJSObjectFromMap(map.value(), properties.value());
       args.PopAndReturn(instance);
     }
   }
 
   BIND(&call_runtime);
   {
-    Node* result =
+    TNode<Object> result =
         CallRuntime(Runtime::kObjectCreate, context, prototype, properties);
     args.PopAndReturn(result);
   }
@@ -1256,11 +1225,11 @@ TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
   Node* const done = Parameter(Descriptor::kDone);
   Node* const context = Parameter(Descriptor::kContext);
 
-  Node* const native_context = LoadNativeContext(context);
-  Node* const map =
-      LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Map> const map = CAST(
+      LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
 
-  Node* const result = AllocateJSObjectFromMap(map);
+  TNode<JSObject> const result = AllocateJSObjectFromMap(map);
 
   StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value);
   StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done);
@@ -1309,27 +1278,31 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
   // have one.
   Label done(this), runtime(this);
   GotoIfNot(IsFunctionWithPrototypeSlotMap(LoadMap(closure)), &runtime);
-  Node* maybe_map =
-      LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset);
+  TNode<HeapObject> maybe_map =
+      CAST(LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset));
   GotoIf(DoesntHaveInstanceType(maybe_map, MAP_TYPE), &runtime);
+  TNode<Map> map = CAST(maybe_map);
 
-  Node* shared =
-      LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
-  Node* bytecode_array = LoadSharedFunctionInfoBytecodeArray(shared);
+  TNode<SharedFunctionInfo> shared =
+      CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
+  TNode<BytecodeArray> bytecode_array =
+      LoadSharedFunctionInfoBytecodeArray(shared);
 
-  Node* formal_parameter_count = ChangeInt32ToIntPtr(
+  TNode<IntPtrT> formal_parameter_count = ChangeInt32ToIntPtr(
       LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
                       MachineType::Uint16()));
-  Node* frame_size = ChangeInt32ToIntPtr(LoadObjectField(
+  TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(LoadObjectField(
       bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
-  Node* size = IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
-                         formal_parameter_count);
-  Node* parameters_and_registers = AllocateFixedArray(HOLEY_ELEMENTS, size);
+  TNode<WordT> size =
+      IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
+                formal_parameter_count);
+  TNode<FixedArrayBase> parameters_and_registers =
+      AllocateFixedArray(HOLEY_ELEMENTS, size);
   FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
                           IntPtrConstant(0), size, RootIndex::kUndefinedValue);
   // TODO(cbruni): support start_offset to avoid double initialization.
-  Node* result = AllocateJSObjectFromMap(maybe_map, nullptr, nullptr, kNone,
-                                         kWithSlackTracking);
+  TNode<JSObject> result =
+      AllocateJSObjectFromMap(map, nullptr, nullptr, kNone, kWithSlackTracking);
   StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kFunctionOffset,
                                  closure);
   StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContextOffset,
@@ -1339,13 +1312,13 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
   StoreObjectFieldNoWriteBarrier(
       result, JSGeneratorObject::kParametersAndRegistersOffset,
       parameters_and_registers);
-  Node* resume_mode = SmiConstant(JSGeneratorObject::ResumeMode::kNext);
+  TNode<Smi> resume_mode = SmiConstant(JSGeneratorObject::ResumeMode::kNext);
   StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kResumeModeOffset,
                                  resume_mode);
-  Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
+  TNode<Smi> executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
   StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset,
                                  executing);
-  GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(maybe_map),
+  GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(map),
                               JS_ASYNC_GENERATOR_OBJECT_TYPE),
             &done);
   StoreObjectFieldNoWriteBarrier(
@@ -1369,11 +1342,11 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
   CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
 
   CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
-  Node* object = args.GetOptionalArgumentValue(0);
-  Node* key = args.GetOptionalArgumentValue(1);
+  TNode<Object> object_input = args.GetOptionalArgumentValue(0);
+  TNode<Object> key = args.GetOptionalArgumentValue(1);
 
   // 1. Let obj be ? ToObject(O).
-  object = ToObject_Inline(CAST(context), CAST(object));
+  TNode<JSReceiver> object = ToObject_Inline(CAST(context), object_input);
 
   // 2. Let key be ? ToPropertyKey(P).
   key = CallBuiltin(Builtins::kToName, context, key);
@@ -1382,8 +1355,8 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
   Label if_keyisindex(this), if_iskeyunique(this),
       call_runtime(this, Label::kDeferred),
       return_undefined(this, Label::kDeferred), if_notunique_name(this);
-  Node* map = LoadMap(object);
-  TNode<Int32T> instance_type = LoadMapInstanceType(map);
+  TNode<Map> map = LoadMap(object);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(map);
   GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime);
   {
     VARIABLE(var_index, MachineType::PointerRepresentation(),
@@ -1440,15 +1413,15 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
 
   BIND(&call_runtime);
   {
-    Node* desc =
+    TNode<Object> desc =
         CallRuntime(Runtime::kGetOwnPropertyDescriptor, context, object, key);
 
     GotoIf(IsUndefined(desc), &return_undefined);
 
-    CSA_ASSERT(this, IsFixedArray(desc));
+    TNode<FixedArray> desc_array = CAST(desc);
 
     // 4. Return FromPropertyDescriptor(desc).
-    Node* js_desc = FromPropertyDescriptor(context, desc);
+    Node* js_desc = FromPropertyDescriptor(context, desc_array);
     args.PopAndReturn(js_desc);
   }
   BIND(&return_undefined);
@@ -1471,10 +1444,10 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context,
                                                       Node* desc) {
   VARIABLE(js_descriptor, MachineRepresentation::kTagged);
 
-  Node* flags = LoadAndUntagToWord32ObjectField(
+  TNode<Int32T> flags = LoadAndUntagToWord32ObjectField(
       desc, PropertyDescriptorObject::kFlagsOffset);
 
-  Node* has_flags =
+  TNode<Word32T> has_flags =
       Word32And(flags, Int32Constant(PropertyDescriptorObject::kHasMask));
 
   Label if_accessor_desc(this), if_data_desc(this), if_generic_desc(this),
@@ -1512,13 +1485,13 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context,
 
   BIND(&if_generic_desc);
   {
-    Node* native_context = LoadNativeContext(context);
-    Node* map = LoadContextElement(
-        native_context, Context::SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
+    TNode<Map> map = CAST(LoadContextElement(
+        native_context, Context::SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP));
     // We want to preallocate the slots for value, writable, get, set,
     // enumerable and configurable - a total of 6
     TNode<NameDictionary> properties = AllocateNameDictionary(6);
-    Node* js_desc = AllocateJSObjectFromMap(map, properties);
+    TNode<JSObject> js_desc = AllocateJSObjectFromMap(map, properties);
 
     Label bailout(this, Label::kDeferred);
 
@@ -1579,8 +1552,10 @@ Node* ObjectBuiltinsAssembler::FromPropertyDetails(Node* context,
 
   BIND(&if_accessor_desc);
   {
-    Node* getter = LoadObjectField(raw_value, AccessorPair::kGetterOffset);
-    Node* setter = LoadObjectField(raw_value, AccessorPair::kSetterOffset);
+    TNode<Object> getter =
+        LoadObjectField(raw_value, AccessorPair::kGetterOffset);
+    TNode<Object> setter =
+        LoadObjectField(raw_value, AccessorPair::kSetterOffset);
     js_descriptor.Bind(ConstructAccessorDescriptor(
         context, GetAccessorOrUndefined(getter, if_bailout),
         GetAccessorOrUndefined(setter, if_bailout),
@@ -1610,7 +1585,7 @@ Node* ObjectBuiltinsAssembler::GetAccessorOrUndefined(Node* accessor,
 
   GotoIf(IsNull(accessor), &bind_undefined);
   result.Bind(accessor);
-  Node* map = LoadMap(accessor);
+  TNode<Map> map = LoadMap(accessor);
   // TODO(ishell): probe template instantiations cache.
   GotoIf(IsFunctionTemplateInfoMap(map), if_bailout);
   Goto(&return_result);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 1ca5fffd8db556..93f011ffa1efe4 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -156,8 +156,11 @@ Object ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
       case LookupIterator::ACCESSOR: {
         Handle<Object> maybe_pair = it.GetAccessors();
         if (maybe_pair->IsAccessorPair()) {
+          Handle<NativeContext> native_context =
+              it.GetHolder<JSReceiver>()->GetCreationContext();
           return *AccessorPair::GetComponent(
-              isolate, Handle<AccessorPair>::cast(maybe_pair), component);
+              isolate, native_context, Handle<AccessorPair>::cast(maybe_pair),
+              component);
         }
       }
     }
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 1339e2dccd788e..a1da55e0d931e3 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -11,6 +11,8 @@
 #include "src/builtins/builtins.h"
 #include "src/codegen/code-factory.h"
 #include "src/codegen/code-stub-assembler.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/js-objects.h"
 #include "src/objects/js-promise.h"
 #include "src/objects/objects-inl.h"
 #include "src/objects/smi.h"
@@ -24,13 +26,14 @@ using TNode = CodeStubAssembler::TNode<T>;
 using IteratorRecord = TorqueStructIteratorRecord;
 
 Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
-  Node* const native_context = LoadNativeContext(context);
-  Node* const promise_fun =
-      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<JSFunction> const promise_fun =
+      CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
   CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
-  Node* const promise_map =
+  TNode<Object> const promise_map =
       LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
-  Node* const promise = Allocate(JSPromise::kSizeWithEmbedderFields);
+  TNode<HeapObject> const promise =
+      Allocate(JSPromise::kSizeWithEmbedderFields);
   StoreMapNoWriteBarrier(promise, promise_map);
   StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset,
                        RootIndex::kEmptyFixedArray);
@@ -99,14 +102,14 @@ PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions(
     Node* promise, Node* debug_event, Node* native_context) {
   Node* const promise_context = CreatePromiseResolvingFunctionsContext(
       promise, debug_event, native_context);
-  Node* const map = LoadContextElement(
+  TNode<Object> const map = LoadContextElement(
       native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
-  Node* const resolve_info = LoadContextElement(
+  TNode<Object> const resolve_info = LoadContextElement(
       native_context,
       Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX);
   Node* const resolve =
       AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
-  Node* const reject_info = LoadContextElement(
+  TNode<Object> const reject_info = LoadContextElement(
       native_context,
       Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX);
   Node* const reject =
@@ -137,7 +140,7 @@ void PromiseBuiltinsAssembler::ExtractHandlerContext(Node* handler,
     };
     static_assert(arraysize(case_values) == arraysize(case_labels), "");
     TNode<Map> handler_map = LoadMap(var_handler.value());
-    TNode<Int32T> handler_type = LoadMapInstanceType(handler_map);
+    TNode<Uint16T> handler_type = LoadMapInstanceType(handler_map);
     Switch(handler_type, &done, case_values, case_labels,
            arraysize(case_labels));
 
@@ -162,7 +165,7 @@ void PromiseBuiltinsAssembler::ExtractHandlerContext(Node* handler,
     BIND(&if_function);
     {
       // Use the function's context.
-      Node* handler_context =
+      TNode<Object> handler_context =
           LoadObjectField(var_handler.value(), JSFunction::kContextOffset);
       var_context->Bind(LoadNativeContext(CAST(handler_context)));
       Goto(&done);
@@ -176,19 +179,19 @@ void PromiseBuiltinsAssembler::ExtractHandlerContext(Node* handler,
 
 // ES #sec-newpromisecapability
 TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
-  Node* const context = Parameter(Descriptor::kContext);
-  Node* const constructor = Parameter(Descriptor::kConstructor);
-  Node* const debug_event = Parameter(Descriptor::kDebugEvent);
-  TNode<Context> const native_context = LoadNativeContext(context);
+  TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
+  TNode<Object> const constructor = CAST(Parameter(Descriptor::kConstructor));
+  TNode<Object> const debug_event = CAST(Parameter(Descriptor::kDebugEvent));
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
 
   Label if_not_constructor(this, Label::kDeferred),
       if_notcallable(this, Label::kDeferred), if_fast_promise_capability(this),
       if_slow_promise_capability(this, Label::kDeferred);
   GotoIf(TaggedIsSmi(constructor), &if_not_constructor);
-  GotoIfNot(IsConstructorMap(LoadMap(constructor)), &if_not_constructor);
-  Branch(WordEqual(constructor,
-                   LoadContextElement(native_context,
-                                      Context::PROMISE_FUNCTION_INDEX)),
+  GotoIfNot(IsConstructorMap(LoadMap(CAST(constructor))), &if_not_constructor);
+  Branch(TaggedEqual(constructor,
+                     LoadContextElement(native_context,
+                                        Context::PROMISE_FUNCTION_INDEX)),
          &if_fast_promise_capability, &if_slow_promise_capability);
 
   BIND(&if_fast_promise_capability);
@@ -201,7 +204,7 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
     std::tie(resolve, reject) =
         CreatePromiseResolvingFunctions(promise, debug_event, native_context);
 
-    Node* capability = Allocate(PromiseCapability::kSize);
+    TNode<HeapObject> capability = Allocate(PromiseCapability::kSize);
     StoreMapNoWriteBarrier(capability, RootIndex::kPromiseCapabilityMap);
     StoreObjectFieldNoWriteBarrier(capability,
                                    PromiseCapability::kPromiseOffset, promise);
@@ -214,7 +217,7 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
 
   BIND(&if_slow_promise_capability);
   {
-    Node* capability = Allocate(PromiseCapability::kSize);
+    TNode<HeapObject> capability = Allocate(PromiseCapability::kSize);
     StoreMapNoWriteBarrier(capability, RootIndex::kPromiseCapabilityMap);
     StoreObjectFieldRoot(capability, PromiseCapability::kPromiseOffset,
                          RootIndex::kUndefinedValue);
@@ -225,25 +228,26 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
 
     Node* executor_context =
         CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
-    Node* executor_info = LoadContextElement(
+    TNode<Object> executor_info = LoadContextElement(
         native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN);
-    Node* function_map = LoadContextElement(
+    TNode<Object> function_map = LoadContextElement(
         native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
     TNode<JSFunction> executor = CAST(AllocateFunctionWithMapAndContext(
         function_map, executor_info, executor_context));
 
-    Node* promise = Construct(native_context, CAST(constructor), executor);
+    TNode<JSReceiver> promise =
+        Construct(native_context, CAST(constructor), executor);
     StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
 
-    Node* resolve =
+    TNode<Object> resolve =
         LoadObjectField(capability, PromiseCapability::kResolveOffset);
     GotoIf(TaggedIsSmi(resolve), &if_notcallable);
-    GotoIfNot(IsCallable(resolve), &if_notcallable);
+    GotoIfNot(IsCallable(CAST(resolve)), &if_notcallable);
 
-    Node* reject =
+    TNode<Object> reject =
         LoadObjectField(capability, PromiseCapability::kRejectOffset);
     GotoIf(TaggedIsSmi(reject), &if_notcallable);
-    GotoIfNot(IsCallable(reject), &if_notcallable);
+    GotoIfNot(IsCallable(CAST(reject)), &if_notcallable);
     Return(capability);
   }
 
@@ -258,7 +262,8 @@ Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
                                                      int slots) {
   DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
 
-  Node* const context = AllocateInNewSpace(FixedArray::SizeFor(slots));
+  TNode<HeapObject> const context =
+      AllocateInNewSpace(FixedArray::SizeFor(slots));
   InitializeFunctionContext(native_context, context, slots);
   return context;
 }
@@ -296,9 +301,10 @@ PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction(
                        index, SmiConstant(PropertyArray::HashField::kMax)));
   CSA_ASSERT(this, IsNativeContext(native_context));
 
-  Node* const map = LoadContextElement(
+  TNode<Object> const map = LoadContextElement(
       native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
-  Node* const resolve_info = LoadContextElement(native_context, slot_index);
+  TNode<Object> const resolve_info =
+      LoadContextElement(native_context, slot_index);
   TNode<JSFunction> resolve =
       Cast(AllocateFunctionWithMapAndContext(map, resolve_info, context));
 
@@ -332,7 +338,8 @@ Node* PromiseBuiltinsAssembler::CreatePromiseGetCapabilitiesExecutorContext(
 }
 
 Node* PromiseBuiltinsAssembler::PromiseHasHandler(Node* promise) {
-  Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
+  TNode<Smi> const flags =
+      CAST(LoadObjectField(promise, JSPromise::kFlagsOffset));
   return IsSetWord(SmiUntag(flags), 1 << JSPromise::kHasHandlerBit);
 }
 
@@ -344,12 +351,12 @@ void PromiseBuiltinsAssembler::PromiseSetHasHandler(Node* promise) {
   StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
 }
 
-Node* PromiseBuiltinsAssembler::IsPromiseStatus(
-    Node* actual, v8::Promise::PromiseState expected) {
+TNode<BoolT> PromiseBuiltinsAssembler::IsPromiseStatus(
+    TNode<Word32T> actual, v8::Promise::PromiseState expected) {
   return Word32Equal(actual, Int32Constant(expected));
 }
 
-Node* PromiseBuiltinsAssembler::PromiseStatus(Node* promise) {
+TNode<Word32T> PromiseBuiltinsAssembler::PromiseStatus(Node* promise) {
   STATIC_ASSERT(JSPromise::kStatusShift == 0);
   TNode<Smi> const flags =
       CAST(LoadObjectField(promise, JSPromise::kFlagsOffset));
@@ -394,7 +401,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
                IsUndefined(result_promise_or_capability)));
 
   Label if_pending(this), if_notpending(this), done(this);
-  Node* const status = PromiseStatus(promise);
+  TNode<Word32T> const status = PromiseStatus(promise);
   Branch(IsPromiseStatus(status, v8::Promise::kPending), &if_pending,
          &if_notpending);
 
@@ -404,7 +411,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
     // PromiseReaction holding both the onFulfilled and onRejected callbacks.
     // Once the {promise} is resolved we decide on the concrete handler to
     // push onto the microtask queue.
-    Node* const promise_reactions =
+    TNode<Object> const promise_reactions =
         LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
     Node* const reaction =
         AllocatePromiseReaction(promise_reactions, result_promise_or_capability,
@@ -426,7 +433,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
 
     BIND(&if_fulfilled);
     {
-      var_map.Bind(LoadRoot(RootIndex::kPromiseFulfillReactionJobTaskMap));
+      var_map.Bind(PromiseFulfillReactionJobTaskMapConstant());
       var_handler.Bind(on_fulfilled);
 
       Label use_fallback(this, Label::kDeferred), done(this);
@@ -445,7 +452,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
     BIND(&if_rejected);
     {
       CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected));
-      var_map.Bind(LoadRoot(RootIndex::kPromiseRejectReactionJobTaskMap));
+      var_map.Bind(PromiseRejectReactionJobTaskMapConstant());
       var_handler.Bind(on_rejected);
 
       Label use_fallback(this, Label::kDeferred), done(this);
@@ -465,7 +472,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
 
     BIND(&enqueue);
     {
-      Node* argument =
+      TNode<Object> argument =
           LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
       Node* microtask = AllocatePromiseReactionJobTask(
           var_map.value(), var_handler_context.value(), argument,
@@ -500,7 +507,7 @@ TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
 Node* PromiseBuiltinsAssembler::AllocatePromiseReaction(
     Node* next, Node* promise_or_capability, Node* fulfill_handler,
     Node* reject_handler) {
-  Node* const reaction = Allocate(PromiseReaction::kSize);
+  TNode<HeapObject> const reaction = Allocate(PromiseReaction::kSize);
   StoreMapNoWriteBarrier(reaction, RootIndex::kPromiseReactionMap);
   StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kNextOffset, next);
   StoreObjectFieldNoWriteBarrier(reaction,
@@ -516,7 +523,7 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReaction(
 Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
     Node* map, Node* context, Node* argument, Node* handler,
     Node* promise_or_capability) {
-  Node* const microtask =
+  TNode<HeapObject> const microtask =
       Allocate(PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks);
   StoreMapNoWriteBarrier(microtask, map);
   StoreObjectFieldNoWriteBarrier(
@@ -531,19 +538,10 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
   return microtask;
 }
 
-Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
-    RootIndex map_root_index, Node* context, Node* argument, Node* handler,
-    Node* promise_or_capability) {
-  DCHECK(map_root_index == RootIndex::kPromiseFulfillReactionJobTaskMap ||
-         map_root_index == RootIndex::kPromiseRejectReactionJobTaskMap);
-  Node* const map = LoadRoot(map_root_index);
-  return AllocatePromiseReactionJobTask(map, context, argument, handler,
-                                        promise_or_capability);
-}
-
 Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask(
     Node* promise_to_resolve, Node* then, Node* thenable, Node* context) {
-  Node* const microtask = Allocate(PromiseResolveThenableJobTask::kSize);
+  TNode<HeapObject> const microtask =
+      Allocate(PromiseResolveThenableJobTask::kSize);
   StoreMapNoWriteBarrier(microtask,
                          RootIndex::kPromiseResolveThenableJobTaskMap);
   StoreObjectFieldNoWriteBarrier(
@@ -574,8 +572,7 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
     // PromiseReaction instances and not actual JavaScript values (which
     // would indicate that we're rejecting or resolving an already settled
     // promise), see https://crbug.com/931640 for details on this.
-    TNode<Map> promise_reaction_map =
-        CAST(LoadRoot(RootIndex::kPromiseReactionMap));
+    TNode<Map> promise_reaction_map = PromiseReactionMapConstant();
 
     Label loop(this, {&var_current, &var_reversed}), done_loop(this);
     Goto(&loop);
@@ -583,7 +580,8 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
     {
       Node* current = var_current.value();
       GotoIf(TaggedIsSmi(current), &done_loop);
-      CSA_CHECK(this, WordEqual(LoadMap(CAST(current)), promise_reaction_map));
+      CSA_CHECK(this,
+                TaggedEqual(LoadMap(CAST(current)), promise_reaction_map));
       var_current.Bind(LoadObjectField(current, PromiseReaction::kNextOffset));
       StoreObjectField(current, PromiseReaction::kNextOffset,
                        var_reversed.value());
@@ -690,7 +688,7 @@ Node* PromiseBuiltinsAssembler::InvokeThen(Node* native_context, Node* receiver,
   VARIABLE(var_result, MachineRepresentation::kTagged);
   Label if_fast(this), if_slow(this, Label::kDeferred), done(this, &var_result);
   GotoIf(TaggedIsSmi(receiver), &if_slow);
-  Node* const receiver_map = LoadMap(receiver);
+  TNode<Map> const receiver_map = LoadMap(receiver);
   // We can skip the "then" lookup on {receiver} if it's [[Prototype]]
   // is the (initial) Promise.prototype and the Promise#then protector
   // is intact, as that guards the lookup path for the "then" property
@@ -700,7 +698,7 @@ Node* PromiseBuiltinsAssembler::InvokeThen(Node* native_context, Node* receiver,
 
   BIND(&if_fast);
   {
-    Node* const then =
+    TNode<Object> const then =
         LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
     Node* const result =
         CallJS(CodeFactory::CallFunction(
@@ -712,8 +710,8 @@ Node* PromiseBuiltinsAssembler::InvokeThen(Node* native_context, Node* receiver,
 
   BIND(&if_slow);
   {
-    Node* const then = GetProperty(native_context, receiver,
-                                   isolate()->factory()->then_string());
+    TNode<Object> const then = GetProperty(native_context, receiver,
+                                           isolate()->factory()->then_string());
     Node* const result = CallJS(
         CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
         native_context, then, receiver, args...);
@@ -740,8 +738,8 @@ Node* PromiseBuiltinsAssembler::CallResolve(Node* native_context,
 
   BIND(&if_fast);
   {
-    Node* const result = CallBuiltin(Builtins::kPromiseResolve, native_context,
-                                     constructor, value);
+    TNode<Object> const result = CallBuiltin(
+        Builtins::kPromiseResolve, native_context, constructor, value);
     GotoIfException(result, if_exception, var_exception);
 
     var_result.Bind(result);
@@ -766,18 +764,19 @@ Node* PromiseBuiltinsAssembler::CallResolve(Node* native_context,
 }
 
 void PromiseBuiltinsAssembler::BranchIfPromiseResolveLookupChainIntact(
-    Node* native_context, Node* constructor, Label* if_fast, Label* if_slow) {
+    Node* native_context, SloppyTNode<Object> constructor, Label* if_fast,
+    Label* if_slow) {
   CSA_ASSERT(this, IsNativeContext(native_context));
 
   GotoIfForceSlowPath(if_slow);
-  Node* const promise_fun =
+  TNode<Object> promise_fun =
       LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
-  GotoIfNot(WordEqual(promise_fun, constructor), if_slow);
+  GotoIfNot(TaggedEqual(promise_fun, constructor), if_slow);
   Branch(IsPromiseResolveProtectorCellInvalid(), if_slow, if_fast);
 }
 
 void PromiseBuiltinsAssembler::GotoIfNotPromiseResolveLookupChainIntact(
-    Node* native_context, Node* constructor, Label* if_slow) {
+    Node* native_context, SloppyTNode<Object> constructor, Label* if_slow) {
   Label if_fast(this);
   BranchIfPromiseResolveLookupChainIntact(native_context, constructor, &if_fast,
                                           if_slow);
@@ -789,10 +788,10 @@ void PromiseBuiltinsAssembler::BranchIfPromiseSpeciesLookupChainIntact(
   CSA_ASSERT(this, IsNativeContext(native_context));
   CSA_ASSERT(this, IsJSPromiseMap(promise_map));
 
-  Node* const promise_prototype =
+  TNode<Object> promise_prototype =
       LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
   GotoIfForceSlowPath(if_slow);
-  GotoIfNot(WordEqual(LoadMapPrototype(promise_map), promise_prototype),
+  GotoIfNot(TaggedEqual(LoadMapPrototype(promise_map), promise_prototype),
             if_slow);
   Branch(IsPromiseSpeciesProtectorCellInvalid(), if_slow, if_fast);
 }
@@ -804,16 +803,16 @@ void PromiseBuiltinsAssembler::BranchIfPromiseThenLookupChainIntact(
 
   GotoIfForceSlowPath(if_slow);
   GotoIfNot(IsJSPromiseMap(receiver_map), if_slow);
-  Node* const promise_prototype =
+  TNode<Object> const promise_prototype =
       LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
-  GotoIfNot(WordEqual(LoadMapPrototype(receiver_map), promise_prototype),
+  GotoIfNot(TaggedEqual(LoadMapPrototype(receiver_map), promise_prototype),
             if_slow);
   Branch(IsPromiseThenProtectorCellInvalid(), if_slow, if_fast);
 }
 
 void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
-    Node* context, Node* native_context, Node* promise_constructor,
-    Node* executor, Label* if_noaccess) {
+    SloppyTNode<Context> context, SloppyTNode<Context> native_context,
+    Node* promise_constructor, Node* executor, Label* if_noaccess) {
   VARIABLE(var_executor, MachineRepresentation::kTagged);
   var_executor.Bind(executor);
   Label has_access(this), call_runtime(this, Label::kDeferred);
@@ -824,7 +823,7 @@ void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
   Goto(&loop_over_bound_function);
   BIND(&loop_over_bound_function);
   {
-    Node* executor_type = LoadInstanceType(var_executor.value());
+    TNode<Uint16T> executor_type = LoadInstanceType(var_executor.value());
     GotoIf(InstanceTypeEqual(executor_type, JS_FUNCTION_TYPE), &found_function);
     GotoIfNot(InstanceTypeEqual(executor_type, JS_BOUND_FUNCTION_TYPE),
               &call_runtime);
@@ -838,18 +837,19 @@ void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
   // out to the runtime.
   BIND(&found_function);
   {
-    Node* function_context =
-        LoadObjectField(var_executor.value(), JSFunction::kContextOffset);
-    Node* native_function_context = LoadNativeContext(function_context);
-    Branch(WordEqual(native_context, native_function_context), &has_access,
+    TNode<Context> function_context =
+        CAST(LoadObjectField(var_executor.value(), JSFunction::kContextOffset));
+    TNode<NativeContext> native_function_context =
+        LoadNativeContext(function_context);
+    Branch(TaggedEqual(native_context, native_function_context), &has_access,
            &call_runtime);
   }
 
   BIND(&call_runtime);
   {
-    Branch(WordEqual(CallRuntime(Runtime::kAllowDynamicFunction, context,
-                                 promise_constructor),
-                     TrueConstant()),
+    Branch(TaggedEqual(CallRuntime(Runtime::kAllowDynamicFunction, context,
+                                   promise_constructor),
+                       TrueConstant()),
            &has_access, if_noaccess);
   }
 
@@ -888,12 +888,12 @@ TF_BUILTIN(PromiseCapabilityDefaultReject, PromiseBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
 
   // 2. Let promise be F.[[Promise]].
-  Node* const promise =
+  TNode<Object> const promise =
       LoadContextElement(context, PromiseBuiltins::kPromiseSlot);
 
   // 3. Let alreadyResolved be F.[[AlreadyResolved]].
   Label if_already_resolved(this, Label::kDeferred);
-  Node* const already_resolved =
+  TNode<Object> const already_resolved =
       LoadContextElement(context, PromiseBuiltins::kAlreadyResolvedSlot);
 
   // 4. If alreadyResolved.[[Value]] is true, return undefined.
@@ -904,7 +904,7 @@ TF_BUILTIN(PromiseCapabilityDefaultReject, PromiseBuiltinsAssembler) {
       context, PromiseBuiltins::kAlreadyResolvedSlot, TrueConstant());
 
   // 6. Return RejectPromise(promise, reason).
-  Node* const debug_event =
+  TNode<Object> const debug_event =
       LoadContextElement(context, PromiseBuiltins::kDebugEventSlot);
   Return(CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
                      debug_event));
@@ -922,12 +922,12 @@ TF_BUILTIN(PromiseCapabilityDefaultResolve, PromiseBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
 
   // 2. Let promise be F.[[Promise]].
-  Node* const promise =
+  TNode<Object> const promise =
       LoadContextElement(context, PromiseBuiltins::kPromiseSlot);
 
   // 3. Let alreadyResolved be F.[[AlreadyResolved]].
   Label if_already_resolved(this, Label::kDeferred);
-  Node* const already_resolved =
+  TNode<Object> const already_resolved =
       LoadContextElement(context, PromiseBuiltins::kAlreadyResolvedSlot);
 
   // 4. If alreadyResolved.[[Value]] is true, return undefined.
@@ -967,9 +967,9 @@ TF_BUILTIN(PromiseConstructorLazyDeoptContinuation, PromiseBuiltinsAssembler) {
 
 // ES6 #sec-promise-executor
 TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
-  Node* const executor = Parameter(Descriptor::kExecutor);
-  Node* const new_target = Parameter(Descriptor::kJSNewTarget);
-  Node* const context = Parameter(Descriptor::kContext);
+  TNode<Object> executor = CAST(Parameter(Descriptor::kExecutor));
+  TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   Isolate* isolate = this->isolate();
 
   Label if_targetisundefined(this, Label::kDeferred);
@@ -980,12 +980,12 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
 
   GotoIf(TaggedIsSmi(executor), &if_notcallable);
 
-  Node* const executor_map = LoadMap(executor);
+  TNode<Map> const executor_map = LoadMap(CAST(executor));
   GotoIfNot(IsCallableMap(executor_map), &if_notcallable);
 
-  Node* const native_context = LoadNativeContext(context);
-  Node* const promise_fun =
-      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<JSFunction> const promise_fun =
+      CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
   Node* const is_debug_active = IsDebugActive();
   Label if_targetisnotmodified(this),
       if_targetismodified(this, Label::kDeferred), run_executor(this),
@@ -994,7 +994,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
   BranchIfAccessCheckFailed(context, native_context, promise_fun, executor,
                             &if_noaccess);
 
-  Branch(WordEqual(promise_fun, new_target), &if_targetisnotmodified,
+  Branch(TaggedEqual(promise_fun, new_target), &if_targetisnotmodified,
          &if_targetismodified);
 
   VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -1011,8 +1011,8 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
   BIND(&if_targetismodified);
   {
     ConstructorBuiltinsAssembler constructor_assembler(this->state());
-    Node* const instance = constructor_assembler.EmitFastNewObject(
-        context, promise_fun, new_target);
+    TNode<JSObject> instance = constructor_assembler.EmitFastNewObject(
+        context, promise_fun, CAST(new_target));
     PromiseInit(instance);
     var_result.Bind(instance);
 
@@ -1071,7 +1071,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
   // Silently fail if the stack looks fishy.
   BIND(&if_noaccess);
   {
-    Node* const counter_id =
+    TNode<Smi> const counter_id =
         SmiConstant(v8::Isolate::kPromiseConstructorReturnedUndefined);
     CallRuntime(Runtime::kIncrementUseCounter, context, counter_id);
     Return(UndefinedConstant());
@@ -1139,17 +1139,17 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
   // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
   Label fast_promise_capability(this), slow_constructor(this, Label::kDeferred),
       slow_promise_capability(this, Label::kDeferred);
-  Node* const native_context = LoadNativeContext(context);
-  Node* const promise_fun =
-      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
-  Node* const promise_map = LoadMap(promise);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<JSFunction> promise_fun =
+      CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
+  TNode<Map> const promise_map = LoadMap(promise);
   BranchIfPromiseSpeciesLookupChainIntact(
       native_context, promise_map, &fast_promise_capability, &slow_constructor);
 
   BIND(&slow_constructor);
-  Node* const constructor =
+  TNode<JSReceiver> constructor =
       SpeciesConstructor(native_context, promise, promise_fun);
-  Branch(WordEqual(constructor, promise_fun), &fast_promise_capability,
+  Branch(TaggedEqual(constructor, promise_fun), &fast_promise_capability,
          &slow_promise_capability);
 
   // 4. Let resultCapability be ? NewPromiseCapability(C).
@@ -1167,9 +1167,9 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
 
   BIND(&slow_promise_capability);
   {
-    Node* const debug_event = TrueConstant();
-    Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
-                                         context, constructor, debug_event);
+    TNode<Oddball> const debug_event = TrueConstant();
+    TNode<PromiseCapability> const capability = CAST(CallBuiltin(
+        Builtins::kNewPromiseCapability, context, constructor, debug_event));
     var_result_promise.Bind(
         LoadObjectField(capability, PromiseCapability::kPromiseOffset));
     var_result_promise_or_capability.Bind(capability);
@@ -1221,26 +1221,22 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
 TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) {
   // 1. Let promise be the this value.
   Node* const receiver = Parameter(Descriptor::kReceiver);
-  Node* const on_fulfilled = UndefinedConstant();
+  TNode<Oddball> const on_fulfilled = UndefinedConstant();
   Node* const on_rejected = Parameter(Descriptor::kOnRejected);
   Node* const context = Parameter(Descriptor::kContext);
 
   // 2. Return ? Invoke(promise, "then", « undefined, onRejected »).
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   Return(InvokeThen(native_context, receiver, on_fulfilled, on_rejected));
 }
 
 // ES #sec-promiseresolvethenablejob
 TF_BUILTIN(PromiseResolveThenableJob, PromiseBuiltinsAssembler) {
-  Node* const native_context = Parameter(Descriptor::kContext);
-  Node* const promise_to_resolve = Parameter(Descriptor::kPromiseToResolve);
-  Node* const thenable = Parameter(Descriptor::kThenable);
-  Node* const then = Parameter(Descriptor::kThen);
-
-  CSA_ASSERT(this, TaggedIsNotSmi(thenable));
-  CSA_ASSERT(this, IsJSReceiver(thenable));
-  CSA_ASSERT(this, IsJSPromise(promise_to_resolve));
-  CSA_ASSERT(this, IsNativeContext(native_context));
+  TNode<NativeContext> native_context = CAST(Parameter(Descriptor::kContext));
+  TNode<JSPromise> promise_to_resolve =
+      CAST(Parameter(Descriptor::kPromiseToResolve));
+  TNode<JSReceiver> thenable = CAST(Parameter(Descriptor::kThenable));
+  TNode<Object> then = CAST(Parameter(Descriptor::kThen));
 
   // We can use a simple optimization here if we know that {then} is the initial
   // Promise.prototype.then method, and {thenable} is a JSPromise whose
@@ -1251,10 +1247,10 @@ TF_BUILTIN(PromiseResolveThenableJob, PromiseBuiltinsAssembler) {
   // We take the generic (slow-)path if a PromiseHook is enabled or the debugger
   // is active, to make sure we expose spec compliant behavior.
   Label if_fast(this), if_slow(this, Label::kDeferred);
-  Node* const promise_then =
+  TNode<Object> promise_then =
       LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
-  GotoIfNot(WordEqual(then, promise_then), &if_slow);
-  Node* const thenable_map = LoadMap(thenable);
+  GotoIfNot(TaggedEqual(then, promise_then), &if_slow);
+  TNode<Map> const thenable_map = LoadMap(thenable);
   GotoIfNot(IsJSPromiseMap(thenable_map), &if_slow);
   GotoIf(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
          &if_slow);
@@ -1379,8 +1375,8 @@ void PromiseBuiltinsAssembler::PromiseReactionJob(Node* context, Node* argument,
     {
       // In the general case we need to call the (user provided)
       // promiseCapability.[[Resolve]] function.
-      Node* const resolve = LoadObjectField(promise_or_capability,
-                                            PromiseCapability::kResolveOffset);
+      TNode<Object> const resolve = LoadObjectField(
+          promise_or_capability, PromiseCapability::kResolveOffset);
       Node* const result = CallJS(
           CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
           context, resolve, UndefinedConstant(), value);
@@ -1412,8 +1408,8 @@ void PromiseBuiltinsAssembler::PromiseReactionJob(Node* context, Node* argument,
       Label if_exception(this, Label::kDeferred);
       VARIABLE(var_exception, MachineRepresentation::kTagged,
                TheHoleConstant());
-      Node* const reject = LoadObjectField(promise_or_capability,
-                                           PromiseCapability::kRejectOffset);
+      TNode<Object> const reject = LoadObjectField(
+          promise_or_capability, PromiseCapability::kRejectOffset);
       Node* const result = CallJS(
           CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
           context, reject, UndefinedConstant(), reason);
@@ -1463,64 +1459,63 @@ TF_BUILTIN(PromiseResolveTrampoline, PromiseBuiltinsAssembler) {
   //  1. Let C be the this value.
   Node* receiver = Parameter(Descriptor::kReceiver);
   Node* value = Parameter(Descriptor::kValue);
-  Node* context = Parameter(Descriptor::kContext);
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
   // 2. If Type(C) is not Object, throw a TypeError exception.
-  ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
-                       "PromiseResolve");
+  ThrowIfNotJSReceiver(context, CAST(receiver),
+                       MessageTemplate::kCalledOnNonObject, "PromiseResolve");
 
   // 3. Return ? PromiseResolve(C, x).
   Return(CallBuiltin(Builtins::kPromiseResolve, context, receiver, value));
 }
 
 TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
-  Node* constructor = Parameter(Descriptor::kConstructor);
-  Node* value = Parameter(Descriptor::kValue);
-  Node* context = Parameter(Descriptor::kContext);
-
-  CSA_ASSERT(this, IsJSReceiver(constructor));
+  TNode<JSReceiver> constructor = CAST(Parameter(Descriptor::kConstructor));
+  TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
-  Node* const native_context = LoadNativeContext(context);
-  Node* const promise_fun =
-      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<JSFunction> promise_fun =
+      CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
 
   Label if_slow_constructor(this, Label::kDeferred), if_need_to_allocate(this);
 
   // Check if {value} is a JSPromise.
   GotoIf(TaggedIsSmi(value), &if_need_to_allocate);
-  Node* const value_map = LoadMap(value);
+  TNode<Map> const value_map = LoadMap(CAST(value));
   GotoIfNot(IsJSPromiseMap(value_map), &if_need_to_allocate);
 
   // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
   // is the (initial) Promise.prototype and the @@species protector is
   // intact, as that guards the lookup path for "constructor" on
   // JSPromise instances which have the (initial) Promise.prototype.
-  Node* const promise_prototype =
+  TNode<Object> promise_prototype =
       LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
-  GotoIfNot(WordEqual(LoadMapPrototype(value_map), promise_prototype),
+  GotoIfNot(TaggedEqual(LoadMapPrototype(value_map), promise_prototype),
             &if_slow_constructor);
   GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow_constructor);
 
   // If the {constructor} is the Promise function, we just immediately
   // return the {value} here and don't bother wrapping it into a
   // native Promise.
-  GotoIfNot(WordEqual(promise_fun, constructor), &if_slow_constructor);
+  GotoIfNot(TaggedEqual(promise_fun, constructor), &if_slow_constructor);
   Return(value);
 
   // At this point, value or/and constructor are not native promises, but
   // they could be of the same subclass.
   BIND(&if_slow_constructor);
   {
-    Node* const value_constructor =
+    TNode<Object> value_constructor =
         GetProperty(context, value, isolate()->factory()->constructor_string());
-    GotoIfNot(WordEqual(value_constructor, constructor), &if_need_to_allocate);
+    GotoIfNot(TaggedEqual(value_constructor, constructor),
+              &if_need_to_allocate);
     Return(value);
   }
 
   BIND(&if_need_to_allocate);
   {
     Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
-    Branch(WordEqual(promise_fun, constructor), &if_nativepromise,
+    Branch(TaggedEqual(promise_fun, constructor), &if_nativepromise,
            &if_notnativepromise);
 
     // This adds a fast path for native promises that don't need to
@@ -1534,17 +1529,17 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
 
     BIND(&if_notnativepromise);
     {
-      Node* const debug_event = TrueConstant();
-      Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
-                                           context, constructor, debug_event);
+      TNode<Oddball> const debug_event = TrueConstant();
+      TNode<PromiseCapability> const capability = CAST(CallBuiltin(
+          Builtins::kNewPromiseCapability, context, constructor, debug_event));
 
-      Node* const resolve =
+      TNode<Object> const resolve =
           LoadObjectField(capability, PromiseCapability::kResolveOffset);
       CallJS(
           CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
           context, resolve, UndefinedConstant(), value);
 
-      Node* const result =
+      TNode<Object> const result =
           LoadObjectField(capability, PromiseCapability::kPromiseOffset);
       Return(result);
     }
@@ -1557,8 +1552,8 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
   Node* const reject = Parameter(Descriptor::kReject);
   Node* const context = Parameter(Descriptor::kContext);
 
-  Node* const capability =
-      LoadContextElement(context, PromiseBuiltins::kCapabilitySlot);
+  TNode<PromiseCapability> const capability =
+      CAST(LoadContextElement(context, PromiseBuiltins::kCapabilitySlot));
 
   Label if_alreadyinvoked(this, Label::kDeferred);
   GotoIfNot(IsUndefined(
@@ -1579,20 +1574,20 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
 
 TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
   // 1. Let C be the this value.
-  Node* const receiver = Parameter(Descriptor::kReceiver);
-  Node* const reason = Parameter(Descriptor::kReason);
-  Node* const context = Parameter(Descriptor::kContext);
+  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+  TNode<Object> reason = CAST(Parameter(Descriptor::kReason));
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
   // 2. If Type(C) is not Object, throw a TypeError exception.
   ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
                        "PromiseReject");
 
   Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
 
-  Node* const promise_fun =
+  TNode<Object> promise_fun =
       LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
-  Branch(WordEqual(promise_fun, receiver), &if_nativepromise,
+  Branch(TaggedEqual(promise_fun, receiver), &if_nativepromise,
          &if_custompromise);
 
   BIND(&if_nativepromise);
@@ -1607,18 +1602,18 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
   BIND(&if_custompromise);
   {
     // 3. Let promiseCapability be ? NewPromiseCapability(C).
-    Node* const debug_event = TrueConstant();
-    Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
-                                         context, receiver, debug_event);
+    TNode<Oddball> const debug_event = TrueConstant();
+    TNode<PromiseCapability> const capability = CAST(CallBuiltin(
+        Builtins::kNewPromiseCapability, context, receiver, debug_event));
 
     // 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
-    Node* const reject =
+    TNode<Object> const reject =
         LoadObjectField(capability, PromiseCapability::kRejectOffset);
     CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
            context, reject, UndefinedConstant(), reason);
 
     // 5. Return promiseCapability.[[Promise]].
-    Node* const promise =
+    TNode<Object> const promise =
         LoadObjectField(capability, PromiseCapability::kPromiseOffset);
     Return(promise);
   }
@@ -1632,13 +1627,13 @@ std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
       promise_context, PromiseBuiltins::kOnFinallySlot, on_finally);
   StoreContextElementNoWriteBarrier(
       promise_context, PromiseBuiltins::kConstructorSlot, constructor);
-  Node* const map = LoadContextElement(
+  TNode<Object> const map = LoadContextElement(
       native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
-  Node* const then_finally_info = LoadContextElement(
+  TNode<Object> const then_finally_info = LoadContextElement(
       native_context, Context::PROMISE_THEN_FINALLY_SHARED_FUN);
   Node* const then_finally = AllocateFunctionWithMapAndContext(
       map, then_finally_info, promise_context);
-  Node* const catch_finally_info = LoadContextElement(
+  TNode<Object> const catch_finally_info = LoadContextElement(
       native_context, Context::PROMISE_CATCH_FINALLY_SHARED_FUN);
   Node* const catch_finally = AllocateFunctionWithMapAndContext(
       map, catch_finally_info, promise_context);
@@ -1648,7 +1643,8 @@ std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
 TF_BUILTIN(PromiseValueThunkFinally, PromiseBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
 
-  Node* const value = LoadContextElement(context, PromiseBuiltins::kValueSlot);
+  TNode<Object> const value =
+      LoadContextElement(context, PromiseBuiltins::kValueSlot);
   Return(value);
 }
 
@@ -1658,9 +1654,9 @@ Node* PromiseBuiltinsAssembler::CreateValueThunkFunction(Node* value,
       native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength);
   StoreContextElementNoWriteBarrier(value_thunk_context,
                                     PromiseBuiltins::kValueSlot, value);
-  Node* const map = LoadContextElement(
+  TNode<Object> const map = LoadContextElement(
       native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
-  Node* const value_thunk_info = LoadContextElement(
+  TNode<Object> const value_thunk_info = LoadContextElement(
       native_context, Context::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN);
   Node* const value_thunk = AllocateFunctionWithMapAndContext(
       map, value_thunk_info, value_thunk_context);
@@ -1674,8 +1670,8 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
 
   // 1. Let onFinally be F.[[OnFinally]].
-  Node* const on_finally =
-      LoadContextElement(context, PromiseBuiltins::kOnFinallySlot);
+  TNode<HeapObject> const on_finally =
+      CAST(LoadContextElement(context, PromiseBuiltins::kOnFinallySlot));
 
   // 2.  Assert: IsCallable(onFinally) is true.
   CSA_ASSERT(this, IsCallable(on_finally));
@@ -1686,18 +1682,18 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
       context, on_finally, UndefinedConstant());
 
   // 4. Let C be F.[[Constructor]].
-  Node* const constructor =
-      LoadContextElement(context, PromiseBuiltins::kConstructorSlot);
+  TNode<JSFunction> const constructor =
+      CAST(LoadContextElement(context, PromiseBuiltins::kConstructorSlot));
 
   // 5. Assert: IsConstructor(C) is true.
   CSA_ASSERT(this, IsConstructor(constructor));
 
   // 6. Let promise be ? PromiseResolve(C, result).
-  Node* const promise =
-    CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
+  TNode<Object> const promise =
+      CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
 
   // 7. Let valueThunk be equivalent to a function that returns value.
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   Node* const value_thunk = CreateValueThunkFunction(value, native_context);
 
   // 8. Return ? Invoke(promise, "then", « valueThunk »).
@@ -1707,7 +1703,8 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
 TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
 
-  Node* const reason = LoadContextElement(context, PromiseBuiltins::kValueSlot);
+  TNode<Object> const reason =
+      LoadContextElement(context, PromiseBuiltins::kValueSlot);
   CallRuntime(Runtime::kThrow, context, reason);
   Unreachable();
 }
@@ -1718,9 +1715,9 @@ Node* PromiseBuiltinsAssembler::CreateThrowerFunction(Node* reason,
       native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength);
   StoreContextElementNoWriteBarrier(thrower_context,
                                     PromiseBuiltins::kValueSlot, reason);
-  Node* const map = LoadContextElement(
+  TNode<Object> const map = LoadContextElement(
       native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
-  Node* const thrower_info = LoadContextElement(
+  TNode<Object> const thrower_info = LoadContextElement(
       native_context, Context::PROMISE_THROWER_FINALLY_SHARED_FUN);
   Node* const thrower =
       AllocateFunctionWithMapAndContext(map, thrower_info, thrower_context);
@@ -1734,8 +1731,8 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
   Node* const context = Parameter(Descriptor::kContext);
 
   // 1. Let onFinally be F.[[OnFinally]].
-  Node* const on_finally =
-      LoadContextElement(context, PromiseBuiltins::kOnFinallySlot);
+  TNode<HeapObject> const on_finally =
+      CAST(LoadContextElement(context, PromiseBuiltins::kOnFinallySlot));
 
   // 2. Assert: IsCallable(onFinally) is true.
   CSA_ASSERT(this, IsCallable(on_finally));
@@ -1746,18 +1743,18 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
       context, on_finally, UndefinedConstant());
 
   // 4. Let C be F.[[Constructor]].
-  Node* const constructor =
-      LoadContextElement(context, PromiseBuiltins::kConstructorSlot);
+  TNode<JSFunction> const constructor =
+      CAST(LoadContextElement(context, PromiseBuiltins::kConstructorSlot));
 
   // 5. Assert: IsConstructor(C) is true.
   CSA_ASSERT(this, IsConstructor(constructor));
 
   // 6. Let promise be ? PromiseResolve(C, result).
-  Node* const promise =
-    CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
+  TNode<Object> const promise =
+      CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
 
   // 7. Let thrower be equivalent to a function that throws reason.
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   Node* const thrower = CreateThrowerFunction(reason, native_context);
 
   // 8. Return ? Invoke(promise, "then", « thrower »).
@@ -1770,26 +1767,27 @@ TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
   // 1.  Let promise be the this value.
   Node* const receiver = Parameter(Descriptor::kReceiver);
   Node* const on_finally = Parameter(Descriptor::kOnFinally);
-  Node* const context = Parameter(Descriptor::kContext);
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
   // 2. If Type(promise) is not Object, throw a TypeError exception.
-  ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+  ThrowIfNotJSReceiver(context, CAST(receiver),
+                       MessageTemplate::kCalledOnNonObject,
                        "Promise.prototype.finally");
 
   // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
-  Node* const native_context = LoadNativeContext(context);
-  Node* const promise_fun =
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Object> const promise_fun =
       LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
   VARIABLE(var_constructor, MachineRepresentation::kTagged, promise_fun);
   Label slow_constructor(this, Label::kDeferred), done_constructor(this);
-  Node* const receiver_map = LoadMap(receiver);
+  TNode<Map> const receiver_map = LoadMap(receiver);
   GotoIfNot(IsJSPromiseMap(receiver_map), &slow_constructor);
   BranchIfPromiseSpeciesLookupChainIntact(native_context, receiver_map,
                                           &done_constructor, &slow_constructor);
   BIND(&slow_constructor);
   {
-    Node* const constructor =
-        SpeciesConstructor(context, receiver, promise_fun);
+    TNode<JSReceiver> const constructor =
+        SpeciesConstructor(context, receiver, CAST(promise_fun));
     var_constructor.Bind(constructor);
     Goto(&done_constructor);
   }
@@ -1819,7 +1817,7 @@ TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
   Node* then_finally = nullptr;
   Node* catch_finally = nullptr;
   std::tie(then_finally, catch_finally) =
-    CreatePromiseFinallyFunctions(on_finally, constructor, native_context);
+      CreatePromiseFinallyFunctions(on_finally, constructor, native_context);
   var_then_finally.Bind(then_finally);
   var_catch_finally.Bind(catch_finally);
   Goto(&perform_finally);
@@ -1850,7 +1848,7 @@ TF_BUILTIN(FulfillPromise, PromiseBuiltinsAssembler) {
   CSA_ASSERT(this, IsJSPromise(promise));
 
   // 2. Let reactions be promise.[[PromiseFulfillReactions]].
-  Node* const reactions =
+  TNode<Object> const reactions =
       LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
 
   // 3. Set promise.[[PromiseResult]] to value.
@@ -1892,7 +1890,7 @@ TF_BUILTIN(RejectPromise, PromiseBuiltinsAssembler) {
   GotoIfNot(PromiseHasHandler(promise), &if_runtime);
 
   // 2. Let reactions be promise.[[PromiseRejectReactions]].
-  Node* reactions =
+  TNode<Object> reactions =
       LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
 
   // 3. Set promise.[[PromiseResult]] to reason.
@@ -1914,17 +1912,14 @@ TF_BUILTIN(RejectPromise, PromiseBuiltinsAssembler) {
 
 // ES #sec-promise-resolve-functions
 TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
-  Node* const promise = Parameter(Descriptor::kPromise);
-  Node* const resolution = Parameter(Descriptor::kResolution);
-  Node* const context = Parameter(Descriptor::kContext);
-
-  CSA_ASSERT(this, TaggedIsNotSmi(promise));
-  CSA_ASSERT(this, IsJSPromise(promise));
+  TNode<JSPromise> const promise = CAST(Parameter(Descriptor::kPromise));
+  TNode<Object> const resolution = CAST(Parameter(Descriptor::kResolution));
+  TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
 
   Label do_enqueue(this), if_fulfill(this), if_reject(this, Label::kDeferred),
       if_runtime(this, Label::kDeferred);
-  VARIABLE(var_reason, MachineRepresentation::kTagged);
-  VARIABLE(var_then, MachineRepresentation::kTagged);
+  TVARIABLE(Object, var_reason);
+  TVARIABLE(Object, var_then);
 
   // If promise hook is enabled or the debugger is active, let
   // the runtime handle this operation, which greatly reduces
@@ -1936,11 +1931,11 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
   // 6. If SameValue(resolution, promise) is true, then
   // We can use pointer comparison here, since the {promise} is guaranteed
   // to be a JSPromise inside this function and thus is reference comparable.
-  GotoIf(WordEqual(promise, resolution), &if_runtime);
+  GotoIf(TaggedEqual(promise, resolution), &if_runtime);
 
   // 7. If Type(resolution) is not Object, then
   GotoIf(TaggedIsSmi(resolution), &if_fulfill);
-  Node* const resolution_map = LoadMap(resolution);
+  TNode<Map> resolution_map = LoadMap(CAST(resolution));
   GotoIfNot(IsJSReceiverMap(resolution_map), &if_fulfill);
 
   // We can skip the "then" lookup on {resolution} if its [[Prototype]]
@@ -1948,21 +1943,19 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
   // is intact, as that guards the lookup path for the "then" property
   // on JSPromise instances which have the (initial) %PromisePrototype%.
   Label if_fast(this), if_receiver(this), if_slow(this, Label::kDeferred);
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   GotoIfForceSlowPath(&if_slow);
   GotoIf(IsPromiseThenProtectorCellInvalid(), &if_slow);
   GotoIfNot(IsJSPromiseMap(resolution_map), &if_receiver);
-  Node* const promise_prototype =
+  TNode<Object> const promise_prototype =
       LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
-  Branch(WordEqual(LoadMapPrototype(resolution_map), promise_prototype),
+  Branch(TaggedEqual(LoadMapPrototype(resolution_map), promise_prototype),
          &if_fast, &if_slow);
 
   BIND(&if_fast);
   {
     // The {resolution} is a native Promise in this case.
-    Node* const then =
-        LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
-    var_then.Bind(then);
+    var_then = LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
     Goto(&do_enqueue);
   }
 
@@ -1975,16 +1968,16 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
     // results from async generators.
     CSA_ASSERT(this, IsJSReceiverMap(resolution_map));
     CSA_ASSERT(this, Word32BinaryNot(IsPromiseThenProtectorCellInvalid()));
-    Node* const iterator_result_map =
+    TNode<Object> const iterator_result_map =
         LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
-    Branch(WordEqual(resolution_map, iterator_result_map), &if_fulfill,
+    Branch(TaggedEqual(resolution_map, iterator_result_map), &if_fulfill,
            &if_slow);
   }
 
   BIND(&if_slow);
   {
     // 8. Let then be Get(resolution, "then").
-    Node* const then =
+    TNode<Object> then =
         GetProperty(context, resolution, isolate()->factory()->then_string());
 
     // 9. If then is an abrupt completion, then
@@ -1992,9 +1985,9 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
 
     // 11. If IsCallable(thenAction) is false, then
     GotoIf(TaggedIsSmi(then), &if_fulfill);
-    Node* const then_map = LoadMap(then);
+    TNode<Map> const then_map = LoadMap(CAST(then));
     GotoIfNot(IsCallableMap(then_map), &if_fulfill);
-    var_then.Bind(then);
+    var_then = then;
     Goto(&do_enqueue);
   }
 
@@ -2032,7 +2025,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
     Label* if_exception, Variable* var_exception) {
   IteratorBuiltinsAssembler iter_assembler(state());
 
-  TNode<NativeContext> native_context = Cast(LoadNativeContext(context));
+  TNode<NativeContext> native_context = LoadNativeContext(context);
 
   // For catch prediction, don't treat the .then calls as handling it;
   // instead, recurse outwards.
@@ -2120,9 +2113,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
     // Set resolveElement.[[Values]] to values.
     // Set resolveElement.[[Capability]] to resultCapability.
     // Set resolveElement.[[RemainingElements]] to remainingElementsCount.
-    Node* const resolve_element_fun = create_resolve_element_function(
+    TNode<Object> const resolve_element_fun = create_resolve_element_function(
         resolve_element_context, index, native_context, Cast(capability));
-    Node* const reject_element_fun = create_reject_element_function(
+    TNode<Object> const reject_element_fun = create_reject_element_function(
         resolve_element_context, index, native_context, Cast(capability));
 
     // We can skip the "resolve" lookup on the {constructor} as well as the
@@ -2148,7 +2141,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
            &if_slow);
     GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow);
     GotoIf(TaggedIsSmi(next_value), &if_slow);
-    Node* const next_value_map = LoadMap(CAST(next_value));
+    TNode<Map> const next_value_map = LoadMap(CAST(next_value));
     BranchIfPromiseThenLookupChainIntact(native_context, next_value_map,
                                          &if_fast, &if_slow);
 
@@ -2172,7 +2165,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
 
       // Perform ? Invoke(nextPromise, "then", « resolveElement,
       //                  resultCapability.[[Reject]] »).
-      Node* const then =
+      TNode<Object> const then =
           GetProperty(native_context, next_promise, factory()->then_string());
       GotoIfException(then, &close_iterator, var_exception);
 
@@ -2205,7 +2198,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
     // function and pass the larger indices via a separate context, but it
     // doesn't seem likely that we need this, and it's unclear how the rest
     // of the system deals with 2**21 live Promises anyways.
-    Node* const result =
+    TNode<Object> const result =
         CallRuntime(Runtime::kThrowRangeError, native_context,
                     SmiConstant(MessageTemplate::kTooManyElementsInPromiseAll));
     GotoIfException(result, &close_iterator, var_exception);
@@ -2241,14 +2234,14 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
     // capacity here. We may already have elements here in case of some
     // fancy Thenable that calls the resolve callback immediately, so we need
     // to handle that correctly here.
-    Node* const values_array = LoadContextElement(
+    TNode<JSArray> const values_array = CAST(LoadContextElement(
         resolve_element_context,
-        PromiseBuiltins::kPromiseAllResolveElementValuesArraySlot);
-    Node* const old_elements = LoadElements(values_array);
+        PromiseBuiltins::kPromiseAllResolveElementValuesArraySlot));
+    TNode<FixedArrayBase> const old_elements = LoadElements(values_array);
     TNode<Smi> const old_capacity = LoadFixedArrayBaseLength(old_elements);
     TNode<Smi> const new_capacity = var_index.value();
     GotoIf(SmiGreaterThanOrEqual(old_capacity, new_capacity), &return_promise);
-    Node* const new_elements =
+    TNode<FixedArrayBase> const new_elements =
         AllocateFixedArray(PACKED_ELEMENTS, new_capacity, SMI_PARAMETERS,
                            AllocationFlag::kAllowLargeObjectAllocation);
     CopyFixedArrayElements(PACKED_ELEMENTS, old_elements, PACKED_ELEMENTS,
@@ -2263,9 +2256,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
     //                    « valuesArray »).
     BIND(&resolve_promise);
     {
-      Node* const resolve =
+      TNode<Object> const resolve =
           LoadObjectField(capability, PromiseCapability::kResolveOffset);
-      Node* const values_array = LoadContextElement(
+      TNode<Object> const values_array = LoadContextElement(
           resolve_element_context,
           PromiseBuiltins::kPromiseAllResolveElementValuesArraySlot);
       Node* const resolve_call = CallJS(
@@ -2279,7 +2272,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
     BIND(&return_promise);
   }
 
-  Node* const promise =
+  TNode<Object> const promise =
       LoadObjectField(capability, PromiseCapability::kPromiseOffset);
   return promise;
 }
@@ -2298,9 +2291,9 @@ void PromiseBuiltinsAssembler::Generate_PromiseAll(
   // Let promiseCapability be ? NewPromiseCapability(C).
   // Don't fire debugEvent so that forwarding the rejection through all does not
   // trigger redundant ExceptionEvents
-  Node* const debug_event = FalseConstant();
-  Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability, context,
-                                       receiver, debug_event);
+  TNode<Oddball> const debug_event = FalseConstant();
+  TNode<PromiseCapability> const capability = CAST(CallBuiltin(
+      Builtins::kNewPromiseCapability, context, receiver, debug_event));
 
   VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
   Label reject_promise(this, &var_exception, Label::kDeferred);
@@ -2325,12 +2318,12 @@ void PromiseBuiltinsAssembler::Generate_PromiseAll(
   {
     // Exception must be bound to a JS value.
     CSA_SLOW_ASSERT(this, IsNotTheHole(var_exception.value()));
-    Node* const reject =
+    TNode<Object> const reject =
         LoadObjectField(capability, PromiseCapability::kRejectOffset);
     CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
            context, reject, UndefinedConstant(), var_exception.value());
 
-    Node* const promise =
+    TNode<Object> const promise =
         LoadObjectField(capability, PromiseCapability::kPromiseOffset);
     Return(promise);
   }
@@ -2397,7 +2390,7 @@ void PromiseBuiltinsAssembler::Generate_PromiseAllResolveElementClosure(
       this,
       SmiEqual(LoadObjectField<Smi>(context, Context::kLengthOffset),
                SmiConstant(PromiseBuiltins::kPromiseAllResolveElementLength)));
-  TNode<NativeContext> native_context = Cast(LoadNativeContext(context));
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   StoreObjectField(function, JSFunction::kContextOffset, native_context);
 
   // Update the value depending on whether Promise.all or
@@ -2578,19 +2571,19 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
 
   Node* const receiver = Parameter(Descriptor::kReceiver);
   TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
-  ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
-                       "Promise.race");
+  ThrowIfNotJSReceiver(context, CAST(receiver),
+                       MessageTemplate::kCalledOnNonObject, "Promise.race");
 
   // Let promiseCapability be ? NewPromiseCapability(C).
   // Don't fire debugEvent so that forwarding the rejection through all does not
   // trigger redundant ExceptionEvents
-  Node* const debug_event = FalseConstant();
-  Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability, context,
-                                       receiver, debug_event);
+  TNode<Oddball> const debug_event = FalseConstant();
+  TNode<PromiseCapability> const capability = CAST(CallBuiltin(
+      Builtins::kNewPromiseCapability, context, receiver, debug_event));
 
-  Node* const resolve =
+  TNode<Object> const resolve =
       LoadObjectField(capability, PromiseCapability::kResolveOffset);
-  Node* const reject =
+  TNode<Object> const reject =
       LoadObjectField(capability, PromiseCapability::kRejectOffset);
 
   Label close_iterator(this, Label::kDeferred);
@@ -2613,7 +2606,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
     // as that guards the lookup path for the "resolve" property on the
     // Promise constructor.
     Label loop(this), break_loop(this), if_slow(this, Label::kDeferred);
-    Node* const native_context = LoadNativeContext(context);
+    TNode<NativeContext> const native_context = LoadNativeContext(context);
     TVARIABLE(Object, var_promise_resolve_function, UndefinedConstant());
     GotoIfNotPromiseResolveLookupChainIntact(native_context, receiver,
                                              &if_slow);
@@ -2662,7 +2655,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
 
       // Perform ? Invoke(nextPromise, "then", « resolveElement,
       //                  resultCapability.[[Reject]] »).
-      Node* const then =
+      TNode<Object> const then =
           GetProperty(context, next_promise, factory()->then_string());
       GotoIfException(then, &close_iterator, &var_exception);
 
@@ -2694,12 +2687,12 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
 
   BIND(&reject_promise);
   {
-    Node* const reject =
+    TNode<Object> const reject =
         LoadObjectField(capability, PromiseCapability::kRejectOffset);
     CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
            context, reject, UndefinedConstant(), var_exception.value());
 
-    Node* const promise =
+    TNode<Object> const promise =
         LoadObjectField(capability, PromiseCapability::kPromiseOffset);
     Return(promise);
   }
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 71443ca920c330..633e3321aa17d3 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -35,9 +35,6 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
   Node* AllocatePromiseReaction(Node* next, Node* promise_or_capability,
                                 Node* fulfill_handler, Node* reject_handler);
 
-  Node* AllocatePromiseReactionJobTask(RootIndex map_root_index, Node* context,
-                                       Node* argument, Node* handler,
-                                       Node* promise_or_capability);
   Node* AllocatePromiseReactionJobTask(Node* map, Node* context, Node* argument,
                                        Node* handler,
                                        Node* promise_or_capability);
@@ -91,10 +88,10 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
   // that guards the lookup path for the "resolve" property on the %Promise%
   // intrinsic object.
   void BranchIfPromiseResolveLookupChainIntact(Node* native_context,
-                                               Node* constructor,
+                                               SloppyTNode<Object> constructor,
                                                Label* if_fast, Label* if_slow);
   void GotoIfNotPromiseResolveLookupChainIntact(Node* native_context,
-                                                Node* constructor,
+                                                SloppyTNode<Object> constructor,
                                                 Label* if_slow);
 
   // We can shortcut the SpeciesConstructor on {promise_map} if it's
@@ -120,7 +117,8 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
   template <typename... TArgs>
   Node* InvokeThen(Node* native_context, Node* receiver, TArgs... args);
 
-  void BranchIfAccessCheckFailed(Node* context, Node* native_context,
+  void BranchIfAccessCheckFailed(SloppyTNode<Context> context,
+                                 SloppyTNode<Context> native_context,
                                  Node* promise_constructor, Node* executor,
                                  Label* if_noaccess);
 
@@ -153,13 +151,14 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
   void SetPromiseHandledByIfTrue(Node* context, Node* condition, Node* promise,
                                  const NodeGenerator& handled_by);
 
-  Node* PromiseStatus(Node* promise);
+  TNode<Word32T> PromiseStatus(Node* promise);
 
   void PromiseReactionJob(Node* context, Node* argument, Node* handler,
                           Node* promise_or_capability,
                           PromiseReaction::Type type);
 
-  Node* IsPromiseStatus(Node* actual, v8::Promise::PromiseState expected);
+  TNode<BoolT> IsPromiseStatus(TNode<Word32T> actual,
+                               v8::Promise::PromiseState expected);
   void PromiseSetStatus(Node* promise, v8::Promise::PromiseState status);
 
   Node* AllocateJSPromise(Node* context);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 948540ea5f1d1d..bb1137735cdcf3 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -8,6 +8,7 @@
 #include "src/builtins/builtins.h"
 
 #include "src/logging/counters.h"
+#include "src/objects/js-proxy.h"
 #include "src/objects/objects-inl.h"
 
 namespace v8 {
@@ -21,7 +22,7 @@ compiler::TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy(
   Label callable_target(this), constructor_target(this), none_target(this),
       create_proxy(this);
 
-  Node* nativeContext = LoadNativeContext(context);
+  TNode<NativeContext> nativeContext = LoadNativeContext(context);
 
   Branch(IsCallable(target), &callable_target, &none_target);
 
@@ -47,7 +48,7 @@ compiler::TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy(
   }
 
   BIND(&create_proxy);
-  Node* proxy = Allocate(JSProxy::kSize);
+  TNode<HeapObject> proxy = Allocate(JSProxy::kSize);
   StoreMapNoWriteBarrier(proxy, map.value());
   StoreObjectFieldRoot(proxy, JSProxy::kPropertiesOrHashOffset,
                        RootIndex::kEmptyPropertyDictionary);
@@ -69,8 +70,8 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
   GotoIf(SmiEqual(length, SmiConstant(0)), &if_empty_array);
   {
     Label if_large_object(this, Label::kDeferred);
-    Node* allocated_elements = AllocateFixedArray(PACKED_ELEMENTS, argc, mode,
-                                                  kAllowLargeObjectAllocation);
+    TNode<FixedArrayBase> allocated_elements = AllocateFixedArray(
+        PACKED_ELEMENTS, argc, mode, kAllowLargeObjectAllocation);
     elements.Bind(allocated_elements);
 
     TVARIABLE(IntPtrT, offset,
@@ -104,7 +105,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
 
   BIND(&allocate_js_array);
   // Allocate the result JSArray.
-  Node* native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<Map> array_map =
       LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
   TNode<JSArray> array =
@@ -115,23 +116,24 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
 
 Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
     Node* proxy, Node* native_context) {
-  Node* const context = Allocate(FixedArray::SizeFor(kProxyContextLength));
+  TNode<HeapObject> const context =
+      Allocate(FixedArray::SizeFor(kProxyContextLength));
   StoreMapNoWriteBarrier(context, RootIndex::kFunctionContextMap);
   InitializeFunctionContext(native_context, context, kProxyContextLength);
-  StoreContextElementNoWriteBarrier(context, kProxySlot, proxy);
+  StoreContextElementNoWriteBarrier(CAST(context), kProxySlot, proxy);
   return context;
 }
 
 compiler::TNode<JSFunction>
 ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(TNode<Context> context,
                                                       TNode<JSProxy> proxy) {
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
 
   Node* const proxy_context =
       CreateProxyRevokeFunctionContext(proxy, native_context);
-  Node* const revoke_map = LoadContextElement(
+  TNode<Object> const revoke_map = LoadContextElement(
       native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
-  Node* const revoke_info =
+  TNode<Object> const revoke_info =
       LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN);
 
   return CAST(AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
@@ -140,11 +142,10 @@ ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(TNode<Context> context,
 
 TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
   Node* argc = Parameter(Descriptor::kActualArgumentsCount);
-  Node* argc_ptr = ChangeInt32ToIntPtr(argc);
-  Node* proxy = Parameter(Descriptor::kFunction);
+  TNode<IntPtrT> argc_ptr = ChangeInt32ToIntPtr(argc);
+  TNode<JSProxy> proxy = CAST(Parameter(Descriptor::kFunction));
   Node* context = Parameter(Descriptor::kContext);
 
-  CSA_ASSERT(this, IsJSProxy(proxy));
   CSA_ASSERT(this, IsCallable(proxy));
 
   PerformStackCheck(CAST(context));
@@ -153,7 +154,8 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
       trap_undefined(this);
 
   // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
-  Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
+  TNode<HeapObject> handler =
+      CAST(LoadObjectField(proxy, JSProxy::kHandlerOffset));
 
   // 2. If handler is null, throw a TypeError exception.
   CSA_ASSERT(this, IsNullOrJSReceiver(handler));
@@ -163,7 +165,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
   CSA_ASSERT(this, IsJSReceiver(handler));
 
   // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
-  Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
+  TNode<Object> target = LoadObjectField(proxy, JSProxy::kTargetOffset);
 
   // 5. Let trap be ? GetMethod(handler, "apply").
   // 6. If trap is undefined, then
@@ -171,7 +173,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
   Node* trap = GetMethod(context, handler, trap_name, &trap_undefined);
 
   CodeStubArguments args(this, argc_ptr);
-  Node* receiver = args.GetReceiver();
+  TNode<Object> receiver = args.GetReceiver();
 
   // 7. Let argArray be CreateArrayFromList(argumentsList).
   Node* array = AllocateJSArrayForCodeStubArguments(context, args, argc_ptr,
@@ -194,7 +196,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
 
 TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
   Node* argc = Parameter(Descriptor::kActualArgumentsCount);
-  Node* argc_ptr = ChangeInt32ToIntPtr(argc);
+  TNode<IntPtrT> argc_ptr = ChangeInt32ToIntPtr(argc);
   Node* proxy = Parameter(Descriptor::kTarget);
   Node* new_target = Parameter(Descriptor::kNewTarget);
   Node* context = Parameter(Descriptor::kContext);
@@ -206,7 +208,8 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
       trap_undefined(this), not_an_object(this, Label::kDeferred);
 
   // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
-  Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
+  TNode<HeapObject> handler =
+      CAST(LoadObjectField(proxy, JSProxy::kHandlerOffset));
 
   // 2. If handler is null, throw a TypeError exception.
   CSA_ASSERT(this, IsNullOrJSReceiver(handler));
@@ -216,7 +219,7 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
   CSA_ASSERT(this, IsJSReceiver(handler));
 
   // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
-  Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
+  TNode<Object> target = LoadObjectField(proxy, JSProxy::kTargetOffset);
 
   // 5. Let trap be ? GetMethod(handler, "construct").
   // 6. If trap is undefined, then
@@ -248,7 +251,7 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
   BIND(&trap_undefined);
   {
     // 6.a. Assert: target has a [[Construct]] internal method.
-    CSA_ASSERT(this, IsConstructor(target));
+    CSA_ASSERT(this, IsConstructor(CAST(target)));
 
     // 6.b. Return ? Construct(target, argumentsList, newTarget).
     TailCallStub(CodeFactory::Construct(isolate()), context, target, new_target,
@@ -264,7 +267,7 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
     TNode<Name> name, TNode<Object> trap_result,
     JSProxy::AccessKind access_kind) {
   // TODO(mslekova): Think of a better name for the trap_result param.
-  Node* map = LoadMap(target);
+  TNode<Map> map = LoadMap(target);
   VARIABLE(var_value, MachineRepresentation::kTagged);
   VARIABLE(var_details, MachineRepresentation::kWord32);
   VARIABLE(var_raw_value, MachineRepresentation::kTagged);
@@ -273,7 +276,7 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
       check_passed(this);
 
   GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime);
-  Node* instance_type = LoadInstanceType(target);
+  TNode<Uint16T> instance_type = LoadInstanceType(target);
   TryGetOwnProperty(context, target, target, map, instance_type, name,
                     &if_found_value, &var_value, &var_details, &var_raw_value,
                     &check_passed, &check_in_runtime, kReturnAccessorPair);
@@ -296,8 +299,8 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
 
     BIND(&check_data);
     {
-      Node* read_only = IsSetWord32(var_details.value(),
-                                    PropertyDetails::kAttributesReadOnlyMask);
+      TNode<BoolT> read_only = IsSetWord32(
+          var_details.value(), PropertyDetails::kAttributesReadOnlyMask);
       GotoIfNot(read_only, &check_passed);
 
       // If SameValue(trapResult, targetDesc.[[Value]]) is false,
@@ -314,7 +317,7 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
         Label continue_check(this, Label::kDeferred);
         // 10.b. If IsAccessorDescriptor(targetDesc) is true and
         // targetDesc.[[Get]] is undefined, then:
-        Node* getter =
+        TNode<Object> getter =
             LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
         // Here we check for null as well because if the getter was never
         // defined it's set as null.
@@ -328,7 +331,7 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
       } else {
         // 11.b.i. If targetDesc.[[Set]] is undefined, throw a TypeError
         // exception.
-        Node* setter =
+        TNode<Object> setter =
             LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
         GotoIf(IsUndefined(setter), &throw_non_configurable_accessor);
         GotoIf(IsNull(setter), &throw_non_configurable_accessor);
@@ -372,7 +375,7 @@ void ProxiesCodeStubAssembler::CheckHasTrapResult(TNode<Context> context,
                                                   TNode<JSReceiver> target,
                                                   TNode<JSProxy> proxy,
                                                   TNode<Name> name) {
-  Node* target_map = LoadMap(target);
+  TNode<Map> target_map = LoadMap(target);
   VARIABLE(var_value, MachineRepresentation::kTagged);
   VARIABLE(var_details, MachineRepresentation::kWord32);
   VARIABLE(var_raw_value, MachineRepresentation::kTagged);
@@ -384,7 +387,7 @@ void ProxiesCodeStubAssembler::CheckHasTrapResult(TNode<Context> context,
 
   // 9.a. Let targetDesc be ? target.[[GetOwnProperty]](P).
   GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime);
-  Node* instance_type = LoadInstanceType(target);
+  TNode<Uint16T> instance_type = LoadInstanceType(target);
   TryGetOwnProperty(context, target, target, target_map, instance_type, name,
                     &if_found_value, &var_value, &var_details, &var_raw_value,
                     &check_passed, &check_in_runtime, kReturnAccessorPair);
@@ -394,12 +397,12 @@ void ProxiesCodeStubAssembler::CheckHasTrapResult(TNode<Context> context,
   {
     // 9.b.i. If targetDesc.[[Configurable]] is false, throw a TypeError
     // exception.
-    Node* non_configurable = IsSetWord32(
+    TNode<BoolT> non_configurable = IsSetWord32(
         var_details.value(), PropertyDetails::kAttributesDontDeleteMask);
     GotoIf(non_configurable, &throw_non_configurable);
 
     // 9.b.ii. Let extensibleTarget be ? IsExtensible(target).
-    Node* target_extensible = IsExtensibleMap(target_map);
+    TNode<BoolT> target_extensible = IsExtensibleMap(target_map);
 
     // 9.b.iii. If extensibleTarget is false, throw a TypeError exception.
     GotoIfNot(target_extensible, &throw_non_extensible);
@@ -437,7 +440,7 @@ void ProxiesCodeStubAssembler::CheckDeleteTrapResult(TNode<Context> context,
 
   // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
   GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime);
-  TNode<Int32T> instance_type = LoadInstanceType(target);
+  TNode<Uint16T> instance_type = LoadInstanceType(target);
   TryGetOwnProperty(context, target, target, target_map, instance_type, name,
                     &if_found_value, &var_value, &var_details, &var_raw_value,
                     &check_passed, &check_in_runtime, kReturnAccessorPair);
diff --git a/deps/v8/src/builtins/builtins-reflect-gen.cc b/deps/v8/src/builtins/builtins-reflect-gen.cc
index dade25b7c7f501..744a443ecc2dc1 100644
--- a/deps/v8/src/builtins/builtins-reflect-gen.cc
+++ b/deps/v8/src/builtins/builtins-reflect-gen.cc
@@ -13,10 +13,10 @@ namespace internal {
 TF_BUILTIN(ReflectHas, CodeStubAssembler) {
   Node* target = Parameter(Descriptor::kTarget);
   Node* key = Parameter(Descriptor::kKey);
-  Node* context = Parameter(Descriptor::kContext);
+  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
-  ThrowIfNotJSReceiver(context, target, MessageTemplate::kCalledOnNonObject,
-                       "Reflect.has");
+  ThrowIfNotJSReceiver(context, CAST(target),
+                       MessageTemplate::kCalledOnNonObject, "Reflect.has");
 
   Return(CallBuiltin(Builtins::kHasProperty, context, target, key));
 }
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index d53518ff7ee094..f879d70c676329 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -10,6 +10,8 @@
 #include "src/builtins/growable-fixed-array-gen.h"
 #include "src/codegen/code-factory.h"
 #include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/protectors.h"
 #include "src/heap/factory-inl.h"
 #include "src/logging/counters.h"
 #include "src/objects/js-regexp-string-iterator.h"
@@ -24,12 +26,55 @@ using compiler::Node;
 template <class T>
 using TNode = compiler::TNode<T>;
 
+// Tail calls the regular expression interpreter.
+// static
+void Builtins::Generate_RegExpInterpreterTrampoline(MacroAssembler* masm) {
+  ExternalReference interpreter_code_entry =
+      ExternalReference::re_match_for_call_from_js(masm->isolate());
+  masm->Jump(interpreter_code_entry);
+}
+
 TNode<Smi> RegExpBuiltinsAssembler::SmiZero() { return SmiConstant(0); }
 
 TNode<IntPtrT> RegExpBuiltinsAssembler::IntPtrZero() {
   return IntPtrConstant(0);
 }
 
+// If code is a builtin, return the address to the (possibly embedded) builtin
+// code entry, otherwise return the entry of the code object itself.
+TNode<RawPtrT> RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode<Code> code) {
+  TVARIABLE(RawPtrT, var_result);
+
+  Label if_code_is_off_heap(this), out(this);
+  TNode<Int32T> builtin_index = UncheckedCast<Int32T>(
+      LoadObjectField(code, Code::kBuiltinIndexOffset, MachineType::Int32()));
+  {
+    GotoIfNot(Word32Equal(builtin_index, Int32Constant(Builtins::kNoBuiltinId)),
+              &if_code_is_off_heap);
+    var_result = ReinterpretCast<RawPtrT>(
+        IntPtrAdd(BitcastTaggedToWord(code),
+                  IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
+    Goto(&out);
+  }
+
+  BIND(&if_code_is_off_heap);
+  {
+    TNode<IntPtrT> builtin_entry_offset_from_isolate_root =
+        IntPtrAdd(IntPtrConstant(IsolateData::builtin_entry_table_offset()),
+                  ChangeInt32ToIntPtr(Word32Shl(
+                      builtin_index, Int32Constant(kSystemPointerSizeLog2))));
+
+    var_result = ReinterpretCast<RawPtrT>(
+        Load(MachineType::Pointer(),
+             ExternalConstant(ExternalReference::isolate_root(isolate())),
+             builtin_entry_offset_from_isolate_root));
+    Goto(&out);
+  }
+
+  BIND(&out);
+  return var_result.value();
+}
+
 // -----------------------------------------------------------------------------
 // ES6 section 21.2 RegExp Objects
 
@@ -131,8 +176,8 @@ void RegExpBuiltinsAssembler::FastStoreLastIndex(TNode<JSRegExp> regexp,
 
 void RegExpBuiltinsAssembler::SlowStoreLastIndex(SloppyTNode<Context> context,
                                                  SloppyTNode<Object> regexp,
-                                                 SloppyTNode<Number> value) {
-  TNode<Name> name = HeapConstant(isolate()->factory()->lastIndex_string());
+                                                 SloppyTNode<Object> value) {
+  TNode<String> name = HeapConstant(isolate()->factory()->lastIndex_string());
   SetPropertyStrict(context, regexp, name, value);
 }
 
@@ -236,7 +281,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
     // index at odd indices.
     TNode<Object> maybe_names =
         LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureNameMapIndex);
-    GotoIf(WordEqual(maybe_names, SmiZero()), &out);
+    GotoIf(TaggedEqual(maybe_names, SmiZero()), &out);
 
     // One or more named captures exist, add a property for each one.
 
@@ -249,7 +294,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
     // root list.
 
     TNode<IntPtrT> num_properties = WordSar(names_length, 1);
-    TNode<Context> native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
     TNode<Map> map = CAST(LoadContextElement(
         native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
     TNode<NameDictionary> properties =
@@ -320,11 +365,11 @@ void RegExpBuiltinsAssembler::GetStringPointers(
                                 ? UINT8_ELEMENTS
                                 : UINT16_ELEMENTS;
 
-  Node* const from_offset = ElementOffsetFromIndex(
+  TNode<IntPtrT> const from_offset = ElementOffsetFromIndex(
       IntPtrAdd(offset, last_index), kind, INTPTR_PARAMETERS);
   var_string_start->Bind(IntPtrAdd(string_data, from_offset));
 
-  Node* const to_offset = ElementOffsetFromIndex(
+  TNode<IntPtrT> const to_offset = ElementOffsetFromIndex(
       IntPtrAdd(offset, string_length), kind, INTPTR_PARAMETERS);
   var_string_end->Bind(IntPtrAdd(string_data, to_offset));
 }
@@ -340,9 +385,8 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
   // External constants.
   TNode<ExternalReference> isolate_address =
       ExternalConstant(ExternalReference::isolate_address(isolate()));
-  TNode<ExternalReference> regexp_stack_memory_address_address =
-      ExternalConstant(
-          ExternalReference::address_of_regexp_stack_memory_address(isolate()));
+  TNode<ExternalReference> regexp_stack_memory_top_address = ExternalConstant(
+      ExternalReference::address_of_regexp_stack_memory_top_address(isolate()));
   TNode<ExternalReference> regexp_stack_memory_size_address = ExternalConstant(
       ExternalReference::address_of_regexp_stack_memory_size(isolate()));
   TNode<ExternalReference> static_offsets_vector_address = ExternalConstant(
@@ -374,7 +418,9 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
           data, IntPtrConstant(JSRegExp::kTagIndex));
 
       int32_t values[] = {
-          JSRegExp::IRREGEXP, JSRegExp::ATOM, JSRegExp::NOT_COMPILED,
+          JSRegExp::IRREGEXP,
+          JSRegExp::ATOM,
+          JSRegExp::NOT_COMPILED,
       };
       Label* labels[] = {&next, &atom, &runtime};
 
@@ -398,24 +444,17 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
            &runtime);
   }
 
-  // Ensure that a RegExp stack is allocated. This check is after branching off
-  // for ATOM regexps to avoid unnecessary trips to runtime.
-  {
-    TNode<IntPtrT> stack_size = UncheckedCast<IntPtrT>(
-        Load(MachineType::IntPtr(), regexp_stack_memory_size_address));
-    GotoIf(IntPtrEqual(stack_size, IntPtrZero()), &runtime);
-  }
-
   // Unpack the string if possible.
 
   to_direct.TryToDirect(&runtime);
 
-  // Load the irregexp code object and offsets into the subject string. Both
-  // depend on whether the string is one- or two-byte.
+  // Load the irregexp code or bytecode object and offsets into the subject
+  // string. Both depend on whether the string is one- or two-byte.
 
   TVARIABLE(RawPtrT, var_string_start);
   TVARIABLE(RawPtrT, var_string_end);
   TVARIABLE(Object, var_code);
+  TVARIABLE(Object, var_bytecode);
 
   {
     TNode<RawPtrT> direct_string_data = to_direct.PointerToData(&runtime);
@@ -431,6 +470,8 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
                         &var_string_start, &var_string_end);
       var_code =
           UnsafeLoadFixedArrayElement(data, JSRegExp::kIrregexpLatin1CodeIndex);
+      var_bytecode = UnsafeLoadFixedArrayElement(
+          data, JSRegExp::kIrregexpLatin1BytecodeIndex);
       Goto(&next);
     }
 
@@ -441,6 +482,8 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
                         &var_string_start, &var_string_end);
       var_code =
           UnsafeLoadFixedArrayElement(data, JSRegExp::kIrregexpUC16CodeIndex);
+      var_bytecode = UnsafeLoadFixedArrayElement(
+          data, JSRegExp::kIrregexpUC16BytecodeIndex);
       Goto(&next);
     }
 
@@ -462,9 +505,32 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
 #endif
 
   GotoIf(TaggedIsSmi(var_code.value()), &runtime);
-  GotoIfNot(IsCode(CAST(var_code.value())), &runtime);
   TNode<Code> code = CAST(var_code.value());
 
+  // Tier-up in runtime if ticks are non-zero and tier-up hasn't happened yet
+  // and ensure that a RegExp stack is allocated when using compiled Irregexp.
+  {
+    Label next(this), check_tier_up(this);
+    GotoIfNot(TaggedIsSmi(var_bytecode.value()), &check_tier_up);
+    CSA_ASSERT(this, SmiEqual(CAST(var_bytecode.value()),
+                              SmiConstant(JSRegExp::kUninitializedValue)));
+
+    // Ensure RegExp stack is allocated.
+    TNode<IntPtrT> stack_size = UncheckedCast<IntPtrT>(
+        Load(MachineType::IntPtr(), regexp_stack_memory_size_address));
+    GotoIf(IntPtrEqual(stack_size, IntPtrZero()), &runtime);
+    Goto(&next);
+
+    // Check if tier-up is requested.
+    BIND(&check_tier_up);
+    TNode<Smi> ticks = CAST(
+        UnsafeLoadFixedArrayElement(data, JSRegExp::kIrregexpTierUpTicksIndex));
+    GotoIf(SmiToInt32(ticks), &runtime);
+
+    Goto(&next);
+    BIND(&next);
+  }
+
   Label if_success(this), if_exception(this, Label::kDeferred);
   {
     IncrementCounter(isolate()->counters()->regexp_entry_native(), 1);
@@ -486,11 +552,13 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
     MachineType arg1_type = type_int32;
     TNode<Int32T> arg1 = TruncateIntPtrToInt32(int_last_index);
 
-    // Argument 2: Start of string data.
+    // Argument 2: Start of string data. This argument is ignored in the
+    // interpreter.
     MachineType arg2_type = type_ptr;
     TNode<RawPtrT> arg2 = var_string_start.value();
 
-    // Argument 3: End of string data.
+    // Argument 3: End of string data. This argument is ignored in the
+    // interpreter.
     MachineType arg3_type = type_ptr;
     TNode<RawPtrT> arg3 = var_string_end.value();
 
@@ -498,41 +566,50 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
     MachineType arg4_type = type_ptr;
     TNode<ExternalReference> arg4 = static_offsets_vector_address;
 
-    // Argument 5: Set the number of capture registers to zero to force global
-    // regexps to behave as non-global.  This does not affect non-global
-    // regexps.
+    // Argument 5: Number of capture registers.
+    // Setting this to the number of registers required to store all captures
+    // forces global regexps to behave as non-global.
+    TNode<Smi> capture_count = CAST(UnsafeLoadFixedArrayElement(
+        data, JSRegExp::kIrregexpCaptureCountIndex));
+    // capture_count is the number of captures without the match itself.
+    // Required registers = (capture_count + 1) * 2.
+    STATIC_ASSERT(Internals::IsValidSmi((JSRegExp::kMaxCaptures + 1) << 1));
+    TNode<Smi> register_count =
+        SmiShl(SmiAdd(capture_count, SmiConstant(1)), 1);
+
     MachineType arg5_type = type_int32;
-    TNode<Int32T> arg5 = Int32Constant(0);
+    TNode<Int32T> arg5 = SmiToInt32(register_count);
 
-    // Argument 6: Start (high end) of backtracking stack memory area.
-    TNode<RawPtrT> stack_start = UncheckedCast<RawPtrT>(
-        Load(MachineType::Pointer(), regexp_stack_memory_address_address));
-    TNode<IntPtrT> stack_size = UncheckedCast<IntPtrT>(
-        Load(MachineType::IntPtr(), regexp_stack_memory_size_address));
-    TNode<RawPtrT> stack_end =
-        ReinterpretCast<RawPtrT>(IntPtrAdd(stack_start, stack_size));
+    // Argument 6: Start (high end) of backtracking stack memory area. This
+    // argument is ignored in the interpreter.
+    TNode<RawPtrT> stack_top = UncheckedCast<RawPtrT>(
+        Load(MachineType::Pointer(), regexp_stack_memory_top_address));
 
     MachineType arg6_type = type_ptr;
-    TNode<RawPtrT> arg6 = stack_end;
+    TNode<RawPtrT> arg6 = stack_top;
 
     // Argument 7: Indicate that this is a direct call from JavaScript.
     MachineType arg7_type = type_int32;
-    TNode<Int32T> arg7 = Int32Constant(1);
+    TNode<Int32T> arg7 = Int32Constant(RegExp::CallOrigin::kFromJs);
 
     // Argument 8: Pass current isolate address.
     MachineType arg8_type = type_ptr;
     TNode<ExternalReference> arg8 = isolate_address;
 
-    TNode<RawPtrT> code_entry = ReinterpretCast<RawPtrT>(
-        IntPtrAdd(BitcastTaggedToWord(code),
-                  IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
+    // Argument 9: Regular expression object. This argument is ignored in native
+    // irregexp code.
+    MachineType arg9_type = type_tagged;
+    TNode<JSRegExp> arg9 = regexp;
+
+    TNode<RawPtrT> code_entry = LoadCodeObjectEntry(code);
 
     TNode<Int32T> result = UncheckedCast<Int32T>(CallCFunction(
         code_entry, retval_type, std::make_pair(arg0_type, arg0),
         std::make_pair(arg1_type, arg1), std::make_pair(arg2_type, arg2),
         std::make_pair(arg3_type, arg3), std::make_pair(arg4_type, arg4),
         std::make_pair(arg5_type, arg5), std::make_pair(arg6_type, arg6),
-        std::make_pair(arg7_type, arg7), std::make_pair(arg8_type, arg8)));
+        std::make_pair(arg7_type, arg7), std::make_pair(arg8_type, arg8),
+        std::make_pair(arg9_type, arg9)));
 
     // Check the result.
     // We expect exactly one result since we force the called regexp to behave
@@ -734,7 +811,7 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
   BIND(&run_exec);
   {
     // Get last match info from the context.
-    TNode<Context> native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
     TNode<RegExpMatchInfo> last_match_info = CAST(LoadContextElement(
         native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX));
 
@@ -771,8 +848,8 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
 
 TNode<RegExpMatchInfo>
 RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast(
-    TNode<Context> context, TNode<JSReceiver> maybe_regexp,
-    TNode<String> string, Label* if_didnotmatch) {
+    TNode<Context> context, TNode<JSRegExp> maybe_regexp, TNode<String> string,
+    Label* if_didnotmatch) {
   return RegExpPrototypeExecBodyWithoutResult(context, maybe_regexp, string,
                                               if_didnotmatch, true);
 }
@@ -805,36 +882,21 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpPrototypeExecBody(
   return var_result.value();
 }
 
-Node* RegExpBuiltinsAssembler::ThrowIfNotJSReceiver(
-    Node* context, Node* maybe_receiver, MessageTemplate msg_template,
-    char const* method_name) {
-  Label out(this), throw_exception(this, Label::kDeferred);
-  VARIABLE(var_value_map, MachineRepresentation::kTagged);
-
-  GotoIf(TaggedIsSmi(maybe_receiver), &throw_exception);
-
-  // Load the instance type of the {value}.
-  var_value_map.Bind(LoadMap(maybe_receiver));
-  Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
-
-  Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
-
-  // The {value} is not a compatible receiver for this method.
-  BIND(&throw_exception);
-  {
-    Node* const value_str =
-        CallBuiltin(Builtins::kToString, context, maybe_receiver);
-    ThrowTypeError(context, msg_template, StringConstant(method_name),
-                   value_str);
-  }
-
-  BIND(&out);
-  return var_value_map.value();
+TNode<BoolT> RegExpBuiltinsAssembler::IsReceiverInitialRegExpPrototype(
+    SloppyTNode<Context> context, SloppyTNode<Object> receiver) {
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<JSFunction> const regexp_fun =
+      CAST(LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
+  TNode<Object> const initial_map =
+      LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+  TNode<HeapObject> const initial_prototype =
+      LoadMapPrototype(CAST(initial_map));
+  return TaggedEqual(receiver, initial_prototype);
 }
 
-Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
-                                                       Node* const object,
-                                                       Node* const map) {
+Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
+    SloppyTNode<Context> context, SloppyTNode<Object> object,
+    SloppyTNode<Map> map) {
   Label out(this);
   VARIABLE(var_result, MachineRepresentation::kWord32);
 
@@ -843,12 +905,12 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
   GotoIfForceSlowPath(&out);
 #endif
 
-  Node* const native_context = LoadNativeContext(context);
-  Node* const regexp_fun =
-      LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
-  Node* const initial_map =
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<HeapObject> const regexp_fun =
+      CAST(LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
+  TNode<Object> const initial_map =
       LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
-  Node* const has_initialmap = WordEqual(map, initial_map);
+  TNode<BoolT> const has_initialmap = TaggedEqual(map, initial_map);
 
   var_result.Bind(has_initialmap);
   GotoIfNot(has_initialmap, &out);
@@ -882,14 +944,14 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec(
   var_result = is_regexp;
   GotoIfNot(is_regexp, &out);
 
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<Object> original_exec =
       LoadContextElement(native_context, Context::REGEXP_EXEC_FUNCTION_INDEX);
 
   TNode<Object> regexp_exec =
       GetProperty(context, object, isolate()->factory()->exec_string());
 
-  TNode<BoolT> has_initialexec = WordEqual(regexp_exec, original_exec);
+  TNode<BoolT> has_initialexec = TaggedEqual(regexp_exec, original_exec);
   var_result = has_initialexec;
   GotoIf(has_initialexec, &check_last_index);
   TNode<BoolT> is_undefined = IsUndefined(regexp_exec);
@@ -908,85 +970,80 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec(
   return var_result.value();
 }
 
-Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
-                                                       Node* const object) {
+Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
+    SloppyTNode<Context> context, SloppyTNode<Object> object) {
   CSA_ASSERT(this, TaggedIsNotSmi(object));
-  return IsFastRegExpNoPrototype(context, object, LoadMap(object));
+  return IsFastRegExpNoPrototype(context, object, LoadMap(CAST(object)));
 }
 
-// RegExp fast path implementations rely on unmodified JSRegExp instances.
-// We use a fairly coarse granularity for this and simply check whether both
-// the regexp itself is unmodified (i.e. its map has not changed), its
-// prototype is unmodified, and lastIndex is a non-negative smi.
 void RegExpBuiltinsAssembler::BranchIfFastRegExp(
-    Node* const context, Node* const object, Node* const map,
-    base::Optional<DescriptorIndexAndName> additional_property_to_check,
-    Label* const if_isunmodified, Label* const if_ismodified) {
-  CSA_ASSERT(this, WordEqual(LoadMap(object), map));
+    TNode<Context> context, TNode<HeapObject> object, TNode<Map> map,
+    PrototypeCheckAssembler::Flags prototype_check_flags,
+    base::Optional<DescriptorIndexNameValue> additional_property_to_check,
+    Label* if_isunmodified, Label* if_ismodified) {
+  CSA_ASSERT(this, TaggedEqual(LoadMap(object), map));
 
   GotoIfForceSlowPath(if_ismodified);
 
   // This should only be needed for String.p.(split||matchAll), but we are
   // conservative here.
-  TNode<Context> native_context = LoadNativeContext(context);
+  // Note: we are using the current native context here, which may or may not
+  // match the object's native context. That's fine: in case of a mismatch, we
+  // will bail in the next step when comparing the object's map against the
+  // current native context's initial regexp map.
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   GotoIf(IsRegExpSpeciesProtectorCellInvalid(native_context), if_ismodified);
 
-  Node* const regexp_fun =
-      LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
-  Node* const initial_map =
-      LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
-  Node* const has_initialmap = WordEqual(map, initial_map);
+  TNode<JSFunction> regexp_fun =
+      CAST(LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
+  TNode<Map> initial_map = CAST(
+      LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset));
+  TNode<BoolT> has_initialmap = TaggedEqual(map, initial_map);
 
   GotoIfNot(has_initialmap, if_ismodified);
 
-  Node* const initial_proto_initial_map =
-      LoadContextElement(native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
+  // The smi check is required to omit ToLength(lastIndex) calls with possible
+  // user-code execution on the fast path.
+  TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object));
+  GotoIfNot(TaggedIsPositiveSmi(last_index), if_ismodified);
+
+  // Verify the prototype.
+
+  TNode<Map> initial_proto_initial_map = CAST(
+      LoadContextElement(native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX));
 
-  DescriptorIndexAndName properties_to_check[2];
+  DescriptorIndexNameValue properties_to_check[2];
   int property_count = 0;
-  properties_to_check[property_count++] = DescriptorIndexAndName{
-      JSRegExp::kExecFunctionDescriptorIndex, RootIndex::kexec_string};
+  properties_to_check[property_count++] = DescriptorIndexNameValue{
+      JSRegExp::kExecFunctionDescriptorIndex, RootIndex::kexec_string,
+      Context::REGEXP_EXEC_FUNCTION_INDEX};
   if (additional_property_to_check) {
     properties_to_check[property_count++] = *additional_property_to_check;
   }
 
-  GotoIfInitialPrototypePropertiesModified(
-      CAST(map), CAST(initial_proto_initial_map),
-      Vector<DescriptorIndexAndName>(properties_to_check, property_count),
-      if_ismodified);
+  PrototypeCheckAssembler prototype_check_assembler(
+      state(), prototype_check_flags, native_context, initial_proto_initial_map,
+      Vector<DescriptorIndexNameValue>(properties_to_check, property_count));
 
-  // The smi check is required to omit ToLength(lastIndex) calls with possible
-  // user-code execution on the fast path.
-  TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object));
-  Branch(TaggedIsPositiveSmi(last_index), if_isunmodified, if_ismodified);
+  TNode<HeapObject> prototype = LoadMapPrototype(map);
+  prototype_check_assembler.CheckAndBranch(prototype, if_isunmodified,
+                                           if_ismodified);
 }
 
-void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* const context,
-                                                 Node* const object,
-                                                 Label* const if_isunmodified,
-                                                 Label* const if_ismodified) {
-  CSA_ASSERT(this, TaggedIsNotSmi(object));
-  BranchIfFastRegExp(context, object, LoadMap(object), base::nullopt,
-                     if_isunmodified, if_ismodified);
+void RegExpBuiltinsAssembler::BranchIfFastRegExp_Strict(
+    TNode<Context> context, TNode<HeapObject> object, Label* if_isunmodified,
+    Label* if_ismodified) {
+  BranchIfFastRegExp(context, object, LoadMap(object),
+                     PrototypeCheckAssembler::kCheckPrototypePropertyConstness,
+                     base::nullopt, if_isunmodified, if_ismodified);
 }
 
-TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExp(SloppyTNode<Context> context,
-                                                   SloppyTNode<Object> object) {
-  Label yup(this), nope(this), out(this);
-  TVARIABLE(BoolT, var_result);
-
-  BranchIfFastRegExp(context, object, &yup, &nope);
-
-  BIND(&yup);
-  var_result = Int32TrueConstant();
-  Goto(&out);
-
-  BIND(&nope);
-  var_result = Int32FalseConstant();
-  Goto(&out);
-
-  BIND(&out);
-  return var_result.value();
+void RegExpBuiltinsAssembler::BranchIfFastRegExp_Permissive(
+    TNode<Context> context, TNode<HeapObject> object, Label* if_isunmodified,
+    Label* if_ismodified) {
+  BranchIfFastRegExp(context, object, LoadMap(object),
+                     PrototypeCheckAssembler::kCheckFull, base::nullopt,
+                     if_isunmodified, if_ismodified);
 }
 
 void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context,
@@ -994,13 +1051,13 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context,
                                                        Label* if_isunmodified,
                                                        Label* if_ismodified) {
   // Could be a Smi.
-  Node* const map = LoadReceiverMap(object);
+  TNode<Map> const map = LoadReceiverMap(object);
 
-  Node* const native_context = LoadNativeContext(context);
-  Node* const initial_regexp_result_map =
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Object> const initial_regexp_result_map =
       LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
 
-  Branch(WordEqual(map, initial_regexp_result_map), if_isunmodified,
+  Branch(TaggedEqual(map, initial_regexp_result_map), if_isunmodified,
          if_ismodified);
 }
 
@@ -1036,9 +1093,8 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
              UintPtrLessThanOrEqual(SmiUntag(last_index),
                                     LoadStringLengthAsWord(subject_string)));
 
-  Node* const needle_string =
-      UnsafeLoadFixedArrayElement(data, JSRegExp::kAtomPatternIndex);
-  CSA_ASSERT(this, IsString(needle_string));
+  TNode<String> const needle_string =
+      CAST(UnsafeLoadFixedArrayElement(data, JSRegExp::kAtomPatternIndex));
 
   TNode<Smi> const match_from =
       CAST(CallBuiltin(Builtins::kStringIndexOf, context, subject_string,
@@ -1120,9 +1176,9 @@ TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
                      string));
 }
 
-Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
-                                           Node* const regexp,
-                                           bool is_fastpath) {
+TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
+                                                   TNode<Object> regexp,
+                                                   bool is_fastpath) {
   Isolate* isolate = this->isolate();
 
   TNode<IntPtrT> const int_one = IntPtrConstant(1);
@@ -1134,8 +1190,9 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
 
   if (is_fastpath) {
     // Refer to JSRegExp's flag property on the fast-path.
-    CSA_ASSERT(this, IsJSRegExp(regexp));
-    Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+    CSA_ASSERT(this, IsJSRegExp(CAST(regexp)));
+    TNode<Smi> const flags_smi =
+        CAST(LoadObjectField(CAST(regexp), JSRegExp::kFlagsOffset));
     var_flags = SmiUntag(flags_smi);
 
 #define CASE_FOR_FLAG(FLAG)                                        \
@@ -1163,7 +1220,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
 #define CASE_FOR_FLAG(NAME, FLAG)                                          \
   do {                                                                     \
     Label next(this);                                                      \
-    Node* const flag = GetProperty(                                        \
+    TNode<Object> const flag = GetProperty(                                \
         context, regexp, isolate->factory()->InternalizeUtf8String(NAME)); \
     Label if_isflagset(this);                                              \
     BranchIfToBooleanIsTrue(flag, &if_isflagset, &next);                   \
@@ -1187,7 +1244,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
   // char for each set flag.
 
   {
-    Node* const result = AllocateSeqOneByteString(context, var_length.value());
+    TNode<String> const result = AllocateSeqOneByteString(var_length.value());
 
     VARIABLE(var_offset, MachineType::PointerRepresentation(),
              IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
@@ -1196,7 +1253,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
   do {                                                         \
     Label next(this);                                          \
     GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next);      \
-    Node* const value = Int32Constant(CHAR);                   \
+    TNode<Int32T> const value = Int32Constant(CHAR);           \
     StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
                         var_offset.value(), value);            \
     var_offset.Bind(IntPtrAdd(var_offset.value(), int_one));   \
@@ -1290,27 +1347,6 @@ Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
                      pattern, flags);
 }
 
-// ES #sec-get-regexp.prototype.flags
-TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
-  TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
-  TNode<Map> map = CAST(ThrowIfNotJSReceiver(context, maybe_receiver,
-                                             MessageTemplate::kRegExpNonObject,
-                                             "RegExp.prototype.flags"));
-  TNode<JSReceiver> receiver = CAST(maybe_receiver);
-
-  Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred);
-  BranchIfFastRegExp(context, receiver, map, base::nullopt, &if_isfastpath,
-                     &if_isslowpath);
-
-  BIND(&if_isfastpath);
-  Return(FlagsGetter(context, receiver, true));
-
-  BIND(&if_isslowpath);
-  Return(FlagsGetter(context, receiver, false));
-}
-
 // ES#sec-regexp-pattern-flags
 // RegExp ( pattern, flags )
 TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
@@ -1321,13 +1357,13 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
 
   Isolate* isolate = this->isolate();
 
-  VARIABLE(var_flags, MachineRepresentation::kTagged, flags);
-  VARIABLE(var_pattern, MachineRepresentation::kTagged, pattern);
-  VARIABLE(var_new_target, MachineRepresentation::kTagged, new_target);
+  TVARIABLE(Object, var_flags, flags);
+  TVARIABLE(Object, var_pattern, pattern);
+  TVARIABLE(Object, var_new_target, new_target);
 
-  Node* const native_context = LoadNativeContext(context);
-  Node* const regexp_function =
-      LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<JSFunction> regexp_function =
+      CAST(LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
 
   TNode<BoolT> pattern_is_regexp = IsRegExp(context, pattern);
 
@@ -1335,15 +1371,15 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
     Label next(this);
 
     GotoIfNot(IsUndefined(new_target), &next);
-    var_new_target.Bind(regexp_function);
+    var_new_target = regexp_function;
 
     GotoIfNot(pattern_is_regexp, &next);
     GotoIfNot(IsUndefined(flags), &next);
 
-    Node* const value =
+    TNode<Object> value =
         GetProperty(context, pattern, isolate->factory()->constructor_string());
 
-    GotoIfNot(WordEqual(value, regexp_function), &next);
+    GotoIfNot(TaggedEqual(value, regexp_function), &next);
     Return(pattern);
 
     BIND(&next);
@@ -1360,16 +1396,15 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
 
     BIND(&if_patternisfastregexp);
     {
-      Node* const source =
+      TNode<Object> source =
           LoadObjectField(CAST(pattern), JSRegExp::kSourceOffset);
-      var_pattern.Bind(source);
+      var_pattern = source;
 
       {
         Label inner_next(this);
         GotoIfNot(IsUndefined(flags), &inner_next);
 
-        Node* const value = FlagsGetter(context, pattern, true);
-        var_flags.Bind(value);
+        var_flags = FlagsGetter(context, pattern, true);
         Goto(&inner_next);
 
         BIND(&inner_next);
@@ -1380,19 +1415,15 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
 
     BIND(&if_patternisslowregexp);
     {
-      {
-        Node* const value =
-            GetProperty(context, pattern, isolate->factory()->source_string());
-        var_pattern.Bind(value);
-      }
+      var_pattern =
+          GetProperty(context, pattern, isolate->factory()->source_string());
 
       {
         Label inner_next(this);
         GotoIfNot(IsUndefined(flags), &inner_next);
 
-        Node* const value =
+        var_flags =
             GetProperty(context, pattern, isolate->factory()->flags_string());
-        var_flags.Bind(value);
         Goto(&inner_next);
 
         BIND(&inner_next);
@@ -1410,14 +1441,14 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
   {
     Label allocate_jsregexp(this), allocate_generic(this, Label::kDeferred),
         next(this);
-    Branch(WordEqual(var_new_target.value(), regexp_function),
+    Branch(TaggedEqual(var_new_target.value(), regexp_function),
            &allocate_jsregexp, &allocate_generic);
 
     BIND(&allocate_jsregexp);
     {
-      Node* const initial_map = LoadObjectField(
-          regexp_function, JSFunction::kPrototypeOrInitialMapOffset);
-      Node* const regexp = AllocateJSObjectFromMap(initial_map);
+      TNode<Map> const initial_map = CAST(LoadObjectField(
+          regexp_function, JSFunction::kPrototypeOrInitialMapOffset));
+      TNode<JSObject> const regexp = AllocateJSObjectFromMap(initial_map);
       var_regexp.Bind(regexp);
       Goto(&next);
     }
@@ -1425,8 +1456,8 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
     BIND(&allocate_generic);
     {
       ConstructorBuiltinsAssembler constructor_assembler(this->state());
-      Node* const regexp = constructor_assembler.EmitFastNewObject(
-          context, regexp_function, var_new_target.value());
+      TNode<JSObject> const regexp = constructor_assembler.EmitFastNewObject(
+          context, regexp_function, CAST(var_new_target.value()));
       var_regexp.Bind(regexp);
       Goto(&next);
     }
@@ -1473,8 +1504,9 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
       BIND(&next);
     }
 
-    Node* const new_flags = FlagsGetter(context, pattern, true);
-    Node* const new_pattern = LoadObjectField(pattern, JSRegExp::kSourceOffset);
+    TNode<String> const new_flags = FlagsGetter(context, CAST(pattern), true);
+    TNode<Object> const new_pattern =
+        LoadObjectField(pattern, JSRegExp::kSourceOffset);
 
     var_flags.Bind(new_flags);
     var_pattern.Bind(new_pattern);
@@ -1488,69 +1520,22 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
   Return(result);
 }
 
-// ES6 21.2.5.10.
-// ES #sec-get-regexp.prototype.source
-TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
-  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
-  // Check whether we have an unmodified regexp instance.
-  Label if_isjsregexp(this), if_isnotjsregexp(this, Label::kDeferred);
-
-  GotoIf(TaggedIsSmi(receiver), &if_isnotjsregexp);
-  Branch(IsJSRegExp(CAST(receiver)), &if_isjsregexp, &if_isnotjsregexp);
-
-  BIND(&if_isjsregexp);
-  Return(LoadObjectField(CAST(receiver), JSRegExp::kSourceOffset));
-
-  BIND(&if_isnotjsregexp);
-  {
-    Isolate* isolate = this->isolate();
-    Node* const native_context = LoadNativeContext(context);
-    Node* const regexp_fun =
-        LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
-    Node* const initial_map =
-        LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
-    Node* const initial_prototype = LoadMapPrototype(initial_map);
-
-    Label if_isprototype(this), if_isnotprototype(this);
-    Branch(WordEqual(receiver, initial_prototype), &if_isprototype,
-           &if_isnotprototype);
-
-    BIND(&if_isprototype);
-    {
-      const int counter = v8::Isolate::kRegExpPrototypeSourceGetter;
-      Node* const counter_smi = SmiConstant(counter);
-      CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
-
-      Node* const result =
-          HeapConstant(isolate->factory()->NewStringFromAsciiChecked("(?:)"));
-      Return(result);
-    }
-
-    BIND(&if_isnotprototype);
-    {
-      ThrowTypeError(context, MessageTemplate::kRegExpNonRegExp,
-                     "RegExp.prototype.source");
-    }
-  }
-}
-
 // Fast-path implementation for flag checks on an unmodified JSRegExp instance.
-TNode<Int32T> RegExpBuiltinsAssembler::FastFlagGetter(TNode<JSRegExp> regexp,
-                                                      JSRegExp::Flag flag) {
+TNode<BoolT> RegExpBuiltinsAssembler::FastFlagGetter(TNode<JSRegExp> regexp,
+                                                     JSRegExp::Flag flag) {
   TNode<Smi> flags = CAST(LoadObjectField(regexp, JSRegExp::kFlagsOffset));
   TNode<Smi> mask = SmiConstant(flag);
-  return SmiToInt32(SmiShr(SmiAnd(flags, mask), base::bits::CountTrailingZeros(
-                                                    static_cast<int>(flag))));
+  return ReinterpretCast<BoolT>(SmiToInt32(
+      SmiShr(SmiAnd(flags, mask),
+             base::bits::CountTrailingZeros(static_cast<int>(flag)))));
 }
 
 // Load through the GetProperty stub.
-TNode<Int32T> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
-                                                      TNode<Object> regexp,
-                                                      JSRegExp::Flag flag) {
+TNode<BoolT> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
+                                                     TNode<Object> regexp,
+                                                     JSRegExp::Flag flag) {
   Label out(this);
-  TVARIABLE(Int32T, var_result);
+  TVARIABLE(BoolT, var_result);
 
   Handle<String> name;
   switch (flag) {
@@ -1582,140 +1567,36 @@ TNode<Int32T> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
   BranchIfToBooleanIsTrue(value, &if_true, &if_false);
 
   BIND(&if_true);
-  var_result = Int32Constant(1);
+  var_result = BoolConstant(true);
   Goto(&out);
 
   BIND(&if_false);
-  var_result = Int32Constant(0);
+  var_result = BoolConstant(false);
   Goto(&out);
 
   BIND(&out);
   return var_result.value();
 }
 
-TNode<Int32T> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
-                                                  TNode<Object> regexp,
-                                                  JSRegExp::Flag flag,
-                                                  bool is_fastpath) {
+TNode<BoolT> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
+                                                 TNode<Object> regexp,
+                                                 JSRegExp::Flag flag,
+                                                 bool is_fastpath) {
   return is_fastpath ? FastFlagGetter(CAST(regexp), flag)
                      : SlowFlagGetter(context, regexp, flag);
 }
 
-void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
-                                         JSRegExp::Flag flag, int counter,
-                                         const char* method_name) {
-  // Check whether we have an unmodified regexp instance.
-  Label if_isunmodifiedjsregexp(this),
-      if_isnotunmodifiedjsregexp(this, Label::kDeferred);
-
-  GotoIf(TaggedIsSmi(receiver), &if_isnotunmodifiedjsregexp);
-  Branch(IsJSRegExp(receiver), &if_isunmodifiedjsregexp,
-         &if_isnotunmodifiedjsregexp);
-
-  BIND(&if_isunmodifiedjsregexp);
-  {
-    // Refer to JSRegExp's flag property on the fast-path.
-    Node* const is_flag_set = FastFlagGetter(CAST(receiver), flag);
-    Return(SelectBooleanConstant(is_flag_set));
-  }
-
-  BIND(&if_isnotunmodifiedjsregexp);
-  {
-    Node* const native_context = LoadNativeContext(context);
-    Node* const regexp_fun =
-        LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
-    Node* const initial_map =
-        LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
-    Node* const initial_prototype = LoadMapPrototype(initial_map);
-
-    Label if_isprototype(this), if_isnotprototype(this);
-    Branch(WordEqual(receiver, initial_prototype), &if_isprototype,
-           &if_isnotprototype);
-
-    BIND(&if_isprototype);
-    {
-      if (counter != -1) {
-        Node* const counter_smi = SmiConstant(counter);
-        CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
-      }
-      Return(UndefinedConstant());
-    }
-
-    BIND(&if_isnotprototype);
-    { ThrowTypeError(context, MessageTemplate::kRegExpNonRegExp, method_name); }
-  }
-}
-
-// ES6 21.2.5.4.
-// ES #sec-get-regexp.prototype.global
-TF_BUILTIN(RegExpPrototypeGlobalGetter, RegExpBuiltinsAssembler) {
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-  FlagGetter(context, receiver, JSRegExp::kGlobal,
-             v8::Isolate::kRegExpPrototypeOldFlagGetter,
-             "RegExp.prototype.global");
-}
-
-// ES6 21.2.5.5.
-// ES #sec-get-regexp.prototype.ignorecase
-TF_BUILTIN(RegExpPrototypeIgnoreCaseGetter, RegExpBuiltinsAssembler) {
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-  FlagGetter(context, receiver, JSRegExp::kIgnoreCase,
-             v8::Isolate::kRegExpPrototypeOldFlagGetter,
-             "RegExp.prototype.ignoreCase");
-}
-
-// ES6 21.2.5.7.
-// ES #sec-get-regexp.prototype.multiline
-TF_BUILTIN(RegExpPrototypeMultilineGetter, RegExpBuiltinsAssembler) {
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-  FlagGetter(context, receiver, JSRegExp::kMultiline,
-             v8::Isolate::kRegExpPrototypeOldFlagGetter,
-             "RegExp.prototype.multiline");
-}
-
-// ES #sec-get-regexp.prototype.dotAll
-TF_BUILTIN(RegExpPrototypeDotAllGetter, RegExpBuiltinsAssembler) {
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-  static const int kNoCounter = -1;
-  FlagGetter(context, receiver, JSRegExp::kDotAll, kNoCounter,
-             "RegExp.prototype.dotAll");
-}
-
-// ES6 21.2.5.12.
-// ES #sec-get-regexp.prototype.sticky
-TF_BUILTIN(RegExpPrototypeStickyGetter, RegExpBuiltinsAssembler) {
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-  FlagGetter(context, receiver, JSRegExp::kSticky,
-             v8::Isolate::kRegExpPrototypeStickyGetter,
-             "RegExp.prototype.sticky");
-}
-
-// ES6 21.2.5.15.
-// ES #sec-get-regexp.prototype.unicode
-TF_BUILTIN(RegExpPrototypeUnicodeGetter, RegExpBuiltinsAssembler) {
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-  FlagGetter(context, receiver, JSRegExp::kUnicode,
-             v8::Isolate::kRegExpPrototypeUnicodeGetter,
-             "RegExp.prototype.unicode");
-}
-
 // ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
-Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
-                                          Node* string) {
-  VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Object> RegExpBuiltinsAssembler::RegExpExec(TNode<Context> context,
+                                                  Node* regexp, Node* string) {
+  TVARIABLE(Object, var_result);
   Label out(this);
 
   // Take the slow path of fetching the exec property, calling it, and
   // verifying its return value.
 
   // Get the exec property.
-  Node* const exec =
+  TNode<Object> const exec =
       GetProperty(context, regexp, isolate()->factory()->exec_string());
 
   // Is {exec} callable?
@@ -1723,18 +1604,17 @@ Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
 
   GotoIf(TaggedIsSmi(exec), &if_isnotcallable);
 
-  Node* const exec_map = LoadMap(exec);
+  TNode<Map> const exec_map = LoadMap(CAST(exec));
   Branch(IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
 
   BIND(&if_iscallable);
   {
     Callable call_callable = CodeFactory::Call(isolate());
-    Node* const result = CallJS(call_callable, context, exec, regexp, string);
+    var_result = CAST(CallJS(call_callable, context, exec, regexp, string));
 
-    var_result.Bind(result);
-    GotoIf(IsNull(result), &out);
+    GotoIf(IsNull(var_result.value()), &out);
 
-    ThrowIfNotJSReceiver(context, result,
+    ThrowIfNotJSReceiver(context, var_result.value(),
                          MessageTemplate::kInvalidRegExpExecResult, "");
 
     Goto(&out);
@@ -1745,9 +1625,8 @@ Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
     ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
                            "RegExp.prototype.exec");
 
-    Node* const result = CallBuiltin(Builtins::kRegExpPrototypeExecSlow,
-                                     context, regexp, string);
-    var_result.Bind(result);
+    var_result = CallBuiltin(Builtins::kRegExpPrototypeExecSlow, context,
+                             regexp, string);
     Goto(&out);
   }
 
@@ -1755,62 +1634,6 @@ Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
   return var_result.value();
 }
 
-// ES#sec-regexp.prototype.test
-// RegExp.prototype.test ( S )
-TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
-  TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
-  TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
-  // Ensure {maybe_receiver} is a JSReceiver.
-  ThrowIfNotJSReceiver(context, maybe_receiver,
-                       MessageTemplate::kIncompatibleMethodReceiver,
-                       "RegExp.prototype.test");
-  TNode<JSReceiver> receiver = CAST(maybe_receiver);
-
-  // Convert {maybe_string} to a String.
-  TNode<String> string = ToString_Inline(context, maybe_string);
-
-  Label fast_path(this), slow_path(this);
-  BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
-
-  BIND(&fast_path);
-  {
-    Label if_didnotmatch(this);
-    RegExpPrototypeExecBodyWithoutResult(context, receiver, string,
-                                         &if_didnotmatch, true);
-    Return(TrueConstant());
-
-    BIND(&if_didnotmatch);
-    Return(FalseConstant());
-  }
-
-  BIND(&slow_path);
-  {
-    // Call exec.
-    TNode<HeapObject> match_indices =
-        CAST(RegExpExec(context, receiver, string));
-
-    // Return true iff exec matched successfully.
-    Return(SelectBooleanConstant(IsNotNull(match_indices)));
-  }
-}
-
-TF_BUILTIN(RegExpPrototypeTestFast, RegExpBuiltinsAssembler) {
-  TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kReceiver));
-  TNode<String> string = CAST(Parameter(Descriptor::kString));
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
-  Label if_didnotmatch(this);
-  CSA_ASSERT(this, IsFastRegExpWithOriginalExec(context, regexp));
-  RegExpPrototypeExecBodyWithoutResult(context, regexp, string, &if_didnotmatch,
-                                       true);
-  Return(TrueConstant());
-
-  BIND(&if_didnotmatch);
-  Return(FalseConstant());
-}
-
 TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
     SloppyTNode<String> string, SloppyTNode<Number> index,
     SloppyTNode<BoolT> is_unicode, bool is_fastpath) {
@@ -1852,12 +1675,12 @@ TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
     TNode<IntPtrT> untagged_plus_one = SmiUntag(CAST(index_plus_one));
     GotoIfNot(IntPtrLessThan(untagged_plus_one, string_length), &out);
 
-    Node* const lead = StringCharCodeAt(string, SmiUntag(CAST(index)));
+    TNode<Int32T> const lead = StringCharCodeAt(string, SmiUntag(CAST(index)));
     GotoIfNot(Word32Equal(Word32And(lead, Int32Constant(0xFC00)),
                           Int32Constant(0xD800)),
               &out);
 
-    Node* const trail = StringCharCodeAt(string, untagged_plus_one);
+    TNode<Int32T> const trail = StringCharCodeAt(string, untagged_plus_one);
     GotoIfNot(Word32Equal(Word32And(trail, Int32Constant(0xFC00)),
                           Int32Constant(0xDC00)),
               &out);
@@ -1873,29 +1696,34 @@ TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
   return var_result.value();
 }
 
-void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode<Context> context,
-                                                       TNode<Object> regexp,
-                                                       TNode<String> string,
-                                                       const bool is_fastpath) {
-  if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp));
+TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(
+    TNode<Context> context, TNode<Object> regexp, TNode<String> string,
+    const bool is_fastpath) {
+  if (is_fastpath) {
+    CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) {
+      BranchIfFastRegExp_Strict(context, CAST(regexp), ok, not_ok);
+    });
+  }
 
-  Node* const is_global =
+  TVARIABLE(Object, var_result);
+
+  TNode<BoolT> const is_global =
       FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
 
-  Label if_isglobal(this), if_isnotglobal(this);
+  Label if_isglobal(this), if_isnotglobal(this), done(this);
   Branch(is_global, &if_isglobal, &if_isnotglobal);
 
   BIND(&if_isnotglobal);
   {
-    Node* const result = is_fastpath ? RegExpPrototypeExecBody(
-                                           context, CAST(regexp), string, true)
-                                     : RegExpExec(context, regexp, string);
-    Return(result);
+    var_result = is_fastpath ? RegExpPrototypeExecBody(context, CAST(regexp),
+                                                       string, true)
+                             : RegExpExec(context, regexp, string);
+    Goto(&done);
   }
 
   BIND(&if_isglobal);
   {
-    Node* const is_unicode =
+    TNode<BoolT> const is_unicode =
         FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath);
 
     StoreLastIndex(context, regexp, SmiZero(), is_fastpath);
@@ -1939,15 +1767,14 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode<Context> context,
         TNode<RegExpMatchInfo> match_indices =
             RegExpPrototypeExecBodyWithoutResult(context, CAST(regexp), string,
                                                  &if_didnotmatch, true);
-
         Label dosubstring(this), donotsubstring(this);
         Branch(var_atom.value(), &donotsubstring, &dosubstring);
 
         BIND(&dosubstring);
         {
-          Node* const match_from = UnsafeLoadFixedArrayElement(
+          TNode<Object> const match_from = UnsafeLoadFixedArrayElement(
               match_indices, RegExpMatchInfo::kFirstCaptureIndex);
-          Node* const match_to = UnsafeLoadFixedArrayElement(
+          TNode<Object> const match_to = UnsafeLoadFixedArrayElement(
               match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
           var_match.Bind(CallBuiltin(Builtins::kSubString, context, string,
                                      match_from, match_to));
@@ -1959,7 +1786,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode<Context> context,
         Goto(&if_didmatch);
       } else {
         DCHECK(!is_fastpath);
-        Node* const result = RegExpExec(context, regexp, string);
+        TNode<Object> const result = RegExpExec(context, regexp, string);
 
         Label load_match(this);
         Branch(IsNull(result), &if_didnotmatch, &load_match);
@@ -1974,7 +1801,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode<Context> context,
       {
         // Return null if there were no matches, otherwise just exit the loop.
         GotoIfNot(IntPtrEqual(array.length(), IntPtrZero()), &out);
-        Return(NullConstant());
+        var_result = NullConstant();
+        Goto(&done);
       }
 
       BIND(&if_didmatch);
@@ -1990,15 +1818,15 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode<Context> context,
         TNode<Smi> const match_length = LoadStringLengthAsSmi(match);
         GotoIfNot(SmiEqual(match_length, SmiZero()), &loop);
 
-        Node* last_index = LoadLastIndex(context, regexp, is_fastpath);
+        TNode<Object> last_index = LoadLastIndex(context, regexp, is_fastpath);
         if (is_fastpath) {
           CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
         } else {
           last_index = ToLength_Inline(context, last_index);
         }
 
-        TNode<Number> new_last_index =
-            AdvanceStringIndex(string, last_index, is_unicode, is_fastpath);
+        TNode<Number> new_last_index = AdvanceStringIndex(
+            string, CAST(last_index), is_unicode, is_fastpath);
 
         if (is_fastpath) {
           // On the fast path, we can be certain that lastIndex can never be
@@ -2018,38 +1846,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode<Context> context,
     {
       // Wrap the match in a JSArray.
 
-      Node* const result = array.ToJSArray(context);
-      Return(result);
+      var_result = array.ToJSArray(context);
+      Goto(&done);
     }
   }
-}
-
-// ES#sec-regexp.prototype-@@match
-// RegExp.prototype [ @@match ] ( string )
-TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
-  TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
-  TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
-  // Ensure {maybe_receiver} is a JSReceiver.
-  ThrowIfNotJSReceiver(context, maybe_receiver,
-                       MessageTemplate::kIncompatibleMethodReceiver,
-                       "RegExp.prototype.@@match");
-  TNode<JSReceiver> receiver = CAST(maybe_receiver);
-
-  // Convert {maybe_string} to a String.
-  TNode<String> const string = ToString_Inline(context, maybe_string);
-
-  Label fast_path(this), slow_path(this);
-  BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
-
-  BIND(&fast_path);
-  // TODO(pwong): Could be optimized to remove the overhead of calling the
-  //              builtin (at the cost of a larger builtin).
-  Return(CallBuiltin(Builtins::kRegExpMatchFast, context, receiver, string));
-
-  BIND(&slow_path);
-  RegExpPrototypeMatchBody(context, receiver, string, false);
+  BIND(&done);
+  return var_result.value();
 }
 
 void RegExpMatchAllAssembler::Generate(TNode<Context> context,
@@ -2066,12 +1869,17 @@ void RegExpMatchAllAssembler::Generate(TNode<Context> context,
   TNode<String> string = ToString_Inline(context, maybe_string);
 
   TVARIABLE(Object, var_matcher);
-  TVARIABLE(Int32T, var_global);
-  TVARIABLE(Int32T, var_unicode);
+  TVARIABLE(BoolT, var_global);
+  TVARIABLE(BoolT, var_unicode);
   Label create_iterator(this), if_fast_regexp(this),
       if_slow_regexp(this, Label::kDeferred);
 
-  BranchIfFastRegExp(context, receiver, &if_fast_regexp, &if_slow_regexp);
+  // Strict, because following code uses the flags property.
+  // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+  // permissive.
+  BranchIfFastRegExp_Strict(context, CAST(receiver), &if_fast_regexp,
+                            &if_slow_regexp);
+
   BIND(&if_fast_regexp);
   {
     TNode<JSRegExp> fast_regexp = CAST(receiver);
@@ -2081,9 +1889,10 @@ void RegExpMatchAllAssembler::Generate(TNode<Context> context,
     // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
     // 5. Let flags be ? ToString(? Get(R, "flags")).
     // 6. Let matcher be ? Construct(C, « R, flags »).
-    TNode<String> flags = CAST(FlagsGetter(context, fast_regexp, true));
+    TNode<String> flags = FlagsGetter(context, fast_regexp, true);
     var_matcher = RegExpCreate(context, native_context, source, flags);
-    CSA_ASSERT(this, IsFastRegExp(context, var_matcher.value()));
+    CSA_ASSERT(this,
+               IsFastRegExpPermissive(context, CAST(var_matcher.value())));
 
     // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
     // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
@@ -2130,8 +1939,7 @@ void RegExpMatchAllAssembler::Generate(TNode<Context> context,
     TNode<Smi> global_ix =
         CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string,
                          global_char_string, SmiZero()));
-    var_global =
-        SelectInt32Constant(SmiEqual(global_ix, SmiConstant(-1)), 0, 1);
+    var_global = SmiNotEqual(global_ix, SmiConstant(-1));
 
     // 11. If flags contains "u", let fullUnicode be true.
     // 12. Else, let fullUnicode be false.
@@ -2139,13 +1947,23 @@ void RegExpMatchAllAssembler::Generate(TNode<Context> context,
     TNode<Smi> unicode_ix =
         CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string,
                          unicode_char_string, SmiZero()));
-    var_unicode =
-        SelectInt32Constant(SmiEqual(unicode_ix, SmiConstant(-1)), 0, 1);
+    var_unicode = SmiNotEqual(unicode_ix, SmiConstant(-1));
     Goto(&create_iterator);
   }
 
   BIND(&create_iterator);
   {
+    {
+      // UseCounter for matchAll with non-g RegExp.
+      // https://crbug.com/v8/9551
+      Label next(this);
+      GotoIf(var_global.value(), &next);
+      CallRuntime(Runtime::kIncrementUseCounter, context,
+                  SmiConstant(v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp));
+      Goto(&next);
+      BIND(&next);
+    }
+
     // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
     TNode<Object> iterator =
         CreateRegExpStringIterator(native_context, var_matcher.value(), string,
@@ -2158,7 +1976,7 @@ void RegExpMatchAllAssembler::Generate(TNode<Context> context,
 // CreateRegExpStringIterator ( R, S, global, fullUnicode )
 TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
     TNode<Context> native_context, TNode<Object> regexp, TNode<String> string,
-    TNode<Int32T> global, TNode<Int32T> full_unicode) {
+    TNode<BoolT> global, TNode<BoolT> full_unicode) {
   TNode<Map> map = CAST(LoadContextElement(
       native_context,
       Context::INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX));
@@ -2166,7 +1984,7 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
   // 4. Let iterator be ObjectCreate(%RegExpStringIteratorPrototype%, «
   // [[IteratingRegExp]], [[IteratedString]], [[Global]], [[Unicode]],
   // [[Done]] »).
-  TNode<Object> iterator = Allocate(JSRegExpStringIterator::kSize);
+  TNode<HeapObject> iterator = Allocate(JSRegExpStringIterator::kSize);
   StoreMapNoWriteBarrier(iterator, map);
   StoreObjectFieldRoot(iterator,
                        JSRegExpStringIterator::kPropertiesOrHashOffset,
@@ -2182,26 +2000,18 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
   StoreObjectFieldNoWriteBarrier(
       iterator, JSRegExpStringIterator::kIteratedStringOffset, string);
 
-#ifdef DEBUG
-  // Verify global and full_unicode can be bitwise shifted without masking.
-  TNode<Int32T> zero = Int32Constant(0);
-  TNode<Int32T> one = Int32Constant(1);
-  CSA_ASSERT(this,
-             Word32Or(Word32Equal(global, zero), Word32Equal(global, one)));
-  CSA_ASSERT(this, Word32Or(Word32Equal(full_unicode, zero),
-                            Word32Equal(full_unicode, one)));
-#endif  // DEBUG
-
   // 7. Set iterator.[[Global]] to global.
   // 8. Set iterator.[[Unicode]] to fullUnicode.
   // 9. Set iterator.[[Done]] to false.
-  TNode<Word32T> global_flag =
-      Word32Shl(global, Int32Constant(JSRegExpStringIterator::kGlobalBit));
-  TNode<Word32T> unicode_flag = Word32Shl(
-      full_unicode, Int32Constant(JSRegExpStringIterator::kUnicodeBit));
-  TNode<Word32T> iterator_flags = Word32Or(global_flag, unicode_flag);
+  TNode<Int32T> global_flag =
+      Word32Shl(ReinterpretCast<Int32T>(global),
+                Int32Constant(JSRegExpStringIterator::kGlobalBit));
+  TNode<Int32T> unicode_flag =
+      Word32Shl(ReinterpretCast<Int32T>(full_unicode),
+                Int32Constant(JSRegExpStringIterator::kUnicodeBit));
+  TNode<Int32T> iterator_flags = Word32Or(global_flag, unicode_flag);
   StoreObjectFieldNoWriteBarrier(iterator, JSRegExpStringIterator::kFlagsOffset,
-                                 SmiFromInt32(Signed(iterator_flags)));
+                                 SmiFromInt32(iterator_flags));
 
   return iterator;
 }
@@ -2210,26 +2020,15 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
 // RegExp.prototype [ @@matchAll ] ( string )
 TF_BUILTIN(RegExpPrototypeMatchAll, RegExpMatchAllAssembler) {
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
   TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
   Generate(context, native_context, receiver, maybe_string);
 }
 
-// Helper that skips a few initial checks. and assumes...
-// 1) receiver is a "fast" RegExp
-// 2) pattern is a string
-TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) {
-  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-  TNode<String> string = CAST(Parameter(Descriptor::kPattern));
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
-  RegExpPrototypeMatchBody(context, receiver, string, true);
-}
-
 void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
     TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string) {
-  CSA_ASSERT(this, IsFastRegExp(context, regexp));
+  CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp));
 
   // Grab the initial value of last index.
   TNode<Smi> previous_last_index = FastLoadLastIndex(regexp);
@@ -2248,7 +2047,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
     FastStoreLastIndex(regexp, previous_last_index);
 
     // Return the index of the match.
-    Node* const index = LoadFixedArrayElement(
+    TNode<Object> const index = LoadFixedArrayElement(
         match_indices, RegExpMatchInfo::kFirstCaptureIndex);
     Return(index);
   }
@@ -2262,17 +2061,17 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
 }
 
 void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
-    Node* const context, Node* const regexp, Node* const string) {
+    TNode<Context> context, Node* const regexp, Node* const string) {
   CSA_ASSERT(this, IsJSReceiver(regexp));
   CSA_ASSERT(this, IsString(string));
 
   Isolate* const isolate = this->isolate();
 
-  Node* const smi_zero = SmiZero();
+  TNode<Smi> const smi_zero = SmiZero();
 
   // Grab the initial value of last index.
-  Node* const previous_last_index =
-      SlowLoadLastIndex(CAST(context), CAST(regexp));
+  TNode<Object> const previous_last_index =
+      SlowLoadLastIndex(context, CAST(regexp));
 
   // Ensure last index is 0.
   {
@@ -2286,13 +2085,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
   }
 
   // Call exec.
-  Node* const exec_result = RegExpExec(context, regexp, string);
+  TNode<Object> const exec_result = RegExpExec(context, regexp, string);
 
   // Reset last index if necessary.
   {
     Label next(this), slow(this, Label::kDeferred);
-    Node* const current_last_index =
-        SlowLoadLastIndex(CAST(context), CAST(regexp));
+    TNode<Object> const current_last_index =
+        SlowLoadLastIndex(context, CAST(regexp));
 
     BranchIfSameValue(current_last_index, previous_last_index, &next, &slow);
 
@@ -2317,8 +2116,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
 
     BIND(&fast_result);
     {
-      Node* const index =
-          LoadObjectField(exec_result, JSRegExpResult::kIndexOffset);
+      TNode<Object> const index =
+          LoadObjectField(CAST(exec_result), JSRegExpResult::kIndexOffset);
       Return(index);
     }
 
@@ -2341,13 +2140,13 @@ TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
   ThrowIfNotJSReceiver(context, maybe_receiver,
                        MessageTemplate::kIncompatibleMethodReceiver,
                        "RegExp.prototype.@@search");
-  Node* const receiver = maybe_receiver;
+  TNode<JSReceiver> receiver = CAST(maybe_receiver);
 
   // Convert {maybe_string} to a String.
   TNode<String> const string = ToString_Inline(context, maybe_string);
 
   Label fast_path(this), slow_path(this);
-  BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
+  BranchIfFastRegExp_Permissive(context, receiver, &fast_path, &slow_path);
 
   BIND(&fast_path);
   // TODO(pwong): Could be optimized to remove the overhead of calling the
@@ -2371,13 +2170,12 @@ TF_BUILTIN(RegExpSearchFast, RegExpBuiltinsAssembler) {
 
 // Generates the fast path for @@split. {regexp} is an unmodified, non-sticky
 // JSRegExp, {string} is a String, and {limit} is a Smi.
-void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
-                                                       Node* const regexp,
+void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context,
+                                                       TNode<JSRegExp> regexp,
                                                        TNode<String> string,
                                                        TNode<Smi> const limit) {
-  CSA_ASSERT(this, IsFastRegExp(context, regexp));
-  CSA_ASSERT(this,
-             Word32BinaryNot(FastFlagGetter(CAST(regexp), JSRegExp::kSticky)));
+  CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp));
+  CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
 
   TNode<IntPtrT> const int_limit = SmiUntag(limit);
 
@@ -2385,7 +2183,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
   const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
 
   Node* const allocation_site = nullptr;
-  Node* const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
 
   Label return_empty_array(this, Label::kDeferred);
@@ -2407,10 +2205,10 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
 
     BIND(&if_stringisempty);
     {
-      Node* const last_match_info = LoadContextElement(
+      TNode<Object> const last_match_info = LoadContextElement(
           native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
 
-      Node* const match_indices =
+      TNode<Object> const match_indices =
           CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
                       SmiZero(), last_match_info);
 
@@ -2464,7 +2262,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
 
     // Search for the given {regexp}.
 
-    Node* const last_match_info = LoadContextElement(
+    TNode<Object> const last_match_info = LoadContextElement(
         native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
 
     TNode<HeapObject> const match_indices_ho =
@@ -2499,8 +2297,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
       GotoIfNot(SmiEqual(match_to, next_search_from), &next);
       GotoIfNot(SmiEqual(match_to, last_matched_until), &next);
 
-      Node* const is_unicode = FastFlagGetter(CAST(regexp), JSRegExp::kUnicode);
-      Node* const new_next_search_from =
+      TNode<BoolT> const is_unicode =
+          FastFlagGetter(regexp, JSRegExp::kUnicode);
+      TNode<Number> const new_next_search_from =
           AdvanceStringIndex(string, next_search_from, is_unicode, true);
       var_next_search_from = CAST(new_next_search_from);
       Goto(&loop);
@@ -2518,9 +2317,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
 
     // Add all captures to the array.
     {
-      Node* const num_registers = LoadFixedArrayElement(
-          match_indices, RegExpMatchInfo::kNumberOfCapturesIndex);
-      Node* const int_num_registers = SmiUntag(num_registers);
+      TNode<Smi> const num_registers = CAST(LoadFixedArrayElement(
+          match_indices, RegExpMatchInfo::kNumberOfCapturesIndex));
+      TNode<IntPtrT> const int_num_registers = SmiUntag(num_registers);
 
       VARIABLE(var_reg, MachineType::PointerRepresentation());
       var_reg.Bind(IntPtrConstant(2));
@@ -2535,7 +2334,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
       BIND(&nested_loop);
       {
         Node* const reg = var_reg.value();
-        Node* const from = LoadFixedArrayElement(
+        TNode<Object> const from = LoadFixedArrayElement(
             match_indices, reg,
             RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize, mode);
         TNode<Smi> const to = CAST(LoadFixedArrayElement(
@@ -2565,7 +2364,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
           array.Push(CAST(var_value.value()));
           GotoIf(WordEqual(array.length(), int_limit), &out);
 
-          Node* const new_reg = IntPtrAdd(reg, IntPtrConstant(2));
+          TNode<WordT> const new_reg = IntPtrAdd(reg, IntPtrConstant(2));
           var_reg.Bind(new_reg);
 
           Branch(IntPtrLessThan(new_reg, int_num_registers), &nested_loop,
@@ -2583,7 +2382,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
 
   BIND(&push_suffix_and_out);
   {
-    Node* const from = var_last_matched_until.value();
+    TNode<Smi> const from = var_last_matched_until.value();
     Node* const to = string_length;
     array.Push(CallBuiltin(Builtins::kSubString, context, string, from, to));
     Goto(&out);
@@ -2591,7 +2390,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
 
   BIND(&out);
   {
-    Node* const result = array.ToJSArray(CAST(context));
+    TNode<JSArray> const result = array.ToJSArray(context);
     Return(result);
   }
 
@@ -2612,11 +2411,9 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
   TNode<Object> maybe_limit = CAST(Parameter(Descriptor::kLimit));
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
-  CSA_ASSERT(this, IsFastRegExp(context, regexp));
-
-  // TODO(jgruber): Even if map checks send us to the fast path, we still need
-  // to verify the constructor property and jump to the slow path if it has
-  // been changed.
+  CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) {
+    BranchIfFastRegExp_Strict(context, regexp, ok, not_ok);
+  });
 
   // Verify {maybe_limit}.
 
@@ -2679,13 +2476,16 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
   ThrowIfNotJSReceiver(context, maybe_receiver,
                        MessageTemplate::kIncompatibleMethodReceiver,
                        "RegExp.prototype.@@split");
-  Node* const receiver = maybe_receiver;
+  TNode<JSReceiver> receiver = CAST(maybe_receiver);
 
   // Convert {maybe_string} to a String.
-  TNode<String> const string = ToString_Inline(context, maybe_string);
+  TNode<String> string = ToString_Inline(context, maybe_string);
 
+  // Strict: Reads the flags property.
+  // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+  // permissive.
   Label stub(this), runtime(this, Label::kDeferred);
-  BranchIfFastRegExp(context, receiver, &stub, &runtime);
+  BranchIfFastRegExp_Strict(context, receiver, &stub, &runtime);
 
   BIND(&stub);
   args.PopAndReturn(CallBuiltin(Builtins::kRegExpSplit, context, receiver,
@@ -2753,12 +2553,11 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
   GotoIf(HasDoneFlag(flags), &return_empty_done_result);
 
   // 5. Let R be O.[[IteratingRegExp]].
-  TNode<Object> iterating_regexp =
-      LoadObjectField(receiver, JSRegExpStringIterator::kIteratingRegExpOffset);
+  TNode<JSReceiver> iterating_regexp = CAST(LoadObjectField(
+      receiver, JSRegExpStringIterator::kIteratingRegExpOffset));
 
-  // TODO(jgruber): Verify that this is guaranteed.
-  CSA_CHECK(this, TaggedIsNotSmi(iterating_regexp));
-  CSA_CHECK(this, IsJSReceiver(CAST(iterating_regexp)));
+  // For extra safety, also check the type in release mode.
+  CSA_CHECK(this, IsJSReceiver(iterating_regexp));
 
   // 6. Let S be O.[[IteratedString]].
   TNode<String> iterating_string = CAST(
@@ -2775,23 +2574,23 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
   TVARIABLE(BoolT, var_is_fast_regexp);
   {
     Label if_fast(this), if_slow(this, Label::kDeferred);
-    BranchIfFastRegExp(context, iterating_regexp, &if_fast, &if_slow);
+    BranchIfFastRegExp_Permissive(context, iterating_regexp, &if_fast,
+                                  &if_slow);
 
     BIND(&if_fast);
     {
       TNode<RegExpMatchInfo> match_indices =
-          RegExpPrototypeExecBodyWithoutResult(context, CAST(iterating_regexp),
-                                               iterating_string, &if_no_match,
-                                               true);
+          RegExpPrototypeExecBodyWithoutResult(
+              context, iterating_regexp, iterating_string, &if_no_match, true);
       var_match = ConstructNewResultFromMatchInfo(
-          context, CAST(iterating_regexp), match_indices, iterating_string);
+          context, iterating_regexp, match_indices, iterating_string);
       var_is_fast_regexp = Int32TrueConstant();
       Goto(&if_match);
     }
 
     BIND(&if_slow);
     {
-      var_match = CAST(RegExpExec(context, iterating_regexp, iterating_string));
+      var_match = RegExpExec(context, iterating_regexp, iterating_string);
       var_is_fast_regexp = Int32FalseConstant();
       Branch(IsNull(var_match.value()), &if_no_match, &if_match);
     }
@@ -2836,7 +2635,7 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
 
         // When iterating_regexp is fast, we assume it stays fast even after
         // accessing the first match from the RegExp result.
-        CSA_ASSERT(this, IsFastRegExp(context, iterating_regexp));
+        CSA_ASSERT(this, IsFastRegExpPermissive(context, iterating_regexp));
         GotoIfNot(IsEmptyString(match_str), &return_result);
 
         // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 3677314f195ead..de841f57b292f0 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -17,11 +17,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
   explicit RegExpBuiltinsAssembler(compiler::CodeAssemblerState* state)
       : CodeStubAssembler(state) {}
 
-  void BranchIfFastRegExp(
-      Node* const context, Node* const object, Node* const map,
-      base::Optional<DescriptorIndexAndName> additional_property_to_check,
-      Label* const if_isunmodified, Label* const if_ismodified);
-
   // Create and initialize a RegExp object.
   TNode<Object> RegExpCreate(TNode<Context> context,
                              TNode<Context> native_context,
@@ -35,6 +30,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
   TNode<Smi> SmiZero();
   TNode<IntPtrT> IntPtrZero();
 
+  TNode<RawPtrT> LoadCodeObjectEntry(TNode<Code> code);
+
   // Allocate a RegExpResult with the given length (the number of captures,
   // including the match itself), index (the index where the match starts),
   // and input string.
@@ -53,7 +50,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
   void FastStoreLastIndex(TNode<JSRegExp> regexp, TNode<Smi> value);
   void SlowStoreLastIndex(SloppyTNode<Context> context,
                           SloppyTNode<Object> regexp,
-                          SloppyTNode<Number> value);
+                          SloppyTNode<Object> value);
   void StoreLastIndex(TNode<Context> context, TNode<Object> regexp,
                       TNode<Number> value, bool is_fastpath);
 
@@ -79,7 +76,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
       TNode<Context> context, TNode<JSReceiver> maybe_regexp,
       TNode<String> string, Label* if_didnotmatch, const bool is_fastpath);
   TNode<RegExpMatchInfo> RegExpPrototypeExecBodyWithoutResultFast(
-      TNode<Context> context, TNode<JSReceiver> maybe_regexp,
+      TNode<Context> context, TNode<JSRegExp> maybe_regexp,
       TNode<String> string, Label* if_didnotmatch);
 
   TNode<HeapObject> RegExpPrototypeExecBody(TNode<Context> context,
@@ -87,50 +84,90 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
                                             TNode<String> string,
                                             const bool is_fastpath);
 
-  Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
-                             MessageTemplate msg_template,
-                             char const* method_name);
-
-  // Analogous to BranchIfFastRegExp, for use in asserts.
-  TNode<BoolT> IsFastRegExp(SloppyTNode<Context> context,
-                            SloppyTNode<Object> object);
+  TNode<BoolT> IsReceiverInitialRegExpPrototype(SloppyTNode<Context> context,
+                                                SloppyTNode<Object> receiver);
+
+  // Fast path check logic.
+  //
+  // Are you afraid? If not, you should be.
+  //
+  // It's complicated. Fast path checks protect certain assumptions, e.g. that
+  // relevant properties on the regexp prototype (such as exec, @@split, global)
+  // are unmodified.
+  //
+  // These assumptions differ by callsite. For example, RegExpPrototypeExec
+  // cares whether the exec property has been modified; but it's totally fine
+  // to modify other prototype properties. On the other hand,
+  // StringPrototypeSplit does care very much whether @@split has been changed.
+  //
+  // We want to keep regexp execution on the fast path as much as possible.
+  // Ideally, we could simply check if the regexp prototype has been modified;
+  // yet common web frameworks routinely mutate it for various reasons. But most
+  // of these mutations should happen in a way that still allows us to remain
+  // on the fast path. To support this, the fast path check logic necessarily
+  // becomes more involved.
+  //
+  // There are multiple knobs to twiddle for regexp fast path checks. We support
+  // checks that completely ignore the prototype, checks that verify specific
+  // properties on the prototype (the caller must ensure it passes in the right
+  // ones), and strict checks that additionally ensure the prototype is
+  // unchanged (we use these when we'd have to check multiple properties we
+  // don't care too much about, e.g. all individual flag getters).
+
+  using DescriptorIndexNameValue =
+      PrototypeCheckAssembler::DescriptorIndexNameValue;
 
-  void BranchIfFastRegExp(Node* const context, Node* const object,
-                          Label* const if_isunmodified,
-                          Label* const if_ismodified);
+  void BranchIfFastRegExp(
+      TNode<Context> context, TNode<HeapObject> object, TNode<Map> map,
+      PrototypeCheckAssembler::Flags prototype_check_flags,
+      base::Optional<DescriptorIndexNameValue> additional_property_to_check,
+      Label* if_isunmodified, Label* if_ismodified);
+
+  // Strict: Does not tolerate any changes to the prototype map.
+  // Permissive: Allows changes to the prototype map except for the exec
+  //             property.
+  void BranchIfFastRegExp_Strict(TNode<Context> context,
+                                 TNode<HeapObject> object,
+                                 Label* if_isunmodified, Label* if_ismodified);
+  void BranchIfFastRegExp_Permissive(TNode<Context> context,
+                                     TNode<HeapObject> object,
+                                     Label* if_isunmodified,
+                                     Label* if_ismodified);
 
   // Performs fast path checks on the given object itself, but omits prototype
   // checks.
-  Node* IsFastRegExpNoPrototype(Node* const context, Node* const object);
+  Node* IsFastRegExpNoPrototype(SloppyTNode<Context> context,
+                                SloppyTNode<Object> object);
+  Node* IsFastRegExpNoPrototype(SloppyTNode<Context> context,
+                                SloppyTNode<Object> object,
+                                SloppyTNode<Map> map);
+
+  // For debugging only. Uses a slow GetProperty call to fetch object.exec.
   TNode<BoolT> IsFastRegExpWithOriginalExec(TNode<Context> context,
                                             TNode<JSRegExp> object);
-  Node* IsFastRegExpNoPrototype(Node* const context, Node* const object,
-                                Node* const map);
 
   void BranchIfFastRegExpResult(Node* const context, Node* const object,
                                 Label* if_isunmodified, Label* if_ismodified);
 
-  Node* FlagsGetter(Node* const context, Node* const regexp, bool is_fastpath);
+  TNode<String> FlagsGetter(TNode<Context> context, TNode<Object> regexp,
+                            const bool is_fastpath);
 
-  TNode<Int32T> FastFlagGetter(TNode<JSRegExp> regexp, JSRegExp::Flag flag);
+  TNode<BoolT> FastFlagGetter(TNode<JSRegExp> regexp, JSRegExp::Flag flag);
   TNode<BoolT> FastFlagGetterGlobal(TNode<JSRegExp> regexp) {
-    return ReinterpretCast<BoolT>(FastFlagGetter(regexp, JSRegExp::kGlobal));
+    return FastFlagGetter(regexp, JSRegExp::kGlobal);
   }
   TNode<BoolT> FastFlagGetterUnicode(TNode<JSRegExp> regexp) {
-    return ReinterpretCast<BoolT>(FastFlagGetter(regexp, JSRegExp::kUnicode));
+    return FastFlagGetter(regexp, JSRegExp::kUnicode);
   }
-  TNode<Int32T> SlowFlagGetter(TNode<Context> context, TNode<Object> regexp,
-                               JSRegExp::Flag flag);
-  TNode<Int32T> FlagGetter(TNode<Context> context, TNode<Object> regexp,
-                           JSRegExp::Flag flag, bool is_fastpath);
-
-  void FlagGetter(Node* context, Node* receiver, JSRegExp::Flag flag,
-                  int counter, const char* method_name);
+  TNode<BoolT> SlowFlagGetter(TNode<Context> context, TNode<Object> regexp,
+                              JSRegExp::Flag flag);
+  TNode<BoolT> FlagGetter(TNode<Context> context, TNode<Object> regexp,
+                          JSRegExp::Flag flag, bool is_fastpath);
 
   Node* RegExpInitialize(Node* const context, Node* const regexp,
                          Node* const maybe_pattern, Node* const maybe_flags);
 
-  Node* RegExpExec(Node* context, Node* regexp, Node* string);
+  TNode<Object> RegExpExec(TNode<Context> context, Node* regexp, Node* string);
 
   TNode<Number> AdvanceStringIndex(SloppyTNode<String> string,
                                    SloppyTNode<Number> index,
@@ -142,17 +179,18 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
     return CAST(AdvanceStringIndex(string, index, is_unicode, true));
   }
 
-  void RegExpPrototypeMatchBody(TNode<Context> context, TNode<Object> regexp,
-                                TNode<String> const string,
-                                const bool is_fastpath);
+  TNode<Object> RegExpPrototypeMatchBody(TNode<Context> context,
+                                         TNode<Object> regexp,
+                                         TNode<String> const string,
+                                         const bool is_fastpath);
 
   void RegExpPrototypeSearchBodyFast(TNode<Context> context,
                                      TNode<JSRegExp> regexp,
                                      TNode<String> string);
-  void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp,
+  void RegExpPrototypeSearchBodySlow(TNode<Context> context, Node* const regexp,
                                      Node* const string);
 
-  void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
+  void RegExpPrototypeSplitBody(TNode<Context> context, TNode<JSRegExp> regexp,
                                 TNode<String> const string,
                                 TNode<Smi> const limit);
 };
@@ -165,8 +203,8 @@ class RegExpMatchAllAssembler : public RegExpBuiltinsAssembler {
   TNode<Object> CreateRegExpStringIterator(TNode<Context> native_context,
                                            TNode<Object> regexp,
                                            TNode<String> string,
-                                           TNode<Int32T> global,
-                                           TNode<Int32T> full_unicode);
+                                           TNode<BoolT> global,
+                                           TNode<BoolT> full_unicode);
   void Generate(TNode<Context> context, TNode<Context> native_context,
                 TNode<Object> receiver, TNode<Object> maybe_string);
 };
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 1e9ac8377c84a4..8ae89187ecbc67 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -26,7 +26,7 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
                                                      Node* value,
                                                      Node* value_high);
   void ValidateSharedTypedArray(Node* tagged, Node* context,
-                                Node** out_elements_kind,
+                                TNode<Int32T>* out_elements_kind,
                                 Node** out_backing_store);
   Node* ConvertTaggedAtomicIndexToWord32(Node* tagged, Node* context,
                                          Node** number_index);
@@ -46,7 +46,7 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
 };
 
 void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
-    Node* tagged, Node* context, Node** out_elements_kind,
+    Node* tagged, Node* context, TNode<Int32T>* out_elements_kind,
     Node** out_backing_store) {
   Label not_float_or_clamped(this), invalid(this);
 
@@ -54,7 +54,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
   GotoIf(TaggedIsSmi(tagged), &invalid);
 
   // Fail if the array's instance type is not JSTypedArray.
-  Node* tagged_map = LoadMap(tagged);
+  TNode<Map> tagged_map = LoadMap(tagged);
   GotoIfNot(IsJSTypedArrayMap(tagged_map), &invalid);
 
   // Fail if the array's JSArrayBuffer is not shared.
@@ -69,7 +69,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
   STATIC_ASSERT(UINT8_ELEMENTS < FLOAT32_ELEMENTS);
   STATIC_ASSERT(UINT16_ELEMENTS < FLOAT32_ELEMENTS);
   STATIC_ASSERT(UINT32_ELEMENTS < FLOAT32_ELEMENTS);
-  Node* elements_kind = LoadMapElementsKind(tagged_map);
+  TNode<Int32T> elements_kind = LoadMapElementsKind(tagged_map);
   GotoIf(Int32LessThan(elements_kind, Int32Constant(FLOAT32_ELEMENTS)),
          &not_float_or_clamped);
   STATIC_ASSERT(BIGINT64_ELEMENTS > UINT8_CLAMPED_ELEMENTS);
@@ -167,7 +167,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
   Node* index = Parameter(Descriptor::kIndex);
   Node* context = Parameter(Descriptor::kContext);
 
-  Node* elements_kind;
+  TNode<Int32T> elements_kind;
   Node* backing_store;
   ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
 
@@ -175,7 +175,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
   Node* index_word32 =
       ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
   ValidateAtomicIndex(array, index_word32, context);
-  Node* index_word = ChangeUint32ToWord(index_word32);
+  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
 
   Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
       i64(this), u64(this), other(this);
@@ -239,7 +239,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
   Node* value = Parameter(Descriptor::kValue);
   Node* context = Parameter(Descriptor::kContext);
 
-  Node* elements_kind;
+  TNode<Int32T> elements_kind;
   Node* backing_store;
   ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
 
@@ -247,14 +247,14 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
   Node* index_word32 =
       ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
   ValidateAtomicIndex(array, index_word32, context);
-  Node* index_word = ChangeUint32ToWord(index_word32);
+  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
 
   Label u8(this), u16(this), u32(this), u64(this), other(this);
   STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
   STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
   GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &u64);
 
-  Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
+  TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
   Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
 
 #if DEBUG
@@ -313,7 +313,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
   Node* value = Parameter(Descriptor::kValue);
   Node* context = Parameter(Descriptor::kContext);
 
-  Node* elements_kind;
+  TNode<Int32T> elements_kind;
   Node* backing_store;
   ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
 
@@ -326,7 +326,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
   Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_integer,
                      value));
 #else
-  Node* index_word = ChangeUint32ToWord(index_word32);
+  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
 
   Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
       i64(this), u64(this), big(this), other(this);
@@ -334,7 +334,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
   STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
   GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
 
-  Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
+  TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
 #if DEBUG
   DebugSanityCheckAtomicIndex(array, index_word32, context);
 #endif
@@ -415,7 +415,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
   Node* new_value = Parameter(Descriptor::kNewValue);
   Node* context = Parameter(Descriptor::kContext);
 
-  Node* elements_kind;
+  TNode<Int32T> elements_kind;
   Node* backing_store;
   ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
 
@@ -429,7 +429,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
   Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
                      index_integer, old_value, new_value));
 #else
-  Node* index_word = ChangeUint32ToWord(index_word32);
+  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
 
   Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
       i64(this), u64(this), big(this), other(this);
@@ -437,8 +437,10 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
   STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
   GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
 
-  Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value));
-  Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value));
+  TNode<Number> old_value_integer =
+      ToInteger_Inline(CAST(context), CAST(old_value));
+  TNode<Number> new_value_integer =
+      ToInteger_Inline(CAST(context), CAST(new_value));
 #if DEBUG
   DebugSanityCheckAtomicIndex(array, index_word32, context);
 #endif
@@ -543,7 +545,7 @@ BINOP_BUILTIN(Xor)
 void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
     Node* array, Node* index, Node* value, Node* context,
     AssemblerFunction function, Runtime::FunctionId runtime_function) {
-  Node* elements_kind;
+  TNode<Int32T> elements_kind;
   Node* backing_store;
   ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
 
@@ -556,7 +558,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
     V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
   Return(CallRuntime(runtime_function, context, array, index_integer, value));
 #else
-  Node* index_word = ChangeUint32ToWord(index_word32);
+  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
 
   Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
       i64(this), u64(this), big(this), other(this);
@@ -565,7 +567,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
   STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
   GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
 
-  Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
+  TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
 #if DEBUG
   DebugSanityCheckAtomicIndex(array, index_word32, context);
 #endif
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index fa6534d463f406..e6251c9480fbcb 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -22,8 +22,23 @@ namespace internal {
 // See builtins-arraybuffer.cc for implementations of
 // SharedArrayBuffer.prototye.byteLength and SharedArrayBuffer.prototype.slice
 
+// #sec-atomics.islockfree
 inline bool AtomicIsLockFree(double size) {
-  return size == 1 || size == 2 || size == 4;
+  // According to the standard, 1, 2, and 4 byte atomics are supposed to be
+  // 'lock free' on every platform. But what exactly does 'lock free' mean?
+  // For example, on x64 V8 uses a lock prefix to implement the semantics of
+  // many atomic operations. Is that considered a lock? Probably not.
+  //
+  // On the other hand, V8 emits a few instructions for some arm atomics which
+  // do appear to be a low level form of a spin lock. With an abundance of
+  // caution, we only claim to have 'true lock free' support for 8 byte sizes
+  // on x64 platforms. If people care about this function returning true, then
+  // we need to clarify exactly what 'lock free' means at the standard level.
+  bool is_lock_free = size == 1 || size == 2 || size == 4;
+#if V8_TARGET_ARCH_x64
+  is_lock_free |= size == 8;
+#endif
+  return is_lock_free;
 }
 
 // ES #sec-atomics.islockfree
@@ -37,12 +52,16 @@ BUILTIN(AtomicsIsLockFree) {
 
 // ES #sec-validatesharedintegertypedarray
 V8_WARN_UNUSED_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray(
-    Isolate* isolate, Handle<Object> object, bool only_int32 = false) {
+    Isolate* isolate, Handle<Object> object,
+    bool only_int32_and_big_int64 = false) {
   if (object->IsJSTypedArray()) {
     Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object);
     if (typed_array->GetBuffer()->is_shared()) {
-      if (only_int32) {
-        if (typed_array->type() == kExternalInt32Array) return typed_array;
+      if (only_int32_and_big_int64) {
+        if (typed_array->type() == kExternalInt32Array ||
+            typed_array->type() == kExternalBigInt64Array) {
+          return typed_array;
+        }
       } else {
         if (typed_array->type() != kExternalFloat32Array &&
             typed_array->type() != kExternalFloat64Array &&
@@ -54,8 +73,9 @@ V8_WARN_UNUSED_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray(
 
   THROW_NEW_ERROR(
       isolate,
-      NewTypeError(only_int32 ? MessageTemplate::kNotInt32SharedTypedArray
-                              : MessageTemplate::kNotIntegerSharedTypedArray,
+      NewTypeError(only_int32_and_big_int64
+                       ? MessageTemplate::kNotInt32OrBigInt64SharedTypedArray
+                       : MessageTemplate::kNotIntegerSharedTypedArray,
                    object),
       JSTypedArray);
 }
@@ -83,6 +103,15 @@ V8_WARN_UNUSED_RESULT Maybe<size_t> ValidateAtomicAccess(
 }
 
 namespace {
+
+inline size_t GetAddress64(size_t index, size_t byte_offset) {
+  return (index << 3) + byte_offset;
+}
+
+inline size_t GetAddress32(size_t index, size_t byte_offset) {
+  return (index << 2) + byte_offset;
+}
+
 MaybeHandle<Object> AtomicsWake(Isolate* isolate, Handle<Object> array,
                                 Handle<Object> index, Handle<Object> count) {
   Handle<JSTypedArray> sta;
@@ -109,9 +138,19 @@ MaybeHandle<Object> AtomicsWake(Isolate* isolate, Handle<Object> array,
   }
 
   Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
-  size_t addr = (i << 2) + sta->byte_offset();
 
-  return Handle<Object>(FutexEmulation::Wake(array_buffer, addr, c), isolate);
+  if (sta->type() == kExternalBigInt64Array) {
+    return Handle<Object>(
+        FutexEmulation::Wake(array_buffer, GetAddress64(i, sta->byte_offset()),
+                             c),
+        isolate);
+  } else {
+    DCHECK(sta->type() == kExternalInt32Array);
+    return Handle<Object>(
+        FutexEmulation::Wake(array_buffer, GetAddress32(i, sta->byte_offset()),
+                             c),
+        isolate);
+  }
 }
 
 }  // namespace
@@ -157,9 +196,16 @@ BUILTIN(AtomicsWait) {
   if (maybe_index.IsNothing()) return ReadOnlyRoots(isolate).exception();
   size_t i = maybe_index.FromJust();
 
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
-                                     Object::ToInt32(isolate, value));
-  int32_t value_int32 = NumberToInt32(*value);
+  // According to the spec, we have to check value's type before
+  // looking at the timeout.
+  if (sta->type() == kExternalBigInt64Array) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+                                       BigInt::FromObject(isolate, value));
+  } else {
+    DCHECK(sta->type() == kExternalInt32Array);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+                                       Object::ToInt32(isolate, value));
+  }
 
   double timeout_number;
   if (timeout->IsUndefined(isolate)) {
@@ -180,10 +226,17 @@ BUILTIN(AtomicsWait) {
   }
 
   Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
-  size_t addr = (i << 2) + sta->byte_offset();
 
-  return FutexEmulation::WaitJs(isolate, array_buffer, addr, value_int32,
-                                timeout_number);
+  if (sta->type() == kExternalBigInt64Array) {
+    return FutexEmulation::WaitJs64(
+        isolate, array_buffer, GetAddress64(i, sta->byte_offset()),
+        Handle<BigInt>::cast(value)->AsInt64(), timeout_number);
+  } else {
+    DCHECK(sta->type() == kExternalInt32Array);
+    return FutexEmulation::WaitJs32(isolate, array_buffer,
+                                    GetAddress32(i, sta->byte_offset()),
+                                    NumberToInt32(*value), timeout_number);
+  }
 }
 
 }  // namespace internal
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 97dc8ca895b8ae..fc2745ed0a4ae5 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -65,11 +65,13 @@ void StringBuiltinsAssembler::DispatchOnStringEncodings(
 
   // First combine the encodings.
 
-  Node* const encoding_mask = Int32Constant(kStringEncodingMask);
-  Node* const lhs_encoding = Word32And(lhs_instance_type, encoding_mask);
-  Node* const rhs_encoding = Word32And(rhs_instance_type, encoding_mask);
+  TNode<Int32T> const encoding_mask = Int32Constant(kStringEncodingMask);
+  TNode<Word32T> const lhs_encoding =
+      Word32And(lhs_instance_type, encoding_mask);
+  TNode<Word32T> const rhs_encoding =
+      Word32And(rhs_instance_type, encoding_mask);
 
-  Node* const combined_encodings =
+  TNode<Word32T> const combined_encodings =
       Word32Or(lhs_encoding, Word32Shr(rhs_encoding, 1));
 
   // Then dispatch on the combined encoding.
@@ -99,9 +101,9 @@ Node* StringBuiltinsAssembler::CallSearchStringRaw(Node* const subject_ptr,
                                                    Node* const search_ptr,
                                                    Node* const search_length,
                                                    Node* const start_position) {
-  Node* const function_addr = ExternalConstant(
+  TNode<ExternalReference> const function_addr = ExternalConstant(
       ExternalReference::search_string_raw<SubjectChar, PatternChar>());
-  Node* const isolate_ptr =
+  TNode<ExternalReference> const isolate_ptr =
       ExternalConstant(ExternalReference::isolate_address(isolate()));
 
   MachineType type_ptr = MachineType::Pointer();
@@ -118,20 +120,20 @@ Node* StringBuiltinsAssembler::CallSearchStringRaw(Node* const subject_ptr,
   return result;
 }
 
-Node* StringBuiltinsAssembler::PointerToStringDataAtIndex(
+TNode<IntPtrT> StringBuiltinsAssembler::PointerToStringDataAtIndex(
     Node* const string_data, Node* const index, String::Encoding encoding) {
   const ElementsKind kind = (encoding == String::ONE_BYTE_ENCODING)
                                 ? UINT8_ELEMENTS
                                 : UINT16_ELEMENTS;
-  Node* const offset_in_bytes =
+  TNode<IntPtrT> const offset_in_bytes =
       ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS);
-  return IntPtrAdd(string_data, offset_in_bytes);
+  return Signed(IntPtrAdd(string_data, offset_in_bytes));
 }
 
-void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
-                                                  Node* right) {
-  VARIABLE(var_left, MachineRepresentation::kTagged, left);
-  VARIABLE(var_right, MachineRepresentation::kTagged, right);
+void StringBuiltinsAssembler::GenerateStringEqual(TNode<String> left,
+                                                  TNode<String> right) {
+  TVARIABLE(String, var_left, left);
+  TVARIABLE(String, var_right, right);
   Label if_equal(this), if_notequal(this), if_indirect(this, Label::kDeferred),
       restart(this, {&var_left, &var_right});
 
@@ -143,14 +145,14 @@ void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
 
   Goto(&restart);
   BIND(&restart);
-  Node* lhs = var_left.value();
-  Node* rhs = var_right.value();
+  TNode<String> lhs = var_left.value();
+  TNode<String> rhs = var_right.value();
 
-  Node* lhs_instance_type = LoadInstanceType(lhs);
-  Node* rhs_instance_type = LoadInstanceType(rhs);
+  TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
+  TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
 
-  StringEqual_Core(context, lhs, lhs_instance_type, rhs, rhs_instance_type,
-                   lhs_length, &if_equal, &if_notequal, &if_indirect);
+  StringEqual_Core(lhs, lhs_instance_type, rhs, rhs_instance_type, lhs_length,
+                   &if_equal, &if_notequal, &if_indirect);
 
   BIND(&if_indirect);
   {
@@ -158,7 +160,7 @@ void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
     MaybeDerefIndirectStrings(&var_left, lhs_instance_type, &var_right,
                               rhs_instance_type, &restart);
 
-    TailCallRuntime(Runtime::kStringEqual, context, lhs, rhs);
+    TailCallRuntime(Runtime::kStringEqual, NoContextConstant(), lhs, rhs);
   }
 
   BIND(&if_equal);
@@ -169,19 +171,17 @@ void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
 }
 
 void StringBuiltinsAssembler::StringEqual_Core(
-    Node* context, Node* lhs, Node* lhs_instance_type, Node* rhs,
+    SloppyTNode<String> lhs, Node* lhs_instance_type, SloppyTNode<String> rhs,
     Node* rhs_instance_type, TNode<IntPtrT> length, Label* if_equal,
     Label* if_not_equal, Label* if_indirect) {
-  CSA_ASSERT(this, IsString(lhs));
-  CSA_ASSERT(this, IsString(rhs));
   CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(lhs), length));
   CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(rhs), length));
   // Fast check to see if {lhs} and {rhs} refer to the same String object.
-  GotoIf(WordEqual(lhs, rhs), if_equal);
+  GotoIf(TaggedEqual(lhs, rhs), if_equal);
 
   // Combine the instance types into a single 16-bit value, so we can check
   // both of them at once.
-  Node* both_instance_types = Word32Or(
+  TNode<Word32T> both_instance_types = Word32Or(
       lhs_instance_type, Word32Shl(rhs_instance_type, Int32Constant(8)));
 
   // Check if both {lhs} and {rhs} are internalized. Since we already know
@@ -214,7 +214,7 @@ void StringBuiltinsAssembler::StringEqual_Core(
   int const kOneTwoByteStringTag = kOneByteStringTag | (kTwoByteStringTag << 8);
   Label if_oneonebytestring(this), if_twotwobytestring(this),
       if_onetwobytestring(this), if_twoonebytestring(this);
-  Node* masked_instance_types =
+  TNode<Word32T> masked_instance_types =
       Word32And(both_instance_types, Int32Constant(kBothStringEncodingMask));
   GotoIf(
       Word32Equal(masked_instance_types, Int32Constant(kOneOneByteStringTag)),
@@ -271,14 +271,14 @@ void StringBuiltinsAssembler::StringEqual_Loop(
     GotoIf(WordEqual(var_offset.value(), length), if_equal);
 
     // Load the next characters from {lhs} and {rhs}.
-    Node* lhs_value =
+    TNode<Word32T> lhs_value = UncheckedCast<Word32T>(
         Load(lhs_type, lhs_data,
              WordShl(var_offset.value(),
-                     ElementSizeLog2Of(lhs_type.representation())));
-    Node* rhs_value =
+                     ElementSizeLog2Of(lhs_type.representation()))));
+    TNode<Word32T> rhs_value = UncheckedCast<Word32T>(
         Load(rhs_type, rhs_data,
              WordShl(var_offset.value(),
-                     ElementSizeLog2Of(rhs_type.representation())));
+                     ElementSizeLog2Of(rhs_type.representation()))));
 
     // Check if the characters match.
     GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
@@ -296,28 +296,6 @@ TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) {
   Return(StringAdd(context, left, right));
 }
 
-TF_BUILTIN(StringAdd_ConvertLeft, StringBuiltinsAssembler) {
-  TNode<Object> left = CAST(Parameter(Descriptor::kLeft));
-  TNode<String> right = CAST(Parameter(Descriptor::kRight));
-  Node* context = Parameter(Descriptor::kContext);
-  // TODO(danno): The ToString and JSReceiverToPrimitive below could be
-  // combined to avoid duplicate smi and instance type checks.
-  left =
-      ToStringImpl(CAST(context), CAST(JSReceiverToPrimitive(context, left)));
-  TailCallBuiltin(Builtins::kStringAdd_CheckNone, context, left, right);
-}
-
-TF_BUILTIN(StringAdd_ConvertRight, StringBuiltinsAssembler) {
-  TNode<String> left = CAST(Parameter(Descriptor::kLeft));
-  TNode<Object> right = CAST(Parameter(Descriptor::kRight));
-  Node* context = Parameter(Descriptor::kContext);
-  // TODO(danno): The ToString and JSReceiverToPrimitive below could be
-  // combined to avoid duplicate smi and instance type checks.
-  right =
-      ToStringImpl(CAST(context), CAST(JSReceiverToPrimitive(context, right)));
-  TailCallBuiltin(Builtins::kStringAdd_CheckNone, context, left, right);
-}
-
 TF_BUILTIN(SubString, StringBuiltinsAssembler) {
   TNode<String> string = CAST(Parameter(Descriptor::kString));
   TNode<Smi> from = CAST(Parameter(Descriptor::kFrom));
@@ -325,12 +303,10 @@ TF_BUILTIN(SubString, StringBuiltinsAssembler) {
   Return(SubString(string, SmiUntag(from), SmiUntag(to)));
 }
 
-void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
-                                                                 Node* left,
-                                                                 Node* right,
-                                                                 Operation op) {
-  VARIABLE(var_left, MachineRepresentation::kTagged, left);
-  VARIABLE(var_right, MachineRepresentation::kTagged, right);
+void StringBuiltinsAssembler::GenerateStringRelationalComparison(
+    TNode<String> left, TNode<String> right, Operation op) {
+  TVARIABLE(String, var_left, left);
+  TVARIABLE(String, var_right, right);
 
   Variable* input_vars[2] = {&var_left, &var_right};
   Label if_less(this), if_equal(this), if_greater(this);
@@ -338,18 +314,18 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
   Goto(&restart);
   BIND(&restart);
 
-  Node* lhs = var_left.value();
-  Node* rhs = var_right.value();
+  TNode<String> lhs = var_left.value();
+  TNode<String> rhs = var_right.value();
   // Fast check to see if {lhs} and {rhs} refer to the same String object.
-  GotoIf(WordEqual(lhs, rhs), &if_equal);
+  GotoIf(TaggedEqual(lhs, rhs), &if_equal);
 
   // Load instance types of {lhs} and {rhs}.
-  Node* lhs_instance_type = LoadInstanceType(lhs);
-  Node* rhs_instance_type = LoadInstanceType(rhs);
+  TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
+  TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
 
   // Combine the instance types into a single 16-bit value, so we can check
   // both of them at once.
-  Node* both_instance_types = Word32Or(
+  TNode<Int32T> both_instance_types = Word32Or(
       lhs_instance_type, Word32Shl(rhs_instance_type, Int32Constant(8)));
 
   // Check that both {lhs} and {rhs} are flat one-byte strings.
@@ -394,8 +370,8 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
       BIND(&if_notdone);
       {
         // Load the next characters from {lhs} and {rhs}.
-        Node* lhs_value = Load(MachineType::Uint8(), lhs, var_offset.value());
-        Node* rhs_value = Load(MachineType::Uint8(), rhs, var_offset.value());
+        TNode<Uint8T> lhs_value = Load<Uint8T>(lhs, var_offset.value());
+        TNode<Uint8T> rhs_value = Load<Uint8T>(rhs, var_offset.value());
 
         // Check if the characters match.
         Label if_valueissame(this), if_valueisnotsame(this);
@@ -431,16 +407,20 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
     // TODO(bmeurer): Add support for two byte string relational comparisons.
     switch (op) {
       case Operation::kLessThan:
-        TailCallRuntime(Runtime::kStringLessThan, context, lhs, rhs);
+        TailCallRuntime(Runtime::kStringLessThan, NoContextConstant(), lhs,
+                        rhs);
         break;
       case Operation::kLessThanOrEqual:
-        TailCallRuntime(Runtime::kStringLessThanOrEqual, context, lhs, rhs);
+        TailCallRuntime(Runtime::kStringLessThanOrEqual, NoContextConstant(),
+                        lhs, rhs);
         break;
       case Operation::kGreaterThan:
-        TailCallRuntime(Runtime::kStringGreaterThan, context, lhs, rhs);
+        TailCallRuntime(Runtime::kStringGreaterThan, NoContextConstant(), lhs,
+                        rhs);
         break;
       case Operation::kGreaterThanOrEqual:
-        TailCallRuntime(Runtime::kStringGreaterThanOrEqual, context, lhs, rhs);
+        TailCallRuntime(Runtime::kStringGreaterThanOrEqual, NoContextConstant(),
+                        lhs, rhs);
         break;
       default:
         UNREACHABLE();
@@ -494,41 +474,33 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
 }
 
 TF_BUILTIN(StringEqual, StringBuiltinsAssembler) {
-  Node* context = Parameter(Descriptor::kContext);
-  Node* left = Parameter(Descriptor::kLeft);
-  Node* right = Parameter(Descriptor::kRight);
-  GenerateStringEqual(context, left, right);
+  TNode<String> left = CAST(Parameter(Descriptor::kLeft));
+  TNode<String> right = CAST(Parameter(Descriptor::kRight));
+  GenerateStringEqual(left, right);
 }
 
 TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) {
-  Node* context = Parameter(Descriptor::kContext);
-  Node* left = Parameter(Descriptor::kLeft);
-  Node* right = Parameter(Descriptor::kRight);
-  GenerateStringRelationalComparison(context, left, right,
-                                     Operation::kLessThan);
+  TNode<String> left = CAST(Parameter(Descriptor::kLeft));
+  TNode<String> right = CAST(Parameter(Descriptor::kRight));
+  GenerateStringRelationalComparison(left, right, Operation::kLessThan);
 }
 
 TF_BUILTIN(StringLessThanOrEqual, StringBuiltinsAssembler) {
-  Node* context = Parameter(Descriptor::kContext);
-  Node* left = Parameter(Descriptor::kLeft);
-  Node* right = Parameter(Descriptor::kRight);
-  GenerateStringRelationalComparison(context, left, right,
-                                     Operation::kLessThanOrEqual);
+  TNode<String> left = CAST(Parameter(Descriptor::kLeft));
+  TNode<String> right = CAST(Parameter(Descriptor::kRight));
+  GenerateStringRelationalComparison(left, right, Operation::kLessThanOrEqual);
 }
 
 TF_BUILTIN(StringGreaterThan, StringBuiltinsAssembler) {
-  Node* context = Parameter(Descriptor::kContext);
-  Node* left = Parameter(Descriptor::kLeft);
-  Node* right = Parameter(Descriptor::kRight);
-  GenerateStringRelationalComparison(context, left, right,
-                                     Operation::kGreaterThan);
+  TNode<String> left = CAST(Parameter(Descriptor::kLeft));
+  TNode<String> right = CAST(Parameter(Descriptor::kRight));
+  GenerateStringRelationalComparison(left, right, Operation::kGreaterThan);
 }
 
 TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
-  Node* context = Parameter(Descriptor::kContext);
-  Node* left = Parameter(Descriptor::kLeft);
-  Node* right = Parameter(Descriptor::kRight);
-  GenerateStringRelationalComparison(context, left, right,
+  TNode<String> left = CAST(Parameter(Descriptor::kLeft));
+  TNode<String> right = CAST(Parameter(Descriptor::kRight));
+  GenerateStringRelationalComparison(left, right,
                                      Operation::kGreaterThanOrEqual);
 }
 
@@ -598,11 +570,11 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
     // Single argument case, perform fast single character string cache lookup
     // for one-byte code units, or fall back to creating a single character
     // string on the fly otherwise.
-    Node* code = arguments.AtIndex(0);
+    TNode<Object> code = arguments.AtIndex(0);
     Node* code32 = TruncateTaggedToWord32(context, code);
     TNode<Int32T> code16 =
         Signed(Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)));
-    Node* result = StringFromSingleCharCode(code16);
+    TNode<String> result = StringFromSingleCharCode(code16);
     arguments.PopAndReturn(result);
   }
 
@@ -611,7 +583,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
   {
     Label two_byte(this);
     // Assume that the resulting string contains only one-byte characters.
-    Node* one_byte_result = AllocateSeqOneByteString(context, Unsigned(argc));
+    TNode<String> one_byte_result = AllocateSeqOneByteString(Unsigned(argc));
 
     TVARIABLE(IntPtrT, var_max_index);
     var_max_index = IntPtrConstant(0);
@@ -630,7 +602,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
           &two_byte);
 
       // The {code16} fits into the SeqOneByteString {one_byte_result}.
-      Node* offset = ElementOffsetFromIndex(
+      TNode<IntPtrT> offset = ElementOffsetFromIndex(
           var_max_index.value(), UINT8_ELEMENTS,
           CodeStubAssembler::INTPTR_PARAMETERS,
           SeqOneByteString::kHeaderSize - kHeapObjectTag);
@@ -645,7 +617,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
     // At least one of the characters in the string requires a 16-bit
     // representation.  Allocate a SeqTwoByteString to hold the resulting
     // string.
-    Node* two_byte_result = AllocateSeqTwoByteString(context, Unsigned(argc));
+    TNode<String> two_byte_result = AllocateSeqTwoByteString(Unsigned(argc));
 
     // Copy the characters that have already been put in the 8-bit string into
     // their corresponding positions in the new 16-bit string.
@@ -655,7 +627,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
                          String::TWO_BYTE_ENCODING);
 
     // Write the character that caused the 8-bit to 16-bit fault.
-    Node* max_index_offset =
+    TNode<IntPtrT> max_index_offset =
         ElementOffsetFromIndex(var_max_index.value(), UINT16_ELEMENTS,
                                CodeStubAssembler::INTPTR_PARAMETERS,
                                SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -670,10 +642,10 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
         vars,
         [this, context, two_byte_result, &var_max_index](Node* arg) {
           Node* code32 = TruncateTaggedToWord32(context, arg);
-          Node* code16 =
+          TNode<Word32T> code16 =
               Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
 
-          Node* offset = ElementOffsetFromIndex(
+          TNode<IntPtrT> offset = ElementOffsetFromIndex(
               var_max_index.value(), UINT16_ELEMENTS,
               CodeStubAssembler::INTPTR_PARAMETERS,
               SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -688,12 +660,9 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
 }
 
 void StringBuiltinsAssembler::StringIndexOf(
-    Node* const subject_string, Node* const search_string, Node* const position,
-    const std::function<void(Node*)>& f_return) {
-  CSA_ASSERT(this, IsString(subject_string));
-  CSA_ASSERT(this, IsString(search_string));
-  CSA_ASSERT(this, TaggedIsSmi(position));
-
+    TNode<String> const subject_string, TNode<String> const search_string,
+    TNode<Smi> const position,
+    const std::function<void(TNode<Smi>)>& f_return) {
   TNode<IntPtrT> const int_zero = IntPtrConstant(0);
   TNode<IntPtrT> const search_length = LoadStringLengthAsWord(search_string);
   TNode<IntPtrT> const subject_length = LoadStringLengthAsWord(subject_string);
@@ -712,7 +681,7 @@ void StringBuiltinsAssembler::StringIndexOf(
   // If the string pointers are identical, we can just return 0. Note that this
   // implies {start_position} == 0 since we've passed the check above.
   Label return_zero(this);
-  GotoIf(WordEqual(subject_string, search_string), &return_zero);
+  GotoIf(TaggedEqual(subject_string, search_string), &return_zero);
 
   // Try to unpack subject and search strings. Bail to runtime if either needs
   // to be flattened.
@@ -725,13 +694,13 @@ void StringBuiltinsAssembler::StringIndexOf(
   search_to_direct.TryToDirect(&call_runtime_unchecked);
 
   // Load pointers to string data.
-  Node* const subject_ptr =
+  TNode<RawPtrT> const subject_ptr =
       subject_to_direct.PointerToData(&call_runtime_unchecked);
-  Node* const search_ptr =
+  TNode<RawPtrT> const search_ptr =
       search_to_direct.PointerToData(&call_runtime_unchecked);
 
-  Node* const subject_offset = subject_to_direct.offset();
-  Node* const search_offset = search_to_direct.offset();
+  TNode<IntPtrT> const subject_offset = subject_to_direct.offset();
+  TNode<IntPtrT> const search_offset = search_to_direct.offset();
 
   // Like String::IndexOf, the actual matching is done by the optimized
   // SearchString method in string-search.h. Dispatch based on string instance
@@ -754,9 +723,9 @@ void StringBuiltinsAssembler::StringIndexOf(
 
   BIND(&one_one);
   {
-    Node* const adjusted_subject_ptr = PointerToStringDataAtIndex(
+    TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
         subject_ptr, subject_offset, String::ONE_BYTE_ENCODING);
-    Node* const adjusted_search_ptr = PointerToStringDataAtIndex(
+    TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
         search_ptr, search_offset, String::ONE_BYTE_ENCODING);
 
     Label direct_memchr_call(this), generic_fast_path(this);
@@ -767,20 +736,22 @@ void StringBuiltinsAssembler::StringIndexOf(
     // search strings.
     BIND(&direct_memchr_call);
     {
-      Node* const string_addr = IntPtrAdd(adjusted_subject_ptr, start_position);
-      Node* const search_length = IntPtrSub(subject_length, start_position);
-      Node* const search_byte =
+      TNode<IntPtrT> const string_addr =
+          IntPtrAdd(adjusted_subject_ptr, start_position);
+      TNode<IntPtrT> const search_length =
+          IntPtrSub(subject_length, start_position);
+      TNode<IntPtrT> const search_byte =
           ChangeInt32ToIntPtr(Load(MachineType::Uint8(), adjusted_search_ptr));
 
-      Node* const memchr =
+      TNode<ExternalReference> const memchr =
           ExternalConstant(ExternalReference::libc_memchr_function());
-      Node* const result_address =
+      TNode<IntPtrT> const result_address = UncheckedCast<IntPtrT>(
           CallCFunction(memchr, MachineType::Pointer(),
                         std::make_pair(MachineType::Pointer(), string_addr),
                         std::make_pair(MachineType::IntPtr(), search_byte),
-                        std::make_pair(MachineType::UintPtr(), search_length));
+                        std::make_pair(MachineType::UintPtr(), search_length)));
       GotoIf(WordEqual(result_address, int_zero), &return_minus_1);
-      Node* const result_index =
+      TNode<IntPtrT> const result_index =
           IntPtrAdd(IntPtrSub(result_address, string_addr), start_position);
       f_return(SmiTag(result_index));
     }
@@ -796,9 +767,9 @@ void StringBuiltinsAssembler::StringIndexOf(
 
   BIND(&one_two);
   {
-    Node* const adjusted_subject_ptr = PointerToStringDataAtIndex(
+    TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
         subject_ptr, subject_offset, String::ONE_BYTE_ENCODING);
-    Node* const adjusted_search_ptr = PointerToStringDataAtIndex(
+    TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
         search_ptr, search_offset, String::TWO_BYTE_ENCODING);
 
     Node* const result = CallSearchStringRaw<onebyte_t, twobyte_t>(
@@ -809,9 +780,9 @@ void StringBuiltinsAssembler::StringIndexOf(
 
   BIND(&two_one);
   {
-    Node* const adjusted_subject_ptr = PointerToStringDataAtIndex(
+    TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
         subject_ptr, subject_offset, String::TWO_BYTE_ENCODING);
-    Node* const adjusted_search_ptr = PointerToStringDataAtIndex(
+    TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
         search_ptr, search_offset, String::ONE_BYTE_ENCODING);
 
     Node* const result = CallSearchStringRaw<twobyte_t, onebyte_t>(
@@ -822,9 +793,9 @@ void StringBuiltinsAssembler::StringIndexOf(
 
   BIND(&two_two);
   {
-    Node* const adjusted_subject_ptr = PointerToStringDataAtIndex(
+    TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex(
         subject_ptr, subject_offset, String::TWO_BYTE_ENCODING);
-    Node* const adjusted_search_ptr = PointerToStringDataAtIndex(
+    TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex(
         search_ptr, search_offset, String::TWO_BYTE_ENCODING);
 
     Node* const result = CallSearchStringRaw<twobyte_t, twobyte_t>(
@@ -850,9 +821,9 @@ void StringBuiltinsAssembler::StringIndexOf(
     // Simplified version of the runtime call where the types of the arguments
     // are already known due to type checks in this stub.
     Comment("Call Runtime Unchecked");
-    Node* result =
-        CallRuntime(Runtime::kStringIndexOfUnchecked, NoContextConstant(),
-                    subject_string, search_string, position);
+    TNode<Smi> result =
+        CAST(CallRuntime(Runtime::kStringIndexOfUnchecked, NoContextConstant(),
+                         subject_string, search_string, position));
     f_return(result);
   }
 }
@@ -861,11 +832,11 @@ void StringBuiltinsAssembler::StringIndexOf(
 // #sec-string.prototype.indexof
 // Unchecked helper for builtins lowering.
 TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
-  Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* search_string = Parameter(Descriptor::kSearchString);
-  Node* position = Parameter(Descriptor::kPosition);
+  TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
+  TNode<String> search_string = CAST(Parameter(Descriptor::kSearchString));
+  TNode<Smi> position = CAST(Parameter(Descriptor::kPosition));
   StringIndexOf(receiver, search_string, position,
-                [this](Node* result) { this->Return(result); });
+                [this](TNode<Smi> result) { this->Return(result); });
 }
 
 // ES6 String.prototype.includes(searchString [, position])
@@ -890,10 +861,10 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant,
                                               TNode<IntPtrT> argc,
                                               TNode<Context> context) {
   CodeStubArguments arguments(this, argc);
-  Node* const receiver = arguments.GetReceiver();
+  TNode<Object> const receiver = arguments.GetReceiver();
 
-  VARIABLE(var_search_string, MachineRepresentation::kTagged);
-  VARIABLE(var_position, MachineRepresentation::kTagged);
+  TVARIABLE(Object, var_search_string);
+  TVARIABLE(Object, var_position);
   Label argc_1(this), argc_2(this), call_runtime(this, Label::kDeferred),
       fast_path(this);
 
@@ -902,43 +873,45 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant,
   {
     Comment("0 Argument case");
     CSA_ASSERT(this, IntPtrEqual(argc, IntPtrConstant(0)));
-    Node* const undefined = UndefinedConstant();
-    var_search_string.Bind(undefined);
-    var_position.Bind(undefined);
+    TNode<Oddball> undefined = UndefinedConstant();
+    var_search_string = undefined;
+    var_position = undefined;
     Goto(&call_runtime);
   }
   BIND(&argc_1);
   {
     Comment("1 Argument case");
-    var_search_string.Bind(arguments.AtIndex(0));
-    var_position.Bind(SmiConstant(0));
+    var_search_string = arguments.AtIndex(0);
+    var_position = SmiConstant(0);
     Goto(&fast_path);
   }
   BIND(&argc_2);
   {
     Comment("2 Argument case");
-    var_search_string.Bind(arguments.AtIndex(0));
-    var_position.Bind(arguments.AtIndex(1));
+    var_search_string = arguments.AtIndex(0);
+    var_position = arguments.AtIndex(1);
     GotoIfNot(TaggedIsSmi(var_position.value()), &call_runtime);
     Goto(&fast_path);
   }
   BIND(&fast_path);
   {
     Comment("Fast Path");
-    Node* const search = var_search_string.value();
-    Node* const position = var_position.value();
+    TNode<Object> const search = var_search_string.value();
+    TNode<Smi> const position = CAST(var_position.value());
     GotoIf(TaggedIsSmi(receiver), &call_runtime);
     GotoIf(TaggedIsSmi(search), &call_runtime);
-    GotoIfNot(IsString(receiver), &call_runtime);
-    GotoIfNot(IsString(search), &call_runtime);
-
-    StringIndexOf(receiver, search, position, [&](Node* result) {
-      CSA_ASSERT(this, TaggedIsSmi(result));
-      arguments.PopAndReturn((variant == kIndexOf)
-                                 ? result
-                                 : SelectBooleanConstant(SmiGreaterThanOrEqual(
-                                       CAST(result), SmiConstant(0))));
-    });
+    GotoIfNot(IsString(CAST(receiver)), &call_runtime);
+    GotoIfNot(IsString(CAST(search)), &call_runtime);
+
+    StringIndexOf(CAST(receiver), CAST(search), position,
+                  [&](TNode<Smi> result) {
+                    if (variant == kIndexOf) {
+                      arguments.PopAndReturn(result);
+                    } else {
+                      arguments.PopAndReturn(SelectBooleanConstant(
+                          SmiGreaterThanOrEqual(result, SmiConstant(0))));
+                    }
+                  });
   }
   BIND(&call_runtime);
   {
@@ -946,7 +919,7 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant,
     Runtime::FunctionId runtime = variant == kIndexOf
                                       ? Runtime::kStringIndexOf
                                       : Runtime::kStringIncludes;
-    Node* const result =
+    TNode<Object> const result =
         CallRuntime(runtime, context, receiver, var_search_string.value(),
                     var_position.value());
     arguments.PopAndReturn(result);
@@ -955,7 +928,8 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant,
 
 void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
     Node* const context, Node* const object, Node* const maybe_string,
-    Handle<Symbol> symbol, DescriptorIndexAndName symbol_index,
+    Handle<Symbol> symbol,
+    DescriptorIndexNameValue additional_property_to_check,
     const NodeFunction0& regexp_call, const NodeFunction1& generic_call) {
   Label out(this);
 
@@ -972,9 +946,17 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
     GotoIf(TaggedIsSmi(maybe_string), &slow_lookup);
     GotoIfNot(IsString(maybe_string), &slow_lookup);
 
+    // Note we don't run a full (= permissive) check here, because passing the
+    // check implies calling the fast variants of target builtins, which assume
+    // we've already made their appropriate fast path checks. This is not the
+    // case though; e.g.: some of the target builtins access flag getters.
+    // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+    // permissive.
     RegExpBuiltinsAssembler regexp_asm(state());
-    regexp_asm.BranchIfFastRegExp(context, object, LoadMap(object),
-                                  symbol_index, &stub_call, &slow_lookup);
+    regexp_asm.BranchIfFastRegExp(
+        CAST(context), CAST(object), LoadMap(object),
+        PrototypeCheckAssembler::kCheckPrototypePropertyConstness,
+        additional_property_to_check, &stub_call, &slow_lookup);
 
     BIND(&stub_call);
     // TODO(jgruber): Add a no-JS scope once it exists.
@@ -993,7 +975,7 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
   // We handle the former by jumping to {out} for null values as well, while
   // the latter is already handled by the Call({maybe_func}) operation.
 
-  Node* const maybe_func = GetProperty(context, object, symbol);
+  TNode<Object> const maybe_func = GetProperty(context, object, symbol);
   GotoIf(IsUndefined(maybe_func), &out);
   GotoIf(IsNull(maybe_func), &out);
 
@@ -1041,10 +1023,10 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution(
   {
     CSA_ASSERT(this, TaggedIsPositiveSmi(dollar_index));
 
-    Node* const matched =
+    TNode<Object> const matched =
         CallBuiltin(Builtins::kStringSubstring, context, subject_string,
                     SmiUntag(match_start_index), SmiUntag(match_end_index));
-    Node* const replacement_string =
+    TNode<Object> const replacement_string =
         CallRuntime(Runtime::kGetSubstitution, context, matched, subject_string,
                     match_start_index, replace_string, dollar_index);
     var_result.Bind(replacement_string);
@@ -1073,8 +1055,9 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
 
   MaybeCallFunctionAtSymbol(
       context, search, receiver, isolate()->factory()->replace_symbol(),
-      DescriptorIndexAndName{JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
-                             RootIndex::kreplace_symbol},
+      DescriptorIndexNameValue{JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
+                               RootIndex::kreplace_symbol,
+                               Context::REGEXP_REPLACE_FUNCTION_INDEX},
       [=]() {
         Return(CallBuiltin(Builtins::kRegExpReplace, context, search, receiver,
                            replace));
@@ -1102,7 +1085,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
     GotoIf(TaggedIsSmi(replace), &next);
     GotoIfNot(IsString(replace), &next);
 
-    Node* const subject_instance_type = LoadInstanceType(subject_string);
+    TNode<Uint16T> const subject_instance_type =
+        LoadInstanceType(subject_string);
     GotoIfNot(IsConsStringInstanceType(subject_instance_type), &next);
 
     GotoIf(TaggedIsPositiveSmi(IndexOfDollarChar(context, replace)), &next);
@@ -1160,7 +1144,7 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
     Label next(this);
 
     GotoIf(SmiEqual(match_start_index, smi_zero), &next);
-    Node* const prefix =
+    TNode<Object> const prefix =
         CallBuiltin(Builtins::kStringSubstring, context, subject_string,
                     IntPtrConstant(0), SmiUntag(match_start_index));
     var_result.Bind(prefix);
@@ -1182,7 +1166,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
     Node* const replacement =
         CallJS(call_callable, context, replace, UndefinedConstant(),
                search_string, match_start_index, subject_string);
-    Node* const replacement_string = ToString_Inline(context, replacement);
+    TNode<String> const replacement_string =
+        ToString_Inline(context, replacement);
     var_result.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
                                 var_result.value(), replacement_string));
     Goto(&out);
@@ -1190,7 +1175,7 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
 
   BIND(&if_notcallablereplace);
   {
-    Node* const replace_string = ToString_Inline(context, replace);
+    TNode<String> const replace_string = ToString_Inline(context, replace);
     Node* const replacement =
         GetSubstitution(context, subject_string, match_start_index,
                         match_end_index, replace_string);
@@ -1201,11 +1186,11 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
 
   BIND(&out);
   {
-    Node* const suffix =
+    TNode<Object> const suffix =
         CallBuiltin(Builtins::kStringSubstring, context, subject_string,
                     SmiUntag(match_end_index), subject_length);
-    Node* const result = CallBuiltin(Builtins::kStringAdd_CheckNone, context,
-                                     var_result.value(), suffix);
+    TNode<Object> const result = CallBuiltin(
+        Builtins::kStringAdd_CheckNone, context, var_result.value(), suffix);
     Return(result);
   }
 }
@@ -1225,19 +1210,19 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
 
     Builtins::Name builtin;
     Handle<Symbol> symbol;
-    DescriptorIndexAndName property_to_check;
+    DescriptorIndexNameValue property_to_check;
     if (variant == kMatch) {
       builtin = Builtins::kRegExpMatchFast;
       symbol = isolate()->factory()->match_symbol();
-      property_to_check =
-          DescriptorIndexAndName{JSRegExp::kSymbolMatchFunctionDescriptorIndex,
-                                 RootIndex::kmatch_symbol};
+      property_to_check = DescriptorIndexNameValue{
+          JSRegExp::kSymbolMatchFunctionDescriptorIndex,
+          RootIndex::kmatch_symbol, Context::REGEXP_MATCH_FUNCTION_INDEX};
     } else {
       builtin = Builtins::kRegExpSearchFast;
       symbol = isolate()->factory()->search_symbol();
-      property_to_check =
-          DescriptorIndexAndName{JSRegExp::kSymbolSearchFunctionDescriptorIndex,
-                                 RootIndex::ksearch_symbol};
+      property_to_check = DescriptorIndexNameValue{
+          JSRegExp::kSymbolSearchFunctionDescriptorIndex,
+          RootIndex::ksearch_symbol, Context::REGEXP_SEARCH_FUNCTION_INDEX};
     }
 
     RequireObjectCoercible(context, receiver, method_name);
@@ -1255,7 +1240,7 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
       RegExpBuiltinsAssembler regexp_asm(state());
 
       TNode<String> receiver_string = ToString_Inline(context, receiver);
-      TNode<Context> native_context = LoadNativeContext(context);
+      TNode<NativeContext> native_context = LoadNativeContext(context);
       TNode<HeapObject> regexp_function = CAST(
           LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
       TNode<Map> initial_map = CAST(LoadObjectField(
@@ -1263,9 +1248,13 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
       TNode<Object> regexp = regexp_asm.RegExpCreate(
           context, initial_map, maybe_regexp, EmptyStringConstant());
 
+      // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+      // permissive.
       Label fast_path(this), slow_path(this);
-      regexp_asm.BranchIfFastRegExp(context, regexp, initial_map,
-                                    property_to_check, &fast_path, &slow_path);
+      regexp_asm.BranchIfFastRegExp(
+          context, CAST(regexp), initial_map,
+          PrototypeCheckAssembler::kCheckPrototypePropertyConstness,
+          property_to_check, &fast_path, &slow_path);
 
       BIND(&fast_path);
       Return(CallBuiltin(builtin, context, regexp, receiver_string));
@@ -1297,7 +1286,7 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
   TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
 
   // 1. Let O be ? RequireObjectCoercible(this value).
   RequireObjectCoercible(context, receiver, method_name);
@@ -1320,8 +1309,9 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
   };
   MaybeCallFunctionAtSymbol(
       context, maybe_regexp, receiver, isolate()->factory()->match_all_symbol(),
-      DescriptorIndexAndName{JSRegExp::kSymbolMatchAllFunctionDescriptorIndex,
-                             RootIndex::kmatch_all_symbol},
+      DescriptorIndexNameValue{JSRegExp::kSymbolMatchAllFunctionDescriptorIndex,
+                               RootIndex::kmatch_all_symbol,
+                               Context::REGEXP_MATCH_ALL_FUNCTION_INDEX},
       if_regexp_call, if_generic_call);
 
   RegExpMatchAllAssembler regexp_asm(state());
@@ -1340,141 +1330,6 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
   Return(CallJS(callable, context, match_all_func, rx, s));
 }
 
-class StringPadAssembler : public StringBuiltinsAssembler {
- public:
-  explicit StringPadAssembler(compiler::CodeAssemblerState* state)
-      : StringBuiltinsAssembler(state) {}
-
- protected:
-  enum Variant { kStart, kEnd };
-
-  void Generate(Variant variant, const char* method_name, TNode<IntPtrT> argc,
-                TNode<Context> context) {
-    CodeStubArguments arguments(this, argc);
-    TNode<Object> receiver = arguments.GetReceiver();
-    TNode<String> receiver_string =
-        ToThisString(context, receiver, method_name);
-    TNode<Smi> const string_length = LoadStringLengthAsSmi(receiver_string);
-
-    TVARIABLE(String, var_fill_string, StringConstant(" "));
-    TVARIABLE(IntPtrT, var_fill_length, IntPtrConstant(1));
-
-    Label check_fill(this), dont_pad(this), invalid_string_length(this),
-        pad(this);
-
-    // If no max_length was provided, return the string.
-    GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &dont_pad);
-
-    TNode<Number> const max_length =
-        ToLength_Inline(context, arguments.AtIndex(0));
-    CSA_ASSERT(this, IsNumberNormalized(max_length));
-
-    // If max_length <= string_length, return the string.
-    GotoIfNot(TaggedIsSmi(max_length), &check_fill);
-    Branch(SmiLessThanOrEqual(CAST(max_length), string_length), &dont_pad,
-           &check_fill);
-
-    BIND(&check_fill);
-    {
-      GotoIf(IntPtrEqual(argc, IntPtrConstant(1)), &pad);
-      Node* const fill = arguments.AtIndex(1);
-      GotoIf(IsUndefined(fill), &pad);
-
-      var_fill_string = ToString_Inline(context, fill);
-      var_fill_length = LoadStringLengthAsWord(var_fill_string.value());
-      Branch(WordEqual(var_fill_length.value(), IntPtrConstant(0)), &dont_pad,
-             &pad);
-    }
-
-    BIND(&pad);
-    {
-      CSA_ASSERT(this,
-                 IntPtrGreaterThan(var_fill_length.value(), IntPtrConstant(0)));
-
-      // Throw if max_length is greater than String::kMaxLength.
-      GotoIfNot(TaggedIsSmi(max_length), &invalid_string_length);
-      TNode<Smi> smi_max_length = CAST(max_length);
-      GotoIfNot(
-          SmiLessThanOrEqual(smi_max_length, SmiConstant(String::kMaxLength)),
-          &invalid_string_length);
-
-      CSA_ASSERT(this, SmiGreaterThan(smi_max_length, string_length));
-      TNode<Smi> const pad_length = SmiSub(smi_max_length, string_length);
-
-      VARIABLE(var_pad, MachineRepresentation::kTagged);
-      Label single_char_fill(this), multi_char_fill(this), return_result(this);
-      Branch(IntPtrEqual(var_fill_length.value(), IntPtrConstant(1)),
-             &single_char_fill, &multi_char_fill);
-
-      // Fast path for a single character fill.  No need to calculate number of
-      // repetitions or remainder.
-      BIND(&single_char_fill);
-      {
-        var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
-                                 static_cast<Node*>(var_fill_string.value()),
-                                 pad_length));
-        Goto(&return_result);
-      }
-      BIND(&multi_char_fill);
-      {
-        TNode<Int32T> const fill_length_word32 =
-            TruncateIntPtrToInt32(var_fill_length.value());
-        TNode<Int32T> const pad_length_word32 = SmiToInt32(pad_length);
-        TNode<Int32T> const repetitions_word32 =
-            Int32Div(pad_length_word32, fill_length_word32);
-        TNode<Int32T> const remaining_word32 =
-            Int32Mod(pad_length_word32, fill_length_word32);
-
-        var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
-                                 var_fill_string.value(),
-                                 SmiFromInt32(repetitions_word32)));
-
-        GotoIfNot(remaining_word32, &return_result);
-        {
-          Node* const remainder_string = CallBuiltin(
-              Builtins::kStringSubstring, context, var_fill_string.value(),
-              IntPtrConstant(0), ChangeInt32ToIntPtr(remaining_word32));
-          var_pad.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
-                                   var_pad.value(), remainder_string));
-          Goto(&return_result);
-        }
-      }
-      BIND(&return_result);
-      CSA_ASSERT(this,
-                 SmiEqual(pad_length, LoadStringLengthAsSmi(var_pad.value())));
-      arguments.PopAndReturn(
-          variant == kStart
-              ? CallBuiltin(Builtins::kStringAdd_CheckNone, context,
-                            var_pad.value(), receiver_string)
-              : CallBuiltin(Builtins::kStringAdd_CheckNone, context,
-                            receiver_string, var_pad.value()));
-    }
-    BIND(&dont_pad);
-    arguments.PopAndReturn(receiver_string);
-    BIND(&invalid_string_length);
-    {
-      CallRuntime(Runtime::kThrowInvalidStringLength, context);
-      Unreachable();
-    }
-  }
-};
-
-TF_BUILTIN(StringPrototypePadEnd, StringPadAssembler) {
-  TNode<IntPtrT> argc =
-      ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
-  Generate(kEnd, "String.prototype.padEnd", argc, context);
-}
-
-TF_BUILTIN(StringPrototypePadStart, StringPadAssembler) {
-  TNode<IntPtrT> argc =
-      ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
-  Generate(kStart, "String.prototype.padStart", argc, context);
-}
-
 // ES6 #sec-string.prototype.search
 TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
   TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
@@ -1484,7 +1339,7 @@ TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
 }
 
 TNode<JSArray> StringBuiltinsAssembler::StringToArray(
-    TNode<Context> context, TNode<String> subject_string,
+    TNode<NativeContext> context, TNode<String> subject_string,
     TNode<Smi> subject_length, TNode<Number> limit_number) {
   CSA_ASSERT(this, SmiGreaterThan(subject_length, SmiConstant(0)));
 
@@ -1492,7 +1347,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
       fill_thehole_and_call_runtime(this, Label::kDeferred);
   TVARIABLE(JSArray, result_array);
 
-  TNode<Int32T> instance_type = LoadInstanceType(subject_string);
+  TNode<Uint16T> instance_type = LoadInstanceType(subject_string);
   GotoIfNot(IsOneByteStringInstanceType(instance_type), &call_runtime);
 
   // Try to use cached one byte characters.
@@ -1508,10 +1363,10 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
     TNode<FixedArray> elements = CAST(AllocateFixedArray(
         PACKED_ELEMENTS, length, AllocationFlag::kAllowLargeObjectAllocation));
     // Don't allocate anything while {string_data} is live!
-    TNode<RawPtrT> string_data = UncheckedCast<RawPtrT>(
-        to_direct.PointerToData(&fill_thehole_and_call_runtime));
+    TNode<RawPtrT> string_data =
+        to_direct.PointerToData(&fill_thehole_and_call_runtime);
     TNode<IntPtrT> string_data_offset = to_direct.offset();
-    TNode<Object> cache = LoadRoot(RootIndex::kSingleCharacterStringCache);
+    TNode<FixedArray> cache = SingleCharacterStringCacheConstant();
 
     BuildFastLoop(
         IntPtrConstant(0), length,
@@ -1523,8 +1378,8 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
           TNode<Int32T> char_code =
               UncheckedCast<Int32T>(Load(MachineType::Uint8(), string_data,
                                          IntPtrAdd(index, string_data_offset)));
-          Node* code_index = ChangeUint32ToWord(char_code);
-          TNode<Object> entry = LoadFixedArrayElement(CAST(cache), code_index);
+          TNode<UintPtrT> code_index = ChangeUint32ToWord(char_code);
+          TNode<Object> entry = LoadFixedArrayElement(cache, code_index);
 
           // If we cannot find a char in the cache, fill the hole for the fixed
           // array, and call runtime.
@@ -1562,14 +1417,14 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
   const int kSeparatorArg = 0;
   const int kLimitArg = 1;
 
-  Node* const argc =
+  TNode<IntPtrT> const argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
   TNode<Object> receiver = args.GetReceiver();
-  Node* const separator = args.GetOptionalArgumentValue(kSeparatorArg);
-  Node* const limit = args.GetOptionalArgumentValue(kLimitArg);
-  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+  TNode<Object> const separator = args.GetOptionalArgumentValue(kSeparatorArg);
+  TNode<Object> const limit = args.GetOptionalArgumentValue(kLimitArg);
+  TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
 
   TNode<Smi> smi_zero = SmiConstant(0);
 
@@ -1579,8 +1434,9 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
 
   MaybeCallFunctionAtSymbol(
       context, separator, receiver, isolate()->factory()->split_symbol(),
-      DescriptorIndexAndName{JSRegExp::kSymbolSplitFunctionDescriptorIndex,
-                             RootIndex::ksplit_symbol},
+      DescriptorIndexNameValue{JSRegExp::kSymbolSplitFunctionDescriptorIndex,
+                               RootIndex::ksplit_symbol,
+                               Context::REGEXP_SPLIT_FUNCTION_INDEX},
       [&]() {
         args.PopAndReturn(CallBuiltin(Builtins::kRegExpSplit, context,
                                       separator, receiver, limit));
@@ -1597,13 +1453,12 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
   TNode<Number> limit_number = Select<Number>(
       IsUndefined(limit), [=] { return NumberConstant(kMaxUInt32); },
       [=] { return ToUint32(context, limit); });
-  Node* const separator_string = ToString_Inline(context, separator);
+  TNode<String> const separator_string = ToString_Inline(context, separator);
 
   Label return_empty_array(this);
 
   // Shortcut for {limit} == 0.
-  GotoIf(WordEqual<Object, Object>(limit_number, smi_zero),
-         &return_empty_array);
+  GotoIf(TaggedEqual(limit_number, smi_zero), &return_empty_array);
 
   // ECMA-262 says that if {separator} is undefined, the result should
   // be an array of size 1 containing the entire string.
@@ -1612,7 +1467,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
     GotoIfNot(IsUndefined(separator), &next);
 
     const ElementsKind kind = PACKED_ELEMENTS;
-    Node* const native_context = LoadNativeContext(context);
+    TNode<NativeContext> const native_context = LoadNativeContext(context);
     TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
 
     TNode<Smi> length = SmiConstant(1);
@@ -1642,7 +1497,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
     BIND(&next);
   }
 
-  Node* const result =
+  TNode<Object> const result =
       CallRuntime(Runtime::kStringSplit, context, subject_string,
                   separator_string, limit_number);
   args.PopAndReturn(result);
@@ -1650,7 +1505,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
   BIND(&return_empty_array);
   {
     const ElementsKind kind = PACKED_ELEMENTS;
-    Node* const native_context = LoadNativeContext(context);
+    TNode<NativeContext> const native_context = LoadNativeContext(context);
     TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
 
     TNode<Smi> length = smi_zero;
@@ -1666,7 +1521,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
   const int kStartArg = 0;
   const int kLengthArg = 1;
 
-  Node* const argc =
+  TNode<IntPtrT> const argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
 
@@ -1808,10 +1663,11 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
 
   ToDirectStringAssembler to_direct(state(), string);
   to_direct.TryToDirect(&if_runtime);
-  Node* const string_data = to_direct.PointerToData(&if_runtime);
-  Node* const instance_type = to_direct.instance_type();
-  Node* const is_stringonebyte = IsOneByteStringInstanceType(instance_type);
-  Node* const string_data_offset = to_direct.offset();
+  TNode<RawPtrT> const string_data = to_direct.PointerToData(&if_runtime);
+  TNode<Int32T> const instance_type = to_direct.instance_type();
+  TNode<BoolT> const is_stringonebyte =
+      IsOneByteStringInstanceType(instance_type);
+  TNode<IntPtrT> const string_data_offset = to_direct.offset();
 
   TVARIABLE(IntPtrT, var_start, IntPtrConstant(0));
   TVARIABLE(IntPtrT, var_end, IntPtrSub(string_length, IntPtrConstant(1)));
@@ -1841,8 +1697,8 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
 
 void StringTrimAssembler::ScanForNonWhiteSpaceOrLineTerminator(
     Node* const string_data, Node* const string_data_offset,
-    Node* const is_stringonebyte, Variable* const var_index, Node* const end,
-    int increment, Label* const if_none_found) {
+    Node* const is_stringonebyte, TVariable<IntPtrT>* const var_index,
+    TNode<IntPtrT> const end, int increment, Label* const if_none_found) {
   Label if_stringisonebyte(this), out(this);
 
   GotoIf(is_stringonebyte, &if_stringisonebyte);
@@ -1866,14 +1722,14 @@ void StringTrimAssembler::ScanForNonWhiteSpaceOrLineTerminator(
 }
 
 void StringTrimAssembler::BuildLoop(
-    Variable* const var_index, Node* const end, int increment,
-    Label* const if_none_found, Label* const out,
+    TVariable<IntPtrT>* const var_index, TNode<IntPtrT> const end,
+    int increment, Label* const if_none_found, Label* const out,
     const std::function<Node*(Node*)>& get_character) {
   Label loop(this, var_index);
   Goto(&loop);
   BIND(&loop);
   {
-    Node* const index = var_index->value();
+    TNode<IntPtrT> index = var_index->value();
     GotoIf(IntPtrEqual(index, end), if_none_found);
     GotoIfNotWhiteSpaceOrLineTerminator(
         UncheckedCast<Uint32T>(get_character(index)), out);
@@ -1883,7 +1739,7 @@ void StringTrimAssembler::BuildLoop(
 }
 
 void StringTrimAssembler::GotoIfNotWhiteSpaceOrLineTerminator(
-    Node* const char_code, Label* const if_not_whitespace) {
+    TNode<Word32T> const char_code, Label* const if_not_whitespace) {
   Label out(this);
 
   // 0x0020 - SPACE (Intentionally out of order to fast path a commmon case)
@@ -2010,11 +1866,12 @@ void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration(
 
   // Check that the String iterator hasn't been modified in a way that would
   // affect iteration.
-  Node* protector_cell = LoadRoot(RootIndex::kStringIteratorProtector);
+  TNode<PropertyCell> protector_cell = StringIteratorProtectorConstant();
   DCHECK(isolate()->heap()->string_iterator_protector().IsPropertyCell());
-  Branch(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
-                   SmiConstant(Isolate::kProtectorValid)),
-         if_true, if_false);
+  Branch(
+      TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+                  SmiConstant(Isolate::kProtectorValid)),
+      if_true, if_false);
 }
 
 }  // namespace internal
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 679ce0e17fe8ee..64d5a77615d8f6 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -19,8 +19,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
   Node* GetSubstitution(Node* context, Node* subject_string,
                         Node* match_start_index, Node* match_end_index,
                         Node* replace_string);
-  void StringEqual_Core(Node* context, Node* lhs, Node* lhs_instance_type,
-                        Node* rhs, Node* rhs_instance_type,
+  void StringEqual_Core(SloppyTNode<String> lhs, Node* lhs_instance_type,
+                        SloppyTNode<String> rhs, Node* rhs_instance_type,
                         TNode<IntPtrT> length, Label* if_equal,
                         Label* if_not_equal, Label* if_indirect);
   void BranchIfStringPrimitiveWithNoCustomIteration(TNode<Object> object,
@@ -51,27 +51,29 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
                             Node* const search_ptr, Node* const search_length,
                             Node* const start_position);
 
-  Node* PointerToStringDataAtIndex(Node* const string_data, Node* const index,
-                                   String::Encoding encoding);
+  TNode<IntPtrT> PointerToStringDataAtIndex(Node* const string_data,
+                                            Node* const index,
+                                            String::Encoding encoding);
 
   // substr and slice have a common way of handling the {start} argument.
   void ConvertAndBoundsCheckStartArgument(Node* context, Variable* var_start,
                                           Node* start, Node* string_length);
 
-  void GenerateStringEqual(Node* context, Node* left, Node* right);
-  void GenerateStringRelationalComparison(Node* context, Node* left,
-                                          Node* right, Operation op);
+  void GenerateStringEqual(TNode<String> left, TNode<String> right);
+  void GenerateStringRelationalComparison(TNode<String> left,
+                                          TNode<String> right, Operation op);
 
   using StringAtAccessor = std::function<TNode<Object>(
       TNode<String> receiver, TNode<IntPtrT> length, TNode<IntPtrT> index)>;
 
-  void StringIndexOf(Node* const subject_string, Node* const search_string,
-                     Node* const position,
-                     const std::function<void(Node*)>& f_return);
+  void StringIndexOf(TNode<String> const subject_string,
+                     TNode<String> const search_string,
+                     TNode<Smi> const position,
+                     const std::function<void(TNode<Smi>)>& f_return);
 
   TNode<Smi> IndexOfDollarChar(Node* const context, Node* const string);
 
-  TNode<JSArray> StringToArray(TNode<Context> context,
+  TNode<JSArray> StringToArray(TNode<NativeContext> context,
                                TNode<String> subject_string,
                                TNode<Smi> subject_length,
                                TNode<Number> limit_number);
@@ -94,12 +96,13 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
   // Important: {regexp_call} may not contain any code that can call into JS.
   using NodeFunction0 = std::function<void()>;
   using NodeFunction1 = std::function<void(Node* fn)>;
-  void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
-                                 Node* const maybe_string,
-                                 Handle<Symbol> symbol,
-                                 DescriptorIndexAndName symbol_index,
-                                 const NodeFunction0& regexp_call,
-                                 const NodeFunction1& generic_call);
+  using DescriptorIndexNameValue =
+      PrototypeCheckAssembler::DescriptorIndexNameValue;
+  void MaybeCallFunctionAtSymbol(
+      Node* const context, Node* const object, Node* const maybe_string,
+      Handle<Symbol> symbol,
+      DescriptorIndexNameValue additional_property_to_check,
+      const NodeFunction0& regexp_call, const NodeFunction1& generic_call);
 };
 
 class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
@@ -120,21 +123,19 @@ class StringTrimAssembler : public StringBuiltinsAssembler {
       : StringBuiltinsAssembler(state) {}
 
   V8_EXPORT_PRIVATE void GotoIfNotWhiteSpaceOrLineTerminator(
-      Node* const char_code, Label* const if_not_whitespace);
+      TNode<Word32T> const char_code, Label* const if_not_whitespace);
 
  protected:
   void Generate(String::TrimMode mode, const char* method, TNode<IntPtrT> argc,
                 TNode<Context> context);
 
-  void ScanForNonWhiteSpaceOrLineTerminator(Node* const string_data,
-                                            Node* const string_data_offset,
-                                            Node* const is_stringonebyte,
-                                            Variable* const var_index,
-                                            Node* const end, int increment,
-                                            Label* const if_none_found);
+  void ScanForNonWhiteSpaceOrLineTerminator(
+      Node* const string_data, Node* const string_data_offset,
+      Node* const is_stringonebyte, TVariable<IntPtrT>* const var_index,
+      TNode<IntPtrT> const end, int increment, Label* const if_none_found);
 
-  void BuildLoop(Variable* const var_index, Node* const end, int increment,
-                 Label* const if_none_found, Label* const out,
+  void BuildLoop(TVariable<IntPtrT>* const var_index, TNode<IntPtrT> const end,
+                 int increment, Label* const if_none_found, Label* const out,
                  const std::function<Node*(Node*)>& get_character);
 };
 
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 74b15cf99b1587..04a96c7e46d020 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -208,14 +208,10 @@ BUILTIN(StringPrototypeNormalize) {
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, form,
                                      Object::ToString(isolate, form_input));
 
-  if (!(String::Equals(isolate, form,
-                       isolate->factory()->NewStringFromStaticChars("NFC")) ||
-        String::Equals(isolate, form,
-                       isolate->factory()->NewStringFromStaticChars("NFD")) ||
-        String::Equals(isolate, form,
-                       isolate->factory()->NewStringFromStaticChars("NFKC")) ||
-        String::Equals(isolate, form,
-                       isolate->factory()->NewStringFromStaticChars("NFKD")))) {
+  if (!(String::Equals(isolate, form, isolate->factory()->NFC_string()) ||
+        String::Equals(isolate, form, isolate->factory()->NFD_string()) ||
+        String::Equals(isolate, form, isolate->factory()->NFKC_string()) ||
+        String::Equals(isolate, form, isolate->factory()->NFKD_string()))) {
     Handle<String> valid_forms =
         isolate->factory()->NewStringFromStaticChars("NFC, NFD, NFKC, NFKD");
     THROW_NEW_ERROR_RETURN_FAILURE(
diff --git a/deps/v8/src/builtins/builtins-symbol-gen.cc b/deps/v8/src/builtins/builtins-symbol-gen.cc
index 610a8baeb314f2..cb1ee28501a705 100644
--- a/deps/v8/src/builtins/builtins-symbol-gen.cc
+++ b/deps/v8/src/builtins/builtins-symbol-gen.cc
@@ -16,9 +16,10 @@ TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) {
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
 
-  Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
-                            "Symbol.prototype.description");
-  Node* result = LoadObjectField(value, Symbol::kNameOffset);
+  TNode<Symbol> value =
+      CAST(ToThisValue(context, receiver, PrimitiveType::kSymbol,
+                       "Symbol.prototype.description"));
+  TNode<Object> result = LoadObjectField(value, Symbol::kNameOffset);
   Return(result);
 }
 
@@ -27,8 +28,8 @@ TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) {
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
 
-  Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
-                             "Symbol.prototype [ @@toPrimitive ]");
+  TNode<Object> result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
+                                     "Symbol.prototype [ @@toPrimitive ]");
   Return(result);
 }
 
@@ -37,9 +38,10 @@ TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) {
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
 
-  Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
-                            "Symbol.prototype.toString");
-  Node* result = CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
+  TNode<Object> value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
+                                    "Symbol.prototype.toString");
+  TNode<Object> result =
+      CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
   Return(result);
 }
 
@@ -48,8 +50,8 @@ TF_BUILTIN(SymbolPrototypeValueOf, CodeStubAssembler) {
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
 
-  Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
-                             "Symbol.prototype.valueOf");
+  TNode<Object> result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
+                                     "Symbol.prototype.valueOf");
   Return(result);
 }
 
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index ea6e5947062fac..e705aec8b073d4 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -54,7 +54,7 @@ BUILTIN(SymbolKeyFor) {
   Handle<Symbol> symbol = Handle<Symbol>::cast(obj);
   DisallowHeapAllocation no_gc;
   Object result;
-  if (symbol->is_public()) {
+  if (symbol->is_in_public_symbol_table()) {
     result = symbol->name();
     DCHECK(result.IsString());
   } else {
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 857d33988f32f5..448ff66603f94e 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -35,11 +35,10 @@ void TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
 // TODO(bmeurer,v8:4153): Rename this and maybe fix up the implementation a bit.
 TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
     TNode<Context> context, TNode<UintPtrT> byte_length) {
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<Map> map =
       CAST(LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX));
-  TNode<FixedArray> empty_fixed_array =
-      CAST(LoadRoot(RootIndex::kEmptyFixedArray));
+  TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
 
   TNode<JSArrayBuffer> buffer = UncheckedCast<JSArrayBuffer>(
       Allocate(JSArrayBuffer::kSizeWithEmbedderFields));
@@ -90,12 +89,12 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
   TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
-  Node* argc =
+  TNode<IntPtrT> argc =
       ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
   CodeStubArguments args(this, argc);
-  Node* arg1 = args.GetOptionalArgumentValue(0);
-  Node* arg2 = args.GetOptionalArgumentValue(1);
-  Node* arg3 = args.GetOptionalArgumentValue(2);
+  TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
+  TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
+  TNode<Object> arg3 = args.GetOptionalArgumentValue(2);
 
   // If NewTarget is undefined, throw a TypeError exception.
   // All the TypedArray constructors have this as the first step:
@@ -103,8 +102,8 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
   Label throwtypeerror(this, Label::kDeferred);
   GotoIf(IsUndefined(new_target), &throwtypeerror);
 
-  Node* result = CallBuiltin(Builtins::kCreateTypedArray, context, target,
-                             new_target, arg1, arg2, arg3);
+  TNode<Object> result = CallBuiltin(Builtins::kCreateTypedArray, context,
+                                     target, new_target, arg1, arg2, arg3);
   args.PopAndReturn(result);
 
   BIND(&throwtypeerror);
@@ -221,7 +220,7 @@ TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(TNode<Map> map) {
 TNode<JSFunction> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
     TNode<Context> context, TNode<JSTypedArray> exemplar) {
   TVARIABLE(IntPtrT, context_slot);
-  TNode<Word32T> elements_kind = LoadElementsKind(exemplar);
+  TNode<Int32T> elements_kind = LoadElementsKind(exemplar);
 
   DispatchTypedArrayByElementsKind(
       elements_kind,
@@ -322,8 +321,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
   TNode<RawPtrT> target_data_ptr = LoadJSTypedArrayBackingStore(target);
   TNode<RawPtrT> source_data_ptr = LoadJSTypedArrayBackingStore(source);
 
-  TNode<Word32T> source_el_kind = LoadElementsKind(source);
-  TNode<Word32T> target_el_kind = LoadElementsKind(target);
+  TNode<Int32T> source_el_kind = LoadElementsKind(source);
+  TNode<Int32T> target_el_kind = LoadElementsKind(target);
 
   TNode<IntPtrT> source_el_size = GetTypedArrayElementSize(source_el_kind);
   TNode<IntPtrT> target_el_size = GetTypedArrayElementSize(target_el_kind);
@@ -650,7 +649,7 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
   // that this can be turned into a non-sparse table switch for ideal
   // performance.
   BIND(&if_receiverisheapobject);
-  Node* elements_kind =
+  TNode<Int32T> elements_kind =
       Int32Sub(LoadElementsKind(receiver),
                Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
   Switch(elements_kind, &return_undefined, elements_kinds, elements_kind_labels,
@@ -727,7 +726,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
   TNode<JSTypedArray> new_typed_array = TypedArrayCreateByLength(
       context, receiver, SmiTag(length), "%TypedArray%.of");
 
-  TNode<Word32T> elements_kind = LoadElementsKind(new_typed_array);
+  TNode<Int32T> elements_kind = LoadElementsKind(new_typed_array);
 
   // 6. Let k be 0.
   // 7. Repeat, while k < len
@@ -858,17 +857,16 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
     TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
         CAST(iterator_fn), JSFunction::kSharedFunctionInfoOffset);
     GotoIfNot(
-        WordEqual(LoadObjectField(shared_info,
-                                  SharedFunctionInfo::kFunctionDataOffset),
-                  SmiConstant(Builtins::kTypedArrayPrototypeValues)),
+        TaggedEqual(LoadObjectField(shared_info,
+                                    SharedFunctionInfo::kFunctionDataOffset),
+                    SmiConstant(Builtins::kTypedArrayPrototypeValues)),
         &check_iterator);
     // Check that the ArrayIterator prototype's "next" method hasn't been
     // overridden
-    TNode<PropertyCell> protector_cell =
-        CAST(LoadRoot(RootIndex::kArrayIteratorProtector));
+    TNode<PropertyCell> protector_cell = ArrayIteratorProtectorConstant();
     GotoIfNot(
-        WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
-                  SmiConstant(Isolate::kProtectorValid)),
+        TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+                    SmiConstant(Isolate::kProtectorValid)),
         &check_iterator);
 
     // Source is a TypedArray with unmodified iterator behavior. Use the
@@ -895,7 +893,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
 
     // This is not a spec'd limit, so it doesn't particularly matter when we
     // throw the range error for typed array length > MaxSmi.
-    TNode<Object> raw_length = LoadJSArrayLength(values);
+    TNode<Number> raw_length = LoadJSArrayLength(values);
     GotoIfNot(TaggedIsSmi(raw_length), &if_length_not_smi);
 
     final_length = CAST(raw_length);
@@ -949,7 +947,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
   }
 
   BIND(&slow_path);
-  TNode<Word32T> elements_kind = LoadElementsKind(target_obj.value());
+  TNode<Int32T> elements_kind = LoadElementsKind(target_obj.value());
 
   // 7e/13 : Copy the elements
   BuildFastLoop(
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 0b86d3585368c0..12270495c133e9 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -4,6 +4,7 @@
 
 #include "src/builtins/builtins-utils-gen.h"
 #include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/interface-descriptors.h"
 #include "src/objects/objects-inl.h"
 #include "src/wasm/wasm-objects.h"
 #include "src/wasm/wasm-opcodes.h"
@@ -46,10 +47,16 @@ class WasmBuiltinsAssembler : public CodeStubAssembler {
   }
 
   TNode<Code> LoadCEntryFromInstance(TNode<Object> instance) {
-    return UncheckedCast<Code>(
-        Load(MachineType::AnyTagged(), instance,
-             IntPtrConstant(WasmInstanceObject::kCEntryStubOffset -
+    TNode<IntPtrT> isolate_root = UncheckedCast<IntPtrT>(
+        Load(MachineType::Pointer(), instance,
+             IntPtrConstant(WasmInstanceObject::kIsolateRootOffset -
                             kHeapObjectTag)));
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+    TNode<Code> target = UncheckedCast<Code>(
+        Load(MachineType::TaggedPointer(), isolate_root,
+             IntPtrConstant(IsolateData::builtin_slot_offset(centry_id))));
+    return target;
   }
 };
 
@@ -58,14 +65,6 @@ TF_BUILTIN(WasmAllocateHeapNumber, WasmBuiltinsAssembler) {
   TailCallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant());
 }
 
-TF_BUILTIN(WasmCallJavaScript, WasmBuiltinsAssembler) {
-  TNode<Object> context = UncheckedParameter(Descriptor::kContext);
-  TNode<Object> function = UncheckedParameter(Descriptor::kFunction);
-  TNode<Object> argc = UncheckedParameter(Descriptor::kActualArgumentsCount);
-  TNode<Code> target = LoadBuiltinFromFrame(Builtins::kCall_ReceiverIsAny);
-  TailCallStub(CallTrampolineDescriptor{}, target, context, function, argc);
-}
-
 TF_BUILTIN(WasmRecordWrite, WasmBuiltinsAssembler) {
   TNode<Object> object = UncheckedParameter(Descriptor::kObject);
   TNode<Object> slot = UncheckedParameter(Descriptor::kSlot);
@@ -299,6 +298,20 @@ TF_BUILTIN(WasmI64ToBigInt, WasmBuiltinsAssembler) {
   TailCallStub(I64ToBigIntDescriptor(), target, NoContextConstant(), argument);
 }
 
+TF_BUILTIN(WasmI32PairToBigInt, WasmBuiltinsAssembler) {
+  if (!Is32()) {
+    Unreachable();
+    return;
+  }
+
+  TNode<Code> target = LoadBuiltinFromFrame(Builtins::kI32PairToBigInt);
+  TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Parameter(Descriptor::kLow));
+  TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Parameter(Descriptor::kHigh));
+
+  TailCallStub(I32PairToBigIntDescriptor(), target, NoContextConstant(), low,
+               high);
+}
+
 TF_BUILTIN(WasmBigIntToI64, WasmBuiltinsAssembler) {
   if (!Is64()) {
     Unreachable();
@@ -314,6 +327,21 @@ TF_BUILTIN(WasmBigIntToI64, WasmBuiltinsAssembler) {
   TailCallStub(BigIntToI64Descriptor(), target, context, argument);
 }
 
+TF_BUILTIN(WasmBigIntToI32Pair, WasmBuiltinsAssembler) {
+  if (!Is32()) {
+    Unreachable();
+    return;
+  }
+
+  TNode<Object> context =
+      UncheckedCast<Object>(Parameter(Descriptor::kContext));
+  TNode<Code> target = LoadBuiltinFromFrame(Builtins::kBigIntToI32Pair);
+  TNode<IntPtrT> argument =
+      UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgument));
+
+  TailCallStub(BigIntToI32PairDescriptor(), target, context, argument);
+}
+
 #define DECLARE_ENUM(name)                                                \
   TF_BUILTIN(ThrowWasm##name, WasmBuiltinsAssembler) {                    \
     TNode<Object> instance = LoadInstanceFromFrame();                     \
diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc
index 18738d2c487703..28fb9c9cbdfaf4 100644
--- a/deps/v8/src/builtins/builtins-weak-refs.cc
+++ b/deps/v8/src/builtins/builtins-weak-refs.cc
@@ -151,8 +151,11 @@ BUILTIN(FinalizationGroupCleanupSome) {
   // Don't do set_scheduled_for_cleanup(false); we still have the microtask
   // scheduled and don't want to schedule another one in case the user never
   // executes microtasks.
-  JSFinalizationGroup::Cleanup(isolate, finalization_group, callback);
-
+  if (JSFinalizationGroup::Cleanup(isolate, finalization_group, callback)
+          .IsNothing()) {
+    DCHECK(isolate->has_pending_exception());
+    return ReadOnlyRoots(isolate).exception();
+  }
   return ReadOnlyRoots(isolate).undefined_value();
 }
 
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index ed4a844c9850e3..e5829dd1b34977 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -158,8 +158,7 @@ int Builtins::GetStackParameterCount(Name name) {
 }
 
 // static
-Callable Builtins::CallableFor(Isolate* isolate, Name name) {
-  Handle<Code> code = isolate->builtins()->builtin_handle(name);
+CallInterfaceDescriptor Builtins::CallInterfaceDescriptorFor(Name name) {
   CallDescriptors::Key key;
   switch (name) {
 // This macro is deliberately crafted so as to emit very little code,
@@ -176,12 +175,17 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
       Builtins::Kind kind = Builtins::KindOf(name);
       DCHECK_NE(BCH, kind);
       if (kind == TFJ || kind == CPP) {
-        return Callable(code, JSTrampolineDescriptor{});
+        return JSTrampolineDescriptor{};
       }
       UNREACHABLE();
   }
-  CallInterfaceDescriptor descriptor(key);
-  return Callable(code, descriptor);
+  return CallInterfaceDescriptor{key};
+}
+
+// static
+Callable Builtins::CallableFor(Isolate* isolate, Name name) {
+  Handle<Code> code = isolate->builtins()->builtin_handle(name);
+  return Callable{code, CallInterfaceDescriptorFor(name)};
 }
 
 // static
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index f885c6f29fb121..0b9f2a2b57d016 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -13,6 +13,7 @@ namespace v8 {
 namespace internal {
 
 class ByteArray;
+class CallInterfaceDescriptor;
 class Callable;
 template <typename T>
 class Handle;
@@ -92,6 +93,7 @@ class Builtins {
   V8_EXPORT_PRIVATE Code builtin(int index);
   V8_EXPORT_PRIVATE Handle<Code> builtin_handle(int index);
 
+  static CallInterfaceDescriptor CallInterfaceDescriptorFor(Name name);
   V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate, Name name);
 
   static int GetStackParameterCount(Name name);
diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq
index b83906d109b2da..47d9ec664a65f4 100644
--- a/deps/v8/src/builtins/collections.tq
+++ b/deps/v8/src/builtins/collections.tq
@@ -6,7 +6,7 @@
 
 namespace collections {
   @export
-  macro LoadKeyValuePairNoSideEffects(implicit context: Context)(o: Object):
+  macro LoadKeyValuePairNoSideEffects(implicit context: Context)(o: JSAny):
       KeyValuePair labels MayHaveSideEffects {
     typeswitch (o) {
       case (a: FastJSArray): {
@@ -28,7 +28,7 @@ namespace collections {
                                   Undefined
             };
           }
-          case (Object): deferred {
+          case (FixedArrayBase): deferred {
             unreachable;
           }
         }
@@ -36,14 +36,14 @@ namespace collections {
       case (JSReceiver): {
         goto MayHaveSideEffects;
       }
-      case (o: Object): deferred {
+      case (o: JSAny): deferred {
         ThrowTypeError(kIteratorValueNotAnObject, o);
       }
     }
   }
 
   @export
-  transitioning macro LoadKeyValuePair(implicit context: Context)(o: Object):
+  transitioning macro LoadKeyValuePair(implicit context: Context)(o: JSAny):
       KeyValuePair {
     try {
       return LoadKeyValuePairNoSideEffects(o) otherwise Generic;
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 62a0cc31c3c34a..2cf06095611bdf 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -58,11 +58,11 @@ namespace data_view {
     }
   }
 
-  macro WasNeutered(view: JSArrayBufferView): bool {
+  macro WasDetached(view: JSArrayBufferView): bool {
     return IsDetachedBuffer(view.buffer);
   }
 
-  macro ValidateDataView(context: Context, o: Object, method: String):
+  macro ValidateDataView(context: Context, o: JSAny, method: String):
       JSDataView {
     try {
       return Cast<JSDataView>(o) otherwise CastError;
@@ -75,7 +75,7 @@ namespace data_view {
   // ES6 section 24.2.4.1 get DataView.prototype.buffer
   javascript builtin DataViewPrototypeGetBuffer(
       js-implicit context: Context,
-      receiver: Object)(...arguments): JSArrayBuffer {
+      receiver: JSAny)(...arguments): JSArrayBuffer {
     const dataView: JSDataView =
         ValidateDataView(context, receiver, 'get DataView.prototype.buffer');
     return dataView.buffer;
@@ -83,12 +83,12 @@ namespace data_view {
 
   // ES6 section 24.2.4.2 get DataView.prototype.byteLength
   javascript builtin DataViewPrototypeGetByteLength(
-      js-implicit context: Context, receiver: Object)(...arguments): Number {
+      js-implicit context: Context, receiver: JSAny)(...arguments): Number {
     const dataView: JSDataView = ValidateDataView(
         context, receiver, 'get DataView.prototype.byte_length');
-    if (WasNeutered(dataView)) {
+    if (WasDetached(dataView)) {
       // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
-      // here if the JSArrayBuffer of the {dataView} was neutered.
+      // here if the JSArrayBuffer of the {dataView} was detached.
       return 0;
     }
     return Convert<Number>(dataView.byte_length);
@@ -96,12 +96,12 @@ namespace data_view {
 
   // ES6 section 24.2.4.3 get DataView.prototype.byteOffset
   javascript builtin DataViewPrototypeGetByteOffset(
-      js-implicit context: Context, receiver: Object)(...arguments): Number {
+      js-implicit context: Context, receiver: JSAny)(...arguments): Number {
     const dataView: JSDataView = ValidateDataView(
         context, receiver, 'get DataView.prototype.byte_offset');
-    if (WasNeutered(dataView)) {
+    if (WasDetached(dataView)) {
       // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
-      // here if the JSArrayBuffer of the {dataView} was neutered.
+      // here if the JSArrayBuffer of the {dataView} was detached.
       return 0;
     }
     return Convert<Number>(dataView.byte_offset);
@@ -351,14 +351,14 @@ namespace data_view {
     return MakeBigInt(lowWord, highWord, signed);
   }
 
-  extern macro ToSmiIndex(Object, Context): Smi
+  extern macro ToSmiIndex(JSAny, Context): Smi
       labels RangeError;
   extern macro DataViewBuiltinsAssembler::DataViewElementSize(
       constexpr ElementsKind): constexpr int31;
 
   transitioning macro DataViewGet(
-      context: Context, receiver: Object, offset: Object,
-      requestedLittleEndian: Object, kind: constexpr ElementsKind): Numeric {
+      context: Context, receiver: JSAny, offset: JSAny,
+      requestedLittleEndian: JSAny, kind: constexpr ElementsKind): Numeric {
     const dataView: JSDataView =
         ValidateDataView(context, receiver, MakeDataViewGetterNameString(kind));
 
@@ -416,91 +416,91 @@ namespace data_view {
   }
 
   transitioning javascript builtin DataViewPrototypeGetUint8(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
     return DataViewGet(context, receiver, offset, Undefined, UINT8_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeGetInt8(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
     return DataViewGet(context, receiver, offset, Undefined, INT8_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeGetUint16(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewGet(
         context, receiver, offset, isLittleEndian, UINT16_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeGetInt16(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewGet(
         context, receiver, offset, isLittleEndian, INT16_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeGetUint32(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewGet(
         context, receiver, offset, isLittleEndian, UINT32_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeGetInt32(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewGet(
         context, receiver, offset, isLittleEndian, INT32_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeGetFloat32(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewGet(
         context, receiver, offset, isLittleEndian, FLOAT32_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeGetFloat64(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewGet(
         context, receiver, offset, isLittleEndian, FLOAT64_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeGetBigUint64(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewGet(
         context, receiver, offset, isLittleEndian, BIGUINT64_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeGetBigInt64(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewGet(
         context, receiver, offset, isLittleEndian, BIGINT64_ELEMENTS);
   }
 
-  extern macro ToNumber(Context, Object): Number;
-  extern macro ToBigInt(Context, Object): BigInt;
+  extern macro ToNumber(Context, JSAny): Number;
+  extern macro ToBigInt(Context, JSAny): BigInt;
   extern macro TruncateFloat64ToWord32(float64): uint32;
 
   extern macro DataViewBuiltinsAssembler::StoreWord8(RawPtr, uintptr, uint32):
@@ -632,8 +632,8 @@ namespace data_view {
   }
 
   transitioning macro DataViewSet(
-      context: Context, receiver: Object, offset: Object, value: Object,
-      requestedLittleEndian: Object, kind: constexpr ElementsKind): Object {
+      context: Context, receiver: JSAny, offset: JSAny, value: JSAny,
+      requestedLittleEndian: JSAny, kind: constexpr ElementsKind): JSAny {
     const dataView: JSDataView =
         ValidateDataView(context, receiver, MakeDataViewSetterNameString(kind));
 
@@ -718,96 +718,96 @@ namespace data_view {
   }
 
   transitioning javascript builtin DataViewPrototypeSetUint8(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, Undefined, UINT8_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeSetInt8(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, Undefined, INT8_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeSetUint16(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 2 ? arguments[2] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, isLittleEndian, UINT16_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeSetInt16(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 2 ? arguments[2] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, isLittleEndian, INT16_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeSetUint32(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 2 ? arguments[2] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, isLittleEndian, UINT32_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeSetInt32(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 2 ? arguments[2] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, isLittleEndian, INT32_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeSetFloat32(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 2 ? arguments[2] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, isLittleEndian, FLOAT32_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeSetFloat64(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 2 ? arguments[2] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, isLittleEndian, FLOAT64_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeSetBigUint64(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 2 ? arguments[2] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, isLittleEndian, BIGUINT64_ELEMENTS);
   }
 
   transitioning javascript builtin DataViewPrototypeSetBigInt64(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
-    const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
-    const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
-    const isLittleEndian: Object =
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
+    const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
+    const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+    const isLittleEndian: JSAny =
         arguments.length > 2 ? arguments[2] : Undefined;
     return DataViewSet(
         context, receiver, offset, value, isLittleEndian, BIGINT64_ELEMENTS);
diff --git a/deps/v8/src/builtins/extras-utils.tq b/deps/v8/src/builtins/extras-utils.tq
index 3675fda19165f4..0b48b962e2c46f 100644
--- a/deps/v8/src/builtins/extras-utils.tq
+++ b/deps/v8/src/builtins/extras-utils.tq
@@ -3,23 +3,22 @@
 // found in the LICENSE file.
 
 namespace extras_utils {
-  extern runtime CreatePrivateSymbol(Context, Object): HeapObject;
-  extern runtime PromiseMarkAsHandled(Context, Object): Undefined;
-  extern runtime PromiseStatus(Context, Object): Smi;
+  extern runtime CreatePrivateSymbol(Context, JSAny): PrivateSymbol;
+  extern runtime PromiseMarkAsHandled(Context, JSAny): Undefined;
+  extern runtime PromiseStatus(Context, JSAny): Smi;
 
   javascript builtin ExtrasUtilsCreatePrivateSymbol(
-      js-implicit context: Context,
-      receiver: Object)(...arguments): HeapObject {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     return CreatePrivateSymbol(context, arguments[0]);
   }
 
   javascript builtin ExtrasUtilsMarkPromiseAsHandled(
-      js-implicit context: Context, receiver: Object)(...arguments): Undefined {
+      js-implicit context: Context, receiver: JSAny)(...arguments): Undefined {
     return PromiseMarkAsHandled(context, arguments[0]);
   }
 
   javascript builtin ExtrasUtilsPromiseState(
-      js-implicit context: Context, receiver: Object)(...arguments): Smi {
+      js-implicit context: Context, receiver: JSAny)(...arguments): Smi {
     return PromiseStatus(context, arguments[0]);
   }
 }
diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq
index 55591883479519..7467381690e21d 100644
--- a/deps/v8/src/builtins/frames.tq
+++ b/deps/v8/src/builtins/frames.tq
@@ -102,7 +102,7 @@ macro LoadLengthFromAdapterFrame(implicit context: Context)(
 }
 
 operator '==' macro FrameTypeEquals(f1: FrameType, f2: FrameType): bool {
-  return WordEqual(f1, f2);
+  return TaggedEqual(f1, f2);
 }
 
 macro Cast<A: type>(implicit context: Context)(o: Frame): A labels CastError;
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
index dd23ac5b5a6519..4fc051eb14de55 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.cc
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -36,7 +36,7 @@ void GrowableFixedArray::Push(TNode<Object> const value) {
 TNode<JSArray> GrowableFixedArray::ToJSArray(TNode<Context> const context) {
   const ElementsKind kind = PACKED_ELEMENTS;
 
-  TNode<Context> const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   TNode<Map> const array_map = LoadJSArrayElementsMap(kind, native_context);
 
   // Shrink to fit if necessary.
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 995be77f754ee0..feabac3b66abbe 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -72,7 +72,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
   ExternalReference real_stack_limit =
-      ExternalReference::address_of_real_stack_limit(masm->isolate());
+      ExternalReference::address_of_real_jslimit(masm->isolate());
   // Compute the space that is left as a negative number in scratch. If
   // we already overflowed, this will be a positive number.
   __ mov(scratch, __ ExternalReferenceAsOperand(real_stack_limit, scratch));
@@ -2676,7 +2676,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
     __ Push(kWasmCompileLazyFuncIndexRegister);
     // Load the correct CEntry builtin from the instance object.
     __ mov(ecx, FieldOperand(kWasmInstanceRegister,
-                             WasmInstanceObject::kCEntryStubOffset));
+                             WasmInstanceObject::kIsolateRootOffset));
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+    __ mov(ecx, MemOperand(ecx, IsolateData::builtin_slot_offset(centry_id)));
     // Initialize the JavaScript context with 0. CEntry will use it to
     // set the current context on the isolate.
     __ Move(kContextRegister, Smi::zero());
diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq
index d96fa924ab0418..41ec0c36e42e0e 100644
--- a/deps/v8/src/builtins/internal-coverage.tq
+++ b/deps/v8/src/builtins/internal-coverage.tq
@@ -51,7 +51,7 @@ namespace internal_coverage {
   }
 
   builtin IncBlockCounter(implicit context: Context)(
-      function: JSFunction, coverageArraySlotIndex: Smi): Object {
+      function: JSFunction, coverageArraySlotIndex: Smi): Undefined {
     // It's quite possible that a function contains IncBlockCounter bytecodes,
     // but no coverage info exists. This happens e.g. by selecting the
     // best-effort coverage collection mode, which triggers deletion of all
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index b770f1b6528378..06e8ea539c0dc2 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -11,13 +11,13 @@ namespace iterator {
     object: JSReceiver;
 
     // iteratorRecord.[[NextMethod]]
-    next: Object;
+    next: JSAny;
   }
 
   extern macro IteratorBuiltinsAssembler::GetIteratorMethod(
-      implicit context: Context)(Object): Object;
+      implicit context: Context)(JSAny): JSAny;
   extern macro IteratorBuiltinsAssembler::GetIterator(
-      implicit context: Context)(Object): IteratorRecord;
+      implicit context: Context)(JSAny): IteratorRecord;
 
   extern macro IteratorBuiltinsAssembler::IteratorStep(
       implicit context: Context)(IteratorRecord): JSReceiver
@@ -27,18 +27,33 @@ namespace iterator {
       labels Done;
 
   extern macro IteratorBuiltinsAssembler::IteratorValue(
-      implicit context: Context)(JSReceiver): Object;
+      implicit context: Context)(JSReceiver): JSAny;
   extern macro IteratorBuiltinsAssembler::IteratorValue(
-      implicit context: Context)(JSReceiver, Map): Object;
+      implicit context: Context)(JSReceiver, Map): JSAny;
 
   extern macro IteratorBuiltinsAssembler::IteratorCloseOnException(
-      implicit context: Context)(IteratorRecord, Object): never;
+      implicit context: Context)(IteratorRecord, JSAny): never;
 
   extern macro IteratorBuiltinsAssembler::IterableToList(
-      implicit context: Context)(Object, Object): JSArray;
+      implicit context: Context)(JSAny, JSAny): JSArray;
 
   extern builtin IterableToListMayPreserveHoles(implicit context:
-                                                    Context)(Object, Object);
+                                                    Context)(JSAny, JSAny);
   extern builtin IterableToListWithSymbolLookup(implicit context:
-                                                    Context)(Object);
+                                                    Context)(JSAny);
+
+  transitioning builtin GetIteratorWithFeedback(
+      context: Context, receiver: JSAny, feedbackSlot: Smi,
+      feedback: Undefined | FeedbackVector): JSAny {
+    typeswitch (feedback) {
+      case (Undefined): {
+        return GetProperty(receiver, IteratorSymbolConstant());
+      }
+      case (feedback: FeedbackVector): {
+        return LoadIC(
+            context, receiver, IteratorSymbolConstant(), feedbackSlot,
+            feedback);
+      }
+    }
+  }
 }
diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq
index df43b30efc4e0b..99a29bd5cb6651 100644
--- a/deps/v8/src/builtins/math.tq
+++ b/deps/v8/src/builtins/math.tq
@@ -7,7 +7,7 @@ namespace math {
   extern macro Float64Acos(float64): float64;
 
   transitioning javascript builtin
-  MathAcos(context: Context, _receiver: Object, x: Object): Number {
+  MathAcos(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Acos(value));
   }
@@ -16,7 +16,7 @@ namespace math {
   extern macro Float64Acosh(float64): float64;
 
   transitioning javascript builtin
-  MathAcosh(context: Context, _receiver: Object, x: Object): Number {
+  MathAcosh(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Acosh(value));
   }
@@ -25,7 +25,7 @@ namespace math {
   extern macro Float64Asin(float64): float64;
 
   transitioning javascript builtin
-  MathAsin(context: Context, _receiver: Object, x: Object): Number {
+  MathAsin(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Asin(value));
   }
@@ -34,7 +34,7 @@ namespace math {
   extern macro Float64Asinh(float64): float64;
 
   transitioning javascript builtin
-  MathAsinh(context: Context, _receiver: Object, x: Object): Number {
+  MathAsinh(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Asinh(value));
   }
@@ -43,7 +43,7 @@ namespace math {
   extern macro Float64Atan(float64): float64;
 
   transitioning javascript builtin
-  MathAtan(context: Context, _receiver: Object, x: Object): Number {
+  MathAtan(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Atan(value));
   }
@@ -52,7 +52,7 @@ namespace math {
   extern macro Float64Atan2(float64, float64): float64;
 
   transitioning javascript builtin
-  MathAtan2(context: Context, _receiver: Object, y: Object, x: Object): Number {
+  MathAtan2(js-implicit context: Context)(y: JSAny, x: JSAny): Number {
     const yValue = Convert<float64>(ToNumber_Inline(context, y));
     const xValue = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Atan2(yValue, xValue));
@@ -62,7 +62,7 @@ namespace math {
   extern macro Float64Atanh(float64): float64;
 
   transitioning javascript builtin
-  MathAtanh(context: Context, _receiver: Object, x: Object): Number {
+  MathAtanh(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Atanh(value));
   }
@@ -71,7 +71,7 @@ namespace math {
   extern macro Float64Cbrt(float64): float64;
 
   transitioning javascript builtin
-  MathCbrt(context: Context, _receiver: Object, x: Object): Number {
+  MathCbrt(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Cbrt(value));
   }
@@ -80,7 +80,7 @@ namespace math {
   extern macro Word32Clz(int32): int32;
 
   transitioning javascript builtin
-  MathClz32(context: Context, _receiver: Object, x: Object): Number {
+  MathClz32(js-implicit context: Context)(x: JSAny): Number {
     const num = ToNumber_Inline(context, x);
 
     let value: int32;
@@ -100,7 +100,7 @@ namespace math {
   extern macro Float64Cos(float64): float64;
 
   transitioning javascript builtin
-  MathCos(context: Context, _receiver: Object, x: Object): Number {
+  MathCos(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Cos(value));
   }
@@ -109,7 +109,7 @@ namespace math {
   extern macro Float64Cosh(float64): float64;
 
   transitioning javascript builtin
-  MathCosh(context: Context, _receiver: Object, x: Object): Number {
+  MathCosh(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Cosh(value));
   }
@@ -118,7 +118,7 @@ namespace math {
   extern macro Float64Exp(float64): float64;
 
   transitioning javascript builtin
-  MathExp(context: Context, _receiver: Object, x: Object): Number {
+  MathExp(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Exp(value));
   }
@@ -127,14 +127,14 @@ namespace math {
   extern macro Float64Expm1(float64): float64;
 
   transitioning javascript builtin
-  MathExpm1(context: Context, _receiver: Object, x: Object): Number {
+  MathExpm1(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Expm1(value));
   }
 
   // ES6 #sec-math.fround
   transitioning javascript builtin
-  MathFround(context: Context, _receiver: Object, x: Object): Number {
+  MathFround(js-implicit context: Context)(x: JSAny): Number {
     const x32 = Convert<float32>(ToNumber_Inline(context, x));
     const x64 = Convert<float64>(x32);
     return Convert<Number>(x64);
@@ -144,7 +144,7 @@ namespace math {
   extern macro Float64Log(float64): float64;
 
   transitioning javascript builtin
-  MathLog(context: Context, _receiver: Object, x: Object): Number {
+  MathLog(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Log(value));
   }
@@ -153,7 +153,7 @@ namespace math {
   extern macro Float64Log1p(float64): float64;
 
   transitioning javascript builtin
-  MathLog1p(context: Context, _receiver: Object, x: Object): Number {
+  MathLog1p(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Log1p(value));
   }
@@ -162,7 +162,7 @@ namespace math {
   extern macro Float64Log10(float64): float64;
 
   transitioning javascript builtin
-  MathLog10(context: Context, _receiver: Object, x: Object): Number {
+  MathLog10(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Log10(value));
   }
@@ -171,7 +171,7 @@ namespace math {
   extern macro Float64Log2(float64): float64;
 
   transitioning javascript builtin
-  MathLog2(context: Context, _receiver: Object, x: Object): Number {
+  MathLog2(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Log2(value));
   }
@@ -180,14 +180,14 @@ namespace math {
   extern macro Float64Sin(float64): float64;
 
   transitioning javascript builtin
-  MathSin(context: Context, _receiver: Object, x: Object): Number {
+  MathSin(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Sin(value));
   }
 
   // ES6 #sec-math.sign
   transitioning javascript builtin
-  MathSign(context: Context, _receiver: Object, x: Object): Number {
+  MathSign(js-implicit context: Context)(x: JSAny): Number {
     const num = ToNumber_Inline(context, x);
     const value = Convert<float64>(num);
 
@@ -204,7 +204,7 @@ namespace math {
   extern macro Float64Sinh(float64): float64;
 
   transitioning javascript builtin
-  MathSinh(context: Context, _receiver: Object, x: Object): Number {
+  MathSinh(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Sinh(value));
   }
@@ -213,7 +213,7 @@ namespace math {
   extern macro Float64Sqrt(float64): float64;
 
   transitioning javascript builtin
-  MathSqrt(context: Context, _receiver: Object, x: Object): Number {
+  MathSqrt(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Sqrt(value));
   }
@@ -222,7 +222,7 @@ namespace math {
   extern macro Float64Tan(float64): float64;
 
   transitioning javascript builtin
-  MathTan(context: Context, _receiver: Object, x: Object): Number {
+  MathTan(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Tan(value));
   }
@@ -231,8 +231,56 @@ namespace math {
   extern macro Float64Tanh(float64): float64;
 
   transitioning javascript builtin
-  MathTanh(context: Context, _receiver: Object, x: Object): Number {
+  MathTanh(js-implicit context: Context)(x: JSAny): Number {
     const value = Convert<float64>(ToNumber_Inline(context, x));
     return Convert<Number>(Float64Tanh(value));
   }
+
+  extern macro Float64Abs(float64): float64;
+
+  // ES6 #sec-math.hypot
+  transitioning javascript builtin
+  MathHypot(js-implicit context: Context, receiver: JSAny)(...arguments):
+      Number {
+    const length = arguments.length;
+    if (length == 0) {
+      return 0;
+    }
+    const absValues = AllocateZeroedFixedDoubleArray(length);
+    let oneArgIsNaN: bool = false;
+    let max: float64 = 0;
+    for (let i: intptr = 0; i < length; ++i) {
+      const value = Convert<float64>(ToNumber_Inline(context, arguments[i]));
+      if (Float64IsNaN(value)) {
+        oneArgIsNaN = true;
+      } else {
+        const absValue = Float64Abs(value);
+        absValues.floats[i] = absValue;
+        if (absValue > max) {
+          max = absValue;
+        }
+      }
+    }
+    if (max == V8_INFINITY) {
+      return V8_INFINITY;
+    } else if (oneArgIsNaN) {
+      return kNaN;
+    } else if (max == 0) {
+      return 0;
+    }
+    assert(max > 0);
+
+    // Kahan summation to avoid rounding errors.
+    // Normalize the numbers to the largest one to avoid overflow.
+    let sum: float64 = 0;
+    let compensation: float64 = 0;
+    for (let i: intptr = 0; i < length; ++i) {
+      const n = absValues.floats[i] / max;
+      const summand = n * n - compensation;
+      const preliminary = sum + summand;
+      compensation = (preliminary - sum) - summand;
+      sum = preliminary;
+    }
+    return Convert<Number>(Float64Sqrt(sum) * max);
+  }
 }
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index a359b2436f1818..d3237a1c381c9d 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -84,6 +84,17 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
 
 namespace {
 
+void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+  DCHECK(masm->root_array_available());
+  Isolate* isolate = masm->isolate();
+  ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+  DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+  intptr_t offset =
+      TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+  __ Lw(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
@@ -156,7 +167,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
   // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
-  __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch1);
   // Make scratch1 the space we have left. The stack might already be overflowed
   // here which will cause scratch1 to become negative.
   __ subu(scratch1, sp, scratch1);
@@ -368,7 +379,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
   Label okay;
-  __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch1);
   // Make a2 the space we have left. The stack might already be overflowed
   // here which will cause a2 to become negative.
   __ Subu(scratch1, sp, scratch1);
@@ -715,7 +726,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
   // Check the stack for overflow. We are not trying to catch interruptions
   // (i.e. debug break and preemption) here, so check the "real stack limit".
   Label stack_overflow;
-  __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, kScratchReg);
   __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
 
   // Push receiver.
@@ -1082,7 +1093,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
     // Do a stack check to ensure we don't go over the limit.
     Label ok;
     __ Subu(t1, sp, Operand(t0));
-    __ LoadRoot(a2, RootIndex::kRealStackLimit);
+    LoadRealStackLimit(masm, a2);
     __ Branch(&ok, hs, t1, Operand(a2));
     __ CallRuntime(Runtime::kThrowStackOverflow);
     __ bind(&ok);
@@ -2061,7 +2072,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
     __ Subu(sp, sp, Operand(t1));
     // Check the stack for overflow. We are not trying to catch interruptions
     // (i.e. debug break and preemption) here, so check the "real stack limit".
-    __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+    LoadRealStackLimit(masm, kScratchReg);
     __ Branch(&done, hs, sp, Operand(kScratchReg));
     // Restore the stack pointer.
     __ Addu(sp, sp, Operand(t1));
@@ -2219,7 +2230,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
     __ Subu(sp, sp, Operand(t1));
     // Check the stack for overflow. We are not trying to catch interruptions
     // (i.e. debug break and preemption) here, so check the "real stack limit".
-    __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+    LoadRealStackLimit(masm, kScratchReg);
     __ Branch(&done, hs, sp, Operand(kScratchReg));
     // Restore the stack pointer.
     __ Addu(sp, sp, Operand(t1));
@@ -2488,7 +2499,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
     __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
     // Load the correct CEntry builtin from the instance object.
     __ lw(a2, FieldMemOperand(kWasmInstanceRegister,
-                              WasmInstanceObject::kCEntryStubOffset));
+                              WasmInstanceObject::kIsolateRootOffset));
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+    __ lw(a2, MemOperand(a2, IsolateData::builtin_slot_offset(centry_id)));
     // Initialize the JavaScript context with 0. CEntry will use it to
     // set the current context on the isolate.
     __ Move(kContextRegister, Smi::zero());
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index c5565b90de7a9d..7cb66470a34e36 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -83,6 +83,18 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
 
 namespace {
 
+void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+  DCHECK(masm->root_array_available());
+  Isolate* isolate = masm->isolate();
+  ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+  DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+  intptr_t offset =
+      TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+  CHECK(is_int32(offset));
+  __ Ld(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
@@ -156,7 +168,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
   // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
-  __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch1);
   // Make scratch1 the space we have left. The stack might already be overflowed
   // here which will cause scratch1 to become negative.
   __ dsubu(scratch1, sp, scratch1);
@@ -407,7 +419,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
   // Check the stack for overflow. We are not trying to catch interruptions
   // (i.e. debug break and preemption) here, so check the "real stack limit".
   Label stack_overflow;
-  __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, kScratchReg);
   __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
 
   // Push receiver.
@@ -514,7 +526,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
   Label okay;
-  __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch1);
   // Make a2 the space we have left. The stack might already be overflowed
   // here which will cause r2 to become negative.
   __ dsubu(scratch1, sp, scratch1);
@@ -1099,7 +1111,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
     // Do a stack check to ensure we don't go over the limit.
     Label ok;
     __ Dsubu(a5, sp, Operand(a4));
-    __ LoadRoot(a2, RootIndex::kRealStackLimit);
+    LoadRealStackLimit(masm, a2);
     __ Branch(&ok, hs, a5, Operand(a2));
     __ CallRuntime(Runtime::kThrowStackOverflow);
     __ bind(&ok);
@@ -2100,7 +2112,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
     __ Dsubu(sp, sp, Operand(a5));
     // Check the stack for overflow. We are not trying to catch interruptions
     // (i.e. debug break and preemption) here, so check the "real stack limit".
-    __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+    LoadRealStackLimit(masm, kScratchReg);
     __ Branch(&done, hs, sp, Operand(kScratchReg));
     // Restore the stack pointer.
     __ Daddu(sp, sp, Operand(a5));
@@ -2254,7 +2266,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
     __ Dsubu(sp, sp, Operand(a5));
     // Check the stack for overflow. We are not trying to catch interruptions
     // (i.e. debug break and preemption) here, so check the "real stack limit".
-    __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+    LoadRealStackLimit(masm, kScratchReg);
     __ Branch(&done, hs, sp, Operand(kScratchReg));
     // Restore the stack pointer.
     __ Daddu(sp, sp, Operand(a5));
@@ -2525,7 +2537,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
     __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
     // Load the correct CEntry builtin from the instance object.
     __ Ld(a2, FieldMemOperand(kWasmInstanceRegister,
-                              WasmInstanceObject::kCEntryStubOffset));
+                              WasmInstanceObject::kIsolateRootOffset));
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+    __ Ld(a2, MemOperand(a2, IsolateData::builtin_slot_offset(centry_id)));
     // Initialize the JavaScript context with 0. CEntry will use it to
     // set the current context on the isolate.
     __ Move(kContextRegister, Smi::zero());
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
index 32115e78eab250..fc36e34038d96f 100644
--- a/deps/v8/src/builtins/object-fromentries.tq
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -5,7 +5,7 @@
 namespace object {
 
   transitioning macro ObjectFromEntriesFastCase(implicit context: Context)(
-      iterable: Object): JSObject labels IfSlow {
+      iterable: JSAny): JSObject labels IfSlow {
     typeswitch (iterable) {
       case (array: FastJSArrayWithNoCustomIteration): {
         const elements: FixedArray =
@@ -14,7 +14,7 @@ namespace object {
         const result: JSObject = NewJSObject();
 
         for (let k: Smi = 0; k < length; ++k) {
-          const value: Object = array::LoadElementOrUndefined(elements, k);
+          const value: JSAny = array::LoadElementOrUndefined(elements, k);
           const pair: KeyValuePair =
               collections::LoadKeyValuePairNoSideEffects(value)
               otherwise IfSlow;
@@ -26,16 +26,16 @@ namespace object {
         }
         return result;
       }
-      case (Object): {
+      case (JSAny): {
         goto IfSlow;
       }
     }
   }
 
   transitioning javascript builtin
-  ObjectFromEntries(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
-    const iterable: Object = arguments[0];
+  ObjectFromEntries(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
+    const iterable: JSAny = arguments[0];
     try {
       if (IsNullOrUndefined(iterable)) goto Throw;
       return ObjectFromEntriesFastCase(iterable) otherwise IfSlow;
@@ -50,7 +50,7 @@ namespace object {
           const step: JSReceiver =
               iterator::IteratorStep(i, fastIteratorResultMap)
               otherwise return result;
-          const iteratorValue: Object =
+          const iteratorValue: JSAny =
               iterator::IteratorValue(step, fastIteratorResultMap);
           const pair: KeyValuePair =
               collections::LoadKeyValuePair(iteratorValue);
diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq
index 6706a8f943399f..3dc973cf4067dc 100644
--- a/deps/v8/src/builtins/object.tq
+++ b/deps/v8/src/builtins/object.tq
@@ -4,31 +4,31 @@
 
 namespace runtime {
   extern transitioning runtime
-  ObjectIsExtensible(implicit context: Context)(Object): Object;
+  ObjectIsExtensible(implicit context: Context)(JSAny): JSAny;
 
   extern transitioning runtime
   JSReceiverPreventExtensionsThrow(implicit context: Context)(JSReceiver):
-      Object;
+      JSAny;
 
   extern transitioning runtime
   JSReceiverPreventExtensionsDontThrow(implicit context: Context)(JSReceiver):
-      Object;
+      JSAny;
 
   extern transitioning runtime
-  JSReceiverGetPrototypeOf(implicit context: Context)(JSReceiver): Object;
+  JSReceiverGetPrototypeOf(implicit context: Context)(JSReceiver): JSAny;
 
   extern transitioning runtime
-  JSReceiverSetPrototypeOfThrow(implicit context: Context)(JSReceiver, Object):
-      Object;
+  JSReceiverSetPrototypeOfThrow(implicit context: Context)(JSReceiver, JSAny):
+      JSAny;
 
   extern transitioning runtime
   JSReceiverSetPrototypeOfDontThrow(implicit context:
-                                        Context)(JSReceiver, Object): Object;
+                                        Context)(JSReceiver, JSAny): JSAny;
 }  // namespace runtime
 
 namespace object {
   transitioning macro
-  ObjectIsExtensible(implicit context: Context)(object: Object): Object {
+  ObjectIsExtensible(implicit context: Context)(object: JSAny): JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
     const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
         otherwise return runtime::ObjectIsExtensible(objectJSReceiver);
@@ -36,8 +36,8 @@ namespace object {
   }
 
   transitioning macro
-  ObjectPreventExtensionsThrow(implicit context: Context)(object: Object):
-      Object {
+  ObjectPreventExtensionsThrow(implicit context: Context)(object: JSAny):
+      JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
     const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
         otherwise return runtime::JSReceiverPreventExtensionsThrow(
@@ -47,8 +47,8 @@ namespace object {
   }
 
   transitioning macro
-  ObjectPreventExtensionsDontThrow(implicit context: Context)(object: Object):
-      Object {
+  ObjectPreventExtensionsDontThrow(implicit context: Context)(object: JSAny):
+      JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
     const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
         otherwise return runtime::JSReceiverPreventExtensionsDontThrow(
@@ -57,14 +57,14 @@ namespace object {
   }
 
   transitioning macro
-  ObjectGetPrototypeOf(implicit context: Context)(object: Object): Object {
+  ObjectGetPrototypeOf(implicit context: Context)(object: JSAny): JSAny {
     const objectJSReceiver: JSReceiver = ToObject_Inline(context, object);
     return object::JSReceiverGetPrototypeOf(objectJSReceiver);
   }
 
   transitioning macro
   JSReceiverGetPrototypeOf(implicit context: Context)(object: JSReceiver):
-      Object {
+      JSAny {
     const objectJSProxy = Cast<JSProxy>(object)
         otherwise return runtime::JSReceiverGetPrototypeOf(object);
     return proxy::ProxyGetPrototypeOf(objectJSProxy);
@@ -72,7 +72,7 @@ namespace object {
 
   transitioning macro
   ObjectSetPrototypeOfThrow(implicit context: Context)(
-      object: Object, proto: Object): Object {
+      object: JSAny, proto: JSReceiver | Null): JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
     const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
         otherwise return runtime::JSReceiverSetPrototypeOfThrow(
@@ -83,7 +83,7 @@ namespace object {
 
   transitioning macro
   ObjectSetPrototypeOfDontThrow(implicit context: Context)(
-      object: Object, proto: Object): Object {
+      object: JSAny, proto: JSReceiver | Null): JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
     const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
         otherwise return runtime::JSReceiverSetPrototypeOfDontThrow(
@@ -94,24 +94,24 @@ namespace object {
 
 namespace object_isextensible {
   // ES6 section 19.1.2.11 Object.isExtensible ( O )
-  transitioning javascript builtin ObjectIsExtensible(
-      js-implicit context: Context)(_receiver: Object, object: Object): Object {
+  transitioning javascript builtin
+  ObjectIsExtensible(js-implicit context: Context)(object: JSAny): JSAny {
     return object::ObjectIsExtensible(object);
   }
 }  // namespace object_isextensible
 
 namespace object_preventextensions {
   // ES6 section 19.1.2.11 Object.isExtensible ( O )
-  transitioning javascript builtin ObjectPreventExtensions(
-      js-implicit context: Context)(_receiver: Object, object: Object): Object {
+  transitioning javascript builtin
+  ObjectPreventExtensions(js-implicit context: Context)(object: JSAny): JSAny {
     return object::ObjectPreventExtensionsThrow(object);
   }
 }  // namespace object_preventextensions
 
 namespace object_getprototypeof {
   // ES6 section 19.1.2.9 Object.getPrototypeOf ( O )
-  transitioning javascript builtin ObjectGetPrototypeOf(
-      js-implicit context: Context)(_receiver: Object, object: Object): Object {
+  transitioning javascript builtin
+  ObjectGetPrototypeOf(js-implicit context: Context)(object: JSAny): JSAny {
     return object::ObjectGetPrototypeOf(object);
   }
 }  // namespace object_getprototypeof
@@ -119,8 +119,7 @@ namespace object_getprototypeof {
 namespace object_setprototypeof {
   // ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto )
   transitioning javascript builtin ObjectSetPrototypeOf(
-      js-implicit context:
-          Context)(_receiver: Object, object: Object, proto: Object): Object {
+      js-implicit context: Context)(object: JSAny, proto: JSAny): JSAny {
     // 1. Set O to ? RequireObjectCoercible(O).
     RequireObjectCoercible(object, 'Object.setPrototypeOf');
 
@@ -130,9 +129,13 @@ namespace object_setprototypeof {
     // 4. Let status be ? O.[[SetPrototypeOf]](proto).
     // 5. If status is false, throw a TypeError exception.
     // 6. Return O.
-    if (proto == Null || Is<JSReceiver>(proto)) {
-      return object::ObjectSetPrototypeOfThrow(object, proto);
+    typeswitch (proto) {
+      case (proto: JSReceiver | Null): {
+        return object::ObjectSetPrototypeOfThrow(object, proto);
+      }
+      case (JSAny): {
+        ThrowTypeError(kProtoObjectOrNull, proto);
+      }
     }
-    ThrowTypeError(kProtoObjectOrNull, proto);
   }
 }  // namespace object_setprototypeof
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index a42cb9bebd2824..485b793395240a 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -81,12 +81,24 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
 
 namespace {
 
+void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+  DCHECK(masm->root_array_available());
+  Isolate* isolate = masm->isolate();
+  ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+  DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+  intptr_t offset =
+      TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+  CHECK(is_int32(offset));
+  __ LoadP(destination, MemOperand(kRootRegister, offset));
+}
+
 void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                  Register scratch, Label* stack_overflow) {
   // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
-  __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch);
   // Make scratch the space we have left. The stack might already be overflowed
   // here which will cause scratch to become negative.
   __ sub(scratch, sp, scratch);
@@ -437,7 +449,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
   // Check the stack for overflow. We are not trying to catch interruptions
   // (i.e. debug break and preemption) here, so check the "real stack limit".
   Label stack_overflow;
-  __ CompareRoot(sp, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch);
+  __ cmpl(sp, scratch);
   __ blt(&stack_overflow);
 
   // Push receiver.
@@ -729,7 +742,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
   Label okay;
-  __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+  LoadRealStackLimit(masm, scratch1);
   // Make scratch1 the space we have left. The stack might already be overflowed
   // here which will cause scratch1 to become negative.
   __ sub(scratch1, sp, scratch1);
@@ -1144,7 +1157,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
     // Do a stack check to ensure we don't go over the limit.
     Label ok;
     __ sub(r8, sp, r5);
-    __ LoadRoot(r0, RootIndex::kRealStackLimit);
+    LoadRealStackLimit(masm, r0);
     __ cmpl(r8, r0);
     __ bge(&ok);
     __ CallRuntime(Runtime::kThrowStackOverflow);
@@ -2163,7 +2176,12 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
       // Check the stack for overflow. We are not trying to catch interruptions
       // (i.e. debug break and preemption) here, so check the "real stack
       // limit".
-      __ CompareRoot(sp, RootIndex::kRealStackLimit);
+      {
+        UseScratchRegisterScope temps(masm);
+        Register scratch = temps.Acquire();
+        LoadRealStackLimit(masm, scratch);
+        __ cmpl(sp, scratch);
+      }
       __ bgt(&done);  // Signed comparison.
       // Restore the stack pointer.
       __ mr(sp, scratch);
@@ -2599,7 +2617,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
     __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
     // Load the correct CEntry builtin from the instance object.
     __ LoadP(r5, FieldMemOperand(kWasmInstanceRegister,
-                                 WasmInstanceObject::kCEntryStubOffset));
+                                 WasmInstanceObject::kIsolateRootOffset));
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+    __ LoadP(r5, MemOperand(r5, IsolateData::builtin_slot_offset(centry_id)));
     // Initialize the JavaScript context with 0. CEntry will use it to
     // set the current context on the isolate.
     __ LoadSmiLiteral(cp, Smi::zero());
diff --git a/deps/v8/src/builtins/proxy-constructor.tq b/deps/v8/src/builtins/proxy-constructor.tq
index ad60c20e2c2ea3..cfb11046a7ab64 100644
--- a/deps/v8/src/builtins/proxy-constructor.tq
+++ b/deps/v8/src/builtins/proxy-constructor.tq
@@ -10,8 +10,8 @@ namespace proxy {
   // https://tc39.github.io/ecma262/#sec-proxy-constructor
   transitioning javascript builtin
   ProxyConstructor(
-      js-implicit context: Context, receiver: Object,
-      newTarget: Object)(target: Object, handler: Object): JSProxy {
+      js-implicit context: Context, receiver: JSAny,
+      newTarget: JSAny)(target: JSAny, handler: JSAny): JSProxy {
     try {
       // 1. If NewTarget is undefined, throw a TypeError exception.
       if (newTarget == Undefined) {
diff --git a/deps/v8/src/builtins/proxy-delete-property.tq b/deps/v8/src/builtins/proxy-delete-property.tq
index 759de766efbed1..3054c0d07acbab 100644
--- a/deps/v8/src/builtins/proxy-delete-property.tq
+++ b/deps/v8/src/builtins/proxy-delete-property.tq
@@ -10,8 +10,10 @@ namespace proxy {
   // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-delete-p
   transitioning builtin
   ProxyDeleteProperty(implicit context: Context)(
-      proxy: JSProxy, name: Name, languageMode: LanguageMode): Object {
+      proxy: JSProxy, name: PropertyKey, languageMode: LanguageMode): JSAny {
     const kTrapName: constexpr string = 'deleteProperty';
+    // Handle deeply nested proxy.
+    PerformStackCheck();
     // 1. Assert: IsPropertyKey(P) is true.
     assert(TaggedIsNotSmi(name));
     assert(IsName(name));
@@ -38,7 +40,7 @@ namespace proxy {
       const trapResult = Call(context, trap, handler, target, name);
 
       // 9. If booleanTrapResult is false, return false.
-      if (BranchIfToBooleanIsFalse(trapResult)) {
+      if (!ToBoolean(trapResult)) {
         if (languageMode == SmiConstant(kStrict)) {
           ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName, name);
         }
@@ -56,7 +58,7 @@ namespace proxy {
       // 15. Return true.
       return True;
     }
-    label TrapUndefined(target: Object) {
+    label TrapUndefined(target: JSAny) {
       // 7.a. Return ? target.[[Delete]](P).
       return DeleteProperty(target, name, languageMode);
     }
diff --git a/deps/v8/src/builtins/proxy-get-property.tq b/deps/v8/src/builtins/proxy-get-property.tq
index bac07f550c3eb9..54b8cde24339ff 100644
--- a/deps/v8/src/builtins/proxy-get-property.tq
+++ b/deps/v8/src/builtins/proxy-get-property.tq
@@ -7,14 +7,14 @@
 namespace proxy {
 
   extern transitioning builtin GetPropertyWithReceiver(
-      implicit context: Context)(Object, Name, Object, Smi): Object;
+      implicit context: Context)(JSAny, Name, JSAny, Smi): JSAny;
 
   // ES #sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
   // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
   transitioning builtin
   ProxyGetProperty(implicit context: Context)(
-      proxy: JSProxy, name: Name, receiverValue: Object,
-      onNonExistent: Smi): Object {
+      proxy: JSProxy, name: PropertyKey, receiverValue: JSAny,
+      onNonExistent: Smi): JSAny {
     PerformStackCheck();
     // 1. Assert: IsPropertyKey(P) is true.
     assert(TaggedIsNotSmi(name));
diff --git a/deps/v8/src/builtins/proxy-get-prototype-of.tq b/deps/v8/src/builtins/proxy-get-prototype-of.tq
index 2418eaf4230cb3..653d4503d16477 100644
--- a/deps/v8/src/builtins/proxy-get-prototype-of.tq
+++ b/deps/v8/src/builtins/proxy-get-prototype-of.tq
@@ -9,7 +9,7 @@ namespace proxy {
   // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
   // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
   transitioning builtin
-  ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): Object {
+  ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): JSAny {
     PerformStackCheck();
     const kTrapName: constexpr string = 'getPrototypeOf';
     try {
@@ -39,7 +39,7 @@ namespace proxy {
 
       // 9. Let extensibleTarget be ? IsExtensible(target).
       // 10. If extensibleTarget is true, return handlerProto.
-      const extensibleTarget: Object = object::ObjectIsExtensible(target);
+      const extensibleTarget: JSAny = object::ObjectIsExtensible(target);
       assert(extensibleTarget == True || extensibleTarget == False);
       if (extensibleTarget == True) {
         return handlerProto;
@@ -51,12 +51,12 @@ namespace proxy {
       // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError
       // exception.
       // 13. Return handlerProto.
-      if (BranchIfSameValue(targetProto, handlerProto)) {
+      if (SameValue(targetProto, handlerProto)) {
         return handlerProto;
       }
       ThrowTypeError(kProxyGetPrototypeOfNonExtensible);
     }
-    label TrapUndefined(target: Object) {
+    label TrapUndefined(target: JSAny) {
       // 6.a. Return ? target.[[GetPrototypeOf]]().
       return object::ObjectGetPrototypeOf(target);
     }
diff --git a/deps/v8/src/builtins/proxy-has-property.tq b/deps/v8/src/builtins/proxy-has-property.tq
index ee394c5d847d89..1f14c68903c838 100644
--- a/deps/v8/src/builtins/proxy-has-property.tq
+++ b/deps/v8/src/builtins/proxy-has-property.tq
@@ -9,7 +9,7 @@ namespace proxy {
   // ES #sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
   // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
   transitioning builtin ProxyHasProperty(implicit context: Context)(
-      proxy: JSProxy, name: Name): Object {
+      proxy: JSProxy, name: PropertyKey): JSAny {
     assert(IsJSProxy(proxy));
 
     PerformStackCheck();
@@ -40,13 +40,13 @@ namespace proxy {
       // CheckHasTrapResult).
       // 10. Return booleanTrapResult.
       const trapResult = Call(context, trap, handler, target, name);
-      if (BranchIfToBooleanIsTrue(trapResult)) {
+      if (ToBoolean(trapResult)) {
         return True;
       }
       CheckHasTrapResult(target, proxy, name);
       return False;
     }
-    label TrapUndefined(target: Object) {
+    label TrapUndefined(target: JSAny) {
       // 7.a. Return ? target.[[HasProperty]](P).
       tail HasProperty(target, name);
     }
diff --git a/deps/v8/src/builtins/proxy-is-extensible.tq b/deps/v8/src/builtins/proxy-is-extensible.tq
index 82f4a5b955c297..dfbdc6f734625f 100644
--- a/deps/v8/src/builtins/proxy-is-extensible.tq
+++ b/deps/v8/src/builtins/proxy-is-extensible.tq
@@ -9,7 +9,7 @@ namespace proxy {
   // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
   // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
   transitioning builtin ProxyIsExtensible(implicit context:
-                                              Context)(proxy: JSProxy): Object {
+                                              Context)(proxy: JSProxy): JSAny {
     PerformStackCheck();
     const kTrapName: constexpr string = 'isExtensible';
     try {
@@ -45,7 +45,7 @@ namespace proxy {
       // 10. Return booleanTrapResult.
       return SelectBooleanConstant(trapResult);
     }
-    label TrapUndefined(target: Object) {
+    label TrapUndefined(target: JSAny) {
       // 6.a. Return ? IsExtensible(target).
       return object::ObjectIsExtensible(target);
     }
diff --git a/deps/v8/src/builtins/proxy-prevent-extensions.tq b/deps/v8/src/builtins/proxy-prevent-extensions.tq
index 6d5d2569fb8645..ab75cfc4cb3707 100644
--- a/deps/v8/src/builtins/proxy-prevent-extensions.tq
+++ b/deps/v8/src/builtins/proxy-prevent-extensions.tq
@@ -9,8 +9,8 @@ namespace proxy {
   // ES #sec-proxy-object-internal-methods-and-internal-slots-preventextensions
   // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-preventextensions
   transitioning builtin
-  ProxyPreventExtensions(implicit context: Context)(
-      proxy: JSProxy, doThrow: Boolean): Object {
+  ProxyPreventExtensions(implicit context:
+                             Context)(proxy: JSProxy, doThrow: Boolean): JSAny {
     PerformStackCheck();
     const kTrapName: constexpr string = 'preventExtensions';
     try {
@@ -36,8 +36,8 @@ namespace proxy {
       // 8. If booleanTrapResult is true, then
       //    8.a. Let extensibleTarget be ? IsExtensible(target).
       //    8.b If extensibleTarget is true, throw a TypeError exception.
-      if (BranchIfToBooleanIsTrue(trapResult)) {
-        const extensibleTarget: Object = object::ObjectIsExtensible(target);
+      if (ToBoolean(trapResult)) {
+        const extensibleTarget: JSAny = object::ObjectIsExtensible(target);
         assert(extensibleTarget == True || extensibleTarget == False);
         if (extensibleTarget == True) {
           ThrowTypeError(kProxyPreventExtensionsExtensible);
@@ -52,7 +52,7 @@ namespace proxy {
       // 9. Return booleanTrapResult.
       return True;
     }
-    label TrapUndefined(target: Object) {
+    label TrapUndefined(target: JSAny) {
       // 6.a. Return ? target.[[PreventExtensions]]().
       if (doThrow == True) {
         return object::ObjectPreventExtensionsThrow(target);
diff --git a/deps/v8/src/builtins/proxy-revocable.tq b/deps/v8/src/builtins/proxy-revocable.tq
index b09baab9cf1913..465b1aef964037 100644
--- a/deps/v8/src/builtins/proxy-revocable.tq
+++ b/deps/v8/src/builtins/proxy-revocable.tq
@@ -12,9 +12,8 @@ namespace proxy {
   // Proxy.revocable(target, handler)
   // https://tc39.github.io/ecma262/#sec-proxy.revocable
   transitioning javascript builtin
-  ProxyRevocable(
-      context: Context, _receiver: Object, target: Object,
-      handler: Object): JSProxyRevocableResult {
+  ProxyRevocable(js-implicit context: Context)(target: JSAny, handler: JSAny):
+      JSProxyRevocableResult {
     try {
       const targetJSReceiver =
           Cast<JSReceiver>(target) otherwise ThrowProxyNonObject;
diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq
index d0411a8e894e9a..2d9636c88173c3 100644
--- a/deps/v8/src/builtins/proxy-set-property.tq
+++ b/deps/v8/src/builtins/proxy-set-property.tq
@@ -19,15 +19,21 @@ namespace proxy {
   // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-set-p-v-receiver
   transitioning builtin
   ProxySetProperty(implicit context: Context)(
-      proxy: JSProxy, name: Name, value: Object,
-      receiverValue: Object): Object {
+      proxy: JSProxy, name: PropertyKey | PrivateSymbol, value: JSAny,
+      receiverValue: JSAny): JSAny {
     // 1. Assert: IsPropertyKey(P) is true.
     assert(TaggedIsNotSmi(name));
     assert(IsName(name));
 
-    if (IsPrivateSymbol(name)) {
-      CallThrowTypeErrorIfStrict(kProxyPrivate);
-      return Undefined;
+    let key: PropertyKey;
+    typeswitch (name) {
+      case (PrivateSymbol): {
+        CallThrowTypeErrorIfStrict(kProxyPrivate);
+        return Undefined;
+      }
+      case (name: PropertyKey): {
+        key = name;
+      }
     }
 
     try {
@@ -61,8 +67,8 @@ namespace proxy {
       //      exception.
       // 12. Return true.
       const trapResult =
-          Call(context, trap, handler, target, name, value, receiverValue);
-      if (BranchIfToBooleanIsTrue(trapResult)) {
+          Call(context, trap, handler, target, key, value, receiverValue);
+      if (ToBoolean(trapResult)) {
         CheckGetSetTrapResult(target, proxy, name, value, kProxySet);
         return value;
       }
diff --git a/deps/v8/src/builtins/proxy-set-prototype-of.tq b/deps/v8/src/builtins/proxy-set-prototype-of.tq
index bbd99be4117eaa..355c258ab8f24b 100644
--- a/deps/v8/src/builtins/proxy-set-prototype-of.tq
+++ b/deps/v8/src/builtins/proxy-set-prototype-of.tq
@@ -10,7 +10,7 @@ namespace proxy {
   // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v
   transitioning builtin
   ProxySetPrototypeOf(implicit context: Context)(
-      proxy: JSProxy, proto: Object, doThrow: Boolean): Object {
+      proxy: JSProxy, proto: Null | JSReceiver, doThrow: Boolean): JSAny {
     PerformStackCheck();
     const kTrapName: constexpr string = 'setPrototypeOf';
     try {
@@ -37,7 +37,7 @@ namespace proxy {
       const trapResult = Call(context, trap, handler, target, proto);
 
       // 9. If booleanTrapResult is false, return false.
-      if (BranchIfToBooleanIsFalse(trapResult)) {
+      if (!ToBoolean(trapResult)) {
         if (doThrow == True) {
           ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName);
         }
@@ -58,12 +58,12 @@ namespace proxy {
       // 13. If SameValue(V, targetProto) is false, throw a TypeError
       // exception.
       // 14. Return true.
-      if (BranchIfSameValue(proto, targetProto)) {
+      if (SameValue(proto, targetProto)) {
         return True;
       }
       ThrowTypeError(kProxySetPrototypeOfNonExtensible);
     }
-    label TrapUndefined(target: Object, proto: Object) {
+    label TrapUndefined(target: JSAny, proto: JSReceiver | Null) {
       // 7.a. Return ? target.[[SetPrototypeOf]]().
       if (doThrow == True) {
         return object::ObjectSetPrototypeOfThrow(target, proto);
diff --git a/deps/v8/src/builtins/reflect.tq b/deps/v8/src/builtins/reflect.tq
index 4c25e8338f8883..97c6ec81a74ec0 100644
--- a/deps/v8/src/builtins/reflect.tq
+++ b/deps/v8/src/builtins/reflect.tq
@@ -8,24 +8,24 @@ namespace reflect {
   generates 'MessageTemplate::kCalledOnNonObject';
 
   // ES6 section 26.1.10 Reflect.isExtensible
-  transitioning javascript builtin ReflectIsExtensible(
-      js-implicit context: Context)(_receiver: Object, object: Object): Object {
+  transitioning javascript builtin
+  ReflectIsExtensible(js-implicit context: Context)(object: JSAny): JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object)
         otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.isExtensible');
     return object::ObjectIsExtensible(objectJSReceiver);
   }
 
   // ES6 section 26.1.12 Reflect.preventExtensions
-  transitioning javascript builtin ReflectPreventExtensions(
-      js-implicit context: Context)(_receiver: Object, object: Object): Object {
+  transitioning javascript builtin
+  ReflectPreventExtensions(js-implicit context: Context)(object: JSAny): JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object)
         otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.preventExtensions');
     return object::ObjectPreventExtensionsDontThrow(objectJSReceiver);
   }
 
   // ES6 section 26.1.8 Reflect.getPrototypeOf
-  transitioning javascript builtin ReflectGetPrototypeOf(
-      js-implicit context: Context)(_receiver: Object, object: Object): Object {
+  transitioning javascript builtin
+  ReflectGetPrototypeOf(js-implicit context: Context)(object: JSAny): JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object)
         otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.getPrototypeOf');
     return object::JSReceiverGetPrototypeOf(objectJSReceiver);
@@ -33,50 +33,47 @@ namespace reflect {
 
   // ES6 section 26.1.14 Reflect.setPrototypeOf
   transitioning javascript builtin ReflectSetPrototypeOf(
-      js-implicit context:
-          Context)(_receiver: Object, object: Object, proto: Object): Object {
+      js-implicit context: Context)(object: JSAny, proto: JSAny): JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object)
         otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.setPrototypeOf');
-    if (proto == Null || Is<JSReceiver>(proto)) {
-      return object::ObjectSetPrototypeOfDontThrow(objectJSReceiver, proto);
+    typeswitch (proto) {
+      case (proto: JSReceiver | Null): {
+        return object::ObjectSetPrototypeOfDontThrow(objectJSReceiver, proto);
+      }
+      case (JSAny): {
+        ThrowTypeError(kProtoObjectOrNull, proto);
+      }
     }
-    ThrowTypeError(kProtoObjectOrNull, proto);
   }
 
-  extern transitioning builtin ToName(implicit context: Context)(Object): Name;
+  extern transitioning builtin ToName(implicit context: Context)(JSAny):
+      AnyName;
   type OnNonExistent constexpr 'OnNonExistent';
   const kReturnUndefined: constexpr OnNonExistent
   generates 'OnNonExistent::kReturnUndefined';
   extern macro SmiConstant(constexpr OnNonExistent): Smi;
   extern transitioning builtin GetPropertyWithReceiver(
-      implicit context: Context)(Object, Name, Object, Smi): Object;
+      implicit context: Context)(JSAny, Name, JSAny, Smi): JSAny;
 
   // ES6 section 26.1.6 Reflect.get
   transitioning javascript builtin
-  ReflectGet(js-implicit context: Context)(...arguments): Object {
+  ReflectGet(js-implicit context: Context)(...arguments): JSAny {
     const length = arguments.length;
-    const object: Object = length > 0 ? arguments[0] : Undefined;
+    const object: JSAny = length > 0 ? arguments[0] : Undefined;
     const objectJSReceiver = Cast<JSReceiver>(object)
         otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.get');
-    const propertyKey: Object = length > 1 ? arguments[1] : Undefined;
-    const name: Name = ToName(propertyKey);
-    const receiver: Object = length > 2 ? arguments[2] : objectJSReceiver;
+    const propertyKey: JSAny = length > 1 ? arguments[1] : Undefined;
+    const name: AnyName = ToName(propertyKey);
+    const receiver: JSAny = length > 2 ? arguments[2] : objectJSReceiver;
     return GetPropertyWithReceiver(
         objectJSReceiver, name, receiver, SmiConstant(kReturnUndefined));
   }
 
   // ES6 section 26.1.4 Reflect.deleteProperty
   transitioning javascript builtin ReflectDeleteProperty(
-      js-implicit context:
-          Context)(_receiver: Object, object: Object, key: Object): Object {
+      js-implicit context: Context)(object: JSAny, key: JSAny): JSAny {
     const objectJSReceiver = Cast<JSReceiver>(object)
         otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.deleteProperty');
-    const name: Name = ToName(key);
-    if (IsPrivateSymbol(name)) {
-      return DeleteProperty(objectJSReceiver, name, kSloppy);
-    }
-    const proxy = Cast<JSProxy>(objectJSReceiver)
-        otherwise return DeleteProperty(objectJSReceiver, name, kSloppy);
-    return proxy::ProxyDeleteProperty(proxy, name, kSloppy);
+    return DeleteProperty(objectJSReceiver, key, kSloppy);
   }
 }  // namespace reflect
diff --git a/deps/v8/src/builtins/regexp-match.tq b/deps/v8/src/builtins/regexp-match.tq
new file mode 100644
index 00000000000000..dbee8a616ad564
--- /dev/null
+++ b/deps/v8/src/builtins/regexp-match.tq
@@ -0,0 +1,49 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-regexp-gen.h'
+
+namespace regexp {
+
+  extern transitioning macro RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(
+      implicit context: Context)(Object, String, constexpr bool): JSAny;
+
+  transitioning macro FastRegExpPrototypeMatchBody(implicit context: Context)(
+      receiver: FastJSRegExp, string: String): JSAny {
+    return RegExpPrototypeMatchBody(receiver, string, true);
+  }
+
+  transitioning macro SlowRegExpPrototypeMatchBody(implicit context: Context)(
+      receiver: Object, string: String): JSAny {
+    return RegExpPrototypeMatchBody(receiver, string, false);
+  }
+
+  // Helper that skips a few initial checks. and assumes...
+  // 1) receiver is a "fast" RegExp
+  // 2) pattern is a string
+  transitioning builtin RegExpMatchFast(implicit context: Context)(
+      receiver: FastJSRegExp, string: String): JSAny {
+    return FastRegExpPrototypeMatchBody(receiver, string);
+  }
+
+  // ES#sec-regexp.prototype-@@match
+  // RegExp.prototype [ @@match ] ( string )
+  transitioning javascript builtin RegExpPrototypeMatch(
+      js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny {
+    ThrowIfNotJSReceiver(
+        receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@match');
+    const receiver = UnsafeCast<JSReceiver>(receiver);
+    const string: String = ToString_Inline(context, string);
+
+    // Strict: Reads global and unicode properties.
+    // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+    // permissive.
+    const fastRegExp = Cast<FastJSRegExp>(receiver)
+        otherwise return SlowRegExpPrototypeMatchBody(receiver, string);
+
+    // TODO(pwong): Could be optimized to remove the overhead of calling the
+    //              builtin (at the cost of a larger builtin).
+    return RegExpMatchFast(fastRegExp, string);
+  }
+}
diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq
index cb0038c6b61722..f13724b476ce5d 100644
--- a/deps/v8/src/builtins/regexp-replace.tq
+++ b/deps/v8/src/builtins/regexp-replace.tq
@@ -4,7 +4,7 @@
 
 #include 'src/builtins/builtins-regexp-gen.h'
 
-namespace regexp_replace {
+namespace regexp {
 
   extern builtin
   StringIndexOf(implicit context: Context)(String, String, Smi): Smi;
@@ -23,10 +23,6 @@ namespace regexp_replace {
 
   extern macro
   RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi;
-  extern macro
-  RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast(
-      implicit context: Context)(JSReceiver, String):
-      RegExpMatchInfo labels IfDidNotMatch;
 
   transitioning macro RegExpReplaceCallableNoExplicitCaptures(implicit context:
                                                                   Context)(
@@ -57,7 +53,7 @@ namespace regexp_replace {
         // Element represents the matched substring, which is then passed to the
         // replace function.
         case (elString: String): {
-          const replacementObj: Object =
+          const replacementObj: JSAny =
               Call(context, replaceFn, Undefined, elString, matchStart, string);
           const replacement: String = ToString_Inline(context, replacementObj);
           matchesElements.objects[i] = replacement;
@@ -79,7 +75,7 @@ namespace regexp_replace {
 
       // The JSArray is expanded into the function args by Reflect.apply().
       // TODO(jgruber): Remove indirection through Call->ReflectApply.
-      const replacementObj: Object = Call(
+      const replacementObj: JSAny = Call(
           context, GetReflectApply(), Undefined, replaceFn, Undefined, elArray);
 
       // Overwrite the i'th element in the results with the string
@@ -146,8 +142,9 @@ namespace regexp_replace {
     }
 
     while (true) {
-      const match: RegExpMatchInfo = RegExpPrototypeExecBodyWithoutResultFast(
-          regexp, string) otherwise break;
+      const match: RegExpMatchInfo =
+          regexp::RegExpPrototypeExecBodyWithoutResultFast(regexp, string)
+          otherwise break;
       const matchStart: Smi = match.GetStartOfCapture(0);
       const matchEnd: Smi = match.GetEndOfCapture(0);
 
@@ -172,7 +169,7 @@ namespace regexp_replace {
   }
 
   transitioning builtin RegExpReplace(implicit context: Context)(
-      regexp: FastJSRegExp, string: String, replaceValue: Object): String {
+      regexp: FastJSRegExp, string: String, replaceValue: JSAny): String {
     // TODO(pwong): Remove assert when all callers (StringPrototypeReplace) are
     // from Torque.
     assert(Is<FastJSRegExp>(regexp));
@@ -184,7 +181,7 @@ namespace regexp_replace {
             RegExpReplaceFastGlobalCallable(regexp, string, replaceFn) :
             StringReplaceNonGlobalRegExpWithFunction(string, regexp, replaceFn);
       }
-      case (Object): {
+      case (JSAny): {
         const stableRegexp: JSRegExp = regexp;
         const replaceString: String = ToString_Inline(context, replaceValue);
 
@@ -208,7 +205,7 @@ namespace regexp_replace {
   }
 
   transitioning javascript builtin RegExpPrototypeReplace(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     const methodName: constexpr string = 'RegExp.prototype.@@replace';
 
     // RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
@@ -229,8 +226,8 @@ namespace regexp_replace {
     //   }
     // }
 
-    const string: Object = arguments[0];
-    const replaceValue: Object = arguments[1];
+    const string: JSAny = arguments[0];
+    const replaceValue: JSAny = arguments[1];
 
     // Let rx be the this value.
     // If Type(rx) is not Object, throw a TypeError exception.
diff --git a/deps/v8/src/builtins/regexp-source.tq b/deps/v8/src/builtins/regexp-source.tq
new file mode 100644
index 00000000000000..c1ce1c5e9a6935
--- /dev/null
+++ b/deps/v8/src/builtins/regexp-source.tq
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-regexp-gen.h'
+
+namespace regexp {
+
+  const kRegExpPrototypeSourceGetter: constexpr int31
+  generates 'v8::Isolate::kRegExpPrototypeSourceGetter';
+
+  // ES6 21.2.5.10.
+  // ES #sec-get-regexp.prototype.source
+  transitioning javascript builtin RegExpPrototypeSourceGetter(
+      js-implicit context: Context, receiver: JSAny)(): JSAny {
+    typeswitch (receiver) {
+      case (receiver: JSRegExp): {
+        return receiver.source;
+      }
+      case (Object): {
+      }
+    }
+    if (!IsReceiverInitialRegExpPrototype(receiver)) {
+      const methodName: constexpr string = 'RegExp.prototype.source';
+      ThrowTypeError(kRegExpNonRegExp, methodName);
+    }
+    IncrementUseCounter(context, SmiConstant(kRegExpPrototypeSourceGetter));
+    return '(?:)';
+  }
+}
diff --git a/deps/v8/src/builtins/regexp-test.tq b/deps/v8/src/builtins/regexp-test.tq
new file mode 100644
index 00000000000000..938dfa51f391f5
--- /dev/null
+++ b/deps/v8/src/builtins/regexp-test.tq
@@ -0,0 +1,37 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-regexp-gen.h'
+
+namespace regexp {
+
+  // ES#sec-regexp.prototype.test
+  // RegExp.prototype.test ( S )
+  transitioning javascript builtin RegExpPrototypeTest(
+      js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny {
+    const methodName: constexpr string = 'RegExp.prototype.test';
+    const receiver = Cast<JSReceiver>(receiver)
+        otherwise ThrowTypeError(kIncompatibleMethodReceiver, methodName);
+    const str: String = ToString_Inline(context, string);
+    if (IsFastRegExpPermissive(receiver)) {
+      RegExpPrototypeExecBodyWithoutResultFast(
+          UnsafeCast<JSRegExp>(receiver), str)
+          otherwise return False;
+      return True;
+    }
+    const matchIndices = RegExpExec(context, receiver, str);
+    return SelectBooleanConstant(matchIndices != Null);
+  }
+
+  extern macro RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec(
+      implicit context: Context)(JSRegExp): bool;
+
+  transitioning builtin RegExpPrototypeTestFast(implicit context: Context)(
+      receiver: JSRegExp, string: String): Object {
+    assert(IsFastRegExpWithOriginalExec(receiver));
+    RegExpPrototypeExecBodyWithoutResultFast(receiver, string)
+        otherwise return False;
+    return True;
+  }
+}
diff --git a/deps/v8/src/builtins/regexp.tq b/deps/v8/src/builtins/regexp.tq
index a36e1a14eb6861..7352d2738fa4a4 100644
--- a/deps/v8/src/builtins/regexp.tq
+++ b/deps/v8/src/builtins/regexp.tq
@@ -6,8 +6,159 @@
 
 namespace regexp {
 
-  extern macro RegExpBuiltinsAssembler::BranchIfFastRegExp(
+  extern macro RegExpBuiltinsAssembler::BranchIfFastRegExp_Strict(
       implicit context: Context)(HeapObject): never labels IsFast,
       IsSlow;
+  macro IsFastRegExpStrict(implicit context: Context)(o: HeapObject): bool {
+    BranchIfFastRegExp_Strict(o) otherwise return true, return false;
+  }
 
+  extern macro RegExpBuiltinsAssembler::BranchIfFastRegExp_Permissive(
+      implicit context: Context)(HeapObject): never labels IsFast,
+      IsSlow;
+
+  @export
+  macro IsFastRegExpPermissive(implicit context: Context)(o: HeapObject): bool {
+    BranchIfFastRegExp_Permissive(o) otherwise return true, return false;
+  }
+
+  extern macro RegExpBuiltinsAssembler::RegExpExec(Context, Object, Object):
+      Object;
+
+  extern macro
+  RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast(
+      implicit context: Context)(JSRegExp, String):
+      RegExpMatchInfo labels IfDidNotMatch;
+
+  extern macro RegExpBuiltinsAssembler::IsReceiverInitialRegExpPrototype(
+      implicit context: Context)(Object): bool;
+
+  type Flag constexpr 'JSRegExp::Flag';
+  const kGlobal: constexpr Flag
+  generates 'JSRegExp::kGlobal';
+  const kIgnoreCase: constexpr Flag
+  generates 'JSRegExp::kIgnoreCase';
+  const kMultiline: constexpr Flag
+  generates 'JSRegExp::kMultiline';
+  const kDotAll: constexpr Flag
+  generates 'JSRegExp::kDotAll';
+  const kSticky: constexpr Flag
+  generates 'JSRegExp::kSticky';
+  const kUnicode: constexpr Flag
+  generates 'JSRegExp::kUnicode';
+
+  const kRegExpPrototypeOldFlagGetter: constexpr int31
+  generates 'v8::Isolate::kRegExpPrototypeOldFlagGetter';
+  const kRegExpPrototypeStickyGetter: constexpr int31
+  generates 'v8::Isolate::kRegExpPrototypeStickyGetter';
+  const kRegExpPrototypeUnicodeGetter: constexpr int31
+  generates 'v8::Isolate::kRegExpPrototypeUnicodeGetter';
+
+  extern macro RegExpBuiltinsAssembler::FastFlagGetter(
+      JSRegExp, constexpr Flag): bool;
+  const kRegExpNonRegExp: constexpr MessageTemplate
+  generates 'MessageTemplate::kRegExpNonRegExp';
+  extern runtime IncrementUseCounter(Context, Smi): void;
+
+  macro FlagGetter(implicit context: Context)(
+      receiver: Object, flag: constexpr Flag, counter: constexpr int31,
+      methodName: constexpr string): JSAny {
+    typeswitch (receiver) {
+      case (receiver: JSRegExp): {
+        return SelectBooleanConstant(FastFlagGetter(receiver, flag));
+      }
+      case (Object): {
+      }
+    }
+    if (!IsReceiverInitialRegExpPrototype(receiver)) {
+      ThrowTypeError(kRegExpNonRegExp, methodName);
+    }
+    if constexpr (counter != -1) {
+      IncrementUseCounter(context, SmiConstant(counter));
+    }
+    return Undefined;
+  }
+
+  // ES6 21.2.5.4.
+  // ES #sec-get-regexp.prototype.global
+  transitioning javascript builtin RegExpPrototypeGlobalGetter(
+      js-implicit context: Context, receiver: JSAny)(): JSAny {
+    return FlagGetter(
+        receiver, kGlobal, kRegExpPrototypeOldFlagGetter,
+        'RegExp.prototype.global');
+  }
+
+  // ES6 21.2.5.5.
+  // ES #sec-get-regexp.prototype.ignorecase
+  transitioning javascript builtin RegExpPrototypeIgnoreCaseGetter(
+      js-implicit context: Context, receiver: JSAny)(): JSAny {
+    return FlagGetter(
+        receiver, kIgnoreCase, kRegExpPrototypeOldFlagGetter,
+        'RegExp.prototype.ignoreCase');
+  }
+
+  // ES6 21.2.5.7.
+  // ES #sec-get-regexp.prototype.multiline
+  transitioning javascript builtin RegExpPrototypeMultilineGetter(
+      js-implicit context: Context, receiver: JSAny)(): JSAny {
+    return FlagGetter(
+        receiver, kMultiline, kRegExpPrototypeOldFlagGetter,
+        'RegExp.prototype.multiline');
+  }
+
+  // ES #sec-get-regexp.prototype.dotAll
+  transitioning javascript builtin RegExpPrototypeDotAllGetter(
+      js-implicit context: Context, receiver: JSAny)(): JSAny {
+    const kNoCounter: constexpr int31 = -1;
+    return FlagGetter(receiver, kDotAll, kNoCounter, 'RegExp.prototype.dotAll');
+  }
+
+  // ES6 21.2.5.12.
+  // ES #sec-get-regexp.prototype.sticky
+  transitioning javascript builtin RegExpPrototypeStickyGetter(
+      js-implicit context: Context, receiver: JSAny)(): JSAny {
+    return FlagGetter(
+        receiver, kSticky, kRegExpPrototypeStickyGetter,
+        'RegExp.prototype.sticky');
+  }
+
+  // ES6 21.2.5.15.
+  // ES #sec-get-regexp.prototype.unicode
+  transitioning javascript builtin RegExpPrototypeUnicodeGetter(
+      js-implicit context: Context, receiver: JSAny)(): JSAny {
+    return FlagGetter(
+        receiver, kUnicode, kRegExpPrototypeUnicodeGetter,
+        'RegExp.prototype.unicode');
+  }
+
+  extern transitioning macro
+  RegExpBuiltinsAssembler::FlagsGetter(implicit context: Context)(
+      Object, constexpr bool): String;
+
+  transitioning macro
+  FastFlagsGetter(implicit context: Context)(receiver: FastJSRegExp): String {
+    return FlagsGetter(receiver, true);
+  }
+
+  transitioning macro SlowFlagsGetter(implicit context:
+                                          Context)(receiver: JSAny): String {
+    return FlagsGetter(receiver, false);
+  }
+
+  const kRegExpNonObject: constexpr MessageTemplate
+  generates 'MessageTemplate::kRegExpNonObject';
+
+  // ES #sec-get-regexp.prototype.flags
+  // TFJ(RegExpPrototypeFlagsGetter, 0, kReceiver) \
+  transitioning javascript builtin RegExpPrototypeFlagsGetter(
+      js-implicit context: Context, receiver: JSAny)(): String {
+    ThrowIfNotJSReceiver(receiver, kRegExpNonObject, 'RegExp.prototype.flags');
+
+    // The check is strict because the following code relies on individual flag
+    // getters on the regexp prototype (e.g.: global, sticky, ...). We don't
+    // bother to check these individually.
+    const fastRegexp = Cast<FastJSRegExp>(receiver)
+        otherwise return SlowFlagsGetter(receiver);
+    return FastFlagsGetter(fastRegexp);
+  }
 }
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 854f31cece3cd8..7dca12d17e44e4 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -81,12 +81,24 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
 
 namespace {
 
+MemOperand RealStackLimitAsMemOperand(MacroAssembler* masm) {
+  DCHECK(masm->root_array_available());
+  Isolate* isolate = masm->isolate();
+  ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+  DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+  intptr_t offset =
+      TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+  CHECK(is_int32(offset));
+  return MemOperand(kRootRegister, offset);
+}
+
 void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                  Register scratch, Label* stack_overflow) {
   // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
-  __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+  __ LoadP(scratch, RealStackLimitAsMemOperand(masm));
   // Make scratch the space we have left. The stack might already be overflowed
   // here which will cause scratch to become negative.
   __ SubP(scratch, sp, scratch);
@@ -429,7 +441,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
   // Check the stack for overflow. We are not trying to catch interruptions
   // (i.e. debug break and preemption) here, so check the "real stack limit".
   Label stack_overflow;
-  __ CompareRoot(sp, RootIndex::kRealStackLimit);
+  __ LoadP(scratch, RealStackLimitAsMemOperand(masm));
+  __ CmpLogicalP(sp, scratch);
   __ blt(&stack_overflow);
 
   // Push receiver.
@@ -772,7 +785,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
   Label okay;
-  __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+  __ LoadP(scratch1, RealStackLimitAsMemOperand(masm));
   // Make scratch1 the space we have left. The stack might already be overflowed
   // here which will cause scratch1 to become negative.
   __ SubP(scratch1, sp, scratch1);
@@ -1197,8 +1210,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
     // Do a stack check to ensure we don't go over the limit.
     Label ok;
     __ SubP(r8, sp, r4);
-    __ LoadRoot(r0, RootIndex::kRealStackLimit);
-    __ CmpLogicalP(r8, r0);
+    __ CmpLogicalP(r8, RealStackLimitAsMemOperand(masm));
     __ bge(&ok);
     __ CallRuntime(Runtime::kThrowStackOverflow);
     __ bind(&ok);
@@ -2219,7 +2231,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
       // Check the stack for overflow. We are not trying to catch interruptions
       // (i.e. debug break and preemption) here, so check the "real stack
       // limit".
-      __ CompareRoot(sp, RootIndex::kRealStackLimit);
+      __ CmpLogicalP(sp, RealStackLimitAsMemOperand(masm));
       __ bgt(&done);  // Signed comparison.
       // Restore the stack pointer.
       __ LoadRR(sp, scratch);
@@ -2657,7 +2669,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
     __ Push(kWasmInstanceRegister, r7);
     // Load the correct CEntry builtin from the instance object.
     __ LoadP(r4, FieldMemOperand(kWasmInstanceRegister,
-                                 WasmInstanceObject::kCEntryStubOffset));
+                                 WasmInstanceObject::kIsolateRootOffset));
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+    __ LoadP(r4, MemOperand(r4, IsolateData::builtin_slot_offset(centry_id)));
     // Initialize the JavaScript context with 0. CEntry will use it to
     // set the current context on the isolate.
     __ LoadSmiLiteral(cp, Smi::zero());
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 3c637db63683d6..99ac0d6b1face4 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -119,9 +119,9 @@ Code BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
                           .set_self_reference(masm.CodeObject())
                           .set_builtin_index(builtin_index)
                           .Build();
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   isolate->SetBuiltinUnwindData(builtin_index, masm.GetUnwindInfo());
-#endif
+#endif  // V8_OS_WIN64
   return *code;
 }
 
@@ -276,10 +276,6 @@ Code GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
 
 }  // namespace
 
-#ifdef _MSC_VER
-#pragma optimize( "", off )
-#endif
-
 // static
 void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
   Builtins* builtins = isolate->builtins();
@@ -357,10 +353,5 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
   builtins->MarkInitialized();
 }
 
-#ifdef _MSC_VER
-#pragma optimize( "", on )
-#endif
-
-
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq
index 8b9fe84dfb759b..c3cc7d949b716c 100644
--- a/deps/v8/src/builtins/string-endswith.tq
+++ b/deps/v8/src/builtins/string-endswith.tq
@@ -28,13 +28,13 @@ namespace string {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.endswith
   transitioning javascript builtin StringPrototypeEndsWith(
-      js-implicit context: Context, receiver: Object)(...arguments): Boolean {
-    const searchString: Object = arguments[0];
-    const endPosition: Object = arguments[1];
+      js-implicit context: Context, receiver: JSAny)(...arguments): Boolean {
+    const searchString: JSAny = arguments[0];
+    const endPosition: JSAny = arguments[1];
     const kBuiltinName: constexpr string = 'String.prototype.endsWith';
 
     // 1. Let O be ? RequireObjectCoercible(this value).
-    const object: Object = RequireObjectCoercible(receiver, kBuiltinName);
+    const object: JSAny = RequireObjectCoercible(receiver, kBuiltinName);
 
     // 2. Let S be ? ToString(O).
     const string: String = ToString_Inline(context, object);
diff --git a/deps/v8/src/builtins/string-html.tq b/deps/v8/src/builtins/string-html.tq
index 80b5f778877bd4..0b0fdfeaef5c51 100644
--- a/deps/v8/src/builtins/string-html.tq
+++ b/deps/v8/src/builtins/string-html.tq
@@ -7,8 +7,8 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-createhtml
   transitioning builtin CreateHTML(implicit context: Context)(
-      receiver: Object, methodName: String, tagName: String, attr: String,
-      attrValue: Object): String {
+      receiver: JSAny, methodName: String, tagName: String, attr: String,
+      attrValue: JSAny): String {
     const tagContents: String = ToThisString(receiver, methodName);
     let result = '<' + tagName;
     if (attr != kEmptyString) {
@@ -22,14 +22,14 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.anchor
   transitioning javascript builtin StringPrototypeAnchor(
-      js-implicit context: Context, receiver: Object)(...arguments): String {
+      js-implicit context: Context, receiver: JSAny)(...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.anchor', 'a', 'name', arguments[0]);
   }
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.big
   transitioning javascript builtin
-  StringPrototypeBig(js-implicit context: Context, receiver: Object)(
+  StringPrototypeBig(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.big', 'big', kEmptyString, kEmptyString);
@@ -37,7 +37,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.blink
   transitioning javascript builtin
-  StringPrototypeBlink(js-implicit context: Context, receiver: Object)(
+  StringPrototypeBlink(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.blink', 'blink', kEmptyString,
@@ -46,7 +46,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.bold
   transitioning javascript builtin
-  StringPrototypeBold(js-implicit context: Context, receiver: Object)(
+  StringPrototypeBold(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.bold', 'b', kEmptyString, kEmptyString);
@@ -54,7 +54,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.fontcolor
   transitioning javascript builtin
-  StringPrototypeFontcolor(js-implicit context: Context, receiver: Object)(
+  StringPrototypeFontcolor(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.fontcolor', 'font', 'color', arguments[0]);
@@ -62,7 +62,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.fontsize
   transitioning javascript builtin
-  StringPrototypeFontsize(js-implicit context: Context, receiver: Object)(
+  StringPrototypeFontsize(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.fontsize', 'font', 'size', arguments[0]);
@@ -70,7 +70,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.fixed
   transitioning javascript builtin
-  StringPrototypeFixed(js-implicit context: Context, receiver: Object)(
+  StringPrototypeFixed(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.fixed', 'tt', kEmptyString, kEmptyString);
@@ -78,7 +78,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.italics
   transitioning javascript builtin
-  StringPrototypeItalics(js-implicit context: Context, receiver: Object)(
+  StringPrototypeItalics(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.italics', 'i', kEmptyString, kEmptyString);
@@ -86,7 +86,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.link
   transitioning javascript builtin
-  StringPrototypeLink(js-implicit context: Context, receiver: Object)(
+  StringPrototypeLink(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.link', 'a', 'href', arguments[0]);
@@ -94,7 +94,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.small
   transitioning javascript builtin
-  StringPrototypeSmall(js-implicit context: Context, receiver: Object)(
+  StringPrototypeSmall(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.small', 'small', kEmptyString,
@@ -103,7 +103,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.strike
   transitioning javascript builtin
-  StringPrototypeStrike(js-implicit context: Context, receiver: Object)(
+  StringPrototypeStrike(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.strike', 'strike', kEmptyString,
@@ -112,7 +112,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.sub
   transitioning javascript builtin
-  StringPrototypeSub(js-implicit context: Context, receiver: Object)(
+  StringPrototypeSub(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.sub', 'sub', kEmptyString, kEmptyString);
@@ -120,7 +120,7 @@ namespace string_html {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.sup
   transitioning javascript builtin
-  StringPrototypeSup(js-implicit context: Context, receiver: Object)(
+  StringPrototypeSup(js-implicit context: Context, receiver: JSAny)(
       ...arguments): String {
     return CreateHTML(
         receiver, 'String.prototype.sup', 'sup', kEmptyString, kEmptyString);
diff --git a/deps/v8/src/builtins/string-iterator.tq b/deps/v8/src/builtins/string-iterator.tq
index 5b8f864661295a..d36a44fa97d05e 100644
--- a/deps/v8/src/builtins/string-iterator.tq
+++ b/deps/v8/src/builtins/string-iterator.tq
@@ -17,7 +17,7 @@ namespace string_iterator {
 
   // ES6 #sec-string.prototype-@@iterator
   transitioning javascript builtin StringPrototypeIterator(
-      js-implicit context: Context)(receiver: Object): JSStringIterator {
+      js-implicit context: Context, receiver: JSAny)(): JSStringIterator {
     const name: String =
         ToThisString(receiver, 'String.prototype[Symbol.iterator]');
     const index: Smi = 0;
@@ -26,7 +26,7 @@ namespace string_iterator {
 
   // ES6 #sec-%stringiteratorprototype%.next
   transitioning javascript builtin StringIteratorPrototypeNext(
-      js-implicit context: Context)(receiver: Object): JSObject {
+      js-implicit context: Context, receiver: JSAny)(): JSObject {
     const iterator = Cast<JSStringIterator>(receiver) otherwise ThrowTypeError(
         kIncompatibleMethodReceiver, 'String Iterator.prototype.next',
         receiver);
diff --git a/deps/v8/src/builtins/string-pad.tq b/deps/v8/src/builtins/string-pad.tq
new file mode 100644
index 00000000000000..2368067c4e3bc9
--- /dev/null
+++ b/deps/v8/src/builtins/string-pad.tq
@@ -0,0 +1,111 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-string-gen.h'
+
+namespace string {
+
+  extern transitioning builtin
+  StringSubstring(implicit context: Context)(String, intptr, intptr): String;
+
+  const kStringPadStart: constexpr int31 = 0;
+  const kStringPadEnd: constexpr int31 = 1;
+
+  transitioning macro StringPad(implicit context: Context)(
+      receiver: JSAny, arguments: Arguments, methodName: constexpr string,
+      variant: constexpr int31): String {
+    const receiverString: String = ToThisString(receiver, methodName);
+    const stringLength: Smi = receiverString.length_smi;
+
+    if (arguments.length == 0) {
+      return receiverString;
+    }
+    const maxLength: Number = ToLength_Inline(context, arguments[0]);
+    assert(IsNumberNormalized(maxLength));
+
+    typeswitch (maxLength) {
+      case (smiMaxLength: Smi): {
+        if (smiMaxLength <= stringLength) {
+          return receiverString;
+        }
+      }
+      case (Number): {
+      }
+    }
+
+    let fillString: String = ' ';
+    let fillLength: intptr = 1;
+
+    if (arguments.length != 1) {
+      const fill = arguments[1];
+      if (fill != Undefined) {
+        fillString = ToString_Inline(context, fill);
+        fillLength = fillString.length_intptr;
+        if (fillLength == 0) {
+          return receiverString;
+        }
+      }
+    }
+
+    // Pad.
+    assert(fillLength > 0);
+    // Throw if max_length is greater than String::kMaxLength.
+    if (!TaggedIsSmi(maxLength)) {
+      ThrowInvalidStringLength(context);
+    }
+
+    const smiMaxLength: Smi = UnsafeCast<Smi>(maxLength);
+    if (smiMaxLength > SmiConstant(kStringMaxLength)) {
+      ThrowInvalidStringLength(context);
+    }
+    assert(smiMaxLength > stringLength);
+    const padLength: Smi = smiMaxLength - stringLength;
+
+    let padding: String;
+    if (fillLength == 1) {
+      // Single char fill.
+      // Fast path for a single character fill.  No need to calculate number of
+      // repetitions or remainder.
+      padding = StringRepeat(context, fillString, padLength);
+    } else {
+      // Multi char fill.
+      const fillLengthWord32: int32 = TruncateIntPtrToInt32(fillLength);
+      const padLengthWord32: int32 = Convert<int32>(padLength);
+      const repetitionsWord32: int32 = padLengthWord32 / fillLengthWord32;
+      const remainingWord32: int32 = padLengthWord32 % fillLengthWord32;
+      padding =
+          StringRepeat(context, fillString, Convert<Smi>(repetitionsWord32));
+
+      if (remainingWord32 != 0) {
+        const remainderString =
+            StringSubstring(fillString, 0, Convert<intptr>(remainingWord32));
+        padding = padding + remainderString;
+      }
+    }
+
+    // Return result.
+    assert(padLength == padding.length_smi);
+    if (variant == kStringPadStart) {
+      return padding + receiverString;
+    }
+    assert(variant == kStringPadEnd);
+    return receiverString + padding;
+  }
+
+  // ES6 #sec-string.prototype.padstart
+  transitioning javascript builtin
+  StringPrototypePadStart(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): String {
+    const methodName: constexpr string = 'String.prototype.padStart';
+    return StringPad(receiver, arguments, methodName, kStringPadStart);
+  }
+
+  // ES6 #sec-string.prototype.padend
+  transitioning javascript builtin
+  StringPrototypePadEnd(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): String {
+    const methodName: constexpr string = 'String.prototype.padEnd';
+    return StringPad(receiver, arguments, methodName, kStringPadEnd);
+  }
+}
diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq
index 0d9d4ee4982d50..f341ed4336baaf 100644
--- a/deps/v8/src/builtins/string-repeat.tq
+++ b/deps/v8/src/builtins/string-repeat.tq
@@ -28,7 +28,7 @@ namespace string_repeat {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.repeat
   transitioning javascript builtin StringPrototypeRepeat(
-      js-implicit context: Context, receiver: Object)(count: Object): String {
+      js-implicit context: Context, receiver: JSAny)(count: JSAny): String {
     // 1. Let O be ? RequireObjectCoercible(this value).
     // 2. Let S be ? ToString(O).
     const s: String = ToThisString(receiver, kBuiltinName);
diff --git a/deps/v8/src/builtins/string-slice.tq b/deps/v8/src/builtins/string-slice.tq
index b066fb76692ce7..661cc264c50418 100644
--- a/deps/v8/src/builtins/string-slice.tq
+++ b/deps/v8/src/builtins/string-slice.tq
@@ -9,7 +9,7 @@ namespace string_slice {
   // ES6 #sec-string.prototype.slice ( start, end )
   // https://tc39.github.io/ecma262/#sec-string.prototype.slice
   transitioning javascript builtin StringPrototypeSlice(
-      js-implicit context: Context, receiver: Object)(...arguments): String {
+      js-implicit context: Context, receiver: JSAny)(...arguments): String {
     // 1. Let O be ? RequireObjectCoercible(this value).
     // 2. Let S be ? ToString(O).
     const string: String = ToThisString(receiver, 'String.prototype.slice');
diff --git a/deps/v8/src/builtins/string-startswith.tq b/deps/v8/src/builtins/string-startswith.tq
index b03e67ecf5446c..7fa7ec6d5ce952 100644
--- a/deps/v8/src/builtins/string-startswith.tq
+++ b/deps/v8/src/builtins/string-startswith.tq
@@ -10,13 +10,13 @@ namespace string {
 
   // https://tc39.github.io/ecma262/#sec-string.prototype.startswith
   transitioning javascript builtin StringPrototypeStartsWith(
-      js-implicit context: Context, receiver: Object)(...arguments): Boolean {
-    const searchString: Object = arguments[0];
-    const position: Object = arguments[1];
+      js-implicit context: Context, receiver: JSAny)(...arguments): Boolean {
+    const searchString: JSAny = arguments[0];
+    const position: JSAny = arguments[1];
     const kBuiltinName: constexpr string = 'String.prototype.startsWith';
 
     // 1. Let O be ? RequireObjectCoercible(this value).
-    const object: Object = RequireObjectCoercible(receiver, kBuiltinName);
+    const object: JSAny = RequireObjectCoercible(receiver, kBuiltinName);
 
     // 2. Let S be ? ToString(O).
     const string: String = ToString_Inline(context, object);
diff --git a/deps/v8/src/builtins/string-substring.tq b/deps/v8/src/builtins/string-substring.tq
index 1fafb8af4367ca..c97b294a34fedd 100644
--- a/deps/v8/src/builtins/string-substring.tq
+++ b/deps/v8/src/builtins/string-substring.tq
@@ -7,7 +7,7 @@ namespace string_substring {
   extern macro SubString(String, intptr, intptr): String;
 
   transitioning macro ToSmiBetweenZeroAnd(implicit context: Context)(
-      value: Object, limit: Smi): Smi {
+      value: JSAny, limit: Smi): Smi {
     const valueInt: Number =
         ToInteger_Inline(context, value, kTruncateMinusZero);
     typeswitch (valueInt) {
@@ -28,7 +28,7 @@ namespace string_substring {
 
   // ES6 #sec-string.prototype.substring
   transitioning javascript builtin StringPrototypeSubstring(
-      js-implicit context: Context, receiver: Object)(...arguments): String {
+      js-implicit context: Context, receiver: JSAny)(...arguments): String {
     // Check that {receiver} is coercible to Object and convert it to a String.
     const string: String = ToThisString(receiver, 'String.prototype.substring');
     const length = string.length_smi;
diff --git a/deps/v8/src/builtins/string.tq b/deps/v8/src/builtins/string.tq
index dbcc5799e1063d..7f007680e93b73 100644
--- a/deps/v8/src/builtins/string.tq
+++ b/deps/v8/src/builtins/string.tq
@@ -7,15 +7,15 @@
 namespace string {
   // ES6 #sec-string.prototype.tostring
   transitioning javascript builtin
-  StringPrototypeToString(js-implicit context: Context)(receiver: Object):
-      Object {
+  StringPrototypeToString(js-implicit context: Context, receiver: JSAny)():
+      JSAny {
     return ToThisValue(receiver, kString, 'String.prototype.toString');
   }
 
   // ES6 #sec-string.prototype.valueof
   transitioning javascript builtin
-  StringPrototypeValueOf(js-implicit context: Context)(receiver: Object):
-      Object {
+  StringPrototypeValueOf(js-implicit context: Context, receiver: JSAny)():
+      JSAny {
     return ToThisValue(receiver, kString, 'String.prototype.valueOf');
   }
 
@@ -29,7 +29,8 @@ namespace string {
     const kind = PACKED_ELEMENTS;
     const stringLength: intptr = string.length_intptr;
 
-    const map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context));
+    const nativeContext = LoadNativeContext(context);
+    const map: Map = LoadJSArrayElementsMap(kind, nativeContext);
     const array: JSArray = AllocateJSArray(
         kind, map, stringLength, SmiTag(stringLength),
         kAllowLargeObjectAllocation);
@@ -53,7 +54,7 @@ namespace string {
   }
 
   transitioning macro GenerateStringAt(implicit context: Context)(
-      receiver: Object, position: Object,
+      receiver: JSAny, position: JSAny,
       methodName: constexpr string): never labels
   IfInBounds(String, intptr, intptr), IfOutOfBounds {
     // Check that {receiver} is coercible to Object and convert it to a String.
@@ -71,8 +72,7 @@ namespace string {
 
   // ES6 #sec-string.prototype.charat
   transitioning javascript builtin StringPrototypeCharAt(
-      js-implicit context: Context,
-      receiver: Object)(position: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(position: JSAny): JSAny {
     try {
       GenerateStringAt(receiver, position, 'String.prototype.charAt')
           otherwise IfInBounds, IfOutOfBounds;
@@ -88,8 +88,7 @@ namespace string {
 
   // ES6 #sec-string.prototype.charcodeat
   transitioning javascript builtin StringPrototypeCharCodeAt(
-      js-implicit context: Context,
-      receiver: Object)(position: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(position: JSAny): JSAny {
     try {
       GenerateStringAt(receiver, position, 'String.prototype.charCodeAt')
           otherwise IfInBounds, IfOutOfBounds;
@@ -105,8 +104,7 @@ namespace string {
 
   // ES6 #sec-string.prototype.codepointat
   transitioning javascript builtin StringPrototypeCodePointAt(
-      js-implicit context: Context,
-      receiver: Object)(position: Object): Object {
+      js-implicit context: Context, receiver: JSAny)(position: JSAny): JSAny {
     try {
       GenerateStringAt(receiver, position, 'String.prototype.codePointAt')
           otherwise IfInBounds, IfOutOfBounds;
@@ -125,7 +123,7 @@ namespace string {
   // ES6 String.prototype.concat(...args)
   // ES6 #sec-string.prototype.concat
   transitioning javascript builtin StringPrototypeConcat(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     // Check that {receiver} is coercible to Object and convert it to a String.
     let string: String = ToThisString(receiver, 'String.prototype.concat');
 
@@ -137,4 +135,56 @@ namespace string {
     }
     return string;
   }
+
+  extern transitioning runtime
+  SymbolDescriptiveString(implicit context: Context)(Symbol): String;
+
+  // ES #sec-string-constructor
+  // https://tc39.github.io/ecma262/#sec-string-constructor
+  transitioning javascript builtin StringConstructor(
+      js-implicit context: Context, receiver: JSAny, newTarget: JSAny,
+      target: JSFunction)(...arguments): JSAny {
+    const length: intptr = Convert<intptr>(arguments.length);
+    let s: String;
+    // 1. If no arguments were passed to this function invocation, let s be "".
+    if (length == 0) {
+      s = EmptyStringConstant();
+    } else {
+      // 2. Else,
+      // 2. a. If NewTarget is undefined and Type(value) is Symbol, return
+      // SymbolDescriptiveString(value).
+      if (newTarget == Undefined) {
+        typeswitch (arguments[0]) {
+          case (value: Symbol): {
+            return SymbolDescriptiveString(value);
+          }
+          case (JSAny): {
+          }
+        }
+      }
+      // 2. b. Let s be ? ToString(value).
+      s = ToString_Inline(context, arguments[0]);
+    }
+    // 3. If NewTarget is undefined, return s.
+    if (newTarget == Undefined) {
+      return s;
+    }
+    // 4. Return ! StringCreate(s, ? GetPrototypeFromConstructor(NewTarget,
+    // "%String.prototype%")).
+    const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
+    const obj =
+        UnsafeCast<JSPrimitiveWrapper>(AllocateFastOrSlowJSObjectFromMap(map));
+    obj.value = s;
+    return obj;
+  }
+
+  transitioning builtin StringAddConvertLeft(implicit context: Context)(
+      left: JSAny, right: String): String {
+    return ToStringImpl(context, ToPrimitiveDefault(left)) + right;
+  }
+
+  transitioning builtin StringAddConvertRight(implicit context: Context)(
+      left: String, right: JSAny): String {
+    return left + ToStringImpl(context, ToPrimitiveDefault(right));
+  }
 }
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
new file mode 100644
index 00000000000000..48a08deb0aa207
--- /dev/null
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -0,0 +1,106 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace torque_internal {
+  // TODO(gsps): Synthesize SizeOf<T> in the compiler
+
+  macro SizeOf<T: type>(): constexpr int31;
+  SizeOf<Object>(): constexpr int31 {
+    return kTaggedSize;
+  }
+  SizeOf<float64>(): constexpr int31 {
+    return kDoubleSize;
+  }
+
+  // Unsafe is a marker that we require to be passed when calling internal APIs
+  // that might lead to unsoundness when used incorrectly. Unsafe markers should
+  // therefore not be instantiated anywhere outside of this namespace.
+  struct Unsafe {}
+
+  struct Reference<T: type> {
+    const object: HeapObject;
+    const offset: intptr;
+    unsafeMarker: Unsafe;
+  }
+
+  macro UnsafeNewReference<T: type>(object: HeapObject, offset: intptr):&T {
+    return Reference<T>{
+      object: object,
+      offset: offset,
+      unsafeMarker: Unsafe {}
+    };
+  }
+
+  struct Slice<T: type> {
+    TryAtIndex(index: intptr):&T labels OutOfBounds {
+      if (Convert<uintptr>(index) < Convert<uintptr>(this.length)) {
+        return UnsafeNewReference<T>(
+            this.object, this.offset + index * SizeOf<T>());
+      } else {
+        goto OutOfBounds;
+      }
+    }
+
+    AtIndex(index: intptr):&T {
+      return this.TryAtIndex(index) otherwise unreachable;
+    }
+
+    AtIndex(index: constexpr int31):&T {
+      const i: intptr = Convert<intptr>(index);
+      return this.TryAtIndex(i) otherwise unreachable;
+    }
+
+    AtIndex(index: Smi):&T {
+      const i: intptr = Convert<intptr>(index);
+      return this.TryAtIndex(i) otherwise unreachable;
+    }
+
+    Iterator(): SliceIterator<T> {
+      const end = this.offset + this.length * SizeOf<T>();
+      return SliceIterator<T>{
+        object: this.object,
+        start: this.offset,
+        end: end,
+        unsafeMarker: Unsafe {}
+      };
+    }
+
+    const object: HeapObject;
+    const offset: intptr;
+    const length: intptr;
+    unsafeMarker: Unsafe;
+  }
+
+  macro UnsafeNewSlice<T: type>(
+      object: HeapObject, offset: intptr, length: intptr): Slice<T> {
+    return Slice<T>{
+      object: object,
+      offset: offset,
+      length: length,
+      unsafeMarker: Unsafe {}
+    };
+  }
+
+  struct SliceIterator<T: type> {
+    Empty(): bool {
+      return this.start == this.end;
+    }
+
+    Next():&T labels NoMore {
+      if (this.Empty()) {
+        goto NoMore;
+      } else {
+        const result = UnsafeNewReference<T>(this.object, this.start);
+        this.start += SizeOf<T>();
+        return result;
+      }
+    }
+
+    object: HeapObject;
+    start: intptr;
+    end: intptr;
+    unsafeMarker: Unsafe;
+  }
+
+}  // namespace torque_internal
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index f6ab289e12c166..a476739861684f 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -122,7 +122,7 @@ namespace typed_array_createtypedarray {
   // 22.2.4.2 TypedArray ( length )
   // ES #sec-typedarray-length
   transitioning macro ConstructByLength(implicit context: Context)(
-      map: Map, length: Object,
+      map: Map, length: JSAny,
       elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
     const convertedLength: Number =
         ToInteger_Inline(context, length, kTruncateMinusZero);
@@ -141,7 +141,7 @@ namespace typed_array_createtypedarray {
   // 22.2.4.4 TypedArray ( object )
   // ES #sec-typedarray-object
   transitioning macro ConstructByArrayLike(implicit context: Context)(
-      map: Map, arrayLike: HeapObject, initialLength: Object,
+      map: Map, arrayLike: HeapObject, initialLength: JSAny,
       elementsInfo: typed_array::TypedArrayElementsInfo,
       bufferConstructor: JSReceiver): JSTypedArray {
     // The caller has looked up length on arrayLike, which is observable.
@@ -178,7 +178,7 @@ namespace typed_array_createtypedarray {
   // ES #sec-typedarray-object
   transitioning macro ConstructByIterable(implicit context: Context)(
       iterable: JSReceiver, iteratorFn: Callable): never
-      labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
+      labels IfConstructByArrayLike(JSArray, Number, JSReceiver) {
     const array: JSArray =
         IterableToListMayPreserveHoles(context, iterable, iteratorFn);
     goto IfConstructByArrayLike(array, array.length, GetArrayBufferFunction());
@@ -188,7 +188,7 @@ namespace typed_array_createtypedarray {
   // ES #sec-typedarray-typedarray
   transitioning macro ConstructByTypedArray(implicit context: Context)(
       srcTypedArray: JSTypedArray): never
-      labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
+      labels IfConstructByArrayLike(JSTypedArray, Number, JSReceiver) {
     let bufferConstructor: JSReceiver = GetArrayBufferFunction();
     const srcBuffer: JSArrayBuffer = srcTypedArray.buffer;
     // TODO(petermarshall): Throw on detached typedArray.
@@ -210,7 +210,7 @@ namespace typed_array_createtypedarray {
   // 22.2.4.5 TypedArray ( buffer, byteOffset, length )
   // ES #sec-typedarray-buffer-byteoffset-length
   transitioning macro ConstructByArrayBuffer(implicit context: Context)(
-      map: Map, buffer: JSArrayBuffer, byteOffset: Object, length: Object,
+      map: Map, buffer: JSArrayBuffer, byteOffset: JSAny, length: JSAny,
       elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
     try {
       let offset: uintptr = 0;
@@ -294,7 +294,7 @@ namespace typed_array_createtypedarray {
 
   transitioning macro ConstructByJSReceiver(implicit context:
                                                 Context)(obj: JSReceiver): never
-      labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
+      labels IfConstructByArrayLike(JSReceiver, Number, JSReceiver) {
     try {
       const iteratorMethod: Object =
           GetIteratorMethod(obj) otherwise IfIteratorUndefined;
@@ -304,7 +304,7 @@ namespace typed_array_createtypedarray {
           otherwise IfConstructByArrayLike;
     }
     label IfIteratorUndefined {
-      const lengthObj: Object = GetProperty(obj, kLengthString);
+      const lengthObj: JSAny = GetProperty(obj, kLengthString);
       const length: Smi = ToSmiLength(lengthObj)
           otherwise goto IfInvalidLength(lengthObj);
       goto IfConstructByArrayLike(obj, length, GetArrayBufferFunction());
@@ -317,8 +317,8 @@ namespace typed_array_createtypedarray {
   // 22.2.4 The TypedArray Constructors
   // ES #sec-typedarray-constructors
   transitioning builtin CreateTypedArray(
-      context: Context, target: JSFunction, newTarget: JSReceiver, arg1: Object,
-      arg2: Object, arg3: Object): JSTypedArray {
+      context: Context, target: JSFunction, newTarget: JSReceiver, arg1: JSAny,
+      arg2: JSAny, arg3: JSAny): JSTypedArray {
     assert(IsConstructor(target));
     // 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
     // "%TypedArrayPrototype%").
@@ -345,16 +345,16 @@ namespace typed_array_createtypedarray {
         }
         // The first argument was a number or fell through and is treated as
         // a number. https://tc39.github.io/ecma262/#sec-typedarray-length
-        case (lengthObj: HeapObject): {
+        case (lengthObj: JSAny): {
           goto IfConstructByLength(lengthObj);
         }
       }
     }
-    label IfConstructByLength(length: Object) {
+    label IfConstructByLength(length: JSAny) {
       return ConstructByLength(map, length, elementsInfo);
     }
     label IfConstructByArrayLike(
-        arrayLike: HeapObject, length: Object, bufferConstructor: JSReceiver) {
+        arrayLike: JSReceiver, length: Number, bufferConstructor: JSReceiver) {
       return ConstructByArrayLike(
           map, arrayLike, length, elementsInfo, bufferConstructor);
     }
@@ -362,8 +362,8 @@ namespace typed_array_createtypedarray {
 
   transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
       methodName: constexpr string, numArgs: constexpr int31,
-      exemplar: JSTypedArray, arg0: Object, arg1: Object,
-      arg2: Object): JSTypedArray {
+      exemplar: JSTypedArray, arg0: JSAny, arg1: JSAny,
+      arg2: JSAny): JSTypedArray {
     const defaultConstructor = GetDefaultConstructor(exemplar);
 
     try {
@@ -386,7 +386,7 @@ namespace typed_array_createtypedarray {
 
       // TODO(pwong): Simplify and remove numArgs when varargs are supported in
       // macros.
-      let newObj: Object = Undefined;
+      let newObj: JSAny = Undefined;
       if constexpr (numArgs == 1) {
         newObj = Construct(constructor, arg0);
       } else {
diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq
index 221814cb79d706..4b4fe72eb18e9f 100644
--- a/deps/v8/src/builtins/typed-array-every.tq
+++ b/deps/v8/src/builtins/typed-array-every.tq
@@ -9,7 +9,7 @@ namespace typed_array_every {
 
   transitioning macro EveryAllElements(implicit context: Context)(
       array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
-      thisArg: Object): Boolean {
+      thisArg: JSAny): Boolean {
     let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
     // TODO(v8:4153): Support huge TypedArrays here.
     const length =
@@ -17,7 +17,7 @@ namespace typed_array_every {
     for (let k: Smi = 0; k < length; k++) {
       // BUG(4895): We should throw on detached buffers rather than simply exit.
       witness.Recheck() otherwise break;
-      const value: Object = witness.Load(k);
+      const value: JSAny = witness.Load(k);
       const result =
           Call(context, callbackfn, thisArg, value, k, witness.GetStable());
       if (!ToBoolean(result)) {
@@ -29,8 +29,8 @@ namespace typed_array_every {
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
   transitioning javascript builtin
-  TypedArrayPrototypeEvery(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  TypedArrayPrototypeEvery(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     // arguments[0] = callback
     // arguments[1] = thisArg
     try {
diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq
index 3937699c731ad3..66823f29e1ffb8 100644
--- a/deps/v8/src/builtins/typed-array-filter.tq
+++ b/deps/v8/src/builtins/typed-array-filter.tq
@@ -10,7 +10,7 @@ namespace typed_array_filter {
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.filter
   transitioning javascript builtin TypedArrayPrototypeFilter(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     // arguments[0] = callback
     // arguments[1] = thisArg
     try {
@@ -29,7 +29,7 @@ namespace typed_array_filter {
           otherwise ThrowTypeError(kCalledNonCallable, arguments[0]);
 
       // 5. If thisArg is present, let T be thisArg; else let T be undefined.
-      const thisArg: Object = arguments[1];
+      const thisArg: JSAny = arguments[1];
 
       // 6. Let kept be a new empty List.
       let kept = growable_fixed_array::NewGrowableFixedArray();
@@ -43,17 +43,17 @@ namespace typed_array_filter {
 
         // a. Let Pk be ! ToString(k).
         // b. Let kValue be ? Get(O, Pk).
-        const value: Object = witness.Load(k);
+        const value: JSAny = witness.Load(k);
 
         // c. Let selected be ToBoolean(? Call(callbackfn, T, « kValue, k, O
         // »)).
-        const selected: Object =
+        const selected: JSAny =
             Call(context, callbackfn, thisArg, value, k, witness.GetStable());
 
         // d. If selected is true, then
         //    i. Append kValue to the end of kept.
         //   ii. Increase captured by 1.
-        if (BranchIfToBooleanIsTrue(selected)) kept.Push(value);
+        if (ToBoolean(selected)) kept.Push(value);
 
         // e.Increase k by 1.
       }
diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq
index be1943ccf48ce9..9922abdc171c62 100644
--- a/deps/v8/src/builtins/typed-array-find.tq
+++ b/deps/v8/src/builtins/typed-array-find.tq
@@ -9,7 +9,7 @@ namespace typed_array_find {
 
   transitioning macro FindAllElements(implicit context: Context)(
       array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
-      thisArg: Object): Object {
+      thisArg: JSAny): JSAny {
     let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
     // TODO(v8:4153): Support huge TypedArrays here.
     const length =
@@ -17,7 +17,7 @@ namespace typed_array_find {
     for (let k: Smi = 0; k < length; k++) {
       // BUG(4895): We should throw on detached buffers rather than simply exit.
       witness.Recheck() otherwise break;
-      const value: Object = witness.Load(k);
+      const value: JSAny = witness.Load(k);
       const result =
           Call(context, callbackfn, thisArg, value, k, witness.GetStable());
       if (ToBoolean(result)) {
@@ -29,8 +29,8 @@ namespace typed_array_find {
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find
   transitioning javascript builtin
-  TypedArrayPrototypeFind(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  TypedArrayPrototypeFind(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     // arguments[0] = callback
     // arguments[1] = thisArg
     try {
diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq
index a5ee7897d3c62d..5438f3dbfe32e3 100644
--- a/deps/v8/src/builtins/typed-array-findindex.tq
+++ b/deps/v8/src/builtins/typed-array-findindex.tq
@@ -9,7 +9,7 @@ namespace typed_array_findindex {
 
   transitioning macro FindIndexAllElements(implicit context: Context)(
       array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
-      thisArg: Object): Number {
+      thisArg: JSAny): Number {
     let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
     // TODO(v8:4153): Support huge TypedArrays here.
     const length =
@@ -17,7 +17,7 @@ namespace typed_array_findindex {
     for (let k: Smi = 0; k < length; k++) {
       // BUG(4895): We should throw on detached buffers rather than simply exit.
       witness.Recheck() otherwise break;
-      const value: Object = witness.Load(k);
+      const value: JSAny = witness.Load(k);
       const result =
           Call(context, callbackfn, thisArg, value, k, witness.GetStable());
       if (ToBoolean(result)) {
@@ -29,8 +29,8 @@ namespace typed_array_findindex {
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.findIndex
   transitioning javascript builtin
-  TypedArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  TypedArrayPrototypeFindIndex(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     // arguments[0] = callback
     // arguments[1] = thisArg.
     try {
diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq
index 656a22e07d362a..b1ad894122fa68 100644
--- a/deps/v8/src/builtins/typed-array-foreach.tq
+++ b/deps/v8/src/builtins/typed-array-foreach.tq
@@ -9,7 +9,7 @@ namespace typed_array_foreach {
 
   transitioning macro ForEachAllElements(implicit context: Context)(
       array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
-      thisArg: Object): Object {
+      thisArg: JSAny): Undefined {
     let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
     // TODO(v8:4153): Support huge TypedArrays here.
     const length =
@@ -17,7 +17,7 @@ namespace typed_array_foreach {
     for (let k: Smi = 0; k < length; k++) {
       // BUG(4895): We should throw on detached buffers rather than simply exit.
       witness.Recheck() otherwise break;
-      const value: Object = witness.Load(k);
+      const value: JSAny = witness.Load(k);
       Call(context, callbackfn, thisArg, value, k, witness.GetStable());
     }
     return Undefined;
@@ -25,8 +25,8 @@ namespace typed_array_foreach {
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
   transitioning javascript builtin
-  TypedArrayPrototypeForEach(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  TypedArrayPrototypeForEach(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): Undefined {
     // arguments[0] = callback
     // arguments[1] = this_arg.
 
diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq
index d69dc9a98d840a..7f8eeb7de5a9db 100644
--- a/deps/v8/src/builtins/typed-array-reduce.tq
+++ b/deps/v8/src/builtins/typed-array-reduce.tq
@@ -9,7 +9,7 @@ namespace typed_array_reduce {
 
   transitioning macro ReduceAllElements(implicit context: Context)(
       array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
-      initialValue: Object): Object {
+      initialValue: JSAny | TheHole): JSAny {
     let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
     // TODO(v8:4153): Support huge TypedArrays here.
     const length =
@@ -18,25 +18,32 @@ namespace typed_array_reduce {
     for (let k: Smi = 0; k < length; k++) {
       // BUG(4895): We should throw on detached buffers rather than simply exit.
       witness.Recheck() otherwise break;
-      const value: Object = witness.Load(k);
-      if (accumulator == TheHole) {
-        accumulator = value;
-      } else {
-        accumulator = Call(
-            context, callbackfn, Undefined, accumulator, value, k,
-            witness.GetStable());
+      const value: JSAny = witness.Load(k);
+      typeswitch (accumulator) {
+        case (TheHole): {
+          accumulator = value;
+        }
+        case (accumulatorNotHole: JSAny): {
+          accumulator = Call(
+              context, callbackfn, Undefined, accumulatorNotHole, value, k,
+              witness.GetStable());
+        }
       }
     }
-    if (accumulator == TheHole) {
-      ThrowTypeError(kReduceNoInitial, kBuiltinName);
+    typeswitch (accumulator) {
+      case (TheHole): {
+        ThrowTypeError(kReduceNoInitial, kBuiltinName);
+      }
+      case (accumulator: JSAny): {
+        return accumulator;
+      }
     }
-    return accumulator;
   }
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduce
   transitioning javascript builtin
-  TypedArrayPrototypeReduce(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  TypedArrayPrototypeReduce(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     // arguments[0] = callback
     // arguments[1] = initialValue.
     try {
diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq
index 99a84401ed1c90..c8e4fe83d367fd 100644
--- a/deps/v8/src/builtins/typed-array-reduceright.tq
+++ b/deps/v8/src/builtins/typed-array-reduceright.tq
@@ -9,7 +9,7 @@ namespace typed_array_reduceright {
 
   transitioning macro ReduceRightAllElements(implicit context: Context)(
       array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
-      initialValue: Object): Object {
+      initialValue: JSAny | TheHole): JSAny {
     let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
     // TODO(v8:4153): Support huge TypedArrays here.
     const length =
@@ -18,25 +18,32 @@ namespace typed_array_reduceright {
     for (let k: Smi = length - 1; k >= 0; k--) {
       // BUG(4895): We should throw on detached buffers rather than simply exit.
       witness.Recheck() otherwise break;
-      const value: Object = witness.Load(k);
-      if (accumulator == TheHole) {
-        accumulator = value;
-      } else {
-        accumulator = Call(
-            context, callbackfn, Undefined, accumulator, value, k,
-            witness.GetStable());
+      const value: JSAny = witness.Load(k);
+      typeswitch (accumulator) {
+        case (TheHole): {
+          accumulator = value;
+        }
+        case (accumulatorNotHole: JSAny): {
+          accumulator = Call(
+              context, callbackfn, Undefined, accumulatorNotHole, value, k,
+              witness.GetStable());
+        }
       }
     }
-    if (accumulator == TheHole) {
-      ThrowTypeError(kReduceNoInitial, kBuiltinName);
+    typeswitch (accumulator) {
+      case (TheHole): {
+        ThrowTypeError(kReduceNoInitial, kBuiltinName);
+      }
+      case (accumulator: JSAny): {
+        return accumulator;
+      }
     }
-    return accumulator;
   }
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright
   transitioning javascript builtin
-  TypedArrayPrototypeReduceRight(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+  TypedArrayPrototypeReduceRight(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     // arguments[0] = callback
     // arguments[1] = initialValue.
     try {
diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq
index c0087ae1be0276..dc13865590e051 100644
--- a/deps/v8/src/builtins/typed-array-slice.tq
+++ b/deps/v8/src/builtins/typed-array-slice.tq
@@ -13,7 +13,7 @@ namespace typed_array_slice {
   macro FastCopy(
       src: typed_array::AttachedJSTypedArray, dest: JSTypedArray, k: intptr,
       count: PositiveSmi) labels IfSlow {
-    GotoIfForceSlowPath() otherwise IfSlow;
+    if (IsForceSlowPath()) goto IfSlow;
 
     const srcKind: ElementsKind = src.elements_kind;
     const destInfo = typed_array::GetTypedArrayElementsInfo(dest);
@@ -53,7 +53,7 @@ namespace typed_array_slice {
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.slice
   transitioning javascript builtin TypedArrayPrototypeSlice(
-      js-implicit context: Context, receiver: Object)(...arguments): Object {
+      js-implicit context: Context, receiver: JSAny)(...arguments): JSAny {
     // arguments[0] = start
     // arguments[1] = end
 
diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq
index 7056650fba824e..7d08b1433b5d69 100644
--- a/deps/v8/src/builtins/typed-array-some.tq
+++ b/deps/v8/src/builtins/typed-array-some.tq
@@ -9,7 +9,7 @@ namespace typed_array_some {
 
   transitioning macro SomeAllElements(implicit context: Context)(
       array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
-      thisArg: Object): Boolean {
+      thisArg: JSAny): Boolean {
     let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
     // TODO(v8:4153): Support huge TypedArrays here.
     const length =
@@ -17,7 +17,7 @@ namespace typed_array_some {
     for (let k: Smi = 0; k < length; k++) {
       // BUG(4895): We should throw on detached buffers rather than simply exit.
       witness.Recheck() otherwise break;
-      const value: Object = witness.Load(k);
+      const value: JSAny = witness.Load(k);
       const result =
           Call(context, callbackfn, thisArg, value, k, witness.GetStable());
       if (ToBoolean(result)) {
@@ -29,8 +29,8 @@ namespace typed_array_some {
 
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.some
   transitioning javascript builtin
-  TypedArrayPrototypeSome(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  TypedArrayPrototypeSome(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     // arguments[0] = callback
     // arguments[1] = thisArg.
     try {
diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq
index 4f98123f823a19..589e67230c8dc8 100644
--- a/deps/v8/src/builtins/typed-array-subarray.tq
+++ b/deps/v8/src/builtins/typed-array-subarray.tq
@@ -6,7 +6,7 @@ namespace typed_array_subarray {
   // ES %TypedArray%.prototype.subarray
   transitioning javascript builtin TypedArrayPrototypeSubArray(
       js-implicit context: Context,
-      receiver: Object)(...arguments): JSTypedArray {
+      receiver: JSAny)(...arguments): JSTypedArray {
     const methodName: constexpr string = '%TypedArray%.prototype.subarray';
 
     // 1. Let O be the this value.
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index d03c1a0be977e3..59100736a5dc7b 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -51,9 +51,9 @@ namespace typed_array {
     sizeLog2: uintptr;
     kind: ElementsKind;
   }
-  extern runtime TypedArraySortFast(Context, Object): JSTypedArray;
+  extern runtime TypedArraySortFast(Context, JSAny): JSTypedArray;
   extern macro TypedArrayBuiltinsAssembler::ValidateTypedArray(
-      Context, Object, constexpr string): JSTypedArray;
+      Context, JSAny, constexpr string): JSTypedArray;
 
   extern macro TypedArrayBuiltinsAssembler::CallCMemcpy(
       RawPtr, RawPtr, uintptr): void;
@@ -72,10 +72,10 @@ namespace typed_array {
   extern macro LoadFixedTypedArrayElementAsTagged(
       RawPtr, Smi, constexpr ElementsKind): Numeric;
   extern macro StoreJSTypedArrayElementFromTagged(
-      Context, JSTypedArray, Smi, Object, constexpr ElementsKind);
+      Context, JSTypedArray, Smi, JSAny, constexpr ElementsKind);
 
-  type LoadFn = builtin(Context, JSTypedArray, Smi) => Object;
-  type StoreFn = builtin(Context, JSTypedArray, Smi, Object) => Object;
+  type LoadFn = builtin(Context, JSTypedArray, Smi) => JSAny;
+  type StoreFn = builtin(Context, JSTypedArray, Smi, JSAny) => JSAny;
 
   // AttachedJSTypedArray guards that the array's buffer is not detached.
   transient type AttachedJSTypedArray extends JSTypedArray;
@@ -100,7 +100,7 @@ namespace typed_array {
       this.unstable = %RawDownCast<AttachedJSTypedArray>(this.stable);
     }
 
-    Load(implicit context: Context)(k: Smi): Object {
+    Load(implicit context: Context)(k: Smi): JSAny {
       const lf: LoadFn = this.loadfn;
       return lf(context, this.unstable, k);
     }
@@ -190,14 +190,14 @@ namespace typed_array {
   }
 
   builtin LoadFixedElement<T: type>(
-      _context: Context, array: JSTypedArray, index: Smi): Object {
+      _context: Context, array: JSTypedArray, index: Smi): JSAny {
     return LoadFixedTypedArrayElementAsTagged(
         array.data_ptr, index, KindForArrayType<T>());
   }
 
   builtin StoreFixedElement<T: type>(
       context: Context, typedArray: JSTypedArray, index: Smi,
-      value: Object): Object {
+      value: JSAny): JSAny {
     StoreJSTypedArrayElementFromTagged(
         context, typedArray, index, value, KindForArrayType<T>());
     return Undefined;
@@ -205,7 +205,7 @@ namespace typed_array {
 
   transitioning macro CallCompare(
       implicit context: Context, array: JSTypedArray,
-      comparefn: Callable)(a: Object, b: Object): Number {
+      comparefn: Callable)(a: JSAny, b: JSAny): Number {
     // a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
     const v: Number =
         ToNumber_Inline(context, Call(context, comparefn, Undefined, a, b));
@@ -238,8 +238,8 @@ namespace typed_array {
         target.objects[targetIndex] = source.objects[left++];
       } else if (left < middle) {
         // If both have elements, we need to compare.
-        const leftElement: Object = source.objects[left];
-        const rightElement: Object = source.objects[right];
+        const leftElement = UnsafeCast<JSAny>(source.objects[left]);
+        const rightElement = UnsafeCast<JSAny>(source.objects[right]);
         if (CallCompare(leftElement, rightElement) <= 0) {
           target.objects[targetIndex] = leftElement;
           left++;
@@ -259,7 +259,7 @@ namespace typed_array {
   transitioning builtin
   TypedArrayMergeSort(
       implicit context: Context, array: JSTypedArray, comparefn: Callable)(
-      source: FixedArray, from: Smi, to: Smi, target: FixedArray): Object {
+      source: FixedArray, from: Smi, to: Smi, target: FixedArray): JSAny {
     assert(to - from > 1);
     const middle: Smi = from + ((to - from) >> 1);
 
@@ -277,17 +277,16 @@ namespace typed_array {
   // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort
   transitioning javascript builtin TypedArrayPrototypeSort(
       js-implicit context: Context,
-      receiver: Object)(...arguments): JSTypedArray {
+      receiver: JSAny)(...arguments): JSTypedArray {
     // 1. If comparefn is not undefined and IsCallable(comparefn) is false,
     //    throw a TypeError exception.
-    const comparefnObj: Object =
-        arguments.length > 0 ? arguments[0] : Undefined;
+    const comparefnObj: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
     if (comparefnObj != Undefined && !TaggedIsCallable(comparefnObj)) {
       ThrowTypeError(kBadSortComparisonFunction, comparefnObj);
     }
 
     // 2. Let obj be the this value.
-    const obj: Object = receiver;
+    const obj: JSAny = receiver;
 
     // 3. Let buffer be ? ValidateTypedArray(obj).
     //    ValidateTypedArray currently returns the array, not the ViewBuffer.
@@ -363,7 +362,7 @@ namespace typed_array {
     const work2: FixedArray = AllocateZeroedFixedArray(Convert<intptr>(len));
 
     for (let i: Smi = 0; i < len; ++i) {
-      const element: Object = loadfn(context, array, i);
+      const element: JSAny = loadfn(context, array, i);
       work1.objects[i] = element;
       work2.objects[i] = element;
     }
@@ -372,7 +371,7 @@ namespace typed_array {
 
     // work1 contains the sorted numbers. Write them back.
     for (let i: Smi = 0; i < len; ++i)
-      storefn(context, array, i, work1.objects[i]);
+      storefn(context, array, i, UnsafeCast<JSAny>(work1.objects[i]));
 
     return array;
   }
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index f15c8ba29f251a..b6b407fb3322ef 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -64,6 +64,18 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
 
 namespace {
 
+Operand RealStackLimitAsOperand(MacroAssembler* masm) {
+  DCHECK(masm->root_array_available());
+  Isolate* isolate = masm->isolate();
+  ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+  DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+  intptr_t offset =
+      TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+  CHECK(is_int32(offset));
+  return Operand(kRootRegister, static_cast<int32_t>(offset));
+}
+
 void Generate_StackOverflowCheck(
     MacroAssembler* masm, Register num_args, Register scratch,
     Label* stack_overflow,
@@ -71,7 +83,7 @@ void Generate_StackOverflowCheck(
   // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
-  __ LoadRoot(kScratchRegister, RootIndex::kRealStackLimit);
+  __ movq(kScratchRegister, RealStackLimitAsOperand(masm));
   __ movq(scratch, rsp);
   // Make scratch the space we have left. The stack might already be overflowed
   // here which will cause scratch to become negative.
@@ -735,7 +747,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
   // Check the stack for overflow. We are not trying to catch interruptions
   // (i.e. debug break and preemption) here, so check the "real stack limit".
   Label stack_overflow;
-  __ CompareRoot(rsp, RootIndex::kRealStackLimit);
+  __ cmpq(rsp, RealStackLimitAsOperand(masm));
   __ j(below, &stack_overflow);
 
   // Pop return address.
@@ -1134,7 +1146,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
     Label ok;
     __ movq(rax, rsp);
     __ subq(rax, rcx);
-    __ CompareRoot(rax, RootIndex::kRealStackLimit);
+    __ cmpq(rax, RealStackLimitAsOperand(masm));
     __ j(above_equal, &ok, Label::kNear);
     __ CallRuntime(Runtime::kThrowStackOverflow);
     __ bind(&ok);
@@ -2339,9 +2351,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
       __ shlq(rbx, Immediate(kSystemPointerSizeLog2));
       __ movq(kScratchRegister, rsp);
       __ subq(kScratchRegister, rbx);
+
       // We are not trying to catch interruptions (i.e. debug break and
       // preemption) here, so check the "real stack limit".
-      __ CompareRoot(kScratchRegister, RootIndex::kRealStackLimit);
+      __ cmpq(kScratchRegister, RealStackLimitAsOperand(masm));
       __ j(above_equal, &done, Label::kNear);
       {
         FrameScope scope(masm, StackFrame::MANUAL);
@@ -2663,9 +2676,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
     // Push the function index as second argument.
     __ Push(r11);
     // Load the correct CEntry builtin from the instance object.
+    __ movq(rcx, FieldOperand(kWasmInstanceRegister,
+                              WasmInstanceObject::kIsolateRootOffset));
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
     __ LoadTaggedPointerField(
-        rcx, FieldOperand(kWasmInstanceRegister,
-                          WasmInstanceObject::kCEntryStubOffset));
+        rcx, MemOperand(rcx, IsolateData::builtin_slot_offset(centry_id)));
     // Initialize the JavaScript context with 0. CEntry will use it to
     // set the current context on the isolate.
     __ Move(kContextRegister, Smi::zero());
diff --git a/deps/v8/src/codegen/DEPS b/deps/v8/src/codegen/DEPS
index f3715e6ad01f2a..ca53b615417e61 100644
--- a/deps/v8/src/codegen/DEPS
+++ b/deps/v8/src/codegen/DEPS
@@ -4,6 +4,8 @@
 
 specific_include_rules = {
   "external-reference.cc": [
+    # Required to call IrregexpInterpreter::NativeMatch from builtin.
+    "+src/regexp/regexp-interpreter.h",
     "+src/regexp/regexp-macro-assembler-arch.h",
   ],
 }
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 7ca49a3f9fc0ed..9c46063537d62d 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -4258,6 +4258,24 @@ void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
 
 enum NeonShiftOp { VSHL, VSHR, VSLI, VSRI };
 
+static Instr EncodeNeonShiftRegisterOp(NeonShiftOp op, NeonDataType dt,
+                                       NeonRegType reg_type, int dst_code,
+                                       int src_code, int shift_code) {
+  DCHECK_EQ(op, VSHL);
+  int op_encoding = 0;
+  int vd, d;
+  NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
+  int vm, m;
+  NeonSplitCode(reg_type, src_code, &vm, &m, &op_encoding);
+  int vn, n;
+  NeonSplitCode(reg_type, shift_code, &vn, &n, &op_encoding);
+  int size = NeonSz(dt);
+  int u = NeonU(dt);
+
+  return 0x1E4U * B23 | u * B24 | d * B22 | size * B20 | vn * B16 | vd * B12 |
+         0x4 * B8 | n * B7 | m * B5 | vm | op_encoding;
+}
+
 static Instr EncodeNeonShiftOp(NeonShiftOp op, NeonSize size, bool is_unsigned,
                                NeonRegType reg_type, int dst_code, int src_code,
                                int shift) {
@@ -4315,6 +4333,15 @@ void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
                          dst.code(), src.code(), shift));
 }
 
+void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
+                     QwNeonRegister shift) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vshl(Qm, Qn) SIMD shift left Register.
+  // Instruction details available in ARM DDI 0487A.a, F8-3340..
+  emit(EncodeNeonShiftRegisterOp(VSHL, dt, NEON_Q, dst.code(), src.code(),
+                                 shift.code()));
+}
+
 void Assembler::vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
                      int shift) {
   DCHECK(IsEnabled(NEON));
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index f383632f73aee8..f669943f34edd6 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -899,6 +899,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
   void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
              DwVfpRegister src2);
   void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
+  void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
+            QwNeonRegister shift);
   void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
   void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
   void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index ba334cd0b65af2..7f6d82518ec1dc 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -217,6 +217,13 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
   Jump(static_cast<intptr_t>(code.address()), rmode, cond);
 }
 
+void TurboAssembler::Jump(const ExternalReference& reference) {
+  UseScratchRegisterScope temps(this);
+  Register scratch = temps.Acquire();
+  Move(scratch, reference);
+  Jump(scratch);
+}
+
 void TurboAssembler::Call(Register target, Condition cond) {
   // Block constant pool for the call instruction sequence.
   BlockConstPoolScope block_const_pool(this);
@@ -289,13 +296,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
   } else if (target_is_isolate_independent_builtin &&
              options().inline_offheap_trampolines) {
     // Inline the trampoline.
-    RecordCommentForOffHeapTrampoline(builtin_index);
-    EmbeddedData d = EmbeddedData::FromBlob();
-    Address entry = d.InstructionStartOfBuiltin(builtin_index);
-    // Use ip directly instead of using UseScratchRegisterScope, as we do not
-    // preserve scratch registers across calls.
-    mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
-    Call(ip, cond);
+    CallBuiltin(builtin_index);
     return;
   }
 
@@ -323,6 +324,18 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
   Call(builtin_index);
 }
 
+void TurboAssembler::CallBuiltin(int builtin_index, Condition cond) {
+  DCHECK(Builtins::IsBuiltinId(builtin_index));
+  DCHECK(FLAG_embedded_builtins);
+  RecordCommentForOffHeapTrampoline(builtin_index);
+  EmbeddedData d = EmbeddedData::FromBlob();
+  Address entry = d.InstructionStartOfBuiltin(builtin_index);
+  // Use ip directly instead of using UseScratchRegisterScope, as we do not
+  // preserve scratch registers across calls.
+  mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+  Call(ip, cond);
+}
+
 void TurboAssembler::LoadCodeObjectEntry(Register destination,
                                          Register code_object) {
   // Code objects are called differently depending on whether we are generating
@@ -795,8 +808,9 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
   }
 
-  if (remembered_set_action == OMIT_REMEMBERED_SET &&
-      !FLAG_incremental_marking) {
+  if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+       !FLAG_incremental_marking) ||
+      FLAG_disable_write_barriers) {
     return;
   }
 
@@ -1832,6 +1846,8 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
 
   if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
     Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+  } else if (options().inline_offheap_trampolines) {
+    CallBuiltin(Builtins::kDoubleToI);
   } else {
     Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
   }
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index e4ce734f52a37f..bbea40b9a628cc 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -304,6 +304,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   // register.
   void LoadEntryFromBuiltinIndex(Register builtin_index);
   void CallBuiltinByIndex(Register builtin_index) override;
+  void CallBuiltin(int builtin_index, Condition cond = al);
 
   void LoadCodeObjectEntry(Register destination, Register code_object) override;
   void CallCodeObject(Register code_object) override;
@@ -408,6 +409,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   void Jump(Register target, Condition cond = al);
   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Jump(const ExternalReference& reference) override;
 
   // Perform a floating-point min or max operation with the
   // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 159e763ba26026..c798d3a8a03ed9 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -327,6 +327,12 @@ Assembler::Assembler(const AssemblerOptions& options,
       constpool_(this) {
   veneer_pool_blocked_nesting_ = 0;
   Reset();
+
+#if defined(V8_OS_WIN)
+  if (options.collect_win64_unwind_info) {
+    xdata_encoder_ = std::make_unique<win64_unwindinfo::XdataEncoder>(*this);
+  }
+#endif
 }
 
 Assembler::~Assembler() {
@@ -349,6 +355,14 @@ void Assembler::Reset() {
   next_veneer_pool_check_ = kMaxInt;
 }
 
+#if defined(V8_OS_WIN)
+win64_unwindinfo::BuiltinUnwindInfo Assembler::GetUnwindInfo() const {
+  DCHECK(options().collect_win64_unwind_info);
+  DCHECK_NOT_NULL(xdata_encoder_);
+  return xdata_encoder_->unwinding_info();
+}
+#endif
+
 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
   DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
   for (auto& request : heap_object_requests_) {
@@ -1166,6 +1180,11 @@ void Assembler::cls(const Register& rd, const Register& rn) {
   DataProcessing1Source(rd, rn, CLS);
 }
 
+void Assembler::pacia1716() { Emit(PACIA1716); }
+void Assembler::autia1716() { Emit(AUTIA1716); }
+void Assembler::paciasp() { Emit(PACIASP); }
+void Assembler::autiasp() { Emit(AUTIASP); }
+
 void Assembler::ldp(const CPURegister& rt, const CPURegister& rt2,
                     const MemOperand& src) {
   LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
@@ -1174,6 +1193,12 @@ void Assembler::ldp(const CPURegister& rt, const CPURegister& rt2,
 void Assembler::stp(const CPURegister& rt, const CPURegister& rt2,
                     const MemOperand& dst) {
   LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
+
+#if defined(V8_OS_WIN)
+  if (xdata_encoder_ && rt == x29 && rt2 == lr && dst.base().IsSP()) {
+    xdata_encoder_->onSaveFpLr();
+  }
+#endif
 }
 
 void Assembler::ldpsw(const Register& rt, const Register& rt2,
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 6a6bf633c13ec6..04ee6d8b750e05 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -25,6 +25,10 @@
 #undef mvn
 #endif
 
+#if defined(V8_OS_WIN)
+#include "src/diagnostics/unwinding-info-win64.h"
+#endif  // V8_OS_WIN
+
 namespace v8 {
 namespace internal {
 
@@ -786,6 +790,22 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
   void clz(const Register& rd, const Register& rn);
   void cls(const Register& rd, const Register& rn);
 
+  // Pointer Authentication Code for Instruction address, using key A, with
+  // address in x17 and modifier in x16 [Armv8.3].
+  void pacia1716();
+
+  // Pointer Authentication Code for Instruction address, using key A, with
+  // address in LR and modifier in SP [Armv8.3].
+  void paciasp();
+
+  // Authenticate Instruction address, using key A, with address in x17 and
+  // modifier in x16 [Armv8.3].
+  void autia1716();
+
+  // Authenticate Instruction address, using key A, with address in LR and
+  // modifier in SP [Armv8.3].
+  void autiasp();
+
   // Memory instructions.
 
   // Load integer or FP register.
@@ -2400,6 +2420,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
   };
 
+#if defined(V8_OS_WIN)
+  win64_unwindinfo::XdataEncoder* GetXdataEncoder() {
+    return xdata_encoder_.get();
+  }
+
+  win64_unwindinfo::BuiltinUnwindInfo GetUnwindInfo() const;
+#endif
+
  protected:
   inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
 
@@ -2670,6 +2698,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
   // veneer margin (or kMaxInt if there are no unresolved branches).
   int next_veneer_pool_check_;
 
+#if defined(V8_OS_WIN)
+  std::unique_ptr<win64_unwindinfo::XdataEncoder> xdata_encoder_;
+#endif
+
  private:
   // Avoid overflows for displacements etc.
   static const int kMaximalBufferSize = 512 * MB;
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index a1e962452b7cb2..914268644a6d54 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -130,6 +130,8 @@ const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1)
 static_assert(kAddressTagMask == UINT64_C(0xff00000000000000),
               "AddressTagMask must represent most-significant eight bits.");
 
+const uint64_t kTTBRMask = UINT64_C(1) << 55;
+
 // AArch64 floating-point specifics. These match IEEE-754.
 const unsigned kDoubleMantissaBits = 52;
 const unsigned kDoubleExponentBits = 11;
@@ -760,6 +762,16 @@ enum MemBarrierOp : uint32_t {
   ISB = MemBarrierFixed | 0x00000040
 };
 
+enum SystemPAuthOp : uint32_t {
+  SystemPAuthFixed = 0xD503211F,
+  SystemPAuthFMask = 0xFFFFFD1F,
+  SystemPAuthMask = 0xFFFFFFFF,
+  PACIA1716 = SystemPAuthFixed | 0x00000100,
+  AUTIA1716 = SystemPAuthFixed | 0x00000180,
+  PACIASP = SystemPAuthFixed | 0x00000320,
+  AUTIASP = SystemPAuthFixed | 0x000003A0
+};
+
 // Any load or store (including pair).
 enum LoadStoreAnyOp : uint32_t {
   LoadStoreAnyFMask = 0x0a000000,
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.cc b/deps/v8/src/codegen/arm64/instructions-arm64.cc
index dfc2ef13236b0f..05f3654da99d34 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.cc
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.cc
@@ -211,7 +211,8 @@ Instruction* Instruction::ImmPCOffsetTarget() {
 
 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
                                      ptrdiff_t offset) {
-  return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
+  DCHECK_EQ(offset % kInstrSize, 0);
+  return is_intn(offset / kInstrSize, ImmBranchRangeBitwidth(branch_type));
 }
 
 bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
@@ -251,8 +252,7 @@ void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
 
 void Instruction::SetBranchImmTarget(Instruction* target) {
   DCHECK(IsAligned(DistanceTo(target), kInstrSize));
-  DCHECK(
-      IsValidImmPCOffset(BranchType(), DistanceTo(target) >> kInstrSizeLog2));
+  DCHECK(IsValidImmPCOffset(BranchType(), DistanceTo(target)));
   int offset = static_cast<int>(DistanceTo(target) >> kInstrSizeLog2);
   Instr branch_imm = 0;
   uint32_t imm_mask = 0;
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h
index a73c3feed74396..1132ba39db2d8d 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.h
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.h
@@ -390,6 +390,8 @@ class Instruction {
   // PC-relative addressing instruction.
   V8_EXPORT_PRIVATE Instruction* ImmPCOffsetTarget();
 
+  // Check if the offset is in range of a given branch type. The offset is
+  // a byte offset, unscaled.
   static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset);
   bool IsTargetInImmPCOffsetRange(Instruction* target);
   // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 792a8637f698d3..0a721b06474987 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -13,6 +13,7 @@
 #include "src/codegen/macro-assembler-inl.h"
 #include "src/codegen/register-configuration.h"
 #include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
 #include "src/execution/frame-constants.h"
 #include "src/execution/frames-inl.h"
 #include "src/heap/heap-inl.h"  // For MemoryChunk.
@@ -1138,43 +1139,28 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
   UseScratchRegisterScope temps(this);
   Register temp = temps.AcquireSameSizeAs(count);
 
-  if (FLAG_optimize_for_size) {
-    Label loop, done;
+  Label loop, leftover2, leftover1, done;
 
-    Subs(temp, count, 1);
-    B(mi, &done);
+  Subs(temp, count, 4);
+  B(mi, &leftover2);
 
-    // Push all registers individually, to save code size.
-    Bind(&loop);
-    Subs(temp, temp, 1);
-    PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
-    B(pl, &loop);
-
-    Bind(&done);
-  } else {
-    Label loop, leftover2, leftover1, done;
-
-    Subs(temp, count, 4);
-    B(mi, &leftover2);
-
-    // Push groups of four first.
-    Bind(&loop);
-    Subs(temp, temp, 4);
-    PushHelper(4, src.SizeInBytes(), src, src, src, src);
-    B(pl, &loop);
+  // Push groups of four first.
+  Bind(&loop);
+  Subs(temp, temp, 4);
+  PushHelper(4, src.SizeInBytes(), src, src, src, src);
+  B(pl, &loop);
 
-    // Push groups of two.
-    Bind(&leftover2);
-    Tbz(count, 1, &leftover1);
-    PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
+  // Push groups of two.
+  Bind(&leftover2);
+  Tbz(count, 1, &leftover1);
+  PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
 
-    // Push the last one (if required).
-    Bind(&leftover1);
-    Tbz(count, 0, &done);
-    PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
+  // Push the last one (if required).
+  Bind(&leftover1);
+  Tbz(count, 0, &done);
+  PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
 
-    Bind(&done);
-  }
+  Bind(&done);
 }
 
 void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
@@ -1301,6 +1287,14 @@ void MacroAssembler::PushCalleeSavedRegisters() {
   stp(d8, d9, tos);
 
   stp(x29, x30, tos);
+#if defined(V8_OS_WIN)
+  // kFramePointerOffsetInPushCalleeSavedRegisters is the offset from tos at
+  // the end of this function to the saved caller's fp/x29 pointer. It includes
+  // registers from x19 to x28, which is 10 pointers defined by below stp
+  // instructions.
+  STATIC_ASSERT(kFramePointerOffsetInPushCalleeSavedRegisters ==
+                10 * kSystemPointerSize);
+#endif  // defined(V8_OS_WIN)
   stp(x27, x28, tos);
   stp(x25, x26, tos);
   stp(x23, x24, tos);
@@ -1873,6 +1867,13 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
   }
 }
 
+void TurboAssembler::Jump(const ExternalReference& reference) {
+  UseScratchRegisterScope temps(this);
+  Register scratch = temps.AcquireX();
+  Mov(scratch, reference);
+  Jump(scratch);
+}
+
 void TurboAssembler::Call(Register target) {
   BlockPoolsScope scope(this);
   Blr(target);
@@ -1900,14 +1901,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
     if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
         Builtins::IsIsolateIndependent(builtin_index)) {
       // Inline the trampoline.
-      RecordCommentForOffHeapTrampoline(builtin_index);
-      CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
-      UseScratchRegisterScope temps(this);
-      Register scratch = temps.AcquireX();
-      EmbeddedData d = EmbeddedData::FromBlob();
-      Address entry = d.InstructionStartOfBuiltin(builtin_index);
-      Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
-      Call(scratch);
+      CallBuiltin(builtin_index);
       return;
     }
   }
@@ -1951,6 +1945,19 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
   Call(builtin_index);
 }
 
+void TurboAssembler::CallBuiltin(int builtin_index) {
+  DCHECK(Builtins::IsBuiltinId(builtin_index));
+  DCHECK(FLAG_embedded_builtins);
+  RecordCommentForOffHeapTrampoline(builtin_index);
+  CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+  UseScratchRegisterScope temps(this);
+  Register scratch = temps.AcquireX();
+  EmbeddedData d = EmbeddedData::FromBlob();
+  Address entry = d.InstructionStartOfBuiltin(builtin_index);
+  Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+  Call(scratch);
+}
+
 void TurboAssembler::LoadCodeObjectEntry(Register destination,
                                          Register code_object) {
   // Code objects are called differently depending on whether we are generating
@@ -2051,22 +2058,17 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
 
 void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
   BlockPoolsScope scope(this);
-  NoRootArrayScope no_root_array(this);
-
 #ifdef DEBUG
   Label start;
-  Bind(&start);
+  bind(&start);
 #endif
-  // Make sure that the deopt id can be encoded in 16 bits, so can be encoded
-  // in a single movz instruction with a zero shift.
-  DCHECK(is_uint16(deopt_id));
-  movz(x26, deopt_id);
   int64_t offset = static_cast<int64_t>(target) -
                    static_cast<int64_t>(options().code_range_start);
   DCHECK_EQ(offset % kInstrSize, 0);
   offset = offset / static_cast<int>(kInstrSize);
   DCHECK(IsNearCallOffset(offset));
   near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
+  DCHECK_EQ(SizeOfCodeGeneratedSince(&start), Deoptimizer::kDeoptExitSize);
 }
 
 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
@@ -2374,6 +2376,8 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
   // DoubleToI preserves any registers it needs to clobber.
   if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
     Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+  } else if (options().inline_offheap_trampolines) {
+    CallBuiltin(Builtins::kDoubleToI);
   } else {
     Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
   }
@@ -3002,6 +3006,12 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
   }
 
+  if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+       !FLAG_incremental_marking) ||
+      FLAG_disable_write_barriers) {
+    return;
+  }
+
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index d4e9c3055b0989..94091e862489c5 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -83,6 +83,12 @@ inline MemOperand FieldMemOperand(Register object, int offset);
 // ----------------------------------------------------------------------------
 // MacroAssembler
 
+#if defined(V8_OS_WIN)
+// This offset is originated from PushCalleeSavedRegisters.
+static constexpr int kFramePointerOffsetInPushCalleeSavedRegisters =
+    10 * kSystemPointerSize;
+#endif  // V8_OS_WIN
+
 enum BranchType {
   // Copies of architectural conditions.
   // The associated conditions can be used in place of those, the code will
@@ -515,6 +521,46 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   void Cbnz(const Register& rt, Label* label);
   void Cbz(const Register& rt, Label* label);
 
+  void Paciasp() {
+    DCHECK(allow_macro_instructions_);
+    paciasp();
+  }
+  void Autiasp() {
+    DCHECK(allow_macro_instructions_);
+    autiasp();
+  }
+
+  // The 1716 pac and aut instructions encourage people to use x16 and x17
+  // directly, perhaps without realising that this is forbidden. For example:
+  //
+  //     UseScratchRegisterScope temps(&masm);
+  //     Register temp = temps.AcquireX();  // temp will be x16
+  //     __ Mov(x17, ptr);
+  //     __ Mov(x16, modifier);  // Will override temp!
+  //     __ Pacia1716();
+  //
+  // To work around this issue, you must exclude x16 and x17 from the scratch
+  // register list. You may need to replace them with other registers:
+  //
+  //     UseScratchRegisterScope temps(&masm);
+  //     temps.Exclude(x16, x17);
+  //     temps.Include(x10, x11);
+  //     __ Mov(x17, ptr);
+  //     __ Mov(x16, modifier);
+  //     __ Pacia1716();
+  void Pacia1716() {
+    DCHECK(allow_macro_instructions_);
+    DCHECK(!TmpList()->IncludesAliasOf(x16));
+    DCHECK(!TmpList()->IncludesAliasOf(x17));
+    pacia1716();
+  }
+  void Autia1716() {
+    DCHECK(allow_macro_instructions_);
+    DCHECK(!TmpList()->IncludesAliasOf(x16));
+    DCHECK(!TmpList()->IncludesAliasOf(x17));
+    autia1716();
+  }
+
   inline void Dmb(BarrierDomain domain, BarrierType type);
   inline void Dsb(BarrierDomain domain, BarrierType type);
   inline void Isb();
@@ -843,6 +889,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   void Jump(Register target, Condition cond = al);
   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Jump(const ExternalReference& reference) override;
 
   void Call(Register target);
   void Call(Address target, RelocInfo::Mode rmode);
@@ -856,6 +903,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   // register.
   void LoadEntryFromBuiltinIndex(Register builtin_index);
   void CallBuiltinByIndex(Register builtin_index) override;
+  void CallBuiltin(int builtin_index);
 
   void LoadCodeObjectEntry(Register destination, Register code_object) override;
   void CallCodeObject(Register code_object) override;
@@ -1997,6 +2045,26 @@ class UseScratchRegisterScope {
   Register AcquireSameSizeAs(const Register& reg);
   V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister& reg);
 
+  void Include(const CPURegList& list) { available_->Combine(list); }
+  void Exclude(const CPURegList& list) {
+#if DEBUG
+    CPURegList copy(list);
+    while (!copy.IsEmpty()) {
+      const CPURegister& reg = copy.PopHighestIndex();
+      DCHECK(available_->IncludesAliasOf(reg));
+    }
+#endif
+    available_->Remove(list);
+  }
+  void Include(const Register& reg1, const Register& reg2) {
+    CPURegList list(reg1, reg2);
+    Include(list);
+  }
+  void Exclude(const Register& reg1, const Register& reg2) {
+    CPURegList list(reg1, reg2);
+    Exclude(list);
+  }
+
  private:
   V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
       CPURegList* available);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index b429786aa95050..7b938579f409a9 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -151,13 +151,21 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
   }
   bool IsValid() const { return reg_type_ != kNoRegister; }
   bool IsNone() const { return reg_type_ == kNoRegister; }
-  bool Is(const CPURegister& other) const {
+  constexpr bool Is(const CPURegister& other) const {
     return Aliases(other) && (reg_size_ == other.reg_size_);
   }
-  bool Aliases(const CPURegister& other) const {
+  constexpr bool Aliases(const CPURegister& other) const {
     return (reg_code_ == other.reg_code_) && (reg_type_ == other.reg_type_);
   }
 
+  constexpr bool operator==(const CPURegister& other) const {
+    return Is(other);
+  }
+
+  constexpr bool operator!=(const CPURegister& other) const {
+    return !(*this == other);
+  }
+
   bool IsZero() const;
   bool IsSP() const;
 
@@ -559,8 +567,6 @@ using Simd128Register = VRegister;
 // Lists of registers.
 class V8_EXPORT_PRIVATE CPURegList {
  public:
-  CPURegList() = default;
-
   template <typename... CPURegisters>
   explicit CPURegList(CPURegister reg0, CPURegisters... regs)
       : list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index 931b7837303549..c8838b05666463 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -267,9 +267,9 @@ Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags) {
     case STRING_ADD_CHECK_NONE:
       return Builtins::CallableFor(isolate, Builtins::kStringAdd_CheckNone);
     case STRING_ADD_CONVERT_LEFT:
-      return Builtins::CallableFor(isolate, Builtins::kStringAdd_ConvertLeft);
+      return Builtins::CallableFor(isolate, Builtins::kStringAddConvertLeft);
     case STRING_ADD_CONVERT_RIGHT:
-      return Builtins::CallableFor(isolate, Builtins::kStringAdd_ConvertRight);
+      return Builtins::CallableFor(isolate, Builtins::kStringAddConvertRight);
   }
   UNREACHABLE();
 }
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index e4f35ddcc88472..7dad8cb95e00a2 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -4,7 +4,10 @@
 
 #include "src/codegen/code-stub-assembler.h"
 
+#include "include/v8-internal.h"
+#include "src/base/macros.h"
 #include "src/codegen/code-factory.h"
+#include "src/common/globals.h"
 #include "src/execution/frames-inl.h"
 #include "src/execution/frames.h"
 #include "src/heap/heap-inl.h"  // For Page/MemoryChunk. TODO(jkummerow): Drop.
@@ -81,6 +84,16 @@ void CodeStubAssembler::Assert(const NodeGenerator& condition_body,
 #endif
 }
 
+void CodeStubAssembler::Assert(SloppyTNode<Word32T> condition_node,
+                               const char* message, const char* file, int line,
+                               std::initializer_list<ExtraNode> extra_nodes) {
+#if defined(DEBUG)
+  if (FLAG_debug_code) {
+    Check(condition_node, message, file, line, extra_nodes);
+  }
+#endif
+}
+
 void CodeStubAssembler::Check(const BranchGenerator& branch,
                               const char* message, const char* file, int line,
                               std::initializer_list<ExtraNode> extra_nodes) {
@@ -112,6 +125,16 @@ void CodeStubAssembler::Check(const NodeGenerator& condition_body,
   Check(branch, message, file, line, extra_nodes);
 }
 
+void CodeStubAssembler::Check(SloppyTNode<Word32T> condition_node,
+                              const char* message, const char* file, int line,
+                              std::initializer_list<ExtraNode> extra_nodes) {
+  BranchGenerator branch = [=](Label* ok, Label* not_ok) {
+    Branch(condition_node, ok, not_ok);
+  };
+
+  Check(branch, message, file, line, extra_nodes);
+}
+
 void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
   Label ok(this), not_ok(this, Label::kDeferred);
   Branch(condition, &ok, &not_ok);
@@ -132,7 +155,7 @@ void CodeStubAssembler::FailAssert(
     SNPrintF(chars, "%s [%s:%d]", message, file, line);
     message = chars.begin();
   }
-  Node* message_node = StringConstant(message);
+  TNode<String> message_node = StringConstant(message);
 
 #ifdef DEBUG
   // Only print the extra nodes in debug builds.
@@ -222,15 +245,25 @@ HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
 #define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
   compiler::TNode<BoolT> CodeStubAssembler::Is##name(             \
       SloppyTNode<Object> value) {                                \
-    return WordEqual(value, name##Constant());                    \
+    return TaggedEqual(value, name##Constant());                  \
   }                                                               \
   compiler::TNode<BoolT> CodeStubAssembler::IsNot##name(          \
       SloppyTNode<Object> value) {                                \
-    return WordNotEqual(value, name##Constant());                 \
+    return TaggedNotEqual(value, name##Constant());               \
   }
 HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
 #undef HEAP_CONSTANT_TEST
 
+TNode<BInt> CodeStubAssembler::BIntConstant(int value) {
+#if defined(BINT_IS_SMI)
+  return SmiConstant(value);
+#elif defined(BINT_IS_INTPTR)
+  return IntPtrConstant(value);
+#else
+#error Unknown architecture.
+#endif
+}
+
 Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
   if (mode == SMI_PARAMETERS) {
     return SmiConstant(value);
@@ -240,12 +273,34 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
   }
 }
 
+TNode<BoolT> CodeStubAssembler::IntPtrOrSmiEqual(Node* left, Node* right,
+                                                 ParameterMode mode) {
+  if (mode == SMI_PARAMETERS) {
+    return SmiEqual(CAST(left), CAST(right));
+  } else {
+    DCHECK_EQ(INTPTR_PARAMETERS, mode);
+    return IntPtrEqual(UncheckedCast<IntPtrT>(left),
+                       UncheckedCast<IntPtrT>(right));
+  }
+}
+
+TNode<BoolT> CodeStubAssembler::IntPtrOrSmiNotEqual(Node* left, Node* right,
+                                                    ParameterMode mode) {
+  if (mode == SMI_PARAMETERS) {
+    return SmiNotEqual(CAST(left), CAST(right));
+  } else {
+    DCHECK_EQ(INTPTR_PARAMETERS, mode);
+    return WordNotEqual(UncheckedCast<IntPtrT>(left),
+                        UncheckedCast<IntPtrT>(right));
+  }
+}
+
 bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
                                                   ParameterMode mode) {
   int32_t constant_test;
   Smi smi_test;
   if (mode == INTPTR_PARAMETERS) {
-    if (ToInt32Constant(test, constant_test) && constant_test == 0) {
+    if (ToInt32Constant(test, &constant_test) && constant_test == 0) {
       return true;
     }
   } else {
@@ -262,7 +317,7 @@ bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
                                                        ParameterMode mode) {
   int32_t int32_constant;
   if (mode == INTPTR_PARAMETERS) {
-    if (ToInt32Constant(maybe_constant, int32_constant)) {
+    if (ToInt32Constant(maybe_constant, &int32_constant)) {
       *value = int32_constant;
       return true;
     }
@@ -298,17 +353,17 @@ Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) {
 
 TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) {
   // value && !(value & (value - 1))
-  return WordEqual(
+  return IntPtrEqual(
       Select<IntPtrT>(
-          WordEqual(value, IntPtrConstant(0)),
+          IntPtrEqual(value, IntPtrConstant(0)),
           [=] { return IntPtrConstant(1); },
           [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); }),
       IntPtrConstant(0));
 }
 
 TNode<Float64T> CodeStubAssembler::Float64Round(SloppyTNode<Float64T> x) {
-  Node* one = Float64Constant(1.0);
-  Node* one_half = Float64Constant(0.5);
+  TNode<Float64T> one = Float64Constant(1.0);
+  TNode<Float64T> one_half = Float64Constant(0.5);
 
   Label return_x(this);
 
@@ -329,10 +384,10 @@ TNode<Float64T> CodeStubAssembler::Float64Ceil(SloppyTNode<Float64T> x) {
     return Float64RoundUp(x);
   }
 
-  Node* one = Float64Constant(1.0);
-  Node* zero = Float64Constant(0.0);
-  Node* two_52 = Float64Constant(4503599627370496.0E0);
-  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+  TNode<Float64T> one = Float64Constant(1.0);
+  TNode<Float64T> zero = Float64Constant(0.0);
+  TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0);
+  TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0);
 
   VARIABLE(var_x, MachineRepresentation::kFloat64, x);
   Label return_x(this), return_minus_x(this);
@@ -361,7 +416,7 @@ TNode<Float64T> CodeStubAssembler::Float64Ceil(SloppyTNode<Float64T> x) {
     GotoIfNot(Float64LessThan(x, zero), &return_x);
 
     // Round negated {x} towards Infinity and return the result negated.
-    Node* minus_x = Float64Neg(x);
+    TNode<Float64T> minus_x = Float64Neg(x);
     var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
     GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
     var_x.Bind(Float64Sub(var_x.value(), one));
@@ -381,10 +436,10 @@ TNode<Float64T> CodeStubAssembler::Float64Floor(SloppyTNode<Float64T> x) {
     return Float64RoundDown(x);
   }
 
-  Node* one = Float64Constant(1.0);
-  Node* zero = Float64Constant(0.0);
-  Node* two_52 = Float64Constant(4503599627370496.0E0);
-  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+  TNode<Float64T> one = Float64Constant(1.0);
+  TNode<Float64T> zero = Float64Constant(0.0);
+  TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0);
+  TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0);
 
   VARIABLE(var_x, MachineRepresentation::kFloat64, x);
   Label return_x(this), return_minus_x(this);
@@ -413,7 +468,7 @@ TNode<Float64T> CodeStubAssembler::Float64Floor(SloppyTNode<Float64T> x) {
     GotoIfNot(Float64LessThan(x, zero), &return_x);
 
     // Round negated {x} towards -Infinity and return the result negated.
-    Node* minus_x = Float64Neg(x);
+    TNode<Float64T> minus_x = Float64Neg(x);
     var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
     GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
     var_x.Bind(Float64Add(var_x.value(), one));
@@ -433,8 +488,8 @@ TNode<Float64T> CodeStubAssembler::Float64RoundToEven(SloppyTNode<Float64T> x) {
     return Float64RoundTiesEven(x);
   }
   // See ES#sec-touint8clamp for details.
-  Node* f = Float64Floor(x);
-  Node* f_and_half = Float64Add(f, Float64Constant(0.5));
+  TNode<Float64T> f = Float64Floor(x);
+  TNode<Float64T> f_and_half = Float64Add(f, Float64Constant(0.5));
 
   VARIABLE(var_result, MachineRepresentation::kFloat64);
   Label return_f(this), return_f_plus_one(this), done(this);
@@ -442,7 +497,7 @@ TNode<Float64T> CodeStubAssembler::Float64RoundToEven(SloppyTNode<Float64T> x) {
   GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one);
   GotoIf(Float64LessThan(x, f_and_half), &return_f);
   {
-    Node* f_mod_2 = Float64Mod(f, Float64Constant(2.0));
+    TNode<Float64T> f_mod_2 = Float64Mod(f, Float64Constant(2.0));
     Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f,
            &return_f_plus_one);
   }
@@ -464,10 +519,10 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
     return Float64RoundTruncate(x);
   }
 
-  Node* one = Float64Constant(1.0);
-  Node* zero = Float64Constant(0.0);
-  Node* two_52 = Float64Constant(4503599627370496.0E0);
-  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+  TNode<Float64T> one = Float64Constant(1.0);
+  TNode<Float64T> zero = Float64Constant(0.0);
+  TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0);
+  TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0);
 
   VARIABLE(var_x, MachineRepresentation::kFloat64, x);
   Label return_x(this), return_minus_x(this);
@@ -504,7 +559,7 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
       GotoIfNot(Float64LessThan(x, zero), &return_x);
 
       // Round negated {x} towards -Infinity and return result negated.
-      Node* minus_x = Float64Neg(x);
+      TNode<Float64T> minus_x = Float64Neg(x);
       var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
       GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
       var_x.Bind(Float64Sub(var_x.value(), one));
@@ -521,10 +576,10 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
 }
 
 TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) {
-  if (SmiValuesAre31Bits() && kSystemPointerSize == kInt64Size) {
-    // Check that the Smi value is properly sign-extended.
-    TNode<IntPtrT> value = Signed(BitcastTaggedSignedToWord(smi));
-    return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value)));
+  if (SmiValuesAre32Bits() && kSystemPointerSize == kInt64Size) {
+    // Check that the Smi value is zero in the lower bits.
+    TNode<IntPtrT> value = BitcastTaggedSignedToWord(smi);
+    return Word32Equal(Int32Constant(0), TruncateIntPtrToInt32(value));
   }
   return Int32TrueConstant();
 }
@@ -542,7 +597,7 @@ TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
 
 TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) {
   intptr_t constant_value;
-  if (ToIntPtrConstant(value, constant_value)) {
+  if (ToIntPtrConstant(value, &constant_value)) {
     return (static_cast<uintptr_t>(constant_value) <=
             static_cast<uintptr_t>(Smi::kMaxValue))
                ? Int32TrueConstant()
@@ -554,7 +609,7 @@ TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) {
 
 TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
   int32_t constant_value;
-  if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
+  if (ToInt32Constant(value, &constant_value) && Smi::IsValid(constant_value)) {
     return SmiConstant(constant_value);
   }
   TNode<Smi> smi =
@@ -564,7 +619,7 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
 
 TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
   intptr_t constant_value;
-  if (ToIntPtrConstant(value, constant_value)) {
+  if (ToIntPtrConstant(value, &constant_value)) {
     return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
   }
   return Signed(
@@ -799,11 +854,11 @@ TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) {
   Label return_result(this, &var_result);
 
   // Both {a} and {b} are Smis. Convert them to integers and multiply.
-  Node* lhs32 = SmiToInt32(a);
-  Node* rhs32 = SmiToInt32(b);
-  Node* pair = Int32MulWithOverflow(lhs32, rhs32);
+  TNode<Int32T> lhs32 = SmiToInt32(a);
+  TNode<Int32T> rhs32 = SmiToInt32(b);
+  auto pair = Int32MulWithOverflow(lhs32, rhs32);
 
-  Node* overflow = Projection(1, pair);
+  TNode<BoolT> overflow = Projection<1>(pair);
 
   // Check if the multiplication overflowed.
   Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
@@ -813,8 +868,8 @@ TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) {
     // If the answer is zero, we may need to return -0.0, depending on the
     // input.
     Label answer_zero(this), answer_not_zero(this);
-    Node* answer = Projection(0, pair);
-    Node* zero = Int32Constant(0);
+    TNode<Int32T> answer = Projection<0>(pair);
+    TNode<Int32T> zero = Int32Constant(0);
     Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero);
     BIND(&answer_not_zero);
     {
@@ -823,7 +878,7 @@ TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) {
     }
     BIND(&answer_zero);
     {
-      Node* or_result = Word32Or(lhs32, rhs32);
+      TNode<Word32T> or_result = Word32Or(lhs32, rhs32);
       Label if_should_be_negative_zero(this), if_should_be_zero(this);
       Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero,
              &if_should_be_zero);
@@ -843,7 +898,8 @@ TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) {
   {
     var_lhs_float64.Bind(SmiToFloat64(a));
     var_rhs_float64.Bind(SmiToFloat64(b));
-    Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
+    TNode<Float64T> value =
+        Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
     var_result = AllocateHeapNumberWithValue(value);
     Goto(&return_result);
   }
@@ -856,12 +912,12 @@ TNode<Smi> CodeStubAssembler::TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor,
                                         Label* bailout) {
   // Both {a} and {b} are Smis. Bailout to floating point division if {divisor}
   // is zero.
-  GotoIf(WordEqual(divisor, SmiConstant(0)), bailout);
+  GotoIf(TaggedEqual(divisor, SmiConstant(0)), bailout);
 
   // Do floating point division if {dividend} is zero and {divisor} is
   // negative.
   Label dividend_is_zero(this), dividend_is_not_zero(this);
-  Branch(WordEqual(dividend, SmiConstant(0)), &dividend_is_zero,
+  Branch(TaggedEqual(dividend, SmiConstant(0)), &dividend_is_zero,
          &dividend_is_not_zero);
 
   BIND(&dividend_is_zero);
@@ -911,6 +967,13 @@ TNode<Smi> CodeStubAssembler::SmiLexicographicCompare(TNode<Smi> x,
                             std::make_pair(MachineType::AnyTagged(), y)));
 }
 
+TNode<Int32T> CodeStubAssembler::TruncateWordToInt32(SloppyTNode<WordT> value) {
+  if (Is64()) {
+    return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value));
+  }
+  return ReinterpretCast<Int32T>(value);
+}
+
 TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
     SloppyTNode<IntPtrT> value) {
   if (Is64()) {
@@ -920,14 +983,18 @@ TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
 }
 
 TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<Object> a) {
-  return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
-                   IntPtrConstant(0));
+  STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
+  return Word32Equal(Word32And(TruncateIntPtrToInt32(BitcastTaggedToWord(a)),
+                               Int32Constant(kSmiTagMask)),
+                     Int32Constant(0));
 }
 
 TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
-  return WordEqual(
-      WordAnd(BitcastMaybeObjectToWord(a), IntPtrConstant(kSmiTagMask)),
-      IntPtrConstant(0));
+  STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
+  return Word32Equal(
+      Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(a)),
+                Int32Constant(kSmiTagMask)),
+      Int32Constant(0));
 }
 
 TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
@@ -935,21 +1002,34 @@ TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
   // can nonetheless use it to inspect the Smi tag. The assumption here is that
   // the GC will not exchange Smis for HeapObjects or vice-versa.
   TNode<IntPtrT> a_bitcast = BitcastTaggedSignedToWord(UncheckedCast<Smi>(a));
-  return WordNotEqual(WordAnd(a_bitcast, IntPtrConstant(kSmiTagMask)),
-                      IntPtrConstant(0));
+  STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
+  return Word32NotEqual(
+      Word32And(TruncateIntPtrToInt32(a_bitcast), Int32Constant(kSmiTagMask)),
+      Int32Constant(0));
 }
 
 TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
+#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+  return Word32Equal(
+      Word32And(
+          TruncateIntPtrToInt32(BitcastTaggedToWord(a)),
+          Uint32Constant(kSmiTagMask | static_cast<int32_t>(kSmiSignMask))),
+      Int32Constant(0));
+#else
   return WordEqual(WordAnd(BitcastTaggedToWord(a),
                            IntPtrConstant(kSmiTagMask | kSmiSignMask)),
                    IntPtrConstant(0));
+#endif
 }
 
 TNode<BoolT> CodeStubAssembler::WordIsAligned(SloppyTNode<WordT> word,
                                               size_t alignment) {
   DCHECK(base::bits::IsPowerOfTwo(alignment));
-  return WordEqual(IntPtrConstant(0),
-                   WordAnd(word, IntPtrConstant(alignment - 1)));
+  DCHECK_LE(alignment, kMaxUInt32);
+  return Word32Equal(
+      Int32Constant(0),
+      Word32And(TruncateWordToInt32(word),
+                Uint32Constant(static_cast<uint32_t>(alignment) - 1)));
 }
 
 #if DEBUG
@@ -978,18 +1058,18 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
   CSA_SLOW_ASSERT(this, IsMap(receiver_map));
   VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map);
   Label loop_body(this, &var_map);
-  Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray);
-  Node* empty_slow_element_dictionary =
-      LoadRoot(RootIndex::kEmptySlowElementDictionary);
+  TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
+  TNode<NumberDictionary> empty_slow_element_dictionary =
+      EmptySlowElementDictionaryConstant();
   Goto(&loop_body);
 
   BIND(&loop_body);
   {
     Node* map = var_map.value();
-    Node* prototype = LoadMapPrototype(map);
+    TNode<HeapObject> prototype = LoadMapPrototype(map);
     GotoIf(IsNull(prototype), definitely_no_elements);
-    Node* prototype_map = LoadMap(prototype);
-    TNode<Int32T> prototype_instance_type = LoadMapInstanceType(prototype_map);
+    TNode<Map> prototype_map = LoadMap(prototype);
+    TNode<Uint16T> prototype_instance_type = LoadMapInstanceType(prototype_map);
 
     // Pessimistically assume elements if a Proxy, Special API Object,
     // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this
@@ -1012,25 +1092,25 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
 
     BIND(&if_notcustom);
     {
-      Node* prototype_elements = LoadElements(prototype);
+      TNode<FixedArrayBase> prototype_elements = LoadElements(CAST(prototype));
       var_map.Bind(prototype_map);
-      GotoIf(WordEqual(prototype_elements, empty_fixed_array), &loop_body);
-      Branch(WordEqual(prototype_elements, empty_slow_element_dictionary),
+      GotoIf(TaggedEqual(prototype_elements, empty_fixed_array), &loop_body);
+      Branch(TaggedEqual(prototype_elements, empty_slow_element_dictionary),
              &loop_body, possibly_elements);
     }
   }
 }
 
-void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true,
-                                           Label* if_false) {
+void CodeStubAssembler::BranchIfJSReceiver(SloppyTNode<Object> object,
+                                           Label* if_true, Label* if_false) {
   GotoIf(TaggedIsSmi(object), if_false);
   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
-  Branch(IsJSReceiver(object), if_true, if_false);
+  Branch(IsJSReceiver(CAST(object)), if_true, if_false);
 }
 
 void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
 #ifdef V8_ENABLE_FORCE_SLOW_PATH
-  Node* const force_slow_path_addr =
+  TNode<ExternalReference> const force_slow_path_addr =
       ExternalConstant(ExternalReference::force_slow_path(isolate()));
   Node* const force_slow = Load(MachineType::Uint8(), force_slow_path_addr);
 
@@ -1065,7 +1145,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
 
   intptr_t size_in_bytes_constant;
   bool size_in_bytes_is_constant = false;
-  if (ToIntPtrConstant(size_in_bytes, size_in_bytes_constant)) {
+  if (ToIntPtrConstant(size_in_bytes, &size_in_bytes_constant)) {
     size_in_bytes_is_constant = true;
     CHECK(Internals::IsValidSmi(size_in_bytes_constant));
     CHECK_GT(size_in_bytes_constant, 0);
@@ -1155,7 +1235,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
 
       // Store a filler and increase the address by 4.
       StoreNoWriteBarrier(MachineRepresentation::kTagged, top,
-                          LoadRoot(RootIndex::kOnePointerFillerMap));
+                          OnePointerFillerMapConstant());
       address = IntPtrAdd(UncheckedCast<IntPtrT>(top), IntPtrConstant(4));
       Goto(&next);
 
@@ -1224,7 +1304,7 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
       !new_space || !allow_large_objects || FLAG_young_generation_large_objects;
   if (!allow_large_objects) {
     intptr_t size_constant;
-    if (ToIntPtrConstant(size_in_bytes, size_constant)) {
+    if (ToIntPtrConstant(size_in_bytes, &size_constant)) {
       CHECK_LE(size_constant, kMaxRegularHeapObjectSize);
     } else {
       CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
@@ -1294,12 +1374,13 @@ TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) {
                                 IntPtrConstant(kMaxRegularHeapObjectSize));
 }
 
-void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
+void CodeStubAssembler::BranchIfToBooleanIsTrue(SloppyTNode<Object> value,
+                                                Label* if_true,
                                                 Label* if_false) {
   Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred),
       if_bigint(this, Label::kDeferred);
   // Rule out false {value}.
-  GotoIf(WordEqual(value, FalseConstant()), if_false);
+  GotoIf(TaggedEqual(value, FalseConstant()), if_false);
 
   // Check if {value} is a Smi or a HeapObject.
   Branch(TaggedIsSmi(value), &if_smi, &if_notsmi);
@@ -1312,11 +1393,13 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
 
   BIND(&if_notsmi);
   {
+    TNode<HeapObject> value_heapobject = CAST(value);
+
     // Check if {value} is the empty string.
-    GotoIf(IsEmptyString(value), if_false);
+    GotoIf(IsEmptyString(value_heapobject), if_false);
 
     // The {value} is a HeapObject, load its map.
-    Node* value_map = LoadMap(value);
+    TNode<Map> value_map = LoadMap(value_heapobject);
 
     // Only null, undefined and document.all have the undetectable bit set,
     // so we can return false immediately when that bit is set.
@@ -1325,13 +1408,13 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
     // We still need to handle numbers specially, but all other {value}s
     // that make it here yield true.
     GotoIf(IsHeapNumberMap(value_map), &if_heapnumber);
-    Branch(IsBigInt(value), &if_bigint, if_true);
+    Branch(IsBigInt(value_heapobject), &if_bigint, if_true);
 
     BIND(&if_heapnumber);
     {
       // Load the floating point value of {value}.
-      Node* value_value = LoadObjectField(value, HeapNumber::kValueOffset,
-                                          MachineType::Float64());
+      Node* value_value = LoadObjectField(
+          value_heapobject, HeapNumber::kValueOffset, MachineType::Float64());
 
       // Check if the floating point {value} is neither 0.0, -0.0 nor NaN.
       Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
@@ -1349,7 +1432,7 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
 }
 
 Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType type) {
-  Node* frame_pointer = LoadParentFramePointer();
+  TNode<RawPtrT> frame_pointer = LoadParentFramePointer();
   return Load(type, frame_pointer, IntPtrConstant(offset));
 }
 
@@ -1382,12 +1465,12 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
         LoadObjectField(object, offset, MachineType::Int32()));
   } else {
     return SmiToIntPtr(
-        LoadObjectField(object, offset, MachineType::AnyTagged()));
+        LoadObjectField(object, offset, MachineType::TaggedSigned()));
   }
 }
 
-TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
-                                                                 int offset) {
+TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
+    SloppyTNode<HeapObject> object, int offset) {
   if (SmiValuesAre32Bits()) {
 #if V8_TARGET_LITTLE_ENDIAN
     offset += 4;
@@ -1396,43 +1479,14 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
         LoadObjectField(object, offset, MachineType::Int32()));
   } else {
     return SmiToInt32(
-        LoadObjectField(object, offset, MachineType::AnyTagged()));
-  }
-}
-
-TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
-  if (SmiValuesAre32Bits()) {
-#if V8_TARGET_LITTLE_ENDIAN
-    index += 4;
-#endif
-    return ChangeInt32ToIntPtr(
-        Load(MachineType::Int32(), base, IntPtrConstant(index)));
-  } else {
-    return SmiToIntPtr(
-        Load(MachineType::AnyTagged(), base, IntPtrConstant(index)));
-  }
-}
-
-void CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
-  if (SmiValuesAre32Bits()) {
-    int zero_offset = offset + 4;
-    int payload_offset = offset;
-#if V8_TARGET_LITTLE_ENDIAN
-    std::swap(zero_offset, payload_offset);
-#endif
-    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
-                        IntPtrConstant(zero_offset), Int32Constant(0));
-    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
-                        IntPtrConstant(payload_offset),
-                        TruncateInt64ToInt32(value));
-  } else {
-    StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
-                        IntPtrConstant(offset), SmiTag(value));
+        LoadObjectField(object, offset, MachineType::TaggedSigned()));
   }
 }
 
 TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
-    SloppyTNode<HeapNumber> object) {
+    SloppyTNode<HeapObject> object) {
+  CSA_ASSERT(this, Word32Or(IsHeapNumber(object), IsOddball(object)));
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
   return TNode<Float64T>::UncheckedCast(LoadObjectField(
       object, HeapNumber::kValueOffset, MachineType::Float64()));
 }
@@ -1444,6 +1498,8 @@ TNode<Map> CodeStubAssembler::GetStructMap(InstanceType instance_type) {
 }
 
 TNode<Map> CodeStubAssembler::LoadMap(SloppyTNode<HeapObject> object) {
+  // TODO(v8:9637): Do a proper LoadObjectField<Map> and remove UncheckedCast
+  // when we can avoid making Large code objects due to TNodification.
   return UncheckedCast<Map>(LoadObjectField(object, HeapObject::kMapOffset,
                                             MachineType::TaggedPointer()));
 }
@@ -1472,6 +1528,34 @@ TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType(
       [=]() { return DoesntHaveInstanceType(any_tagged, type); });
 }
 
+TNode<BoolT> CodeStubAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
+  TNode<BoolT> is_special =
+      IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+  uint32_t mask =
+      Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
+  USE(mask);
+  // Interceptors or access checks imply special receiver.
+  CSA_ASSERT(this,
+             SelectConstant<BoolT>(IsSetWord32(LoadMapBitField(map), mask),
+                                   is_special, Int32TrueConstant()));
+  return is_special;
+}
+
+TNode<Word32T> CodeStubAssembler::IsStringWrapperElementsKind(TNode<Map> map) {
+  TNode<Int32T> kind = LoadMapElementsKind(map);
+  return Word32Or(
+      Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)),
+      Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS)));
+}
+
+void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map,
+                                                   Label* if_slow) {
+  GotoIf(IsStringWrapperElementsKind(map), if_slow);
+  GotoIf(IsSpecialReceiverMap(map), if_slow);
+  GotoIf(IsDictionaryMap(map), if_slow);
+}
+
 TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
     SloppyTNode<JSObject> object) {
   CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
@@ -1503,11 +1587,12 @@ TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectWithLength(
 
 TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
     SloppyTNode<JSArray> array) {
-  TNode<Object> length = LoadJSArrayLength(array);
+  TNode<Number> length = LoadJSArrayLength(array);
   CSA_ASSERT(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)),
-                            IsElementsKindInRange(LoadElementsKind(array),
-                                                  PACKED_SEALED_ELEMENTS,
-                                                  HOLEY_FROZEN_ELEMENTS)));
+                            IsElementsKindInRange(
+                                LoadElementsKind(array),
+                                FIRST_ANY_NONEXTENSIBLE_ELEMENTS_KIND,
+                                LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)));
   // JSArray length is always a positive Smi for fast arrays.
   CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
   return UncheckedCast<Smi>(length);
@@ -1532,7 +1617,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadFeedbackVectorLength(
 
 TNode<Smi> CodeStubAssembler::LoadWeakFixedArrayLength(
     TNode<WeakFixedArray> array) {
-  return CAST(LoadObjectField(array, WeakFixedArray::kLengthOffset));
+  return LoadObjectField<Smi>(array, WeakFixedArray::kLengthOffset);
 }
 
 TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength(
@@ -1547,6 +1632,12 @@ TNode<Int32T> CodeStubAssembler::LoadNumberOfDescriptors(
                       MachineType::Int16()));
 }
 
+TNode<Int32T> CodeStubAssembler::LoadNumberOfOwnDescriptors(TNode<Map> map) {
+  TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+  return UncheckedCast<Int32T>(
+      DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3));
+}
+
 TNode<Int32T> CodeStubAssembler::LoadMapBitField(SloppyTNode<Map> map) {
   CSA_SLOW_ASSERT(this, IsMap(map));
   return UncheckedCast<Int32T>(
@@ -1566,13 +1657,12 @@ TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(SloppyTNode<Map> map) {
 }
 
 TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
-  return UncheckedCast<Uint16T>(
-      LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16()));
+  return LoadObjectField<Uint16T>(map, Map::kInstanceTypeOffset);
 }
 
 TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(SloppyTNode<Map> map) {
   CSA_SLOW_ASSERT(this, IsMap(map));
-  Node* bit_field2 = LoadMapBitField2(map);
+  TNode<Int32T> bit_field2 = LoadMapBitField2(map);
   return Signed(DecodeWord32<Map::ElementsKindBits>(bit_field2));
 }
 
@@ -1584,12 +1674,12 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind(
 TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(
     SloppyTNode<Map> map) {
   CSA_SLOW_ASSERT(this, IsMap(map));
-  return CAST(LoadObjectField(map, Map::kInstanceDescriptorsOffset));
+  return LoadObjectField<DescriptorArray>(map, Map::kInstanceDescriptorsOffset);
 }
 
 TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
   CSA_SLOW_ASSERT(this, IsMap(map));
-  return CAST(LoadObjectField(map, Map::kPrototypeOffset));
+  return LoadObjectField<HeapObject>(map, Map::kPrototypeOffset);
 }
 
 TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo(
@@ -1604,8 +1694,8 @@ TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo(
                       &prototype_info);
 
   BIND(&if_strong_heap_object);
-  GotoIfNot(WordEqual(LoadMap(CAST(prototype_info.value())),
-                      LoadRoot(RootIndex::kPrototypeInfoMap)),
+  GotoIfNot(TaggedEqual(LoadMap(CAST(prototype_info.value())),
+                        PrototypeInfoMapConstant()),
             if_no_proto_info);
   return CAST(prototype_info.value());
 }
@@ -1647,7 +1737,7 @@ TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
   BIND(&loop);
   {
     GotoIf(TaggedIsSmi(result.value()), &done);
-    Node* is_map_type =
+    TNode<BoolT> is_map_type =
         InstanceTypeEqual(LoadInstanceType(CAST(result.value())), MAP_TYPE);
     GotoIfNot(is_map_type, &done);
     result = LoadObjectField(CAST(result.value()),
@@ -1658,9 +1748,9 @@ TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
   return result.value();
 }
 
-Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
+TNode<WordT> CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
   CSA_SLOW_ASSERT(this, IsMap(map));
-  Node* bit_field3 = LoadMapBitField3(map);
+  TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
   return DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
 }
 
@@ -1697,7 +1787,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
 
   TNode<HeapObject> properties =
       TNode<HeapObject>::UncheckedCast(properties_or_hash);
-  TNode<Int32T> properties_instance_type = LoadInstanceType(properties);
+  TNode<Uint16T> properties_instance_type = LoadInstanceType(properties);
 
   GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE),
          &if_property_array);
@@ -1818,9 +1908,10 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
 }
 
 TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) {
-  return WordEqual(WordAnd(BitcastMaybeObjectToWord(value),
-                           IntPtrConstant(kHeapObjectTagMask)),
-                   IntPtrConstant(kHeapObjectTag));
+  return Word32Equal(
+      Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
+                Int32Constant(kHeapObjectTagMask)),
+      Int32Constant(kHeapObjectTag));
 }
 
 TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong(
@@ -1862,22 +1953,41 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
 
 TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(TNode<MaybeObject> object,
                                                   TNode<Object> value) {
+#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
+  STATIC_ASSERT(kTaggedSize == kInt32Size);
+  return Word32Equal(
+      Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)),
+                Uint32Constant(
+                    static_cast<uint32_t>(~kWeakHeapObjectMask & kMaxUInt32))),
+      TruncateWordToInt32(BitcastTaggedToWord(value)));
+#else
   return WordEqual(WordAnd(BitcastMaybeObjectToWord(object),
                            IntPtrConstant(~kWeakHeapObjectMask)),
                    BitcastTaggedToWord(value));
+
+#endif
 }
 
 TNode<BoolT> CodeStubAssembler::IsStrongReferenceTo(TNode<MaybeObject> object,
                                                     TNode<Object> value) {
-  return WordEqual(BitcastMaybeObjectToWord(object),
-                   BitcastTaggedToWord(value));
+  return TaggedEqual(BitcastWordToTagged(BitcastMaybeObjectToWord(object)),
+                     value);
 }
 
 TNode<BoolT> CodeStubAssembler::IsNotWeakReferenceTo(TNode<MaybeObject> object,
                                                      TNode<Object> value) {
+#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
+  return Word32NotEqual(
+      Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)),
+                Uint32Constant(
+                    static_cast<uint32_t>(~kWeakHeapObjectMask & kMaxUInt32))),
+      TruncateWordToInt32(BitcastTaggedToWord(value)));
+#else
   return WordNotEqual(WordAnd(BitcastMaybeObjectToWord(object),
                               IntPtrConstant(~kWeakHeapObjectMask)),
                       BitcastTaggedToWord(value));
+
+#endif
 }
 
 TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) {
@@ -2019,7 +2129,7 @@ TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayBackingStore(
   Node* external_pointer =
       LoadObjectField(typed_array, JSTypedArray::kExternalPointerOffset,
                       MachineType::Pointer());
-  Node* base_pointer =
+  TNode<Object> base_pointer =
       LoadObjectField(typed_array, JSTypedArray::kBasePointerOffset);
   return UncheckedCast<RawPtrT>(
       IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
@@ -2062,12 +2172,12 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low,
   Label high_zero(this), negative(this), allocate_one_digit(this),
       allocate_two_digits(this), if_zero(this), done(this);
 
-  GotoIf(WordEqual(var_high.value(), IntPtrConstant(0)), &high_zero);
+  GotoIf(IntPtrEqual(var_high.value(), IntPtrConstant(0)), &high_zero);
   Branch(IntPtrLessThan(var_high.value(), IntPtrConstant(0)), &negative,
          &allocate_two_digits);
 
   BIND(&high_zero);
-  Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &if_zero,
+  Branch(IntPtrEqual(var_low.value(), IntPtrConstant(0)), &if_zero,
          &allocate_one_digit);
 
   BIND(&negative);
@@ -2078,7 +2188,7 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low,
     // of the carry bit (which is 1 iff low != 0).
     var_high = IntPtrSub(IntPtrConstant(0), var_high.value());
     Label carry(this), no_carry(this);
-    Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry);
+    Branch(IntPtrEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry);
     BIND(&carry);
     var_high = IntPtrSub(var_high.value(), IntPtrConstant(1));
     Goto(&no_carry);
@@ -2086,8 +2196,8 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low,
     var_low = IntPtrSub(IntPtrConstant(0), var_low.value());
     // var_high was non-zero going into this block, but subtracting the
     // carry bit from it could bring us back onto the "one digit" path.
-    Branch(WordEqual(var_high.value(), IntPtrConstant(0)), &allocate_one_digit,
-           &allocate_two_digits);
+    Branch(IntPtrEqual(var_high.value(), IntPtrConstant(0)),
+           &allocate_one_digit, &allocate_two_digits);
   }
 
   BIND(&allocate_one_digit);
@@ -2123,7 +2233,7 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) {
   DCHECK(Is64());
   TVARIABLE(BigInt, var_result);
   Label done(this), if_positive(this), if_negative(this), if_zero(this);
-  GotoIf(WordEqual(value, IntPtrConstant(0)), &if_zero);
+  GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
   var_result = AllocateRawBigInt(IntPtrConstant(1));
   Branch(IntPtrGreaterThan(value, IntPtrConstant(0)), &if_positive,
          &if_negative);
@@ -2192,14 +2302,14 @@ TNode<BigInt> CodeStubAssembler::BigIntFromUint32Pair(TNode<UintPtrT> low,
   TVARIABLE(BigInt, var_result);
   Label high_zero(this), if_zero(this), done(this);
 
-  GotoIf(WordEqual(high, IntPtrConstant(0)), &high_zero);
+  GotoIf(IntPtrEqual(high, IntPtrConstant(0)), &high_zero);
   var_result = AllocateBigInt(IntPtrConstant(2));
   StoreBigIntDigit(var_result.value(), 0, low);
   StoreBigIntDigit(var_result.value(), 1, high);
   Goto(&done);
 
   BIND(&high_zero);
-  GotoIf(WordEqual(low, IntPtrConstant(0)), &if_zero);
+  GotoIf(IntPtrEqual(low, IntPtrConstant(0)), &if_zero);
   var_result = AllocateBigInt(IntPtrConstant(1));
   StoreBigIntDigit(var_result.value(), 0, low);
   Goto(&done);
@@ -2216,7 +2326,7 @@ TNode<BigInt> CodeStubAssembler::BigIntFromUint64(TNode<UintPtrT> value) {
   DCHECK(Is64());
   TVARIABLE(BigInt, var_result);
   Label done(this), if_zero(this);
-  GotoIf(WordEqual(value, IntPtrConstant(0)), &if_zero);
+  GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
   var_result = AllocateBigInt(IntPtrConstant(1));
   StoreBigIntDigit(var_result.value(), 0, value);
   Goto(&done);
@@ -2350,8 +2460,8 @@ TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
   CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
   int32_t header_size =
       FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
-  Node* offset = ElementOffsetFromIndex(slot_index_node, HOLEY_ELEMENTS,
-                                        parameter_mode, header_size);
+  TNode<IntPtrT> offset = ElementOffsetFromIndex(
+      slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size);
   CSA_SLOW_ASSERT(
       this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)),
                              FeedbackVector::kHeaderSize));
@@ -2371,14 +2481,14 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
 #endif
   int32_t header_size = array_header_size + additional_offset - kHeapObjectTag +
                         endian_correction;
-  Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
-                                        parameter_mode, header_size);
+  TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
+                                                 parameter_mode, header_size);
   CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(object),
                                     array_header_size + endian_correction));
   if (SmiValuesAre32Bits()) {
     return UncheckedCast<Int32T>(Load(MachineType::Int32(), object, offset));
   } else {
-    return SmiToInt32(Load(MachineType::AnyTagged(), object, offset));
+    return SmiToInt32(Load(MachineType::TaggedSigned(), object, offset));
   }
 }
 
@@ -2422,20 +2532,21 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
   Label done(this), if_packed(this), if_holey(this), if_packed_double(this),
       if_holey_double(this), if_dictionary(this, Label::kDeferred);
 
-  int32_t kinds[] = {// Handled by if_packed.
-                     PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
-                     PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
-                     // Handled by if_holey.
-                     HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS, HOLEY_SEALED_ELEMENTS,
-                     HOLEY_FROZEN_ELEMENTS,
-                     // Handled by if_packed_double.
-                     PACKED_DOUBLE_ELEMENTS,
-                     // Handled by if_holey_double.
-                     HOLEY_DOUBLE_ELEMENTS};
+  int32_t kinds[] = {
+      // Handled by if_packed.
+      PACKED_SMI_ELEMENTS, PACKED_ELEMENTS, PACKED_NONEXTENSIBLE_ELEMENTS,
+      PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
+      // Handled by if_holey.
+      HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS, HOLEY_NONEXTENSIBLE_ELEMENTS,
+      HOLEY_SEALED_ELEMENTS, HOLEY_FROZEN_ELEMENTS,
+      // Handled by if_packed_double.
+      PACKED_DOUBLE_ELEMENTS,
+      // Handled by if_holey_double.
+      HOLEY_DOUBLE_ELEMENTS};
   Label* labels[] = {// PACKED_{SMI,}_ELEMENTS
-                     &if_packed, &if_packed, &if_packed, &if_packed,
+                     &if_packed, &if_packed, &if_packed, &if_packed, &if_packed,
                      // HOLEY_{SMI,}_ELEMENTS
-                     &if_holey, &if_holey, &if_holey, &if_holey,
+                     &if_holey, &if_holey, &if_holey, &if_holey, &if_holey,
                      // PACKED_DOUBLE_ELEMENTS
                      &if_packed_double,
                      // HOLEY_DOUBLE_ELEMENTS
@@ -2451,7 +2562,7 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
   BIND(&if_holey);
   {
     var_result = LoadFixedArrayElement(CAST(elements), index);
-    Branch(WordEqual(var_result.value(), TheHoleConstant()), if_hole, &done);
+    Branch(TaggedEqual(var_result.value(), TheHoleConstant()), if_hole, &done);
   }
 
   BIND(&if_packed_double);
@@ -2489,11 +2600,11 @@ TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
     // compiler is able to fold addition of already complex |offset| with
     // |kIeeeDoubleExponentWordOffset| into one addressing mode.
     if (Is64()) {
-      Node* element = Load(MachineType::Uint64(), base, offset);
+      TNode<Uint64T> element = Load<Uint64T>(base, offset);
       GotoIf(Word64Equal(element, Int64Constant(kHoleNanInt64)), if_hole);
     } else {
-      Node* element_upper = Load(
-          MachineType::Uint32(), base,
+      TNode<Uint32T> element_upper = Load<Uint32T>(
+          base,
           IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset)));
       GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
              if_hole);
@@ -2515,15 +2626,15 @@ TNode<Object> CodeStubAssembler::LoadContextElement(
 
 TNode<Object> CodeStubAssembler::LoadContextElement(
     SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) {
-  Node* offset = ElementOffsetFromIndex(
+  TNode<IntPtrT> offset = ElementOffsetFromIndex(
       slot_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::SlotOffset(0));
   return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
 }
 
 TNode<Object> CodeStubAssembler::LoadContextElement(TNode<Context> context,
                                                     TNode<Smi> slot_index) {
-  Node* offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS,
-                                        SMI_PARAMETERS, Context::SlotOffset(0));
+  TNode<IntPtrT> offset = ElementOffsetFromIndex(
+      slot_index, PACKED_ELEMENTS, SMI_PARAMETERS, Context::SlotOffset(0));
   return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
 }
 
@@ -2537,8 +2648,8 @@ void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
 void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
                                             SloppyTNode<IntPtrT> slot_index,
                                             SloppyTNode<Object> value) {
-  Node* offset = IntPtrAdd(TimesTaggedSize(slot_index),
-                           IntPtrConstant(Context::SlotOffset(0)));
+  TNode<IntPtrT> offset = IntPtrAdd(TimesTaggedSize(slot_index),
+                                    IntPtrConstant(Context::SlotOffset(0)));
   Store(context, offset, value);
 }
 
@@ -2549,15 +2660,15 @@ void CodeStubAssembler::StoreContextElementNoWriteBarrier(
                       IntPtrConstant(offset), value);
 }
 
-TNode<Context> CodeStubAssembler::LoadNativeContext(
+TNode<NativeContext> CodeStubAssembler::LoadNativeContext(
     SloppyTNode<Context> context) {
-  return UncheckedCast<Context>(
+  return UncheckedCast<NativeContext>(
       LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX));
 }
 
 TNode<Context> CodeStubAssembler::LoadModuleContext(
     SloppyTNode<Context> context) {
-  Node* module_map = LoadRoot(RootIndex::kModuleContextMap);
+  TNode<Map> module_map = ModuleContextMapConstant();
   Variable cur_context(this, MachineRepresentation::kTaggedPointer);
   cur_context.Bind(context);
 
@@ -2571,7 +2682,8 @@ TNode<Context> CodeStubAssembler::LoadModuleContext(
   BIND(&context_search);
   {
     CSA_ASSERT(this, Word32BinaryNot(IsNativeContext(cur_context.value())));
-    GotoIf(WordEqual(LoadMap(cur_context.value()), module_map), &context_found);
+    GotoIf(TaggedEqual(LoadMap(cur_context.value()), module_map),
+           &context_found);
 
     cur_context.Bind(
         LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
@@ -2583,17 +2695,16 @@ TNode<Context> CodeStubAssembler::LoadModuleContext(
 }
 
 TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
-    SloppyTNode<Int32T> kind, SloppyTNode<Context> native_context) {
+    SloppyTNode<Int32T> kind, SloppyTNode<NativeContext> native_context) {
   CSA_ASSERT(this, IsFastElementsKind(kind));
-  CSA_ASSERT(this, IsNativeContext(native_context));
-  Node* offset = IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
-                           ChangeInt32ToIntPtr(kind));
+  TNode<IntPtrT> offset =
+      IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
+                ChangeInt32ToIntPtr(kind));
   return UncheckedCast<Map>(LoadContextElement(native_context, offset));
 }
 
 TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
-    ElementsKind kind, SloppyTNode<Context> native_context) {
-  CSA_ASSERT(this, IsNativeContext(native_context));
+    ElementsKind kind, SloppyTNode<NativeContext> native_context) {
   return UncheckedCast<Map>(
       LoadContextElement(native_context, Context::ArrayMapIndex(kind)));
 }
@@ -2601,7 +2712,8 @@ TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
 TNode<BoolT> CodeStubAssembler::IsGeneratorFunction(
     TNode<JSFunction> function) {
   TNode<SharedFunctionInfo> const shared_function_info =
-      CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
+      LoadObjectField<SharedFunctionInfo>(
+          function, JSFunction::kSharedFunctionInfoOffset);
 
   TNode<Uint32T> const function_kind =
       DecodeWord32<SharedFunctionInfo::FunctionKindBits>(LoadObjectField(
@@ -2646,22 +2758,20 @@ void CodeStubAssembler::GotoIfPrototypeRequiresRuntimeLookup(
          runtime);
 }
 
-Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
+Node* CodeStubAssembler::LoadJSFunctionPrototype(TNode<JSFunction> function,
                                                  Label* if_bailout) {
-  CSA_ASSERT(this, TaggedIsNotSmi(function));
-  CSA_ASSERT(this, IsJSFunction(function));
   CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(function)));
   CSA_ASSERT(this, IsClearWord32<Map::HasNonInstancePrototypeBit>(
                        LoadMapBitField(LoadMap(function))));
-  Node* proto_or_map =
-      LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
+  TNode<HeapObject> proto_or_map = LoadObjectField<HeapObject>(
+      function, JSFunction::kPrototypeOrInitialMapOffset);
   GotoIf(IsTheHole(proto_or_map), if_bailout);
 
-  VARIABLE(var_result, MachineRepresentation::kTagged, proto_or_map);
+  TVARIABLE(HeapObject, var_result, proto_or_map);
   Label done(this, &var_result);
   GotoIfNot(IsMap(proto_or_map), &done);
 
-  var_result.Bind(LoadMapPrototype(proto_or_map));
+  var_result = LoadMapPrototype(CAST(proto_or_map));
   Goto(&done);
 
   BIND(&done);
@@ -2670,15 +2780,15 @@ Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
 
 TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
     SloppyTNode<SharedFunctionInfo> shared) {
-  Node* function_data =
+  TNode<Object> function_data =
       LoadObjectField(shared, SharedFunctionInfo::kFunctionDataOffset);
 
   VARIABLE(var_result, MachineRepresentation::kTagged, function_data);
   Label done(this, &var_result);
 
-  GotoIfNot(HasInstanceType(function_data, INTERPRETER_DATA_TYPE), &done);
-  Node* bytecode_array =
-      LoadObjectField(function_data, InterpreterData::kBytecodeArrayOffset);
+  GotoIfNot(HasInstanceType(CAST(function_data), INTERPRETER_DATA_TYPE), &done);
+  TNode<Object> bytecode_array = LoadObjectField(
+      CAST(function_data), InterpreterData::kBytecodeArrayOffset);
   var_result.Bind(bytecode_array);
   Goto(&done);
 
@@ -2699,12 +2809,6 @@ void CodeStubAssembler::StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
                                  MachineRepresentation::kFloat64);
 }
 
-void CodeStubAssembler::StoreMutableHeapNumberValue(
-    SloppyTNode<MutableHeapNumber> object, SloppyTNode<Float64T> value) {
-  StoreObjectFieldNoWriteBarrier(object, MutableHeapNumber::kValueOffset, value,
-                                 MachineRepresentation::kFloat64);
-}
-
 void CodeStubAssembler::StoreObjectField(Node* object, int offset,
                                          Node* value) {
   DCHECK_NE(HeapObject::kMapOffset, offset);  // Use StoreMap instead.
@@ -2716,7 +2820,7 @@ void CodeStubAssembler::StoreObjectField(Node* object, int offset,
 void CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
                                          Node* value) {
   int const_offset;
-  if (ToInt32Constant(offset, const_offset)) {
+  if (ToInt32Constant(offset, &const_offset)) {
     StoreObjectField(object, const_offset, value);
   } else {
     Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
@@ -2744,7 +2848,7 @@ void CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
     Node* object, SloppyTNode<IntPtrT> offset, Node* value,
     MachineRepresentation rep) {
   int const_offset;
-  if (ToInt32Constant(offset, const_offset)) {
+  if (ToInt32Constant(offset, &const_offset)) {
     return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep);
   }
   StoreNoWriteBarrier(rep, object,
@@ -2776,16 +2880,6 @@ void CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
   }
 }
 
-void CodeStubAssembler::StoreJSArrayLength(TNode<JSArray> array,
-                                           TNode<Smi> length) {
-  StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
-}
-
-void CodeStubAssembler::StoreElements(TNode<Object> object,
-                                      TNode<FixedArrayBase> elements) {
-  StoreObjectField(object, JSObject::kElementsOffset, elements);
-}
-
 void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
     Node* object, Node* index_node, Node* value, WriteBarrierMode barrier_mode,
     int additional_offset, ParameterMode parameter_mode) {
@@ -2801,8 +2895,8 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
                 static_cast<int>(PropertyArray::kHeaderSize));
   int header_size =
       FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
-  Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
-                                        parameter_mode, header_size);
+  TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
+                                                 parameter_mode, header_size);
   STATIC_ASSERT(static_cast<int>(FixedArrayBase::kLengthOffset) ==
                 static_cast<int>(WeakFixedArray::kLengthOffset));
   STATIC_ASSERT(static_cast<int>(FixedArrayBase::kLengthOffset) ==
@@ -2846,7 +2940,7 @@ void CodeStubAssembler::StoreFixedDoubleArrayElement(
   if (NeedsBoundsCheck(check_bounds)) {
     FixedArrayBoundsCheck(object, index_node, 0, parameter_mode);
   }
-  Node* offset =
+  TNode<IntPtrT> offset =
       ElementOffsetFromIndex(index_node, PACKED_DOUBLE_ELEMENTS, parameter_mode,
                              FixedArray::kHeaderSize - kHeapObjectTag);
   MachineRepresentation rep = MachineRepresentation::kFloat64;
@@ -2869,8 +2963,8 @@ void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
          barrier_mode == UPDATE_WRITE_BARRIER);
   int header_size =
       FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
-  Node* offset = ElementOffsetFromIndex(slot_index_node, HOLEY_ELEMENTS,
-                                        parameter_mode, header_size);
+  TNode<IntPtrT> offset = ElementOffsetFromIndex(
+      slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size);
   // Check that slot_index_node <= object.length.
   CSA_ASSERT(this,
              IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)),
@@ -2899,8 +2993,7 @@ void CodeStubAssembler::EnsureArrayLengthWritable(TNode<Map> map,
 #ifdef DEBUG
   TNode<Name> maybe_length =
       LoadKeyByDescriptorEntry(descriptors, length_index);
-  CSA_ASSERT(this,
-             WordEqual(maybe_length, LoadRoot(RootIndex::klength_string)));
+  CSA_ASSERT(this, TaggedEqual(maybe_length, LengthStringConstant()));
 #endif
 
   TNode<Uint32T> details =
@@ -2988,7 +3081,7 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
   {
     TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
     var_tagged_length = length;
-    Node* diff = SmiSub(length, LoadFastJSArrayLength(array));
+    TNode<Smi> diff = SmiSub(length, LoadFastJSArrayLength(array));
     StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
     *arg_index = IntPtrAdd(arg_index->value(), SmiUntag(diff));
     Goto(bailout);
@@ -3033,13 +3126,13 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
                        var_length.value(), value);
   Increment(&var_length, 1, mode);
 
-  Node* length = ParameterToTagged(var_length.value(), mode);
+  TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
   StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
 }
 
 Node* CodeStubAssembler::AllocateCellWithValue(Node* value,
                                                WriteBarrierMode mode) {
-  Node* result = Allocate(Cell::kSize, kNone);
+  TNode<HeapObject> result = Allocate(Cell::kSize, kNone);
   StoreMapNoWriteBarrier(result, RootIndex::kCellMap);
   StoreCellValue(result, value, mode);
   return result;
@@ -3063,7 +3156,7 @@ void CodeStubAssembler::StoreCellValue(Node* cell, Node* value,
 }
 
 TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber() {
-  Node* result = Allocate(HeapNumber::kSize, kNone);
+  TNode<HeapObject> result = Allocate(HeapNumber::kSize, kNone);
   RootIndex heap_map_index = RootIndex::kHeapNumberMap;
   StoreMapNoWriteBarrier(result, heap_map_index);
   return UncheckedCast<HeapNumber>(result);
@@ -3076,24 +3169,19 @@ TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
   return result;
 }
 
-TNode<MutableHeapNumber> CodeStubAssembler::AllocateMutableHeapNumber() {
-  Node* result = Allocate(MutableHeapNumber::kSize, kNone);
-  RootIndex heap_map_index = RootIndex::kMutableHeapNumberMap;
-  StoreMapNoWriteBarrier(result, heap_map_index);
-  return UncheckedCast<MutableHeapNumber>(result);
-}
-
 TNode<Object> CodeStubAssembler::CloneIfMutablePrimitive(TNode<Object> object) {
   TVARIABLE(Object, result, object);
   Label done(this);
 
   GotoIf(TaggedIsSmi(object), &done);
-  GotoIfNot(IsMutableHeapNumber(UncheckedCast<HeapObject>(object)), &done);
+  // TODO(leszeks): Read the field descriptor to decide if this heap number is
+  // mutable or not.
+  GotoIfNot(IsHeapNumber(UncheckedCast<HeapObject>(object)), &done);
   {
     // Mutable heap number found --- allocate a clone.
     TNode<Float64T> value =
         LoadHeapNumberValue(UncheckedCast<HeapNumber>(object));
-    result = AllocateMutableHeapNumberWithValue(value);
+    result = AllocateHeapNumberWithValue(value);
     Goto(&done);
   }
 
@@ -3101,13 +3189,6 @@ TNode<Object> CodeStubAssembler::CloneIfMutablePrimitive(TNode<Object> object) {
   return result.value();
 }
 
-TNode<MutableHeapNumber> CodeStubAssembler::AllocateMutableHeapNumberWithValue(
-    SloppyTNode<Float64T> value) {
-  TNode<MutableHeapNumber> result = AllocateMutableHeapNumber();
-  StoreMutableHeapNumberValue(result, value);
-  return result;
-}
-
 TNode<BigInt> CodeStubAssembler::AllocateBigInt(TNode<IntPtrT> length) {
   TNode<BigInt> result = AllocateRawBigInt(length);
   StoreBigIntBitfield(result,
@@ -3120,7 +3201,7 @@ TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
   TNode<IntPtrT> size =
       IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
                 Signed(WordShl(length, kSystemPointerSizeLog2)));
-  Node* raw_result = Allocate(size, kAllowLargeObjectAllocation);
+  TNode<HeapObject> raw_result = Allocate(size, kAllowLargeObjectAllocation);
   StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap);
   if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) {
     DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset));
@@ -3194,7 +3275,7 @@ TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length,
       if_notsizeissmall(this, Label::kDeferred), if_join(this);
   GotoIf(WordEqual(length, UintPtrConstant(0)), &if_lengthiszero);
 
-  Node* raw_size =
+  TNode<IntPtrT> raw_size =
       GetArrayAllocationSize(Signed(length), UINT8_ELEMENTS, INTPTR_PARAMETERS,
                              ByteArray::kHeaderSize + kObjectAlignmentMask);
   TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
@@ -3204,7 +3285,7 @@ TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length,
   BIND(&if_sizeissmall);
   {
     // Just allocate the ByteArray in new space.
-    TNode<Object> result =
+    TNode<HeapObject> result =
         AllocateInNewSpace(UncheckedCast<IntPtrT>(size), flags);
     DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kByteArrayMap));
     StoreMapNoWriteBarrier(result, RootIndex::kByteArrayMap);
@@ -3217,15 +3298,16 @@ TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length,
   BIND(&if_notsizeissmall);
   {
     // We might need to allocate in large object space, go to the runtime.
-    Node* result = CallRuntime(Runtime::kAllocateByteArray, NoContextConstant(),
-                               ChangeUintPtrToTagged(length));
+    TNode<Object> result =
+        CallRuntime(Runtime::kAllocateByteArray, NoContextConstant(),
+                    ChangeUintPtrToTagged(length));
     var_result.Bind(result);
     Goto(&if_join);
   }
 
   BIND(&if_lengthiszero);
   {
-    var_result.Bind(LoadRoot(RootIndex::kEmptyByteArray));
+    var_result.Bind(EmptyByteArrayConstant());
     Goto(&if_join);
   }
 
@@ -3237,9 +3319,9 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
     uint32_t length, AllocationFlags flags) {
   Comment("AllocateSeqOneByteString");
   if (length == 0) {
-    return CAST(LoadRoot(RootIndex::kempty_string));
+    return EmptyStringConstant();
   }
-  Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
+  TNode<HeapObject> result = Allocate(SeqOneByteString::SizeFor(length), flags);
   DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kOneByteStringMap));
   StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap);
   StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
@@ -3253,14 +3335,13 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
 
 TNode<BoolT> CodeStubAssembler::IsZeroOrContext(SloppyTNode<Object> object) {
   return Select<BoolT>(
-      WordEqual(object, SmiConstant(0)), [=] { return Int32TrueConstant(); },
+      TaggedEqual(object, SmiConstant(0)), [=] { return Int32TrueConstant(); },
       [=] { return IsContext(CAST(object)); });
 }
 
 TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
-    Node* context, TNode<Uint32T> length, AllocationFlags flags) {
+    TNode<Uint32T> length, AllocationFlags flags) {
   Comment("AllocateSeqOneByteString");
-  CSA_SLOW_ASSERT(this, IsZeroOrContext(context));
   VARIABLE(var_result, MachineRepresentation::kTagged);
 
   // Compute the SeqOneByteString size and check if it fits into new space.
@@ -3268,7 +3349,7 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
       if_notsizeissmall(this, Label::kDeferred), if_join(this);
   GotoIf(Word32Equal(length, Uint32Constant(0)), &if_lengthiszero);
 
-  Node* raw_size = GetArrayAllocationSize(
+  TNode<IntPtrT> raw_size = GetArrayAllocationSize(
       Signed(ChangeUint32ToWord(length)), UINT8_ELEMENTS, INTPTR_PARAMETERS,
       SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
   TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
@@ -3278,7 +3359,7 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
   BIND(&if_sizeissmall);
   {
     // Just allocate the SeqOneByteString in new space.
-    TNode<Object> result =
+    TNode<HeapObject> result =
         AllocateInNewSpace(UncheckedCast<IntPtrT>(size), flags);
     DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kOneByteStringMap));
     StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap);
@@ -3294,15 +3375,16 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
   BIND(&if_notsizeissmall);
   {
     // We might need to allocate in large object space, go to the runtime.
-    Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
-                               ChangeUint32ToTagged(length));
+    TNode<Object> result =
+        CallRuntime(Runtime::kAllocateSeqOneByteString, NoContextConstant(),
+                    ChangeUint32ToTagged(length));
     var_result.Bind(result);
     Goto(&if_join);
   }
 
   BIND(&if_lengthiszero);
   {
-    var_result.Bind(LoadRoot(RootIndex::kempty_string));
+    var_result.Bind(EmptyStringConstant());
     Goto(&if_join);
   }
 
@@ -3314,9 +3396,9 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
     uint32_t length, AllocationFlags flags) {
   Comment("AllocateSeqTwoByteString");
   if (length == 0) {
-    return CAST(LoadRoot(RootIndex::kempty_string));
+    return EmptyStringConstant();
   }
-  Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
+  TNode<HeapObject> result = Allocate(SeqTwoByteString::SizeFor(length), flags);
   DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kStringMap));
   StoreMapNoWriteBarrier(result, RootIndex::kStringMap);
   StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
@@ -3329,8 +3411,7 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
 }
 
 TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
-    Node* context, TNode<Uint32T> length, AllocationFlags flags) {
-  CSA_SLOW_ASSERT(this, IsZeroOrContext(context));
+    TNode<Uint32T> length, AllocationFlags flags) {
   Comment("AllocateSeqTwoByteString");
   VARIABLE(var_result, MachineRepresentation::kTagged);
 
@@ -3339,7 +3420,7 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
       if_notsizeissmall(this, Label::kDeferred), if_join(this);
   GotoIf(Word32Equal(length, Uint32Constant(0)), &if_lengthiszero);
 
-  Node* raw_size = GetArrayAllocationSize(
+  TNode<IntPtrT> raw_size = GetArrayAllocationSize(
       Signed(ChangeUint32ToWord(length)), UINT16_ELEMENTS, INTPTR_PARAMETERS,
       SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
   TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
@@ -3349,7 +3430,7 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
   BIND(&if_sizeissmall);
   {
     // Just allocate the SeqTwoByteString in new space.
-    TNode<Object> result =
+    TNode<HeapObject> result =
         AllocateInNewSpace(UncheckedCast<IntPtrT>(size), flags);
     DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kStringMap));
     StoreMapNoWriteBarrier(result, RootIndex::kStringMap);
@@ -3365,15 +3446,16 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
   BIND(&if_notsizeissmall);
   {
     // We might need to allocate in large object space, go to the runtime.
-    Node* result = CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
-                               ChangeUint32ToTagged(length));
+    TNode<Object> result =
+        CallRuntime(Runtime::kAllocateSeqTwoByteString, NoContextConstant(),
+                    ChangeUint32ToTagged(length));
     var_result.Bind(result);
     Goto(&if_join);
   }
 
   BIND(&if_lengthiszero);
   {
-    var_result.Bind(LoadRoot(RootIndex::kempty_string));
+    var_result.Bind(EmptyStringConstant());
     Goto(&if_join);
   }
 
@@ -3387,7 +3469,7 @@ TNode<String> CodeStubAssembler::AllocateSlicedString(RootIndex map_root_index,
                                                       TNode<Smi> offset) {
   DCHECK(map_root_index == RootIndex::kSlicedOneByteStringMap ||
          map_root_index == RootIndex::kSlicedStringMap);
-  Node* result = Allocate(SlicedString::kSize);
+  TNode<HeapObject> result = Allocate(SlicedString::kSize);
   DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
   StoreMapNoWriteBarrier(result, map_root_index);
   StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
@@ -3419,20 +3501,20 @@ TNode<String> CodeStubAssembler::AllocateConsString(TNode<Uint32T> length,
                                                     TNode<String> right) {
   // Added string can be a cons string.
   Comment("Allocating ConsString");
-  Node* left_instance_type = LoadInstanceType(left);
-  Node* right_instance_type = LoadInstanceType(right);
+  TNode<Int32T> left_instance_type = LoadInstanceType(left);
+  TNode<Int32T> right_instance_type = LoadInstanceType(right);
 
   // Determine the resulting ConsString map to use depending on whether
   // any of {left} or {right} has two byte encoding.
   STATIC_ASSERT(kOneByteStringTag != 0);
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  Node* combined_instance_type =
+  TNode<Int32T> combined_instance_type =
       Word32And(left_instance_type, right_instance_type);
   TNode<Map> result_map = CAST(Select<Object>(
       IsSetWord32(combined_instance_type, kStringEncodingMask),
-      [=] { return LoadRoot(RootIndex::kConsOneByteStringMap); },
-      [=] { return LoadRoot(RootIndex::kConsStringMap); }));
-  Node* result = AllocateInNewSpace(ConsString::kSize);
+      [=] { return ConsOneByteStringMapConstant(); },
+      [=] { return ConsStringMapConstant(); }));
+  TNode<HeapObject> result = AllocateInNewSpace(ConsString::kSize);
   StoreMapNoWriteBarrier(result, result_map);
   StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
                                  MachineRepresentation::kWord32);
@@ -3498,15 +3580,15 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
 
   // Initialize NameDictionary elements.
   {
-    TNode<WordT> result_word = BitcastTaggedToWord(result);
-    TNode<WordT> start_address = IntPtrAdd(
+    TNode<IntPtrT> result_word = BitcastTaggedToWord(result);
+    TNode<IntPtrT> start_address = IntPtrAdd(
         result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt(
                                         NameDictionary::kElementsStartIndex) -
                                     kHeapObjectTag));
-    TNode<WordT> end_address = IntPtrAdd(
+    TNode<IntPtrT> end_address = IntPtrAdd(
         result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag)));
 
-    TNode<HeapObject> filler = UndefinedConstant();
+    TNode<Oddball> filler = UndefinedConstant();
     DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kUndefinedValue));
 
     StoreFieldsNoWriteBarrier(start_address, end_address, filler);
@@ -3623,7 +3705,7 @@ TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
   // Allocate the table and add the proper map.
   TNode<Map> small_ordered_hash_map =
       CAST(LoadRoot(CollectionType::GetMapRootIndex()));
-  TNode<Object> table_obj = AllocateInNewSpace(total_size_word_aligned);
+  TNode<HeapObject> table_obj = AllocateInNewSpace(total_size_word_aligned);
   StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map);
   TNode<CollectionType> table = UncheckedCast<CollectionType>(table_obj);
 
@@ -3653,7 +3735,8 @@ TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
       IntPtrAdd(table_address, hash_table_start_offset);
 
   // Initialize the HashTable part.
-  Node* memset = ExternalConstant(ExternalReference::libc_memset_function());
+  TNode<ExternalReference> memset =
+      ExternalConstant(ExternalReference::libc_memset_function());
   CallCFunction(
       memset, MachineType::AnyTagged(),
       std::make_pair(MachineType::Pointer(), hash_table_start_address),
@@ -3661,10 +3744,10 @@ TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
       std::make_pair(MachineType::UintPtr(), hash_table_and_chain_table_size));
 
   // Initialize the DataTable part.
-  TNode<HeapObject> filler = TheHoleConstant();
-  TNode<WordT> data_table_start_address =
+  TNode<Oddball> filler = TheHoleConstant();
+  TNode<IntPtrT> data_table_start_address =
       IntPtrAdd(table_address, data_table_start_offset);
-  TNode<WordT> data_table_end_address =
+  TNode<IntPtrT> data_table_end_address =
       IntPtrAdd(data_table_start_address, data_table_size);
   StoreFieldsNoWriteBarrier(data_table_start_address, data_table_end_address,
                             filler);
@@ -3682,31 +3765,32 @@ CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
 template <typename CollectionType>
 void CodeStubAssembler::FindOrderedHashTableEntry(
     Node* table, Node* hash,
-    const std::function<void(Node*, Label*, Label*)>& key_compare,
+    const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
     Variable* entry_start_position, Label* entry_found, Label* not_found) {
   // Get the index of the bucket.
-  Node* const number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
-      CAST(table), CollectionType::NumberOfBucketsIndex())));
-  Node* const bucket =
+  TNode<IntPtrT> const number_of_buckets =
+      SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+          CAST(table), CollectionType::NumberOfBucketsIndex())));
+  TNode<WordT> const bucket =
       WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
-  Node* const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+  TNode<IntPtrT> const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
       CAST(table), bucket,
       CollectionType::HashTableStartIndex() * kTaggedSize)));
 
   // Walk the bucket chain.
-  Node* entry_start;
+  TNode<IntPtrT> entry_start;
   Label if_key_found(this);
   {
-    VARIABLE(var_entry, MachineType::PointerRepresentation(), first_entry);
+    TVARIABLE(IntPtrT, var_entry, first_entry);
     Label loop(this, {&var_entry, entry_start_position}),
         continue_next_entry(this);
     Goto(&loop);
     BIND(&loop);
 
     // If the entry index is the not-found sentinel, we are done.
-    GotoIf(
-        WordEqual(var_entry.value(), IntPtrConstant(CollectionType::kNotFound)),
-        not_found);
+    GotoIf(IntPtrEqual(var_entry.value(),
+                       IntPtrConstant(CollectionType::kNotFound)),
+           not_found);
 
     // Make sure the entry index is within range.
     CSA_ASSERT(
@@ -3727,7 +3811,7 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
                   number_of_buckets);
 
     // Load the key from the entry.
-    Node* const candidate_key = UnsafeLoadFixedArrayElement(
+    TNode<Object> const candidate_key = UnsafeLoadFixedArrayElement(
         CAST(table), entry_start,
         CollectionType::HashTableStartIndex() * kTaggedSize);
 
@@ -3735,10 +3819,10 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
 
     BIND(&continue_next_entry);
     // Load the index of the next entry in the bucket chain.
-    var_entry.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+    var_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
         CAST(table), entry_start,
         (CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) *
-            kTaggedSize))));
+            kTaggedSize)));
 
     Goto(&loop);
   }
@@ -3750,18 +3834,18 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
 
 template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashMap>(
     Node* table, Node* hash,
-    const std::function<void(Node*, Label*, Label*)>& key_compare,
+    const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
     Variable* entry_start_position, Label* entry_found, Label* not_found);
 template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashSet>(
     Node* table, Node* hash,
-    const std::function<void(Node*, Label*, Label*)>& key_compare,
+    const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
     Variable* entry_start_position, Label* entry_found, Label* not_found);
 
 Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
   Comment("AllocateStruct");
   CSA_ASSERT(this, IsMap(map));
   TNode<IntPtrT> size = TimesTaggedSize(LoadMapInstanceSizeInWords(map));
-  TNode<Object> object = Allocate(size, flags);
+  TNode<HeapObject> object = Allocate(size, flags);
   StoreMapNoWriteBarrier(object, map);
   InitializeStructBody(object, map, size, Struct::kHeaderSize);
   return object;
@@ -3771,12 +3855,12 @@ void CodeStubAssembler::InitializeStructBody(Node* object, Node* map,
                                              Node* size, int start_offset) {
   CSA_SLOW_ASSERT(this, IsMap(map));
   Comment("InitializeStructBody");
-  Node* filler = UndefinedConstant();
+  TNode<Oddball> filler = UndefinedConstant();
   // Calculate the untagged field addresses.
   object = BitcastTaggedToWord(object);
-  Node* start_address =
+  TNode<WordT> start_address =
       IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
-  Node* end_address =
+  TNode<WordT> end_address =
       IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
   StoreFieldsNoWriteBarrier(start_address, end_address, filler);
 }
@@ -3791,7 +3875,7 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
                                                      JS_GLOBAL_OBJECT_TYPE)));
   TNode<IntPtrT> instance_size =
       TimesTaggedSize(LoadMapInstanceSizeInWords(map));
-  TNode<Object> object = AllocateInNewSpace(instance_size, flags);
+  TNode<HeapObject> object = AllocateInNewSpace(instance_size, flags);
   StoreMapNoWriteBarrier(object, map);
   InitializeJSObjectFromMap(object, map, instance_size, properties, elements,
                             slack_tracking_mode);
@@ -3846,7 +3930,7 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
 
   // Perform in-object slack tracking if requested.
   int start_offset = JSObject::kHeaderSize;
-  Node* bit_field3 = LoadMapBitField3(map);
+  TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
   Label end(this), slack_tracking(this), complete(this, Label::kDeferred);
   STATIC_ASSERT(Map::kNoSlackTracking == 0);
   GotoIf(IsSetWord32<Map::ConstructionCounterBits>(bit_field3),
@@ -3860,8 +3944,8 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
     Comment("Decrease construction counter");
     // Slack tracking is only done on initial maps.
     CSA_ASSERT(this, IsUndefined(LoadMapBackPointer(map)));
-    STATIC_ASSERT(Map::ConstructionCounterBits::kNext == 32);
-    Node* new_bit_field3 = Int32Sub(
+    STATIC_ASSERT(Map::ConstructionCounterBits::kLastUsedBit == 31);
+    TNode<Word32T> new_bit_field3 = Int32Sub(
         bit_field3, Int32Constant(1 << Map::ConstructionCounterBits::kShift));
     StoreObjectFieldNoWriteBarrier(map, Map::kBitField3Offset, new_bit_field3,
                                    MachineRepresentation::kWord32);
@@ -3869,7 +3953,7 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
 
     // The object still has in-object slack therefore the |unsed_or_unused|
     // field contain the "used" value.
-    Node* used_size = TimesTaggedSize(ChangeUint32ToWord(
+    TNode<UintPtrT> used_size = TimesTaggedSize(ChangeUint32ToWord(
         LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset,
                         MachineType::Uint8())));
 
@@ -3957,7 +4041,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
   int capacity_int;
   if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_int, capacity_mode)) {
     if (capacity_int == 0) {
-      TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
+      TNode<FixedArray> empty_array = EmptyFixedArrayConstant();
       array = AllocateJSArray(array_map, empty_array, length, allocation_site,
                               array_header_size);
       return {array.value(), empty_array};
@@ -3970,7 +4054,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
 
     BIND(&empty);
     {
-      TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
+      TNode<FixedArray> empty_array = EmptyFixedArrayConstant();
       array = AllocateJSArray(array_map, empty_array, length, allocation_site,
                               array_header_size);
       elements = empty_array;
@@ -4059,7 +4143,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
   CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
 
   // Allocate space for the JSArray and the elements FixedArray in one go.
-  TNode<Object> array = AllocateInNewSpace(size_in_bytes);
+  TNode<HeapObject> array = AllocateInNewSpace(size_in_bytes);
 
   StoreMapNoWriteBarrier(array, array_map);
   StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
@@ -4109,18 +4193,18 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
                                             Node* begin, Node* count,
                                             ParameterMode mode, Node* capacity,
                                             Node* allocation_site) {
-  Node* original_array_map = LoadMap(array);
-  Node* elements_kind = LoadMapElementsKind(original_array_map);
+  TNode<Map> original_array_map = LoadMap(array);
+  TNode<Int32T> elements_kind = LoadMapElementsKind(original_array_map);
 
   // Use the cannonical map for the Array's ElementsKind
-  Node* native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<Map> array_map = LoadJSArrayElementsMap(elements_kind, native_context);
 
   TNode<FixedArrayBase> new_elements = ExtractFixedArray(
       LoadElements(array), begin, count, capacity,
       ExtractFixedArrayFlag::kAllFixedArrays, mode, nullptr, elements_kind);
 
-  TNode<Object> result = AllocateJSArray(
+  TNode<JSArray> result = AllocateJSArray(
       array_map, new_elements, ParameterToTagged(count, mode), allocation_site);
   return result;
 }
@@ -4134,7 +4218,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
   // protector is invalid. This function should be renamed to reflect its uses.
   CSA_ASSERT(this, IsJSArray(array));
 
-  Node* length = LoadJSArrayLength(array);
+  TNode<Number> length = LoadJSArrayLength(array);
   Node* new_elements = nullptr;
   VARIABLE(var_new_elements, MachineRepresentation::kTagged);
   TVARIABLE(Int32T, var_elements_kind, LoadMapElementsKind(LoadMap(array)));
@@ -4153,7 +4237,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
   // Simple extraction that preserves holes.
   new_elements =
       ExtractFixedArray(LoadElements(array), IntPtrOrSmiConstant(0, mode),
-                        TaggedToParameter(length, mode), nullptr,
+                        TaggedToParameter(CAST(length), mode), nullptr,
                         ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode,
                         nullptr, var_elements_kind.value());
   var_new_elements.Bind(new_elements);
@@ -4171,7 +4255,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
     // ExtractFixedArrayFlag::kDontCopyCOW.
     new_elements = ExtractFixedArray(
         LoadElements(array), IntPtrOrSmiConstant(0, mode),
-        TaggedToParameter(length, mode), nullptr,
+        TaggedToParameter(CAST(length), mode), nullptr,
         ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted);
     var_new_elements.Bind(new_elements);
     // If the array type didn't change, use the original elements kind.
@@ -4183,9 +4267,10 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
 
   BIND(&allocate_jsarray);
 
-  // Handle sealed, frozen elements kinds
-  CSA_ASSERT(this, IsElementsKindLessThanOrEqual(var_elements_kind.value(),
-                                                 LAST_FROZEN_ELEMENTS_KIND));
+  // Handle any nonextensible elements kinds
+  CSA_ASSERT(this, IsElementsKindLessThanOrEqual(
+                       var_elements_kind.value(),
+                       LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND));
   GotoIf(IsElementsKindLessThanOrEqual(var_elements_kind.value(),
                                        LAST_FAST_ELEMENTS_KIND),
          &allocate_jsarray_main);
@@ -4194,11 +4279,11 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
 
   BIND(&allocate_jsarray_main);
   // Use the cannonical map for the chosen elements kind.
-  Node* native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<Map> array_map =
       LoadJSArrayElementsMap(var_elements_kind.value(), native_context);
 
-  TNode<Object> result = AllocateJSArray(
+  TNode<JSArray> result = AllocateJSArray(
       array_map, CAST(var_new_elements.value()), CAST(length), allocation_site);
   return result;
 }
@@ -4236,7 +4321,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
 
   if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment;
   // Allocate both array and elements object, and initialize the JSArray.
-  Node* array = Allocate(total_size, flags);
+  TNode<HeapObject> array = Allocate(total_size, flags);
   if (fixed_array_map != nullptr) {
     // Conservatively only skip the write barrier if there are no allocation
     // flags, this ensures that the object hasn't ended up in LOS. Note that the
@@ -4256,27 +4341,27 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
     DCHECK(RootsTable::IsImmortalImmovable(map_index));
     StoreMapNoWriteBarrier(array, map_index);
   }
-  StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
+  StoreObjectFieldNoWriteBarrier(array, FixedArrayBase::kLengthOffset,
                                  ParameterToTagged(capacity, mode));
-  return UncheckedCast<FixedArray>(array);
+  return UncheckedCast<FixedArrayBase>(array);
 }
 
 TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
-    Node* source, Node* first, Node* count, Node* capacity, Node* source_map,
-    ElementsKind from_kind, AllocationFlags allocation_flags,
-    ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode,
-    HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted,
-    Node* source_elements_kind) {
+    SloppyTNode<FixedArrayBase> source, Node* first, Node* count,
+    Node* capacity, SloppyTNode<Map> source_map, ElementsKind from_kind,
+    AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
+    ParameterMode parameter_mode, HoleConversionMode convert_holes,
+    TVariable<BoolT>* var_holes_converted, Node* source_elements_kind) {
   DCHECK_NE(first, nullptr);
   DCHECK_NE(count, nullptr);
   DCHECK_NE(capacity, nullptr);
   DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays);
-  CSA_ASSERT(this,
-             WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity));
-  CSA_ASSERT(this, WordEqual(source_map, LoadMap(source)));
+  CSA_ASSERT(this, IntPtrOrSmiNotEqual(IntPtrOrSmiConstant(0, parameter_mode),
+                                       capacity, parameter_mode));
+  CSA_ASSERT(this, TaggedEqual(source_map, LoadMap(source)));
 
-  VARIABLE(var_result, MachineRepresentation::kTagged);
-  VARIABLE(var_target_map, MachineRepresentation::kTagged, source_map);
+  TVARIABLE(FixedArrayBase, var_result);
+  TVARIABLE(Map, var_target_map, source_map);
 
   Label done(this, {&var_result}), is_cow(this),
       new_space_check(this, {&var_target_map});
@@ -4286,12 +4371,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
   // source_map as the target map.
   if (IsDoubleElementsKind(from_kind)) {
     CSA_ASSERT(this, IsFixedDoubleArrayMap(source_map));
-    var_target_map.Bind(LoadRoot(RootIndex::kFixedArrayMap));
+    var_target_map = FixedArrayMapConstant();
     Goto(&new_space_check);
   } else {
     CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArrayMap(source_map)));
-    Branch(WordEqual(var_target_map.value(),
-                     LoadRoot(RootIndex::kFixedCOWArrayMap)),
+    Branch(TaggedEqual(var_target_map.value(), FixedCOWArrayMapConstant()),
            &is_cow, &new_space_check);
 
     BIND(&is_cow);
@@ -4301,13 +4385,14 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
       // 1) |extract_flags| forces us to, or
       // 2) we're asked to extract only part of the |source| (|first| != 0).
       if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) {
-        Branch(WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), first),
+        Branch(IntPtrOrSmiNotEqual(IntPtrOrSmiConstant(0, parameter_mode),
+                                   first, parameter_mode),
                &new_space_check, [&] {
-                 var_result.Bind(source);
+                 var_result = source;
                  Goto(&done);
                });
       } else {
-        var_target_map.Bind(LoadRoot(RootIndex::kFixedArrayMap));
+        var_target_map = FixedArrayMapConstant();
         Goto(&new_space_check);
       }
     }
@@ -4344,8 +4429,9 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
     TNode<FixedArrayBase> to_elements =
         AllocateFixedArray(to_kind, capacity, parameter_mode, allocation_flags,
                            var_target_map.value());
-    var_result.Bind(to_elements);
+    var_result = to_elements;
 
+#ifndef V8_ENABLE_SINGLE_GENERATION
 #ifdef DEBUG
     TNode<IntPtrT> object_word = BitcastTaggedToWord(to_elements);
     TNode<IntPtrT> object_page = PageFromAddress(object_word);
@@ -4358,6 +4444,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
             WordAnd(page_flags,
                     IntPtrConstant(MemoryChunk::kIsInYoungGenerationMask)),
             IntPtrConstant(0)));
+#endif
 #endif
 
     if (convert_holes == HoleConversionMode::kDontConvert &&
@@ -4367,7 +4454,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
       // will efficiently use memcpy.
       FillFixedArrayWithValue(to_kind, to_elements, count, capacity,
                               RootIndex::kTheHoleValue, parameter_mode);
-      CopyElements(to_kind, to_elements, IntPtrConstant(0), CAST(source),
+      CopyElements(to_kind, to_elements, IntPtrConstant(0), source,
                    ParameterToIntPtr(first, parameter_mode),
                    ParameterToIntPtr(count, parameter_mode),
                    SKIP_WRITE_BARRIER);
@@ -4396,15 +4483,15 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
           to_elements =
               AllocateFixedArray(to_smi_kind, capacity, parameter_mode,
                                  allocation_flags, var_target_map.value());
-          var_result.Bind(to_elements);
+          var_result = to_elements;
 
           FillFixedArrayWithValue(to_smi_kind, to_elements, count, capacity,
                                   RootIndex::kTheHoleValue, parameter_mode);
           // CopyElements will try to use memcpy if it's not conflicting with
           // GC. Otherwise it will copy elements by elements, but skip write
           // barriers (since we're copying smis to smis).
-          CopyElements(to_smi_kind, to_elements, IntPtrConstant(0),
-                       CAST(source), ParameterToIntPtr(first, parameter_mode),
+          CopyElements(to_smi_kind, to_elements, IntPtrConstant(0), source,
+                       ParameterToIntPtr(first, parameter_mode),
                        ParameterToIntPtr(count, parameter_mode),
                        SKIP_WRITE_BARRIER);
           Goto(&done);
@@ -4417,7 +4504,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
           to_elements =
               AllocateFixedArray(to_kind, capacity, parameter_mode,
                                  allocation_flags, var_target_map.value());
-          var_result.Bind(to_elements);
+          var_result = to_elements;
           CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
                                  count, capacity, UPDATE_WRITE_BARRIER,
                                  parameter_mode, convert_holes,
@@ -4445,8 +4532,8 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
 
   VARIABLE(var_result, MachineRepresentation::kTagged);
   const ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
-  Node* to_elements = AllocateFixedArray(kind, capacity, mode, allocation_flags,
-                                         fixed_array_map);
+  TNode<FixedArrayBase> to_elements = AllocateFixedArray(
+      kind, capacity, mode, allocation_flags, fixed_array_map);
   var_result.Bind(to_elements);
   // We first try to copy the FixedDoubleArray to a new FixedDoubleArray.
   // |var_holes_converted| is set to False preliminarily.
@@ -4466,25 +4553,25 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
                           capacity, RootIndex::kTheHoleValue, mode);
 
   const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
-  Node* first_from_element_offset =
+  TNode<IntPtrT> first_from_element_offset =
       ElementOffsetFromIndex(first, kind, mode, 0);
-  Node* limit_offset = IntPtrAdd(first_from_element_offset,
-                                 IntPtrConstant(first_element_offset));
-  VARIABLE(var_from_offset, MachineType::PointerRepresentation(),
-           ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count, mode), kind,
-                                  mode, first_element_offset));
+  TNode<WordT> limit_offset = IntPtrAdd(first_from_element_offset,
+                                        IntPtrConstant(first_element_offset));
+  TVARIABLE(IntPtrT, var_from_offset,
+            ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count, mode), kind,
+                                   mode, first_element_offset));
 
   Label decrement(this, {&var_from_offset}), done(this);
-  Node* to_array_adjusted =
+  TNode<WordT> to_array_adjusted =
       IntPtrSub(BitcastTaggedToWord(to_elements), first_from_element_offset);
 
   Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement);
 
   BIND(&decrement);
   {
-    Node* from_offset =
+    TNode<IntPtrT> from_offset =
         IntPtrSub(var_from_offset.value(), IntPtrConstant(kDoubleSize));
-    var_from_offset.Bind(from_offset);
+    var_from_offset = from_offset;
 
     Node* to_offset = from_offset;
 
@@ -4496,7 +4583,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
     StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array_adjusted,
                         to_offset, value);
 
-    Node* compare = WordNotEqual(from_offset, limit_offset);
+    TNode<BoolT> compare = WordNotEqual(from_offset, limit_offset);
     Branch(compare, &decrement, &done);
 
     BIND(&if_hole);
@@ -4557,8 +4644,10 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
   }
 
   Label if_fixed_double_array(this), empty(this), done(this, {&var_result});
-  Node* source_map = LoadMap(source);
-  GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity), &empty);
+  TNode<Map> source_map = LoadMap(source);
+  GotoIf(IntPtrOrSmiEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity,
+                          parameter_mode),
+         &empty);
 
   if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
     if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
@@ -4571,7 +4660,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
   if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
     // Here we can only get |source| as FixedArray, never FixedDoubleArray.
     // PACKED_ELEMENTS is used to signify that the source is a FixedArray.
-    Node* to_elements = ExtractToFixedArray(
+    TNode<FixedArray> to_elements = ExtractToFixedArray(
         source, first, count, capacity, source_map, PACKED_ELEMENTS,
         allocation_flags, extract_flags, parameter_mode, convert_holes,
         var_holes_converted, source_runtime_kind);
@@ -4584,7 +4673,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
     Comment("Copy FixedDoubleArray");
 
     if (convert_holes == HoleConversionMode::kConvertToUndefined) {
-      Node* to_elements = ExtractFixedDoubleArrayFillingHoles(
+      TNode<FixedArrayBase> to_elements = ExtractFixedDoubleArrayFillingHoles(
           source, first, count, capacity, source_map, var_holes_converted,
           allocation_flags, extract_flags, parameter_mode);
       var_result.Bind(to_elements);
@@ -4643,7 +4732,7 @@ Node* CodeStubAssembler::AllocatePropertyArray(Node* capacity_node,
   TNode<IntPtrT> total_size =
       GetPropertyArrayAllocationSize(capacity_node, mode);
 
-  TNode<Object> array = Allocate(total_size, flags);
+  TNode<HeapObject> array = Allocate(total_size, flags);
   RootIndex map_index = RootIndex::kPropertyArrayMap;
   DCHECK(RootsTable::IsImmortalImmovable(map_index));
   StoreMapNoWriteBarrier(array, map_index);
@@ -4659,7 +4748,7 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array,
   CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
   CSA_SLOW_ASSERT(this, IsPropertyArray(array));
   ElementsKind kind = PACKED_ELEMENTS;
-  Node* value = UndefinedConstant();
+  TNode<Oddball> value = UndefinedConstant();
   BuildFastFixedArrayForEach(
       array, kind, from_node, to_node,
       [this, value](Node* array, Node* offset) {
@@ -4681,17 +4770,18 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, Node* array,
 
   // Determine the value to initialize the {array} based
   // on the {value_root_index} and the elements {kind}.
-  Node* value = LoadRoot(value_root_index);
+  TNode<Object> value = LoadRoot(value_root_index);
+  TNode<Float64T> float_value;
   if (IsDoubleElementsKind(kind)) {
-    value = LoadHeapNumberValue(value);
+    float_value = LoadHeapNumberValue(CAST(value));
   }
 
   BuildFastFixedArrayForEach(
       array, kind, from_node, to_node,
-      [this, value, kind](Node* array, Node* offset) {
+      [this, value, float_value, kind](Node* array, Node* offset) {
         if (IsDoubleElementsKind(kind)) {
           StoreNoWriteBarrier(MachineRepresentation::kFloat64, array, offset,
-                              value);
+                              float_value);
         } else {
           StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
                               value);
@@ -4703,13 +4793,13 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, Node* array,
 void CodeStubAssembler::StoreFixedDoubleArrayHole(
     TNode<FixedDoubleArray> array, Node* index, ParameterMode parameter_mode) {
   CSA_SLOW_ASSERT(this, MatchesParameterMode(index, parameter_mode));
-  Node* offset =
+  TNode<IntPtrT> offset =
       ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, parameter_mode,
                              FixedArray::kHeaderSize - kHeapObjectTag);
   CSA_ASSERT(this, IsOffsetInBounds(
                        offset, LoadAndUntagFixedArrayBaseLength(array),
                        FixedDoubleArray::kHeaderSize, PACKED_DOUBLE_ELEMENTS));
-  Node* double_hole =
+  TNode<UintPtrT> double_hole =
       Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
              : ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
   // TODO(danno): When we have a Float32/Float64 wrapper class that
@@ -4845,7 +4935,7 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
                     IntPtrConstant(ElementsKindToByteSize(kind)));
       auto loop_body = [&](Node* array, Node* offset) {
         Node* const element = Load(MachineType::AnyTagged(), array, offset);
-        Node* const delta_offset = IntPtrAdd(offset, delta);
+        TNode<WordT> const delta_offset = IntPtrAdd(offset, delta);
         Store(array, delta_offset, element);
       };
 
@@ -4894,8 +4984,8 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
   CSA_ASSERT(this, IntPtrLessThanOrEqual(
                        IntPtrAdd(src_index, length),
                        LoadAndUntagFixedArrayBaseLength(src_elements)));
-  CSA_ASSERT(this, Word32Or(WordNotEqual(dst_elements, src_elements),
-                            WordEqual(length, IntPtrConstant(0))));
+  CSA_ASSERT(this, Word32Or(TaggedNotEqual(dst_elements, src_elements),
+                            IntPtrEqual(length, IntPtrConstant(0))));
 
   // The write barrier can be ignored if {dst_elements} is in new space, or if
   // the elements pointer is FixedDoubleArray.
@@ -4938,7 +5028,7 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
           src_elements, kind, begin, end,
           [&](Node* array, Node* offset) {
             Node* const element = Load(MachineType::AnyTagged(), array, offset);
-            Node* const delta_offset = IntPtrAdd(offset, delta);
+            TNode<WordT> const delta_offset = IntPtrAdd(offset, delta);
             if (write_barrier == SKIP_WRITE_BARRIER) {
               StoreNoWriteBarrier(MachineRepresentation::kTagged, dst_elements,
                                   delta_offset, element);
@@ -4984,7 +5074,7 @@ void CodeStubAssembler::CopyFixedArrayElements(
       !needs_write_barrier &&
       (kTaggedSize == kDoubleSize ||
        IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind));
-  Node* double_hole =
+  TNode<UintPtrT> double_hole =
       Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
              : ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
 
@@ -5009,12 +5099,12 @@ void CodeStubAssembler::CopyFixedArrayElements(
                             RootIndex::kTheHoleValue, mode);
   }
 
-  Node* first_from_element_offset =
+  TNode<IntPtrT> first_from_element_offset =
       ElementOffsetFromIndex(first_element, from_kind, mode, 0);
-  Node* limit_offset = IntPtrAdd(first_from_element_offset,
-                                 IntPtrConstant(first_element_offset));
-  VARIABLE(
-      var_from_offset, MachineType::PointerRepresentation(),
+  TNode<IntPtrT> limit_offset = Signed(IntPtrAdd(
+      first_from_element_offset, IntPtrConstant(first_element_offset)));
+  TVARIABLE(
+      IntPtrT, var_from_offset,
       ElementOffsetFromIndex(IntPtrOrSmiAdd(first_element, element_count, mode),
                              from_kind, mode, first_element_offset));
   // This second variable is used only when the element sizes of source and
@@ -5041,10 +5131,10 @@ void CodeStubAssembler::CopyFixedArrayElements(
 
   BIND(&decrement);
   {
-    Node* from_offset = IntPtrSub(
+    TNode<IntPtrT> from_offset = Signed(IntPtrSub(
         var_from_offset.value(),
-        IntPtrConstant(from_double_elements ? kDoubleSize : kTaggedSize));
-    var_from_offset.Bind(from_offset);
+        IntPtrConstant(from_double_elements ? kDoubleSize : kTaggedSize)));
+    var_from_offset = from_offset;
 
     Node* to_offset;
     if (element_offset_matches) {
@@ -5119,7 +5209,7 @@ void CodeStubAssembler::CopyFixedArrayElements(
     }
 
     BIND(&next_iter);
-    Node* compare = WordNotEqual(from_offset, limit_offset);
+    TNode<BoolT> compare = WordNotEqual(from_offset, limit_offset);
     Branch(compare, &decrement, &done);
   }
 
@@ -5131,8 +5221,8 @@ TNode<FixedArray> CodeStubAssembler::HeapObjectToFixedArray(
     TNode<HeapObject> base, Label* cast_fail) {
   Label fixed_array(this);
   TNode<Map> map = LoadMap(base);
-  GotoIf(WordEqual(map, LoadRoot(RootIndex::kFixedArrayMap)), &fixed_array);
-  GotoIf(WordNotEqual(map, LoadRoot(RootIndex::kFixedCOWArrayMap)), cast_fail);
+  GotoIf(TaggedEqual(map, FixedArrayMapConstant()), &fixed_array);
+  GotoIf(TaggedNotEqual(map, FixedCOWArrayMapConstant()), cast_fail);
   Goto(&fixed_array);
   BIND(&fixed_array);
   return UncheckedCast<FixedArray>(base);
@@ -5153,8 +5243,8 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array,
   bool needs_write_barrier = barrier_mode == UPDATE_WRITE_BARRIER;
 
   if (destroy_source == DestroySource::kNo) {
-    // PropertyArray may contain MutableHeapNumbers, which will be cloned on the
-    // heap, requiring a write barrier.
+    // PropertyArray may contain mutable HeapNumbers, which will be cloned on
+    // the heap, requiring a write barrier.
     needs_write_barrier = true;
   }
 
@@ -5213,13 +5303,13 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
   ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
   STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
   int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag;
-  Node* from_offset = ElementOffsetFromIndex(from_index, from_kind,
-                                             INTPTR_PARAMETERS, header_size);
-  Node* to_offset =
+  TNode<IntPtrT> from_offset = ElementOffsetFromIndex(
+      from_index, from_kind, INTPTR_PARAMETERS, header_size);
+  TNode<IntPtrT> to_offset =
       ElementOffsetFromIndex(to_index, to_kind, INTPTR_PARAMETERS, header_size);
-  Node* byte_count =
+  TNode<IntPtrT> byte_count =
       ElementOffsetFromIndex(character_count, from_kind, INTPTR_PARAMETERS);
-  Node* limit_offset = IntPtrAdd(from_offset, byte_count);
+  TNode<WordT> limit_offset = IntPtrAdd(from_offset, byte_count);
 
   // Prepare the fast loop
   MachineType type =
@@ -5234,8 +5324,8 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
   int to_index_constant = 0, from_index_constant = 0;
   bool index_same = (from_encoding == to_encoding) &&
                     (from_index == to_index ||
-                     (ToInt32Constant(from_index, from_index_constant) &&
-                      ToInt32Constant(to_index, to_index_constant) &&
+                     (ToInt32Constant(from_index, &from_index_constant) &&
+                      ToInt32Constant(to_index, &to_index_constant) &&
                       from_index_constant == to_index_constant));
   BuildFastLoop(
       vars, from_offset, limit_offset,
@@ -5259,24 +5349,23 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
                                                        Label* if_hole) {
   CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
   if (IsDoubleElementsKind(from_kind)) {
-    Node* value =
+    TNode<Float64T> value =
         LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64());
     if (!IsDoubleElementsKind(to_kind)) {
-      value = AllocateHeapNumberWithValue(value);
+      return AllocateHeapNumberWithValue(value);
     }
     return value;
 
   } else {
-    Node* value = Load(MachineType::AnyTagged(), array, offset);
+    TNode<Object> value = CAST(Load(MachineType::AnyTagged(), array, offset));
     if (if_hole) {
-      GotoIf(WordEqual(value, TheHoleConstant()), if_hole);
+      GotoIf(TaggedEqual(value, TheHoleConstant()), if_hole);
     }
     if (IsDoubleElementsKind(to_kind)) {
       if (IsSmiElementsKind(from_kind)) {
-        value = SmiToFloat64(value);
-      } else {
-        value = LoadHeapNumberValue(value);
+        return SmiToFloat64(CAST(value));
       }
+      return LoadHeapNumberValue(CAST(value));
     }
     return value;
   }
@@ -5298,14 +5387,12 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
   CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
   CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
   CSA_SLOW_ASSERT(this, TaggedIsSmi(key));
-  Node* capacity = LoadFixedArrayBaseLength(elements);
+  TNode<Smi> capacity = LoadFixedArrayBaseLength(elements);
 
   ParameterMode mode = OptimalParameterMode();
-  capacity = TaggedToParameter(capacity, mode);
-  key = TaggedToParameter(key, mode);
-
-  return TryGrowElementsCapacity(object, elements, kind, key, capacity, mode,
-                                 bailout);
+  return TryGrowElementsCapacity(
+      object, elements, kind, TaggedToParameter(key, mode),
+      TaggedToParameter(capacity, mode), mode, bailout);
 }
 
 Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
@@ -5348,7 +5435,8 @@ Node* CodeStubAssembler::GrowElementsCapacity(
          bailout);
 
   // Allocate the new backing store.
-  Node* new_elements = AllocateFixedArray(to_kind, new_capacity, mode);
+  TNode<FixedArrayBase> new_elements =
+      AllocateFixedArray(to_kind, new_capacity, mode);
 
   // Copy the elements from the old elements store to the new.
   // The size-check above guarantees that the |new_elements| is allocated
@@ -5365,7 +5453,7 @@ void CodeStubAssembler::InitializeAllocationMemento(Node* base,
                                                     Node* base_allocation_size,
                                                     Node* allocation_site) {
   Comment("[Initialize AllocationMemento");
-  TNode<Object> memento =
+  TNode<HeapObject> memento =
       InnerAllocate(CAST(base), UncheckedCast<IntPtrT>(base_allocation_size));
   StoreMapNoWriteBarrier(memento, RootIndex::kAllocationMementoMap);
   StoreObjectFieldNoWriteBarrier(
@@ -5509,9 +5597,9 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
     Goto(if_number);
 
     BIND(&not_smi);
-    Node* map = LoadMap(value);
+    TNode<Map> map = LoadMap(value);
     GotoIf(IsHeapNumberMap(map), &is_heap_number);
-    Node* instance_type = LoadMapInstanceType(map);
+    TNode<Uint16T> instance_type = LoadMapInstanceType(map);
     if (conversion == Object::Conversion::kToNumeric) {
       GotoIf(IsBigIntInstanceType(instance_type), &is_bigint);
     }
@@ -5557,7 +5645,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
 
 TNode<Int32T> CodeStubAssembler::TruncateHeapNumberValueToWord32(
     TNode<HeapNumber> object) {
-  Node* value = LoadHeapNumberValue(object);
+  TNode<Float64T> value = LoadHeapNumberValue(object);
   return Signed(TruncateFloat64ToWord32(value));
 }
 
@@ -5715,7 +5803,7 @@ TNode<String> CodeStubAssembler::ToThisString(TNode<Context> context,
   BIND(&if_valueisnotsmi);
   {
     // Load the instance type of the {value}.
-    Node* value_instance_type = LoadInstanceType(CAST(value));
+    TNode<Uint16T> value_instance_type = LoadInstanceType(CAST(value));
 
     // Check if the {value} is already String.
     Label if_valueisnotstring(this, Label::kDeferred);
@@ -5867,16 +5955,16 @@ TNode<Object> CodeStubAssembler::ToThisValue(TNode<Context> context,
     {
       switch (primitive_type) {
         case PrimitiveType::kBoolean:
-          GotoIf(WordEqual(value_map, BooleanMapConstant()), &done_loop);
+          GotoIf(TaggedEqual(value_map, BooleanMapConstant()), &done_loop);
           break;
         case PrimitiveType::kNumber:
-          GotoIf(WordEqual(value_map, HeapNumberMapConstant()), &done_loop);
+          GotoIf(TaggedEqual(value_map, HeapNumberMapConstant()), &done_loop);
           break;
         case PrimitiveType::kString:
           GotoIf(IsStringInstanceType(value_instance_type), &done_loop);
           break;
         case PrimitiveType::kSymbol:
-          GotoIf(WordEqual(value_map, SymbolMapConstant()), &done_loop);
+          GotoIf(TaggedEqual(value_map, SymbolMapConstant()), &done_loop);
           break;
       }
       Goto(&done_throw);
@@ -5921,7 +6009,8 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
 
   // Load the instance type of the {value}.
   var_value_map.Bind(LoadMap(value));
-  Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
+  TNode<Uint16T> const value_instance_type =
+      LoadMapInstanceType(var_value_map.value());
 
   Branch(Word32Equal(value_instance_type, Int32Constant(instance_type)), &out,
          &throw_exception);
@@ -5935,26 +6024,26 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
   return var_value_map.value();
 }
 
-Node* CodeStubAssembler::ThrowIfNotJSReceiver(Node* context, Node* value,
-                                              MessageTemplate msg_template,
-                                              const char* method_name) {
-  Label out(this), throw_exception(this, Label::kDeferred);
-  VARIABLE(var_value_map, MachineRepresentation::kTagged);
+void CodeStubAssembler::ThrowIfNotJSReceiver(TNode<Context> context,
+                                             TNode<Object> value,
+                                             MessageTemplate msg_template,
+                                             const char* method_name) {
+  Label done(this), throw_exception(this, Label::kDeferred);
 
   GotoIf(TaggedIsSmi(value), &throw_exception);
 
   // Load the instance type of the {value}.
-  var_value_map.Bind(LoadMap(value));
-  Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
+  TNode<Map> value_map = LoadMap(CAST(value));
+  TNode<Uint16T> const value_instance_type = LoadMapInstanceType(value_map);
 
-  Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
+  Branch(IsJSReceiverInstanceType(value_instance_type), &done,
+         &throw_exception);
 
   // The {value} is not a compatible receiver for this method.
   BIND(&throw_exception);
-  ThrowTypeError(context, msg_template, method_name);
+  ThrowTypeError(context, msg_template, StringConstant(method_name), value);
 
-  BIND(&out);
-  return var_value_map.value();
+  BIND(&done);
 }
 
 void CodeStubAssembler::ThrowIfNotCallable(TNode<Context> context,
@@ -5974,7 +6063,7 @@ void CodeStubAssembler::ThrowIfNotCallable(TNode<Context> context,
 
 void CodeStubAssembler::ThrowRangeError(Node* context, MessageTemplate message,
                                         Node* arg0, Node* arg1, Node* arg2) {
-  Node* template_index = SmiConstant(static_cast<int>(message));
+  TNode<Smi> template_index = SmiConstant(static_cast<int>(message));
   if (arg0 == nullptr) {
     CallRuntime(Runtime::kThrowRangeError, context, template_index);
   } else if (arg1 == nullptr) {
@@ -5999,7 +6088,7 @@ void CodeStubAssembler::ThrowTypeError(Node* context, MessageTemplate message,
 
 void CodeStubAssembler::ThrowTypeError(Node* context, MessageTemplate message,
                                        Node* arg0, Node* arg1, Node* arg2) {
-  Node* template_index = SmiConstant(static_cast<int>(message));
+  TNode<Smi> template_index = SmiConstant(static_cast<int>(message));
   if (arg0 == nullptr) {
     CallRuntime(Runtime::kThrowTypeError, context, template_index);
   } else if (arg1 == nullptr) {
@@ -6028,13 +6117,6 @@ TNode<BoolT> CodeStubAssembler::IsExtensibleMap(SloppyTNode<Map> map) {
   return IsSetWord32<Map::IsExtensibleBit>(LoadMapBitField3(map));
 }
 
-TNode<BoolT> CodeStubAssembler::IsFrozenOrSealedElementsKindMap(
-    SloppyTNode<Map> map) {
-  CSA_ASSERT(this, IsMap(map));
-  return IsElementsKindInRange(LoadMapElementsKind(map), PACKED_SEALED_ELEMENTS,
-                               HOLEY_FROZEN_ELEMENTS);
-}
-
 TNode<BoolT> CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode<Map> map) {
   int kMask = Map::IsExtensibleBit::kMask | Map::IsPrototypeMapBit::kMask;
   int kExpected = Map::IsExtensibleBit::kMask;
@@ -6062,115 +6144,114 @@ TNode<BoolT> CodeStubAssembler::IsUndetectableMap(SloppyTNode<Map> map) {
 }
 
 TNode<BoolT> CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
-  Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
-  Node* cell = LoadRoot(RootIndex::kNoElementsProtector);
-  Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
-  return WordEqual(cell_value, invalid);
+  TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+  TNode<PropertyCell> cell = NoElementsProtectorConstant();
+  TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+  return TaggedEqual(cell_value, invalid);
 }
 
 TNode<BoolT> CodeStubAssembler::IsArrayIteratorProtectorCellInvalid() {
-  Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
-  Node* cell = LoadRoot(RootIndex::kArrayIteratorProtector);
-  Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
-  return WordEqual(cell_value, invalid);
+  TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+  TNode<PropertyCell> cell = ArrayIteratorProtectorConstant();
+  TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+  return TaggedEqual(cell_value, invalid);
 }
 
 TNode<BoolT> CodeStubAssembler::IsPromiseResolveProtectorCellInvalid() {
-  Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
-  Node* cell = LoadRoot(RootIndex::kPromiseResolveProtector);
-  Node* cell_value = LoadObjectField(cell, Cell::kValueOffset);
-  return WordEqual(cell_value, invalid);
+  TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+  TNode<Cell> cell = PromiseResolveProtectorConstant();
+  TNode<Object> cell_value = LoadObjectField(cell, Cell::kValueOffset);
+  return TaggedEqual(cell_value, invalid);
 }
 
 TNode<BoolT> CodeStubAssembler::IsPromiseThenProtectorCellInvalid() {
-  Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
-  Node* cell = LoadRoot(RootIndex::kPromiseThenProtector);
-  Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
-  return WordEqual(cell_value, invalid);
+  TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+  TNode<PropertyCell> cell = PromiseThenProtectorConstant();
+  TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+  return TaggedEqual(cell_value, invalid);
 }
 
 TNode<BoolT> CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() {
-  Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
-  Node* cell = LoadRoot(RootIndex::kArraySpeciesProtector);
-  Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
-  return WordEqual(cell_value, invalid);
+  TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+  TNode<PropertyCell> cell = ArraySpeciesProtectorConstant();
+  TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+  return TaggedEqual(cell_value, invalid);
 }
 
 TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
-  Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
-  Node* cell = LoadRoot(RootIndex::kTypedArraySpeciesProtector);
-  Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
-  return WordEqual(cell_value, invalid);
+  TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+  TNode<PropertyCell> cell = TypedArraySpeciesProtectorConstant();
+  TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+  return TaggedEqual(cell_value, invalid);
 }
 
 TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid(
-    TNode<Context> native_context) {
-  CSA_ASSERT(this, IsNativeContext(native_context));
+    TNode<NativeContext> native_context) {
   TNode<PropertyCell> cell = CAST(LoadContextElement(
       native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX));
   TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
   TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
-  return WordEqual(cell_value, invalid);
+  return TaggedEqual(cell_value, invalid);
 }
 
 TNode<BoolT> CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() {
-  Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
-  Node* cell = LoadRoot(RootIndex::kPromiseSpeciesProtector);
-  Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
-  return WordEqual(cell_value, invalid);
+  TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+  TNode<PropertyCell> cell = PromiseSpeciesProtectorConstant();
+  TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+  return TaggedEqual(cell_value, invalid);
 }
 
 TNode<BoolT> CodeStubAssembler::IsPrototypeInitialArrayPrototype(
     SloppyTNode<Context> context, SloppyTNode<Map> map) {
-  Node* const native_context = LoadNativeContext(context);
-  Node* const initial_array_prototype = LoadContextElement(
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
+  TNode<Object> const initial_array_prototype = LoadContextElement(
       native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
-  Node* proto = LoadMapPrototype(map);
-  return WordEqual(proto, initial_array_prototype);
+  TNode<HeapObject> proto = LoadMapPrototype(map);
+  return TaggedEqual(proto, initial_array_prototype);
 }
 
 TNode<BoolT> CodeStubAssembler::IsPrototypeTypedArrayPrototype(
     SloppyTNode<Context> context, SloppyTNode<Map> map) {
-  TNode<Context> const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   TNode<Object> const typed_array_prototype =
       LoadContextElement(native_context, Context::TYPED_ARRAY_PROTOTYPE_INDEX);
   TNode<HeapObject> proto = LoadMapPrototype(map);
   TNode<HeapObject> proto_of_proto = Select<HeapObject>(
       IsJSObject(proto), [=] { return LoadMapPrototype(LoadMap(proto)); },
       [=] { return NullConstant(); });
-  return WordEqual(proto_of_proto, typed_array_prototype);
+  return TaggedEqual(proto_of_proto, typed_array_prototype);
 }
 
 TNode<BoolT> CodeStubAssembler::IsFastAliasedArgumentsMap(
     TNode<Context> context, TNode<Map> map) {
-  TNode<Context> const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   TNode<Object> const arguments_map = LoadContextElement(
       native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-  return WordEqual(arguments_map, map);
+  return TaggedEqual(arguments_map, map);
 }
 
 TNode<BoolT> CodeStubAssembler::IsSlowAliasedArgumentsMap(
     TNode<Context> context, TNode<Map> map) {
-  TNode<Context> const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   TNode<Object> const arguments_map = LoadContextElement(
       native_context, Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX);
-  return WordEqual(arguments_map, map);
+  return TaggedEqual(arguments_map, map);
 }
 
 TNode<BoolT> CodeStubAssembler::IsSloppyArgumentsMap(TNode<Context> context,
                                                      TNode<Map> map) {
-  TNode<Context> const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   TNode<Object> const arguments_map =
       LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  return WordEqual(arguments_map, map);
+  return TaggedEqual(arguments_map, map);
 }
 
 TNode<BoolT> CodeStubAssembler::IsStrictArgumentsMap(TNode<Context> context,
                                                      TNode<Map> map) {
-  TNode<Context> const native_context = LoadNativeContext(context);
+  TNode<NativeContext> const native_context = LoadNativeContext(context);
   TNode<Object> const arguments_map =
       LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
-  return WordEqual(arguments_map, map);
+  return TaggedEqual(arguments_map, map);
 }
 
 TNode<BoolT> CodeStubAssembler::TaggedIsCallable(TNode<Object> object) {
@@ -6186,7 +6267,7 @@ TNode<BoolT> CodeStubAssembler::IsCallable(SloppyTNode<HeapObject> object) {
 }
 
 TNode<BoolT> CodeStubAssembler::IsCell(SloppyTNode<HeapObject> object) {
-  return WordEqual(LoadMap(object), LoadRoot(RootIndex::kCellMap));
+  return TaggedEqual(LoadMap(object), CellMapConstant());
 }
 
 TNode<BoolT> CodeStubAssembler::IsCode(SloppyTNode<HeapObject> object) {
@@ -6389,7 +6470,7 @@ TNode<BoolT> CodeStubAssembler::IsJSAsyncGeneratorObject(
 }
 
 TNode<BoolT> CodeStubAssembler::IsContext(SloppyTNode<HeapObject> object) {
-  Node* instance_type = LoadInstanceType(object);
+  TNode<Uint16T> instance_type = LoadInstanceType(object);
   return UncheckedCast<BoolT>(Word32And(
       Int32GreaterThanOrEqual(instance_type, Int32Constant(FIRST_CONTEXT_TYPE)),
       Int32LessThanOrEqual(instance_type, Int32Constant(LAST_CONTEXT_TYPE))));
@@ -6401,7 +6482,7 @@ TNode<BoolT> CodeStubAssembler::IsFixedArray(SloppyTNode<HeapObject> object) {
 
 TNode<BoolT> CodeStubAssembler::IsFixedArraySubclass(
     SloppyTNode<HeapObject> object) {
-  Node* instance_type = LoadInstanceType(object);
+  TNode<Uint16T> instance_type = LoadInstanceType(object);
   return UncheckedCast<BoolT>(
       Word32And(Int32GreaterThanOrEqual(instance_type,
                                         Int32Constant(FIRST_FIXED_ARRAY_TYPE)),
@@ -6411,7 +6492,7 @@ TNode<BoolT> CodeStubAssembler::IsFixedArraySubclass(
 
 TNode<BoolT> CodeStubAssembler::IsNotWeakFixedArraySubclass(
     SloppyTNode<HeapObject> object) {
-  Node* instance_type = LoadInstanceType(object);
+  TNode<Uint16T> instance_type = LoadInstanceType(object);
   return UncheckedCast<BoolT>(Word32Or(
       Int32LessThan(instance_type, Int32Constant(FIRST_WEAK_FIXED_ARRAY_TYPE)),
       Int32GreaterThan(instance_type,
@@ -6459,7 +6540,8 @@ TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(
   if (IsDoubleElementsKind(kind)) {
     return IsFixedDoubleArray(object);
   } else {
-    DCHECK(IsSmiOrObjectElementsKind(kind) || IsSealedElementsKind(kind));
+    DCHECK(IsSmiOrObjectElementsKind(kind) || IsSealedElementsKind(kind) ||
+           IsNonextensibleElementsKind(kind));
     return IsFixedArraySubclass(object);
   }
 }
@@ -6485,12 +6567,6 @@ TNode<BoolT> CodeStubAssembler::IsAllocationSite(
   return IsAllocationSiteInstanceType(LoadInstanceType(object));
 }
 
-TNode<BoolT> CodeStubAssembler::IsAnyHeapNumber(
-    SloppyTNode<HeapObject> object) {
-  return UncheckedCast<BoolT>(
-      Word32Or(IsMutableHeapNumber(object), IsHeapNumber(object)));
-}
-
 TNode<BoolT> CodeStubAssembler::IsHeapNumber(SloppyTNode<HeapObject> object) {
   return IsHeapNumberMap(LoadMap(object));
 }
@@ -6509,11 +6585,6 @@ TNode<BoolT> CodeStubAssembler::IsOddballInstanceType(
   return InstanceTypeEqual(instance_type, ODDBALL_TYPE);
 }
 
-TNode<BoolT> CodeStubAssembler::IsMutableHeapNumber(
-    SloppyTNode<HeapObject> object) {
-  return IsMutableHeapNumberMap(LoadMap(object));
-}
-
 TNode<BoolT> CodeStubAssembler::IsFeedbackCell(SloppyTNode<HeapObject> object) {
   return HasInstanceType(object, FEEDBACK_CELL_TYPE);
 }
@@ -6555,7 +6626,7 @@ TNode<BoolT> CodeStubAssembler::IsInternalizedStringInstanceType(
 }
 
 TNode<BoolT> CodeStubAssembler::IsUniqueName(TNode<HeapObject> object) {
-  TNode<Int32T> instance_type = LoadInstanceType(object);
+  TNode<Uint16T> instance_type = LoadInstanceType(object);
   return Select<BoolT>(
       IsInternalizedStringInstanceType(instance_type),
       [=] { return Int32TrueConstant(); },
@@ -6563,7 +6634,7 @@ TNode<BoolT> CodeStubAssembler::IsUniqueName(TNode<HeapObject> object) {
 }
 
 TNode<BoolT> CodeStubAssembler::IsUniqueNameNoIndex(TNode<HeapObject> object) {
-  TNode<Int32T> instance_type = LoadInstanceType(object);
+  TNode<Uint16T> instance_type = LoadInstanceType(object);
   return Select<BoolT>(
       IsInternalizedStringInstanceType(instance_type),
       [=] {
@@ -6608,16 +6679,16 @@ TNode<BoolT> CodeStubAssembler::IsPrivateName(SloppyTNode<Symbol> symbol) {
 
 TNode<BoolT> CodeStubAssembler::IsNativeContext(
     SloppyTNode<HeapObject> object) {
-  return WordEqual(LoadMap(object), LoadRoot(RootIndex::kNativeContextMap));
+  return TaggedEqual(LoadMap(object), NativeContextMapConstant());
 }
 
 TNode<BoolT> CodeStubAssembler::IsFixedDoubleArray(
     SloppyTNode<HeapObject> object) {
-  return WordEqual(LoadMap(object), FixedDoubleArrayMapConstant());
+  return TaggedEqual(LoadMap(object), FixedDoubleArrayMapConstant());
 }
 
 TNode<BoolT> CodeStubAssembler::IsHashTable(SloppyTNode<HeapObject> object) {
-  Node* instance_type = LoadInstanceType(object);
+  TNode<Uint16T> instance_type = LoadInstanceType(object);
   return UncheckedCast<BoolT>(
       Word32And(Int32GreaterThanOrEqual(instance_type,
                                         Int32Constant(FIRST_HASH_TABLE_TYPE)),
@@ -6848,10 +6919,9 @@ TNode<Int32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
 
   ToDirectStringAssembler to_direct(state(), string);
   to_direct.TryToDirect(&if_runtime);
-  Node* const offset = IntPtrAdd(index, to_direct.offset());
-  Node* const instance_type = to_direct.instance_type();
-
-  Node* const string_data = to_direct.PointerToData(&if_runtime);
+  TNode<IntPtrT> const offset = IntPtrAdd(index, to_direct.offset());
+  TNode<Int32T> const instance_type = to_direct.instance_type();
+  TNode<RawPtrT> const string_data = to_direct.PointerToData(&if_runtime);
 
   // Check if the {string} is a TwoByteSeqString or a OneByteSeqString.
   Branch(IsOneByteStringInstanceType(instance_type), &if_stringisonebyte,
@@ -6874,9 +6944,9 @@ TNode<Int32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
 
   BIND(&if_runtime);
   {
-    Node* result = CallRuntime(Runtime::kStringCharCodeAt, NoContextConstant(),
-                               string, SmiTag(index));
-    var_result = SmiToInt32(result);
+    TNode<Object> result = CallRuntime(
+        Runtime::kStringCharCodeAt, NoContextConstant(), string, SmiTag(index));
+    var_result = SmiToInt32(CAST(result));
     Goto(&return_result);
   }
 
@@ -6895,15 +6965,14 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
   BIND(&if_codeisonebyte);
   {
     // Load the isolate wide single character string cache.
-    TNode<FixedArray> cache =
-        CAST(LoadRoot(RootIndex::kSingleCharacterStringCache));
+    TNode<FixedArray> cache = SingleCharacterStringCacheConstant();
     TNode<IntPtrT> code_index = Signed(ChangeUint32ToWord(code));
 
     // Check if we have an entry for the {code} in the single character string
     // cache already.
     Label if_entryisundefined(this, Label::kDeferred),
         if_entryisnotundefined(this);
-    Node* entry = UnsafeLoadFixedArrayElement(cache, code_index);
+    TNode<Object> entry = UnsafeLoadFixedArrayElement(cache, code_index);
     Branch(IsUndefined(entry), &if_entryisundefined, &if_entryisnotundefined);
 
     BIND(&if_entryisundefined);
@@ -6929,7 +6998,7 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
   BIND(&if_codeistwobyte);
   {
     // Allocate a new SeqTwoByteString for {code}.
-    Node* result = AllocateSeqTwoByteString(1);
+    TNode<String> result = AllocateSeqTwoByteString(1);
     StoreNoWriteBarrier(
         MachineRepresentation::kWord16, result,
         IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag), code);
@@ -6960,7 +7029,7 @@ TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters(
   BIND(&one_byte_sequential);
   {
     TNode<String> result = AllocateSeqOneByteString(
-        NoContextConstant(), Unsigned(TruncateIntPtrToInt32(character_count)));
+        Unsigned(TruncateIntPtrToInt32(character_count)));
     CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
                          character_count, String::ONE_BYTE_ENCODING,
                          String::ONE_BYTE_ENCODING);
@@ -6972,7 +7041,7 @@ TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters(
   BIND(&two_byte_sequential);
   {
     TNode<String> result = AllocateSeqTwoByteString(
-        NoContextConstant(), Unsigned(TruncateIntPtrToInt32(character_count)));
+        Unsigned(TruncateIntPtrToInt32(character_count)));
     CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
                          character_count, String::TWO_BYTE_ENCODING,
                          String::TWO_BYTE_ENCODING);
@@ -7012,7 +7081,7 @@ TNode<String> CodeStubAssembler::SubString(TNode<String> string,
 
   TNode<String> direct_string = to_direct.TryToDirect(&runtime);
   TNode<IntPtrT> offset = IntPtrAdd(from, to_direct.offset());
-  Node* const instance_type = to_direct.instance_type();
+  TNode<Int32T> const instance_type = to_direct.instance_type();
 
   // The subject string can only be external or sequential string of either
   // encoding at this point.
@@ -7070,7 +7139,8 @@ TNode<String> CodeStubAssembler::SubString(TNode<String> string,
   // Handle external string.
   BIND(&external_string);
   {
-    Node* const fake_sequential_string = to_direct.PointerToString(&runtime);
+    TNode<RawPtrT> const fake_sequential_string =
+        to_direct.PointerToString(&runtime);
 
     var_result = AllocAndCopyStringCharacters(
         fake_sequential_string, instance_type, offset, substr_length);
@@ -7125,21 +7195,13 @@ TNode<String> CodeStubAssembler::SubString(TNode<String> string,
 }
 
 ToDirectStringAssembler::ToDirectStringAssembler(
-    compiler::CodeAssemblerState* state, Node* string, Flags flags)
+    compiler::CodeAssemblerState* state, TNode<String> string, Flags flags)
     : CodeStubAssembler(state),
-      var_string_(this, MachineRepresentation::kTagged, string),
-      var_instance_type_(this, MachineRepresentation::kWord32),
-      var_offset_(this, MachineType::PointerRepresentation()),
-      var_is_external_(this, MachineRepresentation::kWord32),
-      flags_(flags) {
-  CSA_ASSERT(this, TaggedIsNotSmi(string));
-  CSA_ASSERT(this, IsString(string));
-
-  var_string_.Bind(string);
-  var_offset_.Bind(IntPtrConstant(0));
-  var_instance_type_.Bind(LoadInstanceType(string));
-  var_is_external_.Bind(Int32Constant(0));
-}
+      var_string_(string, this),
+      var_instance_type_(LoadInstanceType(string), this),
+      var_offset_(IntPtrConstant(0), this),
+      var_is_external_(Int32Constant(0), this),
+      flags_(flags) {}
 
 TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
   VariableList vars({&var_string_, &var_offset_, &var_instance_type_}, zone());
@@ -7165,7 +7227,7 @@ TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
     };
     STATIC_ASSERT(arraysize(values) == arraysize(labels));
 
-    Node* const representation = Word32And(
+    TNode<Int32T> const representation = Word32And(
         var_instance_type_.value(), Int32Constant(kStringRepresentationMask));
     Switch(representation, if_bailout, values, labels, arraysize(values));
   }
@@ -7174,13 +7236,15 @@ TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
   // Flat cons strings have an empty second part.
   BIND(&if_iscons);
   {
-    Node* const string = var_string_.value();
-    GotoIfNot(IsEmptyString(LoadObjectField(string, ConsString::kSecondOffset)),
+    TNode<String> const string = var_string_.value();
+    GotoIfNot(IsEmptyString(
+                  LoadObjectField<String>(string, ConsString::kSecondOffset)),
               if_bailout);
 
-    Node* const lhs = LoadObjectField(string, ConsString::kFirstOffset);
-    var_string_.Bind(lhs);
-    var_instance_type_.Bind(LoadInstanceType(lhs));
+    TNode<String> const lhs =
+        LoadObjectField<String>(string, ConsString::kFirstOffset);
+    var_string_ = lhs;
+    var_instance_type_ = LoadInstanceType(lhs);
 
     Goto(&dispatch);
   }
@@ -7191,14 +7255,15 @@ TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
     if (!FLAG_string_slices || (flags_ & kDontUnpackSlicedStrings)) {
       Goto(if_bailout);
     } else {
-      Node* const string = var_string_.value();
-      Node* const sliced_offset =
+      TNode<String> const string = var_string_.value();
+      TNode<IntPtrT> const sliced_offset =
           LoadAndUntagObjectField(string, SlicedString::kOffsetOffset);
-      var_offset_.Bind(IntPtrAdd(var_offset_.value(), sliced_offset));
+      var_offset_ = IntPtrAdd(var_offset_.value(), sliced_offset);
 
-      Node* const parent = LoadObjectField(string, SlicedString::kParentOffset);
-      var_string_.Bind(parent);
-      var_instance_type_.Bind(LoadInstanceType(parent));
+      TNode<String> const parent =
+          LoadObjectField<String>(string, SlicedString::kParentOffset);
+      var_string_ = parent;
+      var_instance_type_ = LoadInstanceType(parent);
 
       Goto(&dispatch);
     }
@@ -7207,24 +7272,24 @@ TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
   // Thin string. Fetch the actual string.
   BIND(&if_isthin);
   {
-    Node* const string = var_string_.value();
-    Node* const actual_string =
-        LoadObjectField(string, ThinString::kActualOffset);
-    Node* const actual_instance_type = LoadInstanceType(actual_string);
+    TNode<String> const string = var_string_.value();
+    TNode<String> const actual_string =
+        LoadObjectField<String>(string, ThinString::kActualOffset);
+    TNode<Uint16T> const actual_instance_type = LoadInstanceType(actual_string);
 
-    var_string_.Bind(actual_string);
-    var_instance_type_.Bind(actual_instance_type);
+    var_string_ = actual_string;
+    var_instance_type_ = actual_instance_type;
 
     Goto(&dispatch);
   }
 
   // External string.
   BIND(&if_isexternal);
-  var_is_external_.Bind(Int32Constant(1));
+  var_is_external_ = Int32Constant(1);
   Goto(&out);
 
   BIND(&out);
-  return CAST(var_string_.value());
+  return var_string_.value();
 }
 
 TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
@@ -7253,7 +7318,7 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
     GotoIf(IsUncachedExternalStringInstanceType(var_instance_type_.value()),
            if_bailout);
 
-    TNode<String> string = CAST(var_string_.value());
+    TNode<String> string = var_string_.value();
     TNode<IntPtrT> result =
         LoadObjectField<IntPtrT>(string, ExternalString::kResourceDataOffset);
     if (ptr_kind == PTR_TO_STRING) {
@@ -7268,35 +7333,33 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
   return var_result.value();
 }
 
-void CodeStubAssembler::BranchIfCanDerefIndirectString(Node* string,
-                                                       Node* instance_type,
-                                                       Label* can_deref,
-                                                       Label* cannot_deref) {
-  CSA_ASSERT(this, IsString(string));
-  Node* representation =
+void CodeStubAssembler::BranchIfCanDerefIndirectString(
+    TNode<String> string, TNode<Int32T> instance_type, Label* can_deref,
+    Label* cannot_deref) {
+  TNode<Int32T> representation =
       Word32And(instance_type, Int32Constant(kStringRepresentationMask));
   GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), can_deref);
   GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)),
          cannot_deref);
   // Cons string.
-  Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
+  TNode<String> rhs =
+      LoadObjectField<String>(string, ConsString::kSecondOffset);
   GotoIf(IsEmptyString(rhs), can_deref);
   Goto(cannot_deref);
 }
 
-Node* CodeStubAssembler::DerefIndirectString(TNode<String> string,
-                                             TNode<Int32T> instance_type,
-                                             Label* cannot_deref) {
+TNode<String> CodeStubAssembler::DerefIndirectString(
+    TNode<String> string, TNode<Int32T> instance_type, Label* cannot_deref) {
   Label deref(this);
   BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref);
   BIND(&deref);
   STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
                 static_cast<int>(ConsString::kFirstOffset));
-  return LoadObjectField(string, ThinString::kActualOffset);
+  return LoadObjectField<String>(string, ThinString::kActualOffset);
 }
 
-void CodeStubAssembler::DerefIndirectString(Variable* var_string,
-                                            Node* instance_type) {
+void CodeStubAssembler::DerefIndirectString(TVariable<String>* var_string,
+                                            TNode<Int32T> instance_type) {
 #ifdef DEBUG
   Label can_deref(this), cannot_deref(this);
   BranchIfCanDerefIndirectString(var_string->value(), instance_type, &can_deref,
@@ -7309,12 +7372,12 @@ void CodeStubAssembler::DerefIndirectString(Variable* var_string,
 
   STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
                 static_cast<int>(ConsString::kFirstOffset));
-  var_string->Bind(
-      LoadObjectField(var_string->value(), ThinString::kActualOffset));
+  *var_string =
+      LoadObjectField<String>(var_string->value(), ThinString::kActualOffset);
 }
 
-void CodeStubAssembler::MaybeDerefIndirectString(Variable* var_string,
-                                                 Node* instance_type,
+void CodeStubAssembler::MaybeDerefIndirectString(TVariable<String>* var_string,
+                                                 TNode<Int32T> instance_type,
                                                  Label* did_deref,
                                                  Label* cannot_deref) {
   Label deref(this);
@@ -7328,11 +7391,10 @@ void CodeStubAssembler::MaybeDerefIndirectString(Variable* var_string,
   }
 }
 
-void CodeStubAssembler::MaybeDerefIndirectStrings(Variable* var_left,
-                                                  Node* left_instance_type,
-                                                  Variable* var_right,
-                                                  Node* right_instance_type,
-                                                  Label* did_something) {
+void CodeStubAssembler::MaybeDerefIndirectStrings(
+    TVariable<String>* var_left, TNode<Int32T> left_instance_type,
+    TVariable<String>* var_right, TNode<Int32T> right_instance_type,
+    Label* did_something) {
   Label did_nothing_left(this), did_something_left(this),
       didnt_do_anything(this);
   MaybeDerefIndirectString(var_left, left_instance_type, &did_something_left,
@@ -7397,13 +7459,13 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
     BIND(&non_cons);
 
     Comment("Full string concatenate");
-    Node* left_instance_type = LoadInstanceType(var_left.value());
-    Node* right_instance_type = LoadInstanceType(var_right.value());
+    TNode<Int32T> left_instance_type = LoadInstanceType(var_left.value());
+    TNode<Int32T> right_instance_type = LoadInstanceType(var_right.value());
     // Compute intersection and difference of instance types.
 
-    Node* ored_instance_types =
+    TNode<Int32T> ored_instance_types =
         Word32Or(left_instance_type, right_instance_type);
-    Node* xored_instance_types =
+    TNode<Word32T> xored_instance_types =
         Word32Xor(left_instance_type, right_instance_type);
 
     // Check if both strings have the same encoding and both are sequential.
@@ -7419,7 +7481,7 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
                        Int32Constant(kTwoByteStringTag)),
            &two_byte);
     // One-byte sequential string case
-    result = AllocateSeqOneByteString(context, new_length);
+    result = AllocateSeqOneByteString(new_length);
     CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
                          IntPtrConstant(0), word_left_length,
                          String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
@@ -7431,7 +7493,7 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
     BIND(&two_byte);
     {
       // Two-byte sequential string case
-      result = AllocateSeqTwoByteString(context, new_length);
+      result = AllocateSeqTwoByteString(new_length);
       CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
                            IntPtrConstant(0), word_left_length,
                            String::TWO_BYTE_ENCODING,
@@ -7484,7 +7546,7 @@ TNode<String> CodeStubAssembler::StringFromSingleUTF16EncodedCodePoint(
 
   BIND(&if_isword32);
   {
-    Node* value = AllocateSeqTwoByteString(2);
+    TNode<String> value = AllocateSeqTwoByteString(2);
     StoreNoWriteBarrier(
         MachineRepresentation::kWord32, value,
         IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
@@ -7530,12 +7592,12 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
       done(this, &result);
 
   // Load the number string cache.
-  Node* number_string_cache = LoadRoot(RootIndex::kNumberStringCache);
+  TNode<FixedArray> number_string_cache = NumberStringCacheConstant();
 
   // Make the hash mask from the length of the number string cache. It
   // contains two elements (number and string) for each cache entry.
   // TODO(ishell): cleanup mask handling.
-  Node* mask =
+  TNode<IntPtrT> mask =
       BitcastTaggedSignedToWord(LoadFixedArrayBaseLength(number_string_cache));
   TNode<IntPtrT> one = IntPtrConstant(1);
   mask = IntPtrSub(mask, one);
@@ -7546,6 +7608,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
 
   BIND(&if_heap_number);
   {
+    Comment("NumberToString - HeapNumber");
     TNode<HeapNumber> heap_number_input = CAST(input);
     // Try normalizing the HeapNumber.
     TryHeapNumberToSmi(heap_number_input, smi_input, &if_smi);
@@ -7556,42 +7619,44 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
     TNode<Int32T> high = LoadObjectField<Int32T>(
         heap_number_input, HeapNumber::kValueOffset + kIntSize);
     TNode<Word32T> hash = Word32Xor(low, high);
-    TNode<WordT> word_hash = WordShl(ChangeInt32ToIntPtr(hash), one);
+    TNode<IntPtrT> word_hash = WordShl(ChangeInt32ToIntPtr(hash), one);
     TNode<WordT> index =
         WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant()));
 
     // Cache entry's key must be a heap number
-    Node* number_key =
-        UnsafeLoadFixedArrayElement(CAST(number_string_cache), index);
+    TNode<Object> number_key =
+        UnsafeLoadFixedArrayElement(number_string_cache, index);
     GotoIf(TaggedIsSmi(number_key), &runtime);
-    GotoIfNot(IsHeapNumber(number_key), &runtime);
+    TNode<HeapObject> number_key_heap_object = CAST(number_key);
+    GotoIfNot(IsHeapNumber(number_key_heap_object), &runtime);
 
     // Cache entry's key must match the heap number value we're looking for.
-    Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
-                                        MachineType::Int32());
-    Node* high_compare = LoadObjectField(
-        number_key, HeapNumber::kValueOffset + kIntSize, MachineType::Int32());
+    TNode<Int32T> low_compare = LoadObjectField<Int32T>(
+        number_key_heap_object, HeapNumber::kValueOffset);
+    TNode<Int32T> high_compare = LoadObjectField<Int32T>(
+        number_key_heap_object, HeapNumber::kValueOffset + kIntSize);
     GotoIfNot(Word32Equal(low, low_compare), &runtime);
     GotoIfNot(Word32Equal(high, high_compare), &runtime);
 
     // Heap number match, return value from cache entry.
-    result = CAST(UnsafeLoadFixedArrayElement(CAST(number_string_cache), index,
-                                              kTaggedSize));
+    result = CAST(
+        UnsafeLoadFixedArrayElement(number_string_cache, index, kTaggedSize));
     Goto(&done);
   }
 
   BIND(&if_smi);
   {
+    Comment("NumberToString - Smi");
     // Load the smi key, make sure it matches the smi we're looking for.
-    Node* smi_index = BitcastWordToTagged(WordAnd(
+    TNode<Object> smi_index = BitcastWordToTagged(WordAnd(
         WordShl(BitcastTaggedSignedToWord(smi_input.value()), one), mask));
-    Node* smi_key = UnsafeLoadFixedArrayElement(CAST(number_string_cache),
-                                                smi_index, 0, SMI_PARAMETERS);
-    GotoIf(WordNotEqual(smi_key, smi_input.value()), &runtime);
+    TNode<Object> smi_key = UnsafeLoadFixedArrayElement(
+        number_string_cache, smi_index, 0, SMI_PARAMETERS);
+    GotoIf(TaggedNotEqual(smi_key, smi_input.value()), &runtime);
 
     // Smi match, return value from cache entry.
-    result = CAST(UnsafeLoadFixedArrayElement(
-        CAST(number_string_cache), smi_index, kTaggedSize, SMI_PARAMETERS));
+    result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, smi_index,
+                                              kTaggedSize, SMI_PARAMETERS));
     Goto(&done);
   }
 
@@ -7624,7 +7689,7 @@ Node* CodeStubAssembler::NonNumberToNumberOrNumeric(
     Node* input = var_input.value();
 
     // Dispatch on the {input} instance type.
-    Node* input_instance_type = LoadInstanceType(input);
+    TNode<Uint16T> input_instance_type = LoadInstanceType(input);
     Label if_inputisstring(this), if_inputisoddball(this),
         if_inputisbigint(this), if_inputisreceiver(this, Label::kDeferred),
         if_inputisother(this, Label::kDeferred);
@@ -7671,7 +7736,7 @@ Node* CodeStubAssembler::NonNumberToNumberOrNumeric(
       // using the ToPrimitive type conversion, preferably yielding a Number.
       Callable callable = CodeFactory::NonPrimitiveToPrimitive(
           isolate(), ToPrimitiveHint::kNumber);
-      Node* result = CallStub(callable, context, input);
+      TNode<Object> result = CallStub(callable, context, input);
 
       // Check if the {result} is already a Number/Numeric.
       Label if_done(this), if_notdone(this);
@@ -7833,9 +7898,9 @@ void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
   var_numeric->Bind(value);
   Label if_smi(this), if_heapnumber(this), if_bigint(this), if_oddball(this);
   GotoIf(TaggedIsSmi(value), &if_smi);
-  Node* map = LoadMap(value);
+  TNode<Map> map = LoadMap(value);
   GotoIf(IsHeapNumberMap(map), &if_heapnumber);
-  Node* instance_type = LoadMapInstanceType(map);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(map);
   GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
 
   // {value} is not a Numeric yet.
@@ -7865,8 +7930,9 @@ void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
 // ES#sec-touint32
 TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
                                           SloppyTNode<Object> input) {
-  Node* const float_zero = Float64Constant(0.0);
-  Node* const float_two_32 = Float64Constant(static_cast<double>(1ULL << 32));
+  TNode<Float64T> const float_zero = Float64Constant(0.0);
+  TNode<Float64T> const float_two_32 =
+      Float64Constant(static_cast<double>(1ULL << 32));
 
   Label out(this);
 
@@ -7881,7 +7947,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
     BIND(&next);
   }
 
-  Node* const number = ToNumber(context, input);
+  TNode<Number> const number = ToNumber(context, input);
   var_result.Bind(number);
 
   // Perhaps we have a positive smi now.
@@ -7896,8 +7962,8 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
 
   BIND(&if_isnegativesmi);
   {
-    Node* const uint32_value = SmiToInt32(number);
-    Node* float64_value = ChangeUint32ToFloat64(uint32_value);
+    TNode<Int32T> const uint32_value = SmiToInt32(CAST(number));
+    TNode<Float64T> float64_value = ChangeUint32ToFloat64(uint32_value);
     var_result.Bind(AllocateHeapNumberWithValue(float64_value));
     Goto(&out);
   }
@@ -7905,7 +7971,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
   BIND(&if_isheapnumber);
   {
     Label return_zero(this);
-    Node* const value = LoadHeapNumberValue(number);
+    TNode<Float64T> const value = LoadHeapNumberValue(CAST(number));
 
     {
       // +-0.
@@ -7924,7 +7990,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
     {
       // +Infinity.
       Label next(this);
-      Node* const positive_infinity =
+      TNode<Float64T> const positive_infinity =
           Float64Constant(std::numeric_limits<double>::infinity());
       Branch(Float64Equal(value, positive_infinity), &return_zero, &next);
       BIND(&next);
@@ -7933,7 +7999,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
     {
       // -Infinity.
       Label next(this);
-      Node* const negative_infinity =
+      TNode<Float64T> const negative_infinity =
           Float64Constant(-1.0 * std::numeric_limits<double>::infinity());
       Branch(Float64Equal(value, negative_infinity), &return_zero, &next);
       BIND(&next);
@@ -7944,12 +8010,12 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
     // * Let int32bit be int modulo 2^32.
     // * Return int32bit.
     {
-      Node* x = Float64Trunc(value);
+      TNode<Float64T> x = Float64Trunc(value);
       x = Float64Mod(x, float_two_32);
       x = Float64Add(x, float_two_32);
       x = Float64Mod(x, float_two_32);
 
-      Node* const result = ChangeFloat64ToTagged(x);
+      TNode<Number> const result = ChangeFloat64ToTagged(x);
       var_result.Bind(result);
       Goto(&out);
     }
@@ -7981,31 +8047,6 @@ TNode<String> CodeStubAssembler::ToString_Inline(SloppyTNode<Context> context,
   return CAST(var_result.value());
 }
 
-Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
-  Label if_isreceiver(this, Label::kDeferred), if_isnotreceiver(this);
-  VARIABLE(result, MachineRepresentation::kTagged);
-  Label done(this, &result);
-
-  BranchIfJSReceiver(input, &if_isreceiver, &if_isnotreceiver);
-
-  BIND(&if_isreceiver);
-  {
-    // Convert {input} to a primitive first passing Number hint.
-    Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
-    result.Bind(CallStub(callable, context, input));
-    Goto(&done);
-  }
-
-  BIND(&if_isnotreceiver);
-  {
-    result.Bind(input);
-    Goto(&done);
-  }
-
-  BIND(&done);
-  return result.value();
-}
-
 TNode<JSReceiver> CodeStubAssembler::ToObject(SloppyTNode<Context> context,
                                               SloppyTNode<Object> input) {
   return CAST(CallBuiltin(Builtins::kToObject, context, input));
@@ -8152,7 +8193,7 @@ TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
     {
       TNode<HeapNumber> arg_hn = CAST(arg);
       // Load the floating-point value of {arg}.
-      Node* arg_value = LoadHeapNumberValue(arg_hn);
+      TNode<Float64T> arg_value = LoadHeapNumberValue(arg_hn);
 
       // Check if {arg} is NaN.
       GotoIfNot(Float64Equal(arg_value, arg_value), &return_zero);
@@ -8214,7 +8255,7 @@ TNode<WordT> CodeStubAssembler::UpdateWord(TNode<WordT> word,
 
 void CodeStubAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
-    Node* counter_address =
+    TNode<ExternalReference> counter_address =
         ExternalConstant(ExternalReference::Create(counter));
     StoreNoWriteBarrier(MachineRepresentation::kWord32, counter_address,
                         Int32Constant(value));
@@ -8224,7 +8265,7 @@ void CodeStubAssembler::SetCounter(StatsCounter* counter, int value) {
 void CodeStubAssembler::IncrementCounter(StatsCounter* counter, int delta) {
   DCHECK_GT(delta, 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
-    Node* counter_address =
+    TNode<ExternalReference> counter_address =
         ExternalConstant(ExternalReference::Create(counter));
     // This operation has to be exactly 32-bit wide in case the external
     // reference table redirects the counter to a uint32_t dummy_stats_counter_
@@ -8238,7 +8279,7 @@ void CodeStubAssembler::IncrementCounter(StatsCounter* counter, int delta) {
 void CodeStubAssembler::DecrementCounter(StatsCounter* counter, int delta) {
   DCHECK_GT(delta, 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
-    Node* counter_address =
+    TNode<ExternalReference> counter_address =
         ExternalConstant(ExternalReference::Create(counter));
     // This operation has to be exactly 32-bit wide in case the external
     // reference table redirects the counter to a uint32_t dummy_stats_counter_
@@ -8277,17 +8318,17 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
   Goto(if_keyisindex);
 
   BIND(&if_keyisnotindex);
-  Node* key_map = LoadMap(key);
+  TNode<Map> key_map = LoadMap(key);
   var_unique->Bind(key);
   // Symbols are unique.
   GotoIf(IsSymbolMap(key_map), if_keyisunique);
-  Node* key_instance_type = LoadMapInstanceType(key_map);
+  TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
   // Miss if |key| is not a String.
   STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
   GotoIfNot(IsStringInstanceType(key_instance_type), &if_keyisother);
 
   // |key| is a String. Check if it has a cached array index.
-  Node* hash = LoadNameHashField(key);
+  TNode<Uint32T> hash = LoadNameHashField(key);
   GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
          &if_hascachedindex);
   // No cached array index. If the string knows that it contains an index,
@@ -8305,7 +8346,8 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
   Goto(if_keyisunique);
 
   BIND(&if_thinstring);
-  var_unique->Bind(LoadObjectField(key, ThinString::kActualOffset));
+  var_unique->Bind(
+      LoadObjectField<String>(CAST(key), ThinString::kActualOffset));
   Goto(if_keyisunique);
 
   BIND(&if_hascachedindex);
@@ -8324,9 +8366,9 @@ void CodeStubAssembler::TryInternalizeString(
   DCHECK(var_index->rep() == MachineType::PointerRepresentation());
   DCHECK_EQ(var_internalized->rep(), MachineRepresentation::kTagged);
   CSA_SLOW_ASSERT(this, IsString(string));
-  Node* function =
+  TNode<ExternalReference> function =
       ExternalConstant(ExternalReference::try_internalize_string_function());
-  Node* const isolate_ptr =
+  TNode<ExternalReference> const isolate_ptr =
       ExternalConstant(ExternalReference::isolate_address(isolate()));
   Node* result =
       CallCFunction(function, MachineType::AnyTagged(),
@@ -8334,10 +8376,10 @@ void CodeStubAssembler::TryInternalizeString(
                     std::make_pair(MachineType::AnyTagged(), string));
   Label internalized(this);
   GotoIf(TaggedIsNotSmi(result), &internalized);
-  Node* word_result = SmiUntag(result);
-  GotoIf(WordEqual(word_result, IntPtrConstant(ResultSentinel::kNotFound)),
+  TNode<IntPtrT> word_result = SmiUntag(result);
+  GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kNotFound)),
          if_not_internalized);
-  GotoIf(WordEqual(word_result, IntPtrConstant(ResultSentinel::kUnsupported)),
+  GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kUnsupported)),
          if_bailout);
   var_index->Bind(word_result);
   Goto(if_index);
@@ -8461,8 +8503,8 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrMax(SloppyTNode<IntPtrT> left,
                                             SloppyTNode<IntPtrT> right) {
   intptr_t left_constant;
   intptr_t right_constant;
-  if (ToIntPtrConstant(left, left_constant) &&
-      ToIntPtrConstant(right, right_constant)) {
+  if (ToIntPtrConstant(left, &left_constant) &&
+      ToIntPtrConstant(right, &right_constant)) {
     return IntPtrConstant(std::max(left_constant, right_constant));
   }
   return SelectConstant<IntPtrT>(IntPtrGreaterThanOrEqual(left, right), left,
@@ -8473,8 +8515,8 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrMin(SloppyTNode<IntPtrT> left,
                                             SloppyTNode<IntPtrT> right) {
   intptr_t left_constant;
   intptr_t right_constant;
-  if (ToIntPtrConstant(left, left_constant) &&
-      ToIntPtrConstant(right, right_constant)) {
+  if (ToIntPtrConstant(left, &left_constant) &&
+      ToIntPtrConstant(right, &right_constant)) {
     return IntPtrConstant(std::min(left_constant, right_constant));
   }
   return SelectConstant<IntPtrT>(IntPtrLessThanOrEqual(left, right), left,
@@ -8508,13 +8550,13 @@ void CodeStubAssembler::NameDictionaryLookup(
   CSA_ASSERT(this, IsUniqueName(unique_name));
 
   TNode<IntPtrT> capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
-  TNode<WordT> mask = IntPtrSub(capacity, IntPtrConstant(1));
-  TNode<WordT> hash = ChangeUint32ToWord(LoadNameHash(unique_name));
+  TNode<IntPtrT> mask = IntPtrSub(capacity, IntPtrConstant(1));
+  TNode<UintPtrT> hash = ChangeUint32ToWord(LoadNameHash(unique_name));
 
   // See Dictionary::FirstProbe().
   TNode<IntPtrT> count = IntPtrConstant(0);
   TNode<IntPtrT> entry = Signed(WordAnd(hash, mask));
-  Node* undefined = UndefinedConstant();
+  TNode<Oddball> undefined = UndefinedConstant();
 
   // Appease the variable merging algorithm for "Goto(&loop)" below.
   *var_name_index = IntPtrConstant(0);
@@ -8533,13 +8575,13 @@ void CodeStubAssembler::NameDictionaryLookup(
 
     TNode<HeapObject> current =
         CAST(UnsafeLoadFixedArrayElement(dictionary, index));
-    GotoIf(WordEqual(current, undefined), if_not_found);
+    GotoIf(TaggedEqual(current, undefined), if_not_found);
     if (mode == kFindExisting) {
       current = LoadName<Dictionary>(current);
-      GotoIf(WordEqual(current, unique_name), if_found);
+      GotoIf(TaggedEqual(current, unique_name), if_found);
     } else {
       DCHECK_EQ(kFindInsertionIndex, mode);
-      GotoIf(WordEqual(current, TheHoleConstant()), if_not_found);
+      GotoIf(TaggedEqual(current, TheHoleConstant()), if_not_found);
     }
 
     // See Dictionary::NextProbe().
@@ -8563,7 +8605,7 @@ template V8_EXPORT_PRIVATE void CodeStubAssembler::NameDictionaryLookup<
 
 Node* CodeStubAssembler::ComputeUnseededHash(Node* key) {
   // See v8::internal::ComputeUnseededHash()
-  Node* hash = TruncateIntPtrToInt32(key);
+  TNode<Word32T> hash = TruncateIntPtrToInt32(key);
   hash = Int32Add(Word32Xor(hash, Int32Constant(0xFFFFFFFF)),
                   Word32Shl(hash, Int32Constant(15)));
   hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(12)));
@@ -8575,9 +8617,9 @@ Node* CodeStubAssembler::ComputeUnseededHash(Node* key) {
 }
 
 Node* CodeStubAssembler::ComputeSeededHash(Node* key) {
-  Node* const function_addr =
+  TNode<ExternalReference> const function_addr =
       ExternalConstant(ExternalReference::compute_integer_hash());
-  Node* const isolate_ptr =
+  TNode<ExternalReference> const isolate_ptr =
       ExternalConstant(ExternalReference::isolate_address(isolate()));
 
   MachineType type_ptr = MachineType::Pointer();
@@ -8597,17 +8639,17 @@ void CodeStubAssembler::NumberDictionaryLookup(
   Comment("NumberDictionaryLookup");
 
   TNode<IntPtrT> capacity = SmiUntag(GetCapacity<NumberDictionary>(dictionary));
-  TNode<WordT> mask = IntPtrSub(capacity, IntPtrConstant(1));
+  TNode<IntPtrT> mask = IntPtrSub(capacity, IntPtrConstant(1));
 
-  TNode<WordT> hash = ChangeUint32ToWord(ComputeSeededHash(intptr_index));
+  TNode<UintPtrT> hash = ChangeUint32ToWord(ComputeSeededHash(intptr_index));
   Node* key_as_float64 = RoundIntPtrToFloat64(intptr_index);
 
   // See Dictionary::FirstProbe().
   TNode<IntPtrT> count = IntPtrConstant(0);
   TNode<IntPtrT> entry = Signed(WordAnd(hash, mask));
 
-  Node* undefined = UndefinedConstant();
-  Node* the_hole = TheHoleConstant();
+  TNode<Oddball> undefined = UndefinedConstant();
+  TNode<Oddball> the_hole = TheHoleConstant();
 
   TVARIABLE(IntPtrT, var_count, count);
   Variable* loop_vars[] = {&var_count, var_entry};
@@ -8619,22 +8661,22 @@ void CodeStubAssembler::NumberDictionaryLookup(
     TNode<IntPtrT> entry = var_entry->value();
 
     TNode<IntPtrT> index = EntryToIndex<NumberDictionary>(entry);
-    Node* current = UnsafeLoadFixedArrayElement(dictionary, index);
-    GotoIf(WordEqual(current, undefined), if_not_found);
+    TNode<Object> current = UnsafeLoadFixedArrayElement(dictionary, index);
+    GotoIf(TaggedEqual(current, undefined), if_not_found);
     Label next_probe(this);
     {
       Label if_currentissmi(this), if_currentisnotsmi(this);
       Branch(TaggedIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
       BIND(&if_currentissmi);
       {
-        Node* current_value = SmiUntag(current);
+        TNode<IntPtrT> current_value = SmiUntag(CAST(current));
         Branch(WordEqual(current_value, intptr_index), if_found, &next_probe);
       }
       BIND(&if_currentisnotsmi);
       {
-        GotoIf(WordEqual(current, the_hole), &next_probe);
+        GotoIf(TaggedEqual(current, the_hole), &next_probe);
         // Current must be the Number.
-        Node* current_value = LoadHeapNumberValue(current);
+        TNode<Float64T> current_value = LoadHeapNumberValue(CAST(current));
         Branch(Float64Equal(current_value, key_as_float64), if_found,
                &next_probe);
       }
@@ -8823,7 +8865,7 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
             LoadArrayElement(array, Array::kHeaderSize, name_index);
         TNode<Name> candidate_name = CAST(element);
         *var_name_index = name_index;
-        GotoIf(WordEqual(candidate_name, unique_name), if_found);
+        GotoIf(TaggedEqual(candidate_name, unique_name), if_found);
       },
       -Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre);
   Goto(if_not_found);
@@ -8968,7 +9010,7 @@ void CodeStubAssembler::LookupBinary(TNode<Name> unique_name,
     TNode<Uint32T> current_hash = LoadNameHashField(current_name);
     GotoIf(Word32NotEqual(current_hash, hash), if_not_found);
     Label next(this);
-    GotoIf(WordNotEqual(current_name, unique_name), &next);
+    GotoIf(TaggedNotEqual(current_name, unique_name), &next);
     GotoIf(Uint32GreaterThanOrEqual(sort_index, number_of_valid_entries),
            if_not_found);
     *var_name_index = ToKeyIndex<Array>(sort_index);
@@ -8984,7 +9026,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
     TNode<Context> context, TNode<Map> map, TNode<JSObject> object,
     ForEachEnumerationMode mode, const ForEachKeyValueFunction& body,
     Label* bailout) {
-  TNode<Int32T> type = LoadMapInstanceType(map);
+  TNode<Uint16T> type = LoadMapInstanceType(map);
   TNode<Uint32T> bit_field3 = EnsureOnlyHasSimpleProperties(map, type, bailout);
 
   TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
@@ -9145,7 +9187,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
             // property details from preloaded |descriptors|.
             var_stable = Select<BoolT>(
                 var_stable.value(),
-                [=] { return WordEqual(LoadMap(object), map); },
+                [=] { return TaggedEqual(LoadMap(object), map); },
                 [=] { return Int32FalseConstant(); });
 
             Goto(&next_iteration);
@@ -9322,7 +9364,7 @@ void CodeStubAssembler::TryHasOwnProperty(Node* object, Node* map,
 Node* CodeStubAssembler::GetMethod(Node* context, Node* object,
                                    Handle<Name> name,
                                    Label* if_null_or_undefined) {
-  Node* method = GetProperty(context, object, name);
+  TNode<Object> method = GetProperty(context, object, name);
 
   GotoIf(IsUndefined(method), if_null_or_undefined);
   GotoIf(IsNull(method), if_null_or_undefined);
@@ -9344,7 +9386,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
   DCHECK_EQ(MachineRepresentation::kWord32, var_details->rep());
   DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
 
-  Node* details =
+  TNode<Uint32T> details =
       LoadDetailsByKeyIndex(descriptors, UncheckedCast<IntPtrT>(name_index));
   var_details->Bind(details);
 
@@ -9357,21 +9399,22 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
     Node* name_index, Node* details, Variable* var_value) {
   Comment("[ LoadPropertyFromFastObject");
 
-  Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
+  TNode<Uint32T> location =
+      DecodeWord32<PropertyDetails::LocationField>(details);
 
   Label if_in_field(this), if_in_descriptor(this), done(this);
   Branch(Word32Equal(location, Int32Constant(kField)), &if_in_field,
          &if_in_descriptor);
   BIND(&if_in_field);
   {
-    Node* field_index =
-        DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
-    Node* representation =
+    TNode<IntPtrT> field_index =
+        Signed(DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details));
+    TNode<Uint32T> representation =
         DecodeWord32<PropertyDetails::RepresentationField>(details);
 
     field_index =
         IntPtrAdd(field_index, LoadMapInobjectPropertiesStartInWords(map));
-    Node* instance_size_in_words = LoadMapInstanceSizeInWords(map);
+    TNode<IntPtrT> instance_size_in_words = LoadMapInstanceSizeInWords(map);
 
     Label if_inobject(this), if_backing_store(this);
     VARIABLE(var_double_value, MachineRepresentation::kFloat64);
@@ -9381,7 +9424,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
     BIND(&if_inobject);
     {
       Comment("if_inobject");
-      Node* field_offset = TimesTaggedSize(field_index);
+      TNode<IntPtrT> field_offset = TimesTaggedSize(field_index);
 
       Label if_double(this), if_tagged(this);
       Branch(Word32NotEqual(representation,
@@ -9398,8 +9441,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
           var_double_value.Bind(
               LoadObjectField(object, field_offset, MachineType::Float64()));
         } else {
-          Node* mutable_heap_number = LoadObjectField(object, field_offset);
-          var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+          TNode<HeapNumber> heap_number =
+              CAST(LoadObjectField(object, field_offset));
+          var_double_value.Bind(LoadHeapNumberValue(heap_number));
         }
         Goto(&rebox_double);
       }
@@ -9408,8 +9452,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
     {
       Comment("if_backing_store");
       TNode<HeapObject> properties = LoadFastProperties(object);
-      field_index = IntPtrSub(field_index, instance_size_in_words);
-      Node* value = LoadPropertyArrayElement(CAST(properties), field_index);
+      field_index = Signed(IntPtrSub(field_index, instance_size_in_words));
+      TNode<Object> value =
+          LoadPropertyArrayElement(CAST(properties), field_index);
 
       Label if_double(this), if_tagged(this);
       Branch(Word32NotEqual(representation,
@@ -9422,14 +9467,15 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
       }
       BIND(&if_double);
       {
-        var_double_value.Bind(LoadHeapNumberValue(value));
+        var_double_value.Bind(LoadHeapNumberValue(CAST(value)));
         Goto(&rebox_double);
       }
     }
     BIND(&rebox_double);
     {
       Comment("rebox_double");
-      Node* heap_number = AllocateHeapNumberWithValue(var_double_value.value());
+      TNode<HeapNumber> heap_number =
+          AllocateHeapNumberWithValue(var_double_value.value());
       var_value->Bind(heap_number);
       Goto(&done);
     }
@@ -9467,15 +9513,16 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
   Comment("[ LoadPropertyFromGlobalDictionary");
   CSA_ASSERT(this, IsGlobalDictionary(dictionary));
 
-  Node* property_cell = LoadFixedArrayElement(CAST(dictionary), name_index);
-  CSA_ASSERT(this, IsPropertyCell(property_cell));
+  TNode<PropertyCell> property_cell =
+      CAST(LoadFixedArrayElement(CAST(dictionary), name_index));
 
-  Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
-  GotoIf(WordEqual(value, TheHoleConstant()), if_deleted);
+  TNode<Object> value =
+      LoadObjectField(property_cell, PropertyCell::kValueOffset);
+  GotoIf(TaggedEqual(value, TheHoleConstant()), if_deleted);
 
   var_value->Bind(value);
 
-  Node* details = LoadAndUntagToWord32ObjectField(
+  TNode<Int32T> details = LoadAndUntagToWord32ObjectField(
       property_cell, PropertyCell::kPropertyDetailsRawOffset);
   var_details->Bind(details);
 
@@ -9491,7 +9538,7 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
   VARIABLE(var_value, MachineRepresentation::kTagged, value);
   Label done(this), if_accessor_info(this, Label::kDeferred);
 
-  Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
+  TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
   GotoIf(Word32Equal(kind, Int32Constant(kData)), &done);
 
   // Accessor case.
@@ -9501,10 +9548,10 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
   {
     if (mode == kCallJSGetter) {
       Node* accessor_pair = value;
-      Node* getter =
-          LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
-      Node* getter_map = LoadMap(getter);
-      Node* instance_type = LoadMapInstanceType(getter_map);
+      TNode<HeapObject> getter =
+          CAST(LoadObjectField(accessor_pair, AccessorPair::kGetterOffset));
+      TNode<Map> getter_map = LoadMap(getter);
+      TNode<Uint16T> instance_type = LoadMapInstanceType(getter_map);
       // FunctionTemplateInfo getters are not supported yet.
       GotoIf(InstanceTypeEqual(instance_type, FUNCTION_TEMPLATE_INFO_TYPE),
              if_bailout);
@@ -9530,8 +9577,8 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
     Label if_array(this), if_function(this), if_wrapper(this);
 
     // Dispatch based on {receiver} instance type.
-    Node* receiver_map = LoadMap(receiver);
-    Node* receiver_instance_type = LoadMapInstanceType(receiver_map);
+    TNode<Map> receiver_map = LoadMap(receiver);
+    TNode<Uint16T> receiver_instance_type = LoadMapInstanceType(receiver_map);
     GotoIf(IsJSArrayInstanceType(receiver_instance_type), &if_array);
     GotoIf(IsJSFunctionInstanceType(receiver_instance_type), &if_function);
     Branch(IsJSPrimitiveWrapperInstanceType(receiver_instance_type),
@@ -9556,9 +9603,9 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
                     LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
                 if_bailout);
 
-      GotoIfPrototypeRequiresRuntimeLookup(CAST(receiver), CAST(receiver_map),
+      GotoIfPrototypeRequiresRuntimeLookup(CAST(receiver), receiver_map,
                                            if_bailout);
-      var_value.Bind(LoadJSFunctionPrototype(receiver, if_bailout));
+      var_value.Bind(LoadJSFunctionPrototype(CAST(receiver), if_bailout));
       Goto(&done);
     }
 
@@ -9617,7 +9664,7 @@ void CodeStubAssembler::TryGetOwnProperty(
   BIND(&if_found_fast);
   {
     TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value());
-    Node* name_index = var_entry.value();
+    TNode<IntPtrT> name_index = var_entry.value();
 
     LoadPropertyFromFastObject(object, map, descriptors, name_index,
                                var_details, var_value);
@@ -9625,15 +9672,15 @@ void CodeStubAssembler::TryGetOwnProperty(
   }
   BIND(&if_found_dict);
   {
-    Node* dictionary = var_meta_storage.value();
-    Node* entry = var_entry.value();
+    TNode<HeapObject> dictionary = var_meta_storage.value();
+    TNode<IntPtrT> entry = var_entry.value();
     LoadPropertyFromNameDictionary(dictionary, entry, var_details, var_value);
     Goto(&if_found);
   }
   BIND(&if_found_global);
   {
-    Node* dictionary = var_meta_storage.value();
-    Node* entry = var_entry.value();
+    TNode<HeapObject> dictionary = var_meta_storage.value();
+    TNode<IntPtrT> entry = var_entry.value();
 
     LoadPropertyFromGlobalDictionary(dictionary, entry, var_details, var_value,
                                      if_not_found);
@@ -9646,8 +9693,9 @@ void CodeStubAssembler::TryGetOwnProperty(
     if (var_raw_value) {
       var_raw_value->Bind(var_value->value());
     }
-    Node* value = CallGetterIfAccessor(var_value->value(), var_details->value(),
-                                       context, receiver, if_bailout, mode);
+    TNode<Object> value =
+        CallGetterIfAccessor(var_value->value(), var_details->value(), context,
+                             receiver, if_bailout, mode);
     var_value->Bind(value);
     Goto(if_found_value);
   }
@@ -9662,7 +9710,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
   // Handle special objects in runtime.
   GotoIf(IsSpecialReceiverInstanceType(instance_type), if_bailout);
 
-  Node* elements_kind = LoadMapElementsKind(map);
+  TNode<Int32T> elements_kind = LoadMapElementsKind(map);
 
   // TODO(verwaest): Support other elements kinds as well.
   Label if_isobjectorsmi(this), if_isdouble(this), if_isdictionary(this),
@@ -9672,8 +9720,9 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
   int32_t values[] = {
       // Handled by {if_isobjectorsmi}.
       PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, HOLEY_ELEMENTS,
-      PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
-      HOLEY_FROZEN_ELEMENTS,
+      PACKED_NONEXTENSIBLE_ELEMENTS, PACKED_SEALED_ELEMENTS,
+      HOLEY_NONEXTENSIBLE_ELEMENTS, HOLEY_SEALED_ELEMENTS,
+      PACKED_FROZEN_ELEMENTS, HOLEY_FROZEN_ELEMENTS,
       // Handled by {if_isdouble}.
       PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS,
       // Handled by {if_isdictionary}.
@@ -9700,7 +9749,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
   Label* labels[] = {
       &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
       &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
-      &if_isobjectorsmi, &if_isobjectorsmi,
+      &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
+      &if_isobjectorsmi,
       &if_isdouble, &if_isdouble,
       &if_isdictionary,
       &if_isfaststringwrapper,
@@ -9731,7 +9781,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
 
     TNode<Object> element = UnsafeLoadFixedArrayElement(elements, intptr_index);
     TNode<Oddball> the_hole = TheHoleConstant();
-    Branch(WordEqual(element, the_hole), if_not_found, if_found);
+    Branch(TaggedEqual(element, the_hole), if_not_found, if_found);
   }
   BIND(&if_isdouble);
   {
@@ -9761,7 +9811,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
     CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
     Node* string = LoadJSPrimitiveWrapperValue(object);
     CSA_ASSERT(this, IsString(string));
-    Node* length = LoadStringLengthAsWord(string);
+    TNode<IntPtrT> length = LoadStringLengthAsWord(string);
     GotoIf(UintPtrLessThan(intptr_index, length), if_found);
     Goto(&if_isobjectorsmi);
   }
@@ -9770,7 +9820,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
     CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
     Node* string = LoadJSPrimitiveWrapperValue(object);
     CSA_ASSERT(this, IsString(string));
-    Node* length = LoadStringLengthAsWord(string);
+    TNode<IntPtrT> length = LoadStringLengthAsWord(string);
     GotoIf(UintPtrLessThan(intptr_index, length), if_found);
     Goto(&if_isdictionary);
   }
@@ -9829,8 +9879,8 @@ void CodeStubAssembler::TryPrototypeChainLookup(
   GotoIf(TaggedIsSmi(receiver), if_bailout);
   CSA_ASSERT(this, TaggedIsNotSmi(object));
 
-  Node* map = LoadMap(object);
-  Node* instance_type = LoadMapInstanceType(map);
+  TNode<Map> map = LoadMap(object);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(map);
   {
     Label if_objectisreceiver(this);
     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@@ -9851,19 +9901,18 @@ void CodeStubAssembler::TryPrototypeChainLookup(
 
   BIND(&if_iskeyunique);
   {
-    VARIABLE(var_holder, MachineRepresentation::kTagged, object);
-    VARIABLE(var_holder_map, MachineRepresentation::kTagged, map);
-    VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32,
-             instance_type);
+    TVARIABLE(HeapObject, var_holder, CAST(object));
+    TVARIABLE(Map, var_holder_map, map);
+    TVARIABLE(Int32T, var_holder_instance_type, instance_type);
 
-    Variable* merged_variables[] = {&var_holder, &var_holder_map,
-                                    &var_holder_instance_type};
-    Label loop(this, arraysize(merged_variables), merged_variables);
+    VariableList merged_variables(
+        {&var_holder, &var_holder_map, &var_holder_instance_type}, zone());
+    Label loop(this, merged_variables);
     Goto(&loop);
     BIND(&loop);
     {
-      Node* holder_map = var_holder_map.value();
-      Node* holder_instance_type = var_holder_instance_type.value();
+      TNode<Map> holder_map = var_holder_map.value();
+      TNode<Int32T> holder_instance_type = var_holder_instance_type.value();
 
       Label next_proto(this), check_integer_indexed_exotic(this);
       lookup_property_in_holder(receiver, var_holder.value(), holder_map,
@@ -9882,29 +9931,28 @@ void CodeStubAssembler::TryPrototypeChainLookup(
 
       BIND(&next_proto);
 
-      Node* proto = LoadMapPrototype(holder_map);
+      TNode<HeapObject> proto = LoadMapPrototype(holder_map);
 
       GotoIf(IsNull(proto), if_end);
 
-      Node* map = LoadMap(proto);
-      Node* instance_type = LoadMapInstanceType(map);
+      TNode<Map> map = LoadMap(proto);
+      TNode<Uint16T> instance_type = LoadMapInstanceType(map);
 
-      var_holder.Bind(proto);
-      var_holder_map.Bind(map);
-      var_holder_instance_type.Bind(instance_type);
+      var_holder = proto;
+      var_holder_map = map;
+      var_holder_instance_type = instance_type;
       Goto(&loop);
     }
   }
   BIND(&if_keyisindex);
   {
-    VARIABLE(var_holder, MachineRepresentation::kTagged, object);
-    VARIABLE(var_holder_map, MachineRepresentation::kTagged, map);
-    VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32,
-             instance_type);
+    TVARIABLE(HeapObject, var_holder, CAST(object));
+    TVARIABLE(Map, var_holder_map, map);
+    TVARIABLE(Int32T, var_holder_instance_type, instance_type);
 
-    Variable* merged_variables[] = {&var_holder, &var_holder_map,
-                                    &var_holder_instance_type};
-    Label loop(this, arraysize(merged_variables), merged_variables);
+    VariableList merged_variables(
+        {&var_holder, &var_holder_map, &var_holder_instance_type}, zone());
+    Label loop(this, merged_variables);
     Goto(&loop);
     BIND(&loop);
     {
@@ -9915,23 +9963,23 @@ void CodeStubAssembler::TryPrototypeChainLookup(
                                var_index.value(), &next_proto, if_bailout);
       BIND(&next_proto);
 
-      Node* proto = LoadMapPrototype(var_holder_map.value());
+      TNode<HeapObject> proto = LoadMapPrototype(var_holder_map.value());
 
       GotoIf(IsNull(proto), if_end);
 
-      Node* map = LoadMap(proto);
-      Node* instance_type = LoadMapInstanceType(map);
+      TNode<Map> map = LoadMap(proto);
+      TNode<Uint16T> instance_type = LoadMapInstanceType(map);
 
-      var_holder.Bind(proto);
-      var_holder_map.Bind(map);
-      var_holder_instance_type.Bind(instance_type);
+      var_holder = proto;
+      var_holder_map = map;
+      var_holder_instance_type = instance_type;
       Goto(&loop);
     }
   }
 }
 
 Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
-                                             Node* prototype) {
+                                             SloppyTNode<Object> prototype) {
   CSA_ASSERT(this, TaggedIsNotSmi(object));
   VARIABLE(var_result, MachineRepresentation::kTagged);
   Label return_false(this), return_true(this),
@@ -9946,7 +9994,7 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
     // Check if we can determine the prototype directly from the {object_map}.
     Label if_objectisdirect(this), if_objectisspecial(this, Label::kDeferred);
     Node* object_map = var_object_map.value();
-    TNode<Int32T> object_instance_type = LoadMapInstanceType(object_map);
+    TNode<Uint16T> object_instance_type = LoadMapInstanceType(object_map);
     Branch(IsSpecialReceiverInstanceType(object_instance_type),
            &if_objectisspecial, &if_objectisdirect);
     BIND(&if_objectisspecial);
@@ -9955,7 +10003,7 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
       // if we need to use the if_objectisspecial path in the runtime.
       GotoIf(InstanceTypeEqual(object_instance_type, JS_PROXY_TYPE),
              &return_runtime);
-      Node* object_bitfield = LoadMapBitField(object_map);
+      TNode<Int32T> object_bitfield = LoadMapBitField(object_map);
       int mask = Map::HasNamedInterceptorBit::kMask |
                  Map::IsAccessCheckNeededBit::kMask;
       Branch(IsSetWord32(object_bitfield, mask), &return_runtime,
@@ -9964,9 +10012,9 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
     BIND(&if_objectisdirect);
 
     // Check the current {object} prototype.
-    Node* object_prototype = LoadMapPrototype(object_map);
+    TNode<HeapObject> object_prototype = LoadMapPrototype(object_map);
     GotoIf(IsNull(object_prototype), &return_false);
-    GotoIf(WordEqual(object_prototype, prototype), &return_true);
+    GotoIf(TaggedEqual(object_prototype, prototype), &return_true);
 
     // Continue with the prototype.
     CSA_ASSERT(this, TaggedIsNotSmi(object_prototype));
@@ -10008,34 +10056,33 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
   GotoIf(TaggedIsSmi(callable), &return_runtime);
 
   // Load map of {callable}.
-  Node* callable_map = LoadMap(callable);
+  TNode<Map> callable_map = LoadMap(callable);
 
   // Goto runtime if {callable} is not a JSFunction.
-  Node* callable_instance_type = LoadMapInstanceType(callable_map);
+  TNode<Uint16T> callable_instance_type = LoadMapInstanceType(callable_map);
   GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE),
             &return_runtime);
 
-  GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), CAST(callable_map),
+  GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map,
                                        &return_runtime);
 
   // Get the "prototype" (or initial map) of the {callable}.
-  Node* callable_prototype =
-      LoadObjectField(callable, JSFunction::kPrototypeOrInitialMapOffset);
+  TNode<HeapObject> callable_prototype = LoadObjectField<HeapObject>(
+      CAST(callable), JSFunction::kPrototypeOrInitialMapOffset);
   {
     Label no_initial_map(this), walk_prototype_chain(this);
-    VARIABLE(var_callable_prototype, MachineRepresentation::kTagged,
-             callable_prototype);
+    TVARIABLE(HeapObject, var_callable_prototype, callable_prototype);
 
     // Resolve the "prototype" if the {callable} has an initial map.
     GotoIfNot(IsMap(callable_prototype), &no_initial_map);
-    var_callable_prototype.Bind(
-        LoadObjectField(callable_prototype, Map::kPrototypeOffset));
+    var_callable_prototype =
+        LoadObjectField<HeapObject>(callable_prototype, Map::kPrototypeOffset);
     Goto(&walk_prototype_chain);
 
     BIND(&no_initial_map);
     // {callable_prototype} is the hole if the "prototype" property hasn't been
     // requested so far.
-    Branch(WordEqual(callable_prototype, TheHoleConstant()), &return_runtime,
+    Branch(TaggedEqual(callable_prototype, TheHoleConstant()), &return_runtime,
            &walk_prototype_chain);
 
     BIND(&walk_prototype_chain);
@@ -10077,7 +10124,7 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
     index_node = BitcastTaggedSignedToWord(index_node);
   } else {
     DCHECK(mode == INTPTR_PARAMETERS);
-    constant_index = ToIntPtrConstant(index_node, index);
+    constant_index = ToIntPtrConstant(index_node, &index);
   }
   if (constant_index) {
     return IntPtrConstant(base_size + element_size * index);
@@ -10107,8 +10154,8 @@ TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
 TNode<HeapObject> CodeStubAssembler::LoadFeedbackCellValue(
     SloppyTNode<JSFunction> closure) {
   TNode<FeedbackCell> feedback_cell =
-      CAST(LoadObjectField(closure, JSFunction::kFeedbackCellOffset));
-  return CAST(LoadObjectField(feedback_cell, FeedbackCell::kValueOffset));
+      LoadObjectField<FeedbackCell>(closure, JSFunction::kFeedbackCellOffset);
+  return LoadObjectField<HeapObject>(feedback_cell, FeedbackCell::kValueOffset);
 }
 
 TNode<HeapObject> CodeStubAssembler::LoadFeedbackVector(
@@ -10218,26 +10265,23 @@ void CodeStubAssembler::CombineFeedback(Variable* existing_feedback,
       SmiOr(CAST(existing_feedback->value()), CAST(feedback)));
 }
 
-void CodeStubAssembler::CheckForAssociatedProtector(Node* name,
+void CodeStubAssembler::CheckForAssociatedProtector(SloppyTNode<Name> name,
                                                     Label* if_protector) {
   // This list must be kept in sync with LookupIterator::UpdateProtector!
   // TODO(jkummerow): Would it be faster to have a bit in Symbol::flags()?
-  GotoIf(WordEqual(name, LoadRoot(RootIndex::kconstructor_string)),
-         if_protector);
-  GotoIf(WordEqual(name, LoadRoot(RootIndex::kiterator_symbol)), if_protector);
-  GotoIf(WordEqual(name, LoadRoot(RootIndex::knext_string)), if_protector);
-  GotoIf(WordEqual(name, LoadRoot(RootIndex::kspecies_symbol)), if_protector);
-  GotoIf(WordEqual(name, LoadRoot(RootIndex::kis_concat_spreadable_symbol)),
-         if_protector);
-  GotoIf(WordEqual(name, LoadRoot(RootIndex::kresolve_string)), if_protector);
-  GotoIf(WordEqual(name, LoadRoot(RootIndex::kthen_string)), if_protector);
+  GotoIf(TaggedEqual(name, ConstructorStringConstant()), if_protector);
+  GotoIf(TaggedEqual(name, IteratorSymbolConstant()), if_protector);
+  GotoIf(TaggedEqual(name, NextStringConstant()), if_protector);
+  GotoIf(TaggedEqual(name, SpeciesSymbolConstant()), if_protector);
+  GotoIf(TaggedEqual(name, IsConcatSpreadableSymbolConstant()), if_protector);
+  GotoIf(TaggedEqual(name, ResolveStringConstant()), if_protector);
+  GotoIf(TaggedEqual(name, ThenStringConstant()), if_protector);
   // Fall through if no case matched.
 }
 
 TNode<Map> CodeStubAssembler::LoadReceiverMap(SloppyTNode<Object> receiver) {
   return Select<Map>(
-      TaggedIsSmi(receiver),
-      [=] { return CAST(LoadRoot(RootIndex::kHeapNumberMap)); },
+      TaggedIsSmi(receiver), [=] { return HeapNumberMapConstant(); },
       [=] { return LoadMap(UncheckedCast<HeapObject>(receiver)); });
 }
 
@@ -10309,22 +10353,24 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(
            access_mode == ArgumentsAccessMode::kHas);
   }
   Label if_mapped(this), if_unmapped(this), end(this, &var_result);
-  Node* intptr_two = IntPtrConstant(2);
-  Node* adjusted_length = IntPtrSub(elements_length, intptr_two);
+  TNode<IntPtrT> intptr_two = IntPtrConstant(2);
+  TNode<WordT> adjusted_length = IntPtrSub(elements_length, intptr_two);
 
   GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped);
 
   TNode<Object> mapped_index =
       LoadFixedArrayElement(elements, IntPtrAdd(key, intptr_two));
-  Branch(WordEqual(mapped_index, TheHoleConstant()), &if_unmapped, &if_mapped);
+  Branch(TaggedEqual(mapped_index, TheHoleConstant()), &if_unmapped,
+         &if_mapped);
 
   BIND(&if_mapped);
   {
     TNode<IntPtrT> mapped_index_intptr = SmiUntag(CAST(mapped_index));
     TNode<Context> the_context = CAST(LoadFixedArrayElement(elements, 0));
     if (access_mode == ArgumentsAccessMode::kLoad) {
-      Node* result = LoadContextElement(the_context, mapped_index_intptr);
-      CSA_ASSERT(this, WordNotEqual(result, TheHoleConstant()));
+      TNode<Object> result =
+          LoadContextElement(the_context, mapped_index_intptr);
+      CSA_ASSERT(this, TaggedNotEqual(result, TheHoleConstant()));
       var_result.Bind(result);
     } else if (access_mode == ArgumentsAccessMode::kHas) {
       CSA_ASSERT(this, Word32BinaryNot(IsTheHole(LoadContextElement(
@@ -10340,7 +10386,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(
   {
     TNode<HeapObject> backing_store_ho =
         CAST(LoadFixedArrayElement(elements, 1));
-    GotoIf(WordNotEqual(LoadMap(backing_store_ho), FixedArrayMapConstant()),
+    GotoIf(TaggedNotEqual(LoadMap(backing_store_ho), FixedArrayMapConstant()),
            bailout);
     TNode<FixedArray> backing_store = CAST(backing_store_ho);
 
@@ -10350,9 +10396,9 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(
       Label out_of_bounds(this);
       GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length),
              &out_of_bounds);
-      Node* result = LoadFixedArrayElement(backing_store, key);
+      TNode<Object> result = LoadFixedArrayElement(backing_store, key);
       var_result.Bind(
-          SelectBooleanConstant(WordNotEqual(result, TheHoleConstant())));
+          SelectBooleanConstant(TaggedNotEqual(result, TheHoleConstant())));
       Goto(&end);
 
       BIND(&out_of_bounds);
@@ -10363,8 +10409,8 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(
 
       // The key falls into unmapped range.
       if (access_mode == ArgumentsAccessMode::kLoad) {
-        Node* result = LoadFixedArrayElement(backing_store, key);
-        GotoIf(WordEqual(result, TheHoleConstant()), bailout);
+        TNode<Object> result = LoadFixedArrayElement(backing_store, key);
+        GotoIf(TaggedEqual(result, TheHoleConstant()), bailout);
         var_result.Bind(result);
       } else {
         StoreFixedArrayElement(backing_store, key, value);
@@ -10379,7 +10425,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(
 
 TNode<Context> CodeStubAssembler::LoadScriptContext(
     TNode<Context> context, TNode<IntPtrT> context_index) {
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<ScriptContextTable> script_context_table = CAST(
       LoadContextElement(native_context, Context::SCRIPT_CONTEXT_TABLE_INDEX));
 
@@ -10445,10 +10491,10 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
 #endif
   } else if (IsTypedArrayElementsKind(kind)) {
     if (kind == UINT8_CLAMPED_ELEMENTS) {
-      CSA_ASSERT(this,
-                 Word32Equal(value, Word32And(Int32Constant(0xFF), value)));
+      CSA_ASSERT(this, Word32Equal(UncheckedCast<Word32T>(value),
+                                   Word32And(Int32Constant(0xFF), value)));
     }
-    Node* offset = ElementOffsetFromIndex(index, kind, mode, 0);
+    TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, mode, 0);
     // TODO(cbruni): Add OOB check once typed.
     MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
     StoreNoWriteBarrier(rep, elements, offset, value);
@@ -10466,8 +10512,8 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
 
 Node* CodeStubAssembler::Int32ToUint8Clamped(Node* int32_value) {
   Label done(this);
-  Node* int32_zero = Int32Constant(0);
-  Node* int32_255 = Int32Constant(255);
+  TNode<Int32T> int32_zero = Int32Constant(0);
+  TNode<Int32T> int32_255 = Int32Constant(255);
   VARIABLE(var_value, MachineRepresentation::kWord32, int32_value);
   GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done);
   var_value.Bind(int32_zero);
@@ -10485,7 +10531,7 @@ Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
   var_value.Bind(Int32Constant(255));
   GotoIf(Float64LessThanOrEqual(Float64Constant(255.0), float64_value), &done);
   {
-    Node* rounded_value = Float64RoundToEven(float64_value);
+    TNode<Float64T> rounded_value = Float64RoundToEven(float64_value);
     var_value.Bind(TruncateFloat64ToWord32(rounded_value));
     Goto(&done);
   }
@@ -10539,37 +10585,38 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
 
   BIND(&if_heapnumber_or_oddball);
   {
-    Node* value = UncheckedCast<Float64T>(LoadObjectField(
+    TNode<Float64T> value = UncheckedCast<Float64T>(LoadObjectField(
         var_input.value(), HeapNumber::kValueOffset, MachineType::Float64()));
     if (rep == MachineRepresentation::kWord32) {
       if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
-        value = Float64ToUint8Clamped(value);
+        var_result.Bind(Float64ToUint8Clamped(value));
       } else {
-        value = TruncateFloat64ToWord32(value);
+        var_result.Bind(TruncateFloat64ToWord32(value));
       }
     } else if (rep == MachineRepresentation::kFloat32) {
-      value = TruncateFloat64ToFloat32(value);
+      var_result.Bind(TruncateFloat64ToFloat32(value));
     } else {
       DCHECK_EQ(MachineRepresentation::kFloat64, rep);
+      var_result.Bind(value);
     }
-    var_result.Bind(value);
     Goto(&done);
   }
 
   BIND(&if_smi);
   {
-    Node* value = SmiToInt32(var_input.value());
+    TNode<Int32T> value = SmiToInt32(var_input.value());
     if (rep == MachineRepresentation::kFloat32) {
-      value = RoundInt32ToFloat32(value);
+      var_result.Bind(RoundInt32ToFloat32(value));
     } else if (rep == MachineRepresentation::kFloat64) {
-      value = ChangeInt32ToFloat64(value);
+      var_result.Bind(ChangeInt32ToFloat64(value));
     } else {
       DCHECK_EQ(MachineRepresentation::kWord32, rep);
       if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
-        value = Int32ToUint8Clamped(value);
+        var_result.Bind(Int32ToUint8Clamped(value));
+      } else {
+        var_result.Bind(value);
       }
     }
-    var_result.Bind(value);
     Goto(&done);
   }
 
@@ -10606,7 +10653,7 @@ void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
   if (!Is64()) {
     *var_high = Unsigned(IntPtrSub(IntPtrConstant(0), var_high->value()));
     Label no_carry(this);
-    GotoIf(WordEqual(var_low->value(), IntPtrConstant(0)), &no_carry);
+    GotoIf(IntPtrEqual(var_low->value(), IntPtrConstant(0)), &no_carry);
     *var_high = Unsigned(IntPtrSub(var_high->value(), IntPtrConstant(1)));
     Goto(&no_carry);
     BIND(&no_carry);
@@ -10623,9 +10670,10 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
                                          Variable* maybe_converted_value) {
   CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
 
-  Node* elements = LoadElements(object);
+  TNode<FixedArrayBase> elements = LoadElements(object);
   if (!(IsSmiOrObjectElementsKind(elements_kind) ||
-        IsSealedElementsKind(elements_kind))) {
+        IsSealedElementsKind(elements_kind) ||
+        IsNonextensibleElementsKind(elements_kind))) {
     CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
   } else if (!IsCOWHandlingStoreMode(store_mode)) {
     GotoIf(IsFixedCOWArrayMap(LoadMap(elements)), bailout);
@@ -10744,7 +10792,8 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
     return;
   }
   DCHECK(IsFastElementsKind(elements_kind) ||
-         IsSealedElementsKind(elements_kind));
+         IsSealedElementsKind(elements_kind) ||
+         IsNonextensibleElementsKind(elements_kind));
 
   Node* length = SelectImpl(
       IsJSArray(object), [=]() { return LoadJSArrayLength(object); },
@@ -10761,15 +10810,19 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
     value = TryTaggedToFloat64(value, bailout);
   }
 
-  if (IsGrowStoreMode(store_mode) && !IsSealedElementsKind(elements_kind)) {
-    elements = CheckForCapacityGrow(object, elements, elements_kind, length,
-                                    intptr_key, parameter_mode, bailout);
+  if (IsGrowStoreMode(store_mode) &&
+      !(IsSealedElementsKind(elements_kind) ||
+        IsNonextensibleElementsKind(elements_kind))) {
+    elements =
+        CAST(CheckForCapacityGrow(object, elements, elements_kind, length,
+                                  intptr_key, parameter_mode, bailout));
   } else {
     GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
   }
 
   // Cannot store to a hole in holey sealed elements so bailout.
-  if (elements_kind == HOLEY_SEALED_ELEMENTS) {
+  if (elements_kind == HOLEY_SEALED_ELEMENTS ||
+      elements_kind == HOLEY_NONEXTENSIBLE_ELEMENTS) {
     TNode<Object> target_value =
         LoadFixedArrayElement(CAST(elements), intptr_key);
     GotoIf(IsTheHole(target_value), bailout);
@@ -10778,11 +10831,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
   // If we didn't grow {elements}, it might still be COW, in which case we
   // copy it now.
   if (!(IsSmiOrObjectElementsKind(elements_kind) ||
-        IsSealedElementsKind(elements_kind))) {
+        IsSealedElementsKind(elements_kind) ||
+        IsNonextensibleElementsKind(elements_kind))) {
     CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
   } else if (IsCOWHandlingStoreMode(store_mode)) {
-    elements = CopyElementsOnWrite(object, elements, elements_kind, length,
-                                   parameter_mode, bailout);
+    elements = CAST(CopyElementsOnWrite(object, elements, elements_kind, length,
+                                        parameter_mode, bailout));
   }
 
   CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
@@ -10790,8 +10844,10 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
 }
 
 Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
-                                              ElementsKind kind, Node* length,
-                                              Node* key, ParameterMode mode,
+                                              ElementsKind kind,
+                                              SloppyTNode<UintPtrT> length,
+                                              SloppyTNode<WordT> key,
+                                              ParameterMode mode,
                                               Label* bailout) {
   DCHECK(IsFastElementsKind(kind));
   VARIABLE(checked_elements, MachineRepresentation::kTagged);
@@ -10826,12 +10882,12 @@ Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
     BIND(&grow_bailout);
     {
       Node* tagged_key = mode == SMI_PARAMETERS
-                             ? key
-                             : ChangeInt32ToTagged(TruncateIntPtrToInt32(key));
-      Node* maybe_elements = CallRuntime(
+                             ? static_cast<Node*>(key)
+                             : ChangeInt32ToTagged(TruncateWordToInt32(key));
+      TNode<Object> maybe_elements = CallRuntime(
           Runtime::kGrowArrayElements, NoContextConstant(), object, tagged_key);
       GotoIf(TaggedIsSmi(maybe_elements), bailout);
-      CSA_ASSERT(this, IsFixedArrayWithKind(maybe_elements, kind));
+      CSA_ASSERT(this, IsFixedArrayWithKind(CAST(maybe_elements), kind));
       checked_elements.Bind(maybe_elements);
       Goto(&fits_capacity);
     }
@@ -10839,7 +10895,7 @@ Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
     BIND(&fits_capacity);
     GotoIfNot(IsJSArray(object), &done);
 
-    Node* new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
+    TNode<WordT> new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
     StoreObjectFieldNoWriteBarrier(object, JSArray::kLengthOffset,
                                    ParameterToTagged(new_length, mode));
     Goto(&done);
@@ -10888,14 +10944,15 @@ void CodeStubAssembler::TransitionElementsKind(Node* object, Node* map,
 
   if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
     Comment("Non-simple map transition");
-    Node* elements = LoadElements(object);
+    TNode<FixedArrayBase> elements = LoadElements(object);
 
     Label done(this);
-    GotoIf(WordEqual(elements, EmptyFixedArrayConstant()), &done);
+    GotoIf(TaggedEqual(elements, EmptyFixedArrayConstant()), &done);
 
     // TODO(ishell): Use OptimalParameterMode().
     ParameterMode mode = INTPTR_PARAMETERS;
-    Node* elements_length = SmiUntag(LoadFixedArrayBaseLength(elements));
+    TNode<IntPtrT> elements_length =
+        SmiUntag(LoadFixedArrayBaseLength(elements));
     Node* array_length = SelectImpl(
         IsJSArray(object),
         [=]() {
@@ -10978,7 +11035,7 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
   BIND(&map_check);
   {
     TNode<Object> memento_map = LoadObjectField(object, kMementoMapOffset);
-    Branch(WordEqual(memento_map, LoadRoot(RootIndex::kAllocationMementoMap)),
+    Branch(TaggedEqual(memento_map, AllocationMementoMapConstant()),
            memento_found, &no_memento_found);
   }
   BIND(&no_memento_found);
@@ -10992,7 +11049,7 @@ TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
 TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
     SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot) {
   TNode<IntPtrT> size = IntPtrConstant(AllocationSite::kSizeWithWeakNext);
-  Node* site = Allocate(size, CodeStubAssembler::kPretenured);
+  TNode<HeapObject> site = Allocate(size, CodeStubAssembler::kPretenured);
   StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap);
   // Should match AllocationSite::Initialize.
   TNode<WordT> field = UpdateWord<AllocationSite::ElementsKindBits>(
@@ -11097,9 +11154,10 @@ Node* CodeStubAssembler::BuildFastLoop(
   // to force the loop header check at the end of the loop and branch forward to
   // it from the pre-header). The extra branch is slower in the case that the
   // loop actually iterates.
-  Node* first_check = WordEqual(var.value(), end_index);
+  TNode<BoolT> first_check =
+      IntPtrOrSmiEqual(var.value(), end_index, parameter_mode);
   int32_t first_check_val;
-  if (ToInt32Constant(first_check, first_check_val)) {
+  if (ToInt32Constant(first_check, &first_check_val)) {
     if (first_check_val) return var.value();
     Goto(&loop);
   } else {
@@ -11115,7 +11173,8 @@ Node* CodeStubAssembler::BuildFastLoop(
     if (advance_mode == IndexAdvanceMode::kPost) {
       Increment(&var, increment, parameter_mode);
     }
-    Branch(WordNotEqual(var.value(), end_index), &loop, &after_loop);
+    Branch(IntPtrOrSmiNotEqual(var.value(), end_index, parameter_mode), &loop,
+           &after_loop);
   }
   BIND(&after_loop);
   return var.value();
@@ -11132,25 +11191,25 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
   CSA_SLOW_ASSERT(this, Word32Or(IsFixedArrayWithKind(fixed_array, kind),
                                  IsPropertyArray(fixed_array)));
   int32_t first_val;
-  bool constant_first = ToInt32Constant(first_element_inclusive, first_val);
+  bool constant_first = ToInt32Constant(first_element_inclusive, &first_val);
   int32_t last_val;
-  bool constent_last = ToInt32Constant(last_element_exclusive, last_val);
+  bool constent_last = ToInt32Constant(last_element_exclusive, &last_val);
   if (constant_first && constent_last) {
     int delta = last_val - first_val;
     DCHECK_GE(delta, 0);
     if (delta <= kElementLoopUnrollThreshold) {
       if (direction == ForEachDirection::kForward) {
         for (int i = first_val; i < last_val; ++i) {
-          Node* index = IntPtrConstant(i);
-          Node* offset =
+          TNode<IntPtrT> index = IntPtrConstant(i);
+          TNode<IntPtrT> offset =
               ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
                                      FixedArray::kHeaderSize - kHeapObjectTag);
           body(fixed_array, offset);
         }
       } else {
         for (int i = last_val - 1; i >= first_val; --i) {
-          Node* index = IntPtrConstant(i);
-          Node* offset =
+          TNode<IntPtrT> index = IntPtrConstant(i);
+          TNode<IntPtrT> offset =
               ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
                                      FixedArray::kHeaderSize - kHeapObjectTag);
           body(fixed_array, offset);
@@ -11160,10 +11219,10 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
     }
   }
 
-  Node* start =
+  TNode<IntPtrT> start =
       ElementOffsetFromIndex(first_element_inclusive, kind, mode,
                              FixedArray::kHeaderSize - kHeapObjectTag);
-  Node* limit =
+  TNode<IntPtrT> limit =
       ElementOffsetFromIndex(last_element_exclusive, kind, mode,
                              FixedArray::kHeaderSize - kHeapObjectTag);
   if (direction == ForEachDirection::kReverse) std::swap(start, limit);
@@ -11191,7 +11250,7 @@ void CodeStubAssembler::InitializeFieldsWithRoot(Node* object,
   CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
   start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
   end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
-  Node* root_value = LoadRoot(root_index);
+  TNode<Object> root_value = LoadRoot(root_index);
   BuildFastLoop(
       end_offset, start_offset,
       [this, object, root_value](Node* current) {
@@ -11203,7 +11262,8 @@ void CodeStubAssembler::InitializeFieldsWithRoot(Node* object,
 }
 
 void CodeStubAssembler::BranchIfNumberRelationalComparison(
-    Operation op, Node* left, Node* right, Label* if_true, Label* if_false) {
+    Operation op, SloppyTNode<Number> left, SloppyTNode<Number> right,
+    Label* if_true, Label* if_false) {
   CSA_SLOW_ASSERT(this, IsNumber(left));
   CSA_SLOW_ASSERT(this, IsNumber(right));
 
@@ -11246,25 +11306,22 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison(
               }
             },
             [&] {
-              CSA_ASSERT(this, IsHeapNumber(right));
               var_left_float = SmiToFloat64(smi_left);
-              var_right_float = LoadHeapNumberValue(right);
+              var_right_float = LoadHeapNumberValue(CAST(right));
               Goto(&do_float_comparison);
             });
       },
       [&] {
-        CSA_ASSERT(this, IsHeapNumber(left));
-        var_left_float = LoadHeapNumberValue(left);
+        var_left_float = LoadHeapNumberValue(CAST(left));
 
         Branch(
             TaggedIsSmi(right),
             [&] {
-              var_right_float = SmiToFloat64(right);
+              var_right_float = SmiToFloat64(CAST(right));
               Goto(&do_float_comparison);
             },
             [&] {
-              CSA_ASSERT(this, IsHeapNumber(right));
-              var_right_float = LoadHeapNumberValue(right);
+              var_right_float = LoadHeapNumberValue(CAST(right));
               Goto(&do_float_comparison);
             });
       });
@@ -11327,8 +11384,10 @@ Operation Reverse(Operation op) {
 }
 }  // anonymous namespace
 
-Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
-                                              Node* right, Node* context,
+Node* CodeStubAssembler::RelationalComparison(Operation op,
+                                              SloppyTNode<Object> left,
+                                              SloppyTNode<Object> right,
+                                              SloppyTNode<Context> context,
                                               Variable* var_type_feedback) {
   Label return_true(this), return_false(this), do_float_comparison(this),
       end(this);
@@ -11338,8 +11397,8 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
 
   // We might need to loop several times due to ToPrimitive and/or ToNumeric
   // conversions.
-  VARIABLE(var_left, MachineRepresentation::kTagged, left);
-  VARIABLE(var_right, MachineRepresentation::kTagged, right);
+  TVARIABLE(Object, var_left, left);
+  TVARIABLE(Object, var_right, right);
   VariableList loop_variable_list({&var_left, &var_right}, zone());
   if (var_type_feedback != nullptr) {
     // Initialize the type feedback to None. The current feedback is combined
@@ -11364,9 +11423,9 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
           if_right_bigint(this, Label::kDeferred),
           if_right_not_numeric(this, Label::kDeferred);
       GotoIf(TaggedIsSmi(right), &if_right_smi);
-      Node* right_map = LoadMap(right);
+      TNode<Map> right_map = LoadMap(CAST(right));
       GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
-      Node* right_instance_type = LoadMapInstanceType(right_map);
+      TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
       Branch(IsBigIntInstanceType(right_instance_type), &if_right_bigint,
              &if_right_not_numeric);
 
@@ -11401,7 +11460,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
       {
         CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
         var_left_float = SmiToFloat64(smi_left);
-        var_right_float = LoadHeapNumberValue(right);
+        var_right_float = LoadHeapNumberValue(CAST(right));
         Goto(&do_float_comparison);
       }
 
@@ -11421,15 +11480,14 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
         // dedicated ToPrimitive(right, hint Number) operation, as the
         // ToNumeric(right) will by itself already invoke ToPrimitive with
         // a Number hint.
-        var_right.Bind(
-            CallBuiltin(Builtins::kNonNumberToNumeric, context, right));
+        var_right = CallBuiltin(Builtins::kNonNumberToNumeric, context, right);
         Goto(&loop);
       }
     }
 
     BIND(&if_left_not_smi);
     {
-      Node* left_map = LoadMap(left);
+      TNode<Map> left_map = LoadMap(CAST(left));
 
       Label if_right_smi(this), if_right_not_smi(this);
       Branch(TaggedIsSmi(right), &if_right_smi, &if_right_not_smi);
@@ -11439,15 +11497,15 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
         Label if_left_heapnumber(this), if_left_bigint(this, Label::kDeferred),
             if_left_not_numeric(this, Label::kDeferred);
         GotoIf(IsHeapNumberMap(left_map), &if_left_heapnumber);
-        Node* left_instance_type = LoadMapInstanceType(left_map);
+        TNode<Uint16T> left_instance_type = LoadMapInstanceType(left_map);
         Branch(IsBigIntInstanceType(left_instance_type), &if_left_bigint,
                &if_left_not_numeric);
 
         BIND(&if_left_heapnumber);
         {
           CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
-          var_left_float = LoadHeapNumberValue(left);
-          var_right_float = SmiToFloat64(right);
+          var_left_float = LoadHeapNumberValue(CAST(left));
+          var_right_float = SmiToFloat64(CAST(right));
           Goto(&do_float_comparison);
         }
 
@@ -11467,21 +11525,20 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
           // dedicated ToPrimitive(left, hint Number) operation, as the
           // ToNumeric(left) will by itself already invoke ToPrimitive with
           // a Number hint.
-          var_left.Bind(
-              CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
+          var_left = CallBuiltin(Builtins::kNonNumberToNumeric, context, left);
           Goto(&loop);
         }
       }
 
       BIND(&if_right_not_smi);
       {
-        Node* right_map = LoadMap(right);
+        TNode<Map> right_map = LoadMap(CAST(right));
 
         Label if_left_heapnumber(this), if_left_bigint(this, Label::kDeferred),
             if_left_string(this, Label::kDeferred),
             if_left_other(this, Label::kDeferred);
         GotoIf(IsHeapNumberMap(left_map), &if_left_heapnumber);
-        Node* left_instance_type = LoadMapInstanceType(left_map);
+        TNode<Uint16T> left_instance_type = LoadMapInstanceType(left_map);
         GotoIf(IsBigIntInstanceType(left_instance_type), &if_left_bigint);
         Branch(IsStringInstanceType(left_instance_type), &if_left_string,
                &if_left_other);
@@ -11491,8 +11548,8 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
           Label if_right_heapnumber(this),
               if_right_bigint(this, Label::kDeferred),
               if_right_not_numeric(this, Label::kDeferred);
-          GotoIf(WordEqual(right_map, left_map), &if_right_heapnumber);
-          Node* right_instance_type = LoadMapInstanceType(right_map);
+          GotoIf(TaggedEqual(right_map, left_map), &if_right_heapnumber);
+          TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
           Branch(IsBigIntInstanceType(right_instance_type), &if_right_bigint,
                  &if_right_not_numeric);
 
@@ -11500,8 +11557,8 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
           {
             CombineFeedback(var_type_feedback,
                             CompareOperationFeedback::kNumber);
-            var_left_float = LoadHeapNumberValue(left);
-            var_right_float = LoadHeapNumberValue(right);
+            var_left_float = LoadHeapNumberValue(CAST(left));
+            var_right_float = LoadHeapNumberValue(CAST(right));
             Goto(&do_float_comparison);
           }
 
@@ -11523,8 +11580,8 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
             // dedicated ToPrimitive(right, hint Number) operation, as the
             // ToNumeric(right) will by itself already invoke ToPrimitive with
             // a Number hint.
-            var_right.Bind(
-                CallBuiltin(Builtins::kNonNumberToNumeric, context, right));
+            var_right =
+                CallBuiltin(Builtins::kNonNumberToNumeric, context, right);
             Goto(&loop);
           }
         }
@@ -11534,7 +11591,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
           Label if_right_heapnumber(this), if_right_bigint(this),
               if_right_string(this), if_right_other(this);
           GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
-          Node* right_instance_type = LoadMapInstanceType(right_map);
+          TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
           GotoIf(IsBigIntInstanceType(right_instance_type), &if_right_bigint);
           Branch(IsStringInstanceType(right_instance_type), &if_right_string,
                  &if_right_other);
@@ -11578,15 +11635,15 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
             // dedicated ToPrimitive(right, hint Number) operation, as the
             // ToNumeric(right) will by itself already invoke ToPrimitive with
             // a Number hint.
-            var_right.Bind(
-                CallBuiltin(Builtins::kNonNumberToNumeric, context, right));
+            var_right =
+                CallBuiltin(Builtins::kNonNumberToNumeric, context, right);
             Goto(&loop);
           }
         }
 
         BIND(&if_left_string);
         {
-          Node* right_instance_type = LoadMapInstanceType(right_map);
+          TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
 
           Label if_right_not_string(this, Label::kDeferred);
           GotoIfNot(IsStringInstanceType(right_instance_type),
@@ -11629,9 +11686,9 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
             GotoIf(IsJSReceiverInstanceType(right_instance_type),
                    &if_right_receiver);
 
-            var_left.Bind(
-                CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
-            var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right));
+            var_left =
+                CallBuiltin(Builtins::kNonNumberToNumeric, context, left);
+            var_right = CallBuiltin(Builtins::kToNumeric, context, right);
             Goto(&loop);
 
             BIND(&if_right_bigint);
@@ -11646,7 +11703,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
             {
               Callable callable = CodeFactory::NonPrimitiveToPrimitive(
                   isolate(), ToPrimitiveHint::kNumber);
-              var_right.Bind(CallStub(callable, context, right));
+              var_right = CallStub(callable, context, right);
               Goto(&loop);
             }
           }
@@ -11665,7 +11722,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
                       &collect_any_feedback);
 
             GotoIf(IsHeapNumberMap(right_map), &collect_oddball_feedback);
-            Node* right_instance_type = LoadMapInstanceType(right_map);
+            TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
             Branch(InstanceTypeEqual(right_instance_type, ODDBALL_TYPE),
                    &collect_oddball_feedback, &collect_any_feedback);
 
@@ -11694,16 +11751,15 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
           GotoIf(IsJSReceiverInstanceType(left_instance_type),
                  &if_left_receiver);
 
-          var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right));
-          var_left.Bind(
-              CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
+          var_right = CallBuiltin(Builtins::kToNumeric, context, right);
+          var_left = CallBuiltin(Builtins::kNonNumberToNumeric, context, left);
           Goto(&loop);
 
           BIND(&if_left_receiver);
           {
             Callable callable = CodeFactory::NonPrimitiveToPrimitive(
                 isolate(), ToPrimitiveHint::kNumber);
-            var_left.Bind(CallStub(callable, context, left));
+            var_left = CallStub(callable, context, left);
             Goto(&loop);
           }
         }
@@ -11765,8 +11821,8 @@ TNode<Smi> CodeStubAssembler::CollectFeedbackForString(
   return feedback;
 }
 
-void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
-                                           Label* if_notequal,
+void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
+                                           Label* if_equal, Label* if_notequal,
                                            Variable* var_type_feedback) {
   // In case of abstract or strict equality checks, we need additional checks
   // for NaN values because they are not considered equal, even if both the
@@ -11775,12 +11831,13 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
   Label if_smi(this), if_heapnumber(this);
   GotoIf(TaggedIsSmi(value), &if_smi);
 
-  Node* value_map = LoadMap(value);
+  TNode<HeapObject> value_heapobject = CAST(value);
+  TNode<Map> value_map = LoadMap(value_heapobject);
   GotoIf(IsHeapNumberMap(value_map), &if_heapnumber);
 
   // For non-HeapNumbers, all we do is collect type feedback.
   if (var_type_feedback != nullptr) {
-    Node* instance_type = LoadMapInstanceType(value_map);
+    TNode<Uint16T> instance_type = LoadMapInstanceType(value_map);
 
     Label if_string(this), if_receiver(this), if_oddball(this), if_symbol(this),
         if_bigint(this);
@@ -11791,7 +11848,7 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
 
     BIND(&if_string);
     {
-      CSA_ASSERT(this, IsString(value));
+      CSA_ASSERT(this, IsString(value_heapobject));
       CombineFeedback(var_type_feedback,
                       CollectFeedbackForString(instance_type));
       Goto(if_equal);
@@ -11799,28 +11856,28 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
 
     BIND(&if_symbol);
     {
-      CSA_ASSERT(this, IsSymbol(value));
+      CSA_ASSERT(this, IsSymbol(value_heapobject));
       CombineFeedback(var_type_feedback, CompareOperationFeedback::kSymbol);
       Goto(if_equal);
     }
 
     BIND(&if_receiver);
     {
-      CSA_ASSERT(this, IsJSReceiver(value));
+      CSA_ASSERT(this, IsJSReceiver(value_heapobject));
       CombineFeedback(var_type_feedback, CompareOperationFeedback::kReceiver);
       Goto(if_equal);
     }
 
     BIND(&if_bigint);
     {
-      CSA_ASSERT(this, IsBigInt(value));
+      CSA_ASSERT(this, IsBigInt(value_heapobject));
       CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
       Goto(if_equal);
     }
 
     BIND(&if_oddball);
     {
-      CSA_ASSERT(this, IsOddball(value));
+      CSA_ASSERT(this, IsOddball(value_heapobject));
       Label if_boolean(this), if_not_boolean(this);
       Branch(IsBooleanMap(value_map), &if_boolean, &if_not_boolean);
 
@@ -11832,7 +11889,7 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
 
       BIND(&if_not_boolean);
       {
-        CSA_ASSERT(this, IsNullOrUndefined(value));
+        CSA_ASSERT(this, IsNullOrUndefined(value_heapobject));
         CombineFeedback(var_type_feedback,
                         CompareOperationFeedback::kReceiverOrNullOrUndefined);
         Goto(if_equal);
@@ -11845,7 +11902,7 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
   BIND(&if_heapnumber);
   {
     CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
-    Node* number_value = LoadHeapNumberValue(value);
+    TNode<Float64T> number_value = LoadHeapNumberValue(value_heapobject);
     BranchIfFloat64IsNaN(number_value, if_notequal, if_equal);
   }
 
@@ -11857,7 +11914,9 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
 }
 
 // ES6 section 7.2.12 Abstract Equality Comparison
-Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
+Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
+                               SloppyTNode<Object> right,
+                               SloppyTNode<Context> context,
                                Variable* var_type_feedback) {
   // This is a slightly optimized version of Object::Equals. Whenever you
   // change something functionality wise in here, remember to update the
@@ -11875,8 +11934,8 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
 
   // We might need to loop several times due to ToPrimitive and/or ToNumber
   // conversions.
-  VARIABLE(var_left, MachineRepresentation::kTagged, left);
-  VARIABLE(var_right, MachineRepresentation::kTagged, right);
+  TVARIABLE(Object, var_left, left);
+  TVARIABLE(Object, var_right, right);
   VariableList loop_variable_list({&var_left, &var_right}, zone());
   if (var_type_feedback != nullptr) {
     // Initialize the type feedback to None. The current feedback will be
@@ -11892,7 +11951,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
     right = var_right.value();
 
     Label if_notsame(this);
-    GotoIf(WordNotEqual(left, right), &if_notsame);
+    GotoIf(TaggedNotEqual(left, right), &if_notsame);
     {
       // {left} and {right} reference the exact same value, yet we need special
       // treatment for HeapNumber, as NaN is not equal to NaN.
@@ -11918,7 +11977,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
       }
 
       BIND(&if_right_not_smi);
-      Node* right_map = LoadMap(right);
+      TNode<Map> right_map = LoadMap(CAST(right));
       Label if_right_heapnumber(this), if_right_boolean(this),
           if_right_bigint(this, Label::kDeferred),
           if_right_receiver(this, Label::kDeferred);
@@ -11928,7 +11987,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
         var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
       }
       GotoIf(IsBooleanMap(right_map), &if_right_boolean);
-      Node* right_type = LoadMapInstanceType(right_map);
+      TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
       GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
       GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
       Branch(IsJSReceiverInstanceType(right_type), &if_right_receiver,
@@ -11936,15 +11995,15 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
 
       BIND(&if_right_heapnumber);
       {
-        var_left_float = SmiToFloat64(left);
-        var_right_float = LoadHeapNumberValue(right);
+        var_left_float = SmiToFloat64(CAST(left));
+        var_right_float = LoadHeapNumberValue(CAST(right));
         CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
         Goto(&do_float_comparison);
       }
 
       BIND(&if_right_boolean);
       {
-        var_right.Bind(LoadObjectField(right, Oddball::kToNumberOffset));
+        var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
         Goto(&loop);
       }
 
@@ -11958,7 +12017,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
       BIND(&if_right_receiver);
       {
         Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
-        var_right.Bind(CallStub(callable, context, right));
+        var_right = CallStub(callable, context, right);
         Goto(&loop);
       }
     }
@@ -11972,10 +12031,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
           if_left_bigint(this, Label::kDeferred), if_left_oddball(this),
           if_left_receiver(this);
 
-      Node* left_map = LoadMap(left);
-      Node* right_map = LoadMap(right);
-      Node* left_type = LoadMapInstanceType(left_map);
-      Node* right_type = LoadMapInstanceType(right_map);
+      TNode<Map> left_map = LoadMap(CAST(left));
+      TNode<Map> right_map = LoadMap(CAST(right));
+      TNode<Uint16T> left_type = LoadMapInstanceType(left_map);
+      TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
 
       GotoIf(IsStringInstanceType(left_type), &if_left_string);
       GotoIf(IsSymbolInstanceType(left_type), &if_left_symbol);
@@ -11999,8 +12058,8 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
         Label if_right_not_number(this);
         GotoIf(Word32NotEqual(left_type, right_type), &if_right_not_number);
 
-        var_left_float = LoadHeapNumberValue(left);
-        var_right_float = LoadHeapNumberValue(right);
+        var_left_float = LoadHeapNumberValue(CAST(left));
+        var_right_float = LoadHeapNumberValue(CAST(right));
         CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
         Goto(&do_float_comparison);
 
@@ -12019,7 +12078,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
 
           BIND(&if_right_boolean);
           {
-            var_right.Bind(LoadObjectField(right, Oddball::kToNumberOffset));
+            var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
             Goto(&loop);
           }
         }
@@ -12072,7 +12131,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
             var_type_feedback->Bind(
                 SmiConstant(CompareOperationFeedback::kAny));
           }
-          var_right.Bind(LoadObjectField(right, Oddball::kToNumberOffset));
+          var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
           Goto(&loop);
         }
       }
@@ -12124,10 +12183,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
           }
 
           // If {right} is a Boolean too, it must be a different Boolean.
-          GotoIf(WordEqual(right_map, left_map), &if_notequal);
+          GotoIf(TaggedEqual(right_map, left_map), &if_notequal);
 
           // Otherwise, convert {left} to number and try again.
-          var_left.Bind(LoadObjectField(left, Oddball::kToNumberOffset));
+          var_left = LoadObjectField(CAST(left), Oddball::kToNumberOffset);
           Goto(&loop);
         }
       }
@@ -12210,7 +12269,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
                   SmiConstant(CompareOperationFeedback::kAny));
             }
             Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
-            var_left.Bind(CallStub(callable, context, left));
+            var_left = CallStub(callable, context, left);
             Goto(&loop);
           }
         }
@@ -12219,14 +12278,14 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
 
     BIND(&do_right_stringtonumber);
     {
-      var_right.Bind(CallBuiltin(Builtins::kStringToNumber, context, right));
+      var_right = CallBuiltin(Builtins::kStringToNumber, context, right);
       Goto(&loop);
     }
 
     BIND(&use_symmetry);
     {
-      var_left.Bind(right);
-      var_right.Bind(left);
+      var_left = right;
+      var_right = left;
       Goto(&loop);
     }
   }
@@ -12313,7 +12372,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
 
   // Check if {lhs} and {rhs} refer to the same object.
   Label if_same(this), if_notsame(this);
-  Branch(WordEqual(lhs, rhs), &if_same, &if_notsame);
+  Branch(TaggedEqual(lhs, rhs), &if_same, &if_notsame);
 
   BIND(&if_same);
   {
@@ -12349,8 +12408,8 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
         BIND(&if_rhsissmi);
         {
           // Convert {lhs} and {rhs} to floating point values.
-          Node* lhs_value = LoadHeapNumberValue(CAST(lhs));
-          Node* rhs_value = SmiToFloat64(CAST(rhs));
+          TNode<Float64T> lhs_value = LoadHeapNumberValue(CAST(lhs));
+          TNode<Float64T> rhs_value = SmiToFloat64(CAST(rhs));
 
           CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
 
@@ -12371,8 +12430,8 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
           BIND(&if_rhsisnumber);
           {
             // Convert {lhs} and {rhs} to floating point values.
-            Node* lhs_value = LoadHeapNumberValue(CAST(lhs));
-            Node* rhs_value = LoadHeapNumberValue(CAST(rhs));
+            TNode<Float64T> lhs_value = LoadHeapNumberValue(CAST(lhs));
+            TNode<Float64T> rhs_value = LoadHeapNumberValue(CAST(rhs));
 
             CombineFeedback(var_type_feedback,
                             CompareOperationFeedback::kNumber);
@@ -12398,7 +12457,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
         BIND(&if_rhsisnotsmi);
         {
           // Load the instance type of {lhs}.
-          Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+          TNode<Uint16T> lhs_instance_type = LoadMapInstanceType(lhs_map);
 
           // Check if {lhs} is a String.
           Label if_lhsisstring(this, Label::kDeferred), if_lhsisnotstring(this);
@@ -12408,7 +12467,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
           BIND(&if_lhsisstring);
           {
             // Load the instance type of {rhs}.
-            Node* rhs_instance_type = LoadInstanceType(CAST(rhs));
+            TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs));
 
             // Check if {rhs} is also a String.
             Label if_rhsisstring(this, Label::kDeferred),
@@ -12591,15 +12650,17 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
 // ECMA#sec-samevalue
 // This algorithm differs from the Strict Equality Comparison Algorithm in its
 // treatment of signed zeroes and NaNs.
-void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
-                                          Label* if_false, SameValueMode mode) {
+void CodeStubAssembler::BranchIfSameValue(SloppyTNode<Object> lhs,
+                                          SloppyTNode<Object> rhs,
+                                          Label* if_true, Label* if_false,
+                                          SameValueMode mode) {
   VARIABLE(var_lhs_value, MachineRepresentation::kFloat64);
   VARIABLE(var_rhs_value, MachineRepresentation::kFloat64);
   Label do_fcmp(this);
 
   // Immediately jump to {if_true} if {lhs} == {rhs}, because - unlike
   // StrictEqual - SameValue considers two NaNs to be equal.
-  GotoIf(WordEqual(lhs, rhs), if_true);
+  GotoIf(TaggedEqual(lhs, rhs), if_true);
 
   // Check if the {lhs} is a Smi.
   Label if_lhsissmi(this), if_lhsisheapobject(this);
@@ -12610,9 +12671,9 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
     // Since {lhs} is a Smi, the comparison can only yield true
     // iff the {rhs} is a HeapNumber with the same float64 value.
     Branch(TaggedIsSmi(rhs), if_false, [&] {
-      GotoIfNot(IsHeapNumber(rhs), if_false);
-      var_lhs_value.Bind(SmiToFloat64(lhs));
-      var_rhs_value.Bind(LoadHeapNumberValue(rhs));
+      GotoIfNot(IsHeapNumber(CAST(rhs)), if_false);
+      var_lhs_value.Bind(SmiToFloat64(CAST(lhs)));
+      var_rhs_value.Bind(LoadHeapNumberValue(CAST(rhs)));
       Goto(&do_fcmp);
     });
   }
@@ -12625,9 +12686,9 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
         [&] {
           // Since {rhs} is a Smi, the comparison can only yield true
           // iff the {lhs} is a HeapNumber with the same float64 value.
-          GotoIfNot(IsHeapNumber(lhs), if_false);
-          var_lhs_value.Bind(LoadHeapNumberValue(lhs));
-          var_rhs_value.Bind(SmiToFloat64(rhs));
+          GotoIfNot(IsHeapNumber(CAST(lhs)), if_false);
+          var_lhs_value.Bind(LoadHeapNumberValue(CAST(lhs)));
+          var_rhs_value.Bind(SmiToFloat64(CAST(rhs)));
           Goto(&do_fcmp);
         },
         [&] {
@@ -12637,10 +12698,11 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
           // value.
           Label if_lhsisheapnumber(this), if_lhsisstring(this),
               if_lhsisbigint(this);
-          Node* const lhs_map = LoadMap(lhs);
+          TNode<Map> const lhs_map = LoadMap(CAST(lhs));
           GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
           if (mode != SameValueMode::kNumbersOnly) {
-            Node* const lhs_instance_type = LoadMapInstanceType(lhs_map);
+            TNode<Uint16T> const lhs_instance_type =
+                LoadMapInstanceType(lhs_map);
             GotoIf(IsStringInstanceType(lhs_instance_type), &if_lhsisstring);
             GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint);
           }
@@ -12648,9 +12710,9 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
 
           BIND(&if_lhsisheapnumber);
           {
-            GotoIfNot(IsHeapNumber(rhs), if_false);
-            var_lhs_value.Bind(LoadHeapNumberValue(lhs));
-            var_rhs_value.Bind(LoadHeapNumberValue(rhs));
+            GotoIfNot(IsHeapNumber(CAST(rhs)), if_false);
+            var_lhs_value.Bind(LoadHeapNumberValue(CAST(lhs)));
+            var_rhs_value.Bind(LoadHeapNumberValue(CAST(rhs)));
             Goto(&do_fcmp);
           }
 
@@ -12659,17 +12721,17 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
             {
               // Now we can only yield true if {rhs} is also a String
               // with the same sequence of characters.
-              GotoIfNot(IsString(rhs), if_false);
-              Node* const result = CallBuiltin(Builtins::kStringEqual,
-                                               NoContextConstant(), lhs, rhs);
+              GotoIfNot(IsString(CAST(rhs)), if_false);
+              TNode<Object> const result = CallBuiltin(
+                  Builtins::kStringEqual, NoContextConstant(), lhs, rhs);
               Branch(IsTrue(result), if_true, if_false);
             }
 
             BIND(&if_lhsisbigint);
             {
-              GotoIfNot(IsBigInt(rhs), if_false);
-              Node* const result = CallRuntime(Runtime::kBigIntEqualToBigInt,
-                                               NoContextConstant(), lhs, rhs);
+              GotoIfNot(IsBigInt(CAST(rhs)), if_false);
+              TNode<Object> const result = CallRuntime(
+                  Runtime::kBigIntEqualToBigInt, NoContextConstant(), lhs, rhs);
               Branch(IsTrue(result), if_true, if_false);
             }
           }
@@ -12696,8 +12758,8 @@ void CodeStubAssembler::BranchIfSameNumberValue(TNode<Float64T> lhs_value,
     // We still need to handle the case when {lhs} and {rhs} are -0.0 and
     // 0.0 (or vice versa). Compare the high word to
     // distinguish between the two.
-    Node* const lhs_hi_word = Float64ExtractHighWord32(lhs_value);
-    Node* const rhs_hi_word = Float64ExtractHighWord32(rhs_value);
+    TNode<Uint32T> const lhs_hi_word = Float64ExtractHighWord32(lhs_value);
+    TNode<Uint32T> const rhs_hi_word = Float64ExtractHighWord32(rhs_value);
 
     // If x is +0 and y is -0, return false.
     // If x is -0 and y is +0, return false.
@@ -12802,15 +12864,15 @@ Node* CodeStubAssembler::Typeof(Node* value) {
 
   GotoIf(TaggedIsSmi(value), &return_number);
 
-  Node* map = LoadMap(value);
+  TNode<Map> map = LoadMap(value);
 
   GotoIf(IsHeapNumberMap(map), &return_number);
 
-  Node* instance_type = LoadMapInstanceType(map);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(map);
 
   GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &if_oddball);
 
-  Node* callable_or_undetectable_mask = Word32And(
+  TNode<Int32T> callable_or_undetectable_mask = Word32And(
       LoadMapBitField(map),
       Int32Constant(Map::IsCallableBit::kMask | Map::IsUndetectableBit::kMask));
 
@@ -12839,7 +12901,7 @@ Node* CodeStubAssembler::Typeof(Node* value) {
 
   BIND(&if_oddball);
   {
-    Node* type = LoadObjectField(value, Oddball::kTypeOfOffset);
+    TNode<Object> type = LoadObjectField(value, Oddball::kTypeOfOffset);
     result_var.Bind(type);
     Goto(&return_result);
   }
@@ -12884,8 +12946,8 @@ TNode<Object> CodeStubAssembler::GetSuperConstructor(
   TVARIABLE(Object, result);
 
   TNode<Map> map = LoadMap(active_function);
-  TNode<Object> prototype = LoadMapPrototype(map);
-  TNode<Map> prototype_map = LoadMap(CAST(prototype));
+  TNode<HeapObject> prototype = LoadMapPrototype(map);
+  TNode<Map> prototype_map = LoadMap(prototype);
   GotoIfNot(IsConstructorMap(prototype_map), &is_not_constructor);
 
   result = prototype;
@@ -12918,7 +12980,7 @@ TNode<JSReceiver> CodeStubAssembler::SpeciesConstructor(
 
   // 4. If Type(C) is not Object, throw a TypeError exception.
   ThrowIfNotJSReceiver(context, constructor,
-                       MessageTemplate::kConstructorNotReceiver);
+                       MessageTemplate::kConstructorNotReceiver, "");
 
   // 5. Let S be ? Get(C, @@species).
   TNode<Object> species =
@@ -12955,16 +13017,16 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
   GotoIfNot(IsJSReceiver(callable), &if_notreceiver);
 
   // Load the @@hasInstance property from {callable}.
-  Node* inst_of_handler =
+  TNode<Object> inst_of_handler =
       GetProperty(context, callable, HasInstanceSymbolConstant());
 
   // Optimize for the likely case where {inst_of_handler} is the builtin
   // Function.prototype[@@hasInstance] method, and emit a direct call in
   // that case without any additional checking.
-  Node* native_context = LoadNativeContext(context);
-  Node* function_has_instance =
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<Object> function_has_instance =
       LoadContextElement(native_context, Context::FUNCTION_HAS_INSTANCE_INDEX);
-  GotoIfNot(WordEqual(inst_of_handler, function_has_instance),
+  GotoIfNot(TaggedEqual(inst_of_handler, function_has_instance),
             &if_otherhandler);
   {
     // Call to Function.prototype[@@hasInstance] directly.
@@ -12996,7 +13058,7 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
     GotoIfNot(IsCallable(callable), &if_notcallable);
 
     // Use the OrdinaryHasInstance algorithm.
-    Node* result =
+    TNode<Object> result =
         CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object);
     var_result.Bind(result);
     Goto(&return_result);
@@ -13195,10 +13257,10 @@ TNode<Number> CodeStubAssembler::BitwiseOp(Node* left32, Node* right32,
 // ES #sec-createarrayiterator
 TNode<JSArrayIterator> CodeStubAssembler::CreateArrayIterator(
     TNode<Context> context, TNode<Object> object, IterationKind kind) {
-  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
   TNode<Map> iterator_map = CAST(LoadContextElement(
       native_context, Context::INITIAL_ARRAY_ITERATOR_MAP_INDEX));
-  Node* iterator = Allocate(JSArrayIterator::kSize);
+  TNode<HeapObject> iterator = Allocate(JSArrayIterator::kSize);
   StoreMapNoWriteBarrier(iterator, iterator_map);
   StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOrHashOffset,
                        RootIndex::kEmptyFixedArray);
@@ -13218,10 +13280,10 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
     SloppyTNode<Context> context, SloppyTNode<Object> value,
     SloppyTNode<Oddball> done) {
   CSA_ASSERT(this, IsBoolean(done));
-  Node* native_context = LoadNativeContext(context);
-  Node* map =
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<Object> map =
       LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
-  Node* result = Allocate(JSIteratorResult::kSize);
+  TNode<HeapObject> result = Allocate(JSIteratorResult::kSize);
   StoreMapNoWriteBarrier(result, map);
   StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
                        RootIndex::kEmptyFixedArray);
@@ -13235,8 +13297,8 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
 Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
                                                           Node* key,
                                                           Node* value) {
-  Node* native_context = LoadNativeContext(context);
-  Node* length = SmiConstant(2);
+  TNode<NativeContext> native_context = LoadNativeContext(context);
+  TNode<Smi> length = SmiConstant(2);
   int const elements_size = FixedArray::SizeFor(2);
   TNode<FixedArray> elements = UncheckedCast<FixedArray>(
       Allocate(elements_size + JSArray::kSize + JSIteratorResult::kSize));
@@ -13245,7 +13307,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
   StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
   StoreFixedArrayElement(elements, 0, key);
   StoreFixedArrayElement(elements, 1, value);
-  Node* array_map = LoadContextElement(
+  TNode<Object> array_map = LoadContextElement(
       native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
   TNode<HeapObject> array = InnerAllocate(elements, elements_size);
   StoreMapNoWriteBarrier(array, array_map);
@@ -13253,7 +13315,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
                        RootIndex::kEmptyFixedArray);
   StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
   StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
-  Node* iterator_map =
+  TNode<Object> iterator_map =
       LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
   TNode<HeapObject> result = InnerAllocate(array, JSArray::kSize);
   StoreMapNoWriteBarrier(result, iterator_map);
@@ -13340,7 +13402,7 @@ CodeStubArguments::CodeStubArguments(
       argc_(argc),
       base_(),
       fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
-  Node* offset = assembler_->ElementOffsetFromIndex(
+  TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
       argc_, SYSTEM_POINTER_ELEMENTS, param_mode,
       (StandardFrameConstants::kFixedSlotCountAboveFp - 1) *
           kSystemPointerSize);
@@ -13365,7 +13427,7 @@ TNode<WordT> CodeStubArguments::AtIndexPtr(
   using Node = compiler::Node;
   Node* negated_index = assembler_->IntPtrOrSmiSub(
       assembler_->IntPtrOrSmiConstant(0, mode), index, mode);
-  Node* offset = assembler_->ElementOffsetFromIndex(
+  TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
       negated_index, SYSTEM_POINTER_ELEMENTS, mode, 0);
   return assembler_->IntPtrAdd(assembler_->UncheckedCast<IntPtrT>(base_),
                                offset);
@@ -13438,10 +13500,10 @@ void CodeStubArguments::ForEach(
     DCHECK_EQ(mode, argc_mode_);
     last = argc_;
   }
-  Node* start = assembler_->IntPtrSub(
+  TNode<IntPtrT> start = assembler_->IntPtrSub(
       assembler_->UncheckedCast<IntPtrT>(base_),
       assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS, mode));
-  Node* end = assembler_->IntPtrSub(
+  TNode<IntPtrT> end = assembler_->IntPtrSub(
       assembler_->UncheckedCast<IntPtrT>(base_),
       assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS, mode));
   assembler_->BuildFastLoop(
@@ -13510,13 +13572,15 @@ TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKind(
 
 TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKindForRead(
     TNode<Int32T> elements_kind) {
-  CSA_ASSERT(this,
-             Uint32LessThanOrEqual(elements_kind,
-                                   Int32Constant(LAST_FROZEN_ELEMENTS_KIND)));
+  CSA_ASSERT(this, Uint32LessThanOrEqual(
+                       elements_kind,
+                       Int32Constant(LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)));
 
   STATIC_ASSERT(HOLEY_SMI_ELEMENTS == (PACKED_SMI_ELEMENTS | 1));
   STATIC_ASSERT(HOLEY_ELEMENTS == (PACKED_ELEMENTS | 1));
   STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == (PACKED_DOUBLE_ELEMENTS | 1));
+  STATIC_ASSERT(HOLEY_NONEXTENSIBLE_ELEMENTS ==
+                (PACKED_NONEXTENSIBLE_ELEMENTS | 1));
   STATIC_ASSERT(HOLEY_SEALED_ELEMENTS == (PACKED_SEALED_ELEMENTS | 1));
   STATIC_ASSERT(HOLEY_FROZEN_ELEMENTS == (PACKED_FROZEN_ELEMENTS | 1));
   return IsSetWord32(elements_kind, 1);
@@ -13541,41 +13605,35 @@ TNode<BoolT> CodeStubAssembler::IsElementsKindInRange(
 }
 
 Node* CodeStubAssembler::IsDebugActive() {
-  Node* is_debug_active = Load(
-      MachineType::Uint8(),
+  TNode<Uint8T> is_debug_active = Load<Uint8T>(
       ExternalConstant(ExternalReference::debug_is_active_address(isolate())));
   return Word32NotEqual(is_debug_active, Int32Constant(0));
 }
 
 Node* CodeStubAssembler::IsPromiseHookEnabled() {
-  Node* const promise_hook = Load(
-      MachineType::Pointer(),
+  TNode<RawPtrT> const promise_hook = Load<RawPtrT>(
       ExternalConstant(ExternalReference::promise_hook_address(isolate())));
   return WordNotEqual(promise_hook, IntPtrConstant(0));
 }
 
 Node* CodeStubAssembler::HasAsyncEventDelegate() {
-  Node* const async_event_delegate =
-      Load(MachineType::Pointer(),
-           ExternalConstant(
-               ExternalReference::async_event_delegate_address(isolate())));
+  TNode<RawPtrT> const async_event_delegate = Load<RawPtrT>(ExternalConstant(
+      ExternalReference::async_event_delegate_address(isolate())));
   return WordNotEqual(async_event_delegate, IntPtrConstant(0));
 }
 
 Node* CodeStubAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate() {
-  Node* const promise_hook_or_async_event_delegate =
-      Load(MachineType::Uint8(),
-           ExternalConstant(
-               ExternalReference::promise_hook_or_async_event_delegate_address(
-                   isolate())));
+  TNode<Uint8T> const promise_hook_or_async_event_delegate =
+      Load<Uint8T>(ExternalConstant(
+          ExternalReference::promise_hook_or_async_event_delegate_address(
+              isolate())));
   return Word32NotEqual(promise_hook_or_async_event_delegate, Int32Constant(0));
 }
 
 Node* CodeStubAssembler::
     IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() {
-  Node* const promise_hook_or_debug_is_active_or_async_event_delegate = Load(
-      MachineType::Uint8(),
-      ExternalConstant(
+  TNode<Uint8T> const promise_hook_or_debug_is_active_or_async_event_delegate =
+      Load<Uint8T>(ExternalConstant(
           ExternalReference::
               promise_hook_or_debug_is_active_or_async_event_delegate_address(
                   isolate())));
@@ -13622,7 +13680,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
 
   // Switch on data's instance type.
   BIND(&check_instance_type);
-  TNode<Int32T> data_type = LoadInstanceType(CAST(sfi_data));
+  TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
 
   int32_t case_values[] = {BYTECODE_ARRAY_TYPE,
                            WASM_EXPORTED_FUNCTION_DATA_TYPE,
@@ -13712,14 +13770,14 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
                                                            Node* context) {
   CSA_SLOW_ASSERT(this, IsMap(map));
 
-  Node* const code = GetSharedFunctionInfoCode(shared_info);
+  TNode<Code> const code = GetSharedFunctionInfoCode(shared_info);
 
   // TODO(ishell): All the callers of this function pass map loaded from
   // Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove
   // map parameter.
   CSA_ASSERT(this, Word32BinaryNot(IsConstructorMap(map)));
   CSA_ASSERT(this, Word32BinaryNot(IsFunctionWithPrototypeSlotMap(map)));
-  Node* const fun = Allocate(JSFunction::kSizeWithoutPrototype);
+  TNode<HeapObject> const fun = Allocate(JSFunction::kSizeWithoutPrototype);
   STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
   StoreMapNoWriteBarrier(fun, map);
   StoreObjectFieldRoot(fun, JSObject::kPropertiesOrHashOffset,
@@ -13756,14 +13814,16 @@ void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
     // backing store.
     STATIC_ASSERT(static_cast<int>(JSObject::kElementsOffset) ==
                   static_cast<int>(JSProxy::kTargetOffset));
-    Node* object_elements = LoadObjectField(object, JSObject::kElementsOffset);
+    TNode<Object> object_elements =
+        LoadObjectField(object, JSObject::kElementsOffset);
     GotoIf(IsEmptyFixedArray(object_elements), &if_no_elements);
     GotoIf(IsEmptySlowElementDictionary(object_elements), &if_no_elements);
 
     // It might still be an empty JSArray.
     GotoIfNot(IsJSArrayMap(object_map), if_slow);
-    Node* object_length = LoadJSArrayLength(object);
-    Branch(WordEqual(object_length, SmiConstant(0)), &if_no_elements, if_slow);
+    TNode<Number> object_length = LoadJSArrayLength(object);
+    Branch(TaggedEqual(object_length, SmiConstant(0)), &if_no_elements,
+           if_slow);
 
     // Continue with the {object}s prototype.
     BIND(&if_no_elements);
@@ -13774,7 +13834,7 @@ void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
     var_object.Bind(object);
     object_map = LoadMap(object);
     var_object_map.Bind(object_map);
-    Node* object_enum_length = LoadMapEnumLength(object_map);
+    TNode<WordT> object_enum_length = LoadMapEnumLength(object_map);
     Branch(WordEqual(object_enum_length, IntPtrConstant(0)), &loop, if_slow);
   }
 }
@@ -13782,11 +13842,11 @@ void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
 Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
                                         Label* if_runtime) {
   Label if_fast(this), if_cache(this), if_no_cache(this, Label::kDeferred);
-  Node* receiver_map = LoadMap(receiver);
+  TNode<Map> receiver_map = LoadMap(receiver);
 
   // Check if the enum length field of the {receiver} is properly initialized,
   // indicating that there is an enum cache.
-  Node* receiver_enum_length = LoadMapEnumLength(receiver_map);
+  TNode<WordT> receiver_enum_length = LoadMapEnumLength(receiver_map);
   Branch(WordEqual(receiver_enum_length,
                    IntPtrConstant(kInvalidEnumCacheSentinel)),
          &if_no_cache, &if_cache);
@@ -13797,7 +13857,7 @@ Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
     GotoIfNot(IsDictionaryMap(receiver_map), if_runtime);
     TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
     TNode<Smi> length = GetNumberOfElements(properties);
-    GotoIfNot(WordEqual(length, SmiConstant(0)), if_runtime);
+    GotoIfNot(TaggedEqual(length, SmiConstant(0)), if_runtime);
     // Check that there are no elements on the {receiver} and its prototype
     // chain. Given that we do not create an EnumCache for dict-mode objects,
     // directly jump to {if_empty} if there are no elements and no properties
@@ -13847,15 +13907,10 @@ void CodeStubAssembler::Print(const char* prefix, Node* tagged_value) {
 void CodeStubAssembler::PerformStackCheck(TNode<Context> context) {
   Label ok(this), stack_check_interrupt(this, Label::kDeferred);
 
-  // The instruction sequence below is carefully crafted to hit our pattern
-  // matcher for stack checks within instruction selection.
-  // See StackCheckMatcher::Matched and JSGenericLowering::LowerJSStackCheck.
-
-  TNode<UintPtrT> sp = UncheckedCast<UintPtrT>(LoadStackPointer());
-  TNode<UintPtrT> stack_limit = UncheckedCast<UintPtrT>(Load(
-      MachineType::Pointer(),
-      ExternalConstant(ExternalReference::address_of_stack_limit(isolate()))));
-  TNode<BoolT> sp_within_limit = UintPtrLessThan(stack_limit, sp);
+  TNode<UintPtrT> stack_limit = UncheckedCast<UintPtrT>(
+      Load(MachineType::Pointer(),
+           ExternalConstant(ExternalReference::address_of_jslimit(isolate()))));
+  TNode<BoolT> sp_within_limit = StackPointerGreaterThan(stack_limit);
 
   Branch(sp_within_limit, &ok, &stack_check_interrupt);
 
@@ -13873,7 +13928,7 @@ void CodeStubAssembler::InitializeFunctionContext(Node* native_context,
   StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
                                  SmiConstant(slots));
 
-  Node* const empty_scope_info =
+  TNode<Object> const empty_scope_info =
       LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
   StoreContextElementNoWriteBarrier(context, Context::SCOPE_INFO_INDEX,
                                     empty_scope_info);
@@ -13904,7 +13959,7 @@ TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
 
   BIND(&runtime);
   {
-    TNode<Context> native_context = LoadNativeContext(context);
+    TNode<NativeContext> native_context = LoadNativeContext(context);
     TNode<JSFunction> array_function =
         CAST(LoadContextElement(native_context, Context::ARRAY_FUNCTION_INDEX));
     array = CAST(CallRuntime(Runtime::kNewArray, context, array_function,
@@ -13971,63 +14026,139 @@ void CodeStubAssembler::SetPropertyLength(TNode<Context> context,
   BIND(&done);
 }
 
-void CodeStubAssembler::GotoIfInitialPrototypePropertyModified(
-    TNode<Map> object_map, TNode<Map> initial_prototype_map, int descriptor,
-    RootIndex field_name_root_index, Label* if_modified) {
-  DescriptorIndexAndName index_name{descriptor, field_name_root_index};
-  GotoIfInitialPrototypePropertiesModified(
-      object_map, initial_prototype_map,
-      Vector<DescriptorIndexAndName>(&index_name, 1), if_modified);
+TNode<String> CodeStubAssembler::TaggedToDirectString(TNode<Object> value,
+                                                      Label* fail) {
+  ToDirectStringAssembler to_direct(state(), CAST(value));
+  to_direct.TryToDirect(fail);
+  to_direct.PointerToData(fail);
+  return CAST(value);
 }
 
-void CodeStubAssembler::GotoIfInitialPrototypePropertiesModified(
-    TNode<Map> object_map, TNode<Map> initial_prototype_map,
-    Vector<DescriptorIndexAndName> properties, Label* if_modified) {
-  TNode<Map> prototype_map = LoadMap(LoadMapPrototype(object_map));
-  GotoIfNot(WordEqual(prototype_map, initial_prototype_map), if_modified);
-
-  // We need to make sure that relevant properties in the prototype have
-  // not been tampered with. We do this by checking that their slots
-  // in the prototype's descriptor array are still marked as const.
+PrototypeCheckAssembler::PrototypeCheckAssembler(
+    compiler::CodeAssemblerState* state, Flags flags,
+    TNode<NativeContext> native_context, TNode<Map> initial_prototype_map,
+    Vector<DescriptorIndexNameValue> properties)
+    : CodeStubAssembler(state),
+      flags_(flags),
+      native_context_(native_context),
+      initial_prototype_map_(initial_prototype_map),
+      properties_(properties) {}
+
+void PrototypeCheckAssembler::CheckAndBranch(TNode<HeapObject> prototype,
+                                             Label* if_unmodified,
+                                             Label* if_modified) {
+  TNode<Map> prototype_map = LoadMap(prototype);
   TNode<DescriptorArray> descriptors = LoadMapDescriptors(prototype_map);
 
-  TNode<Uint32T> combined_details;
-  for (int i = 0; i < properties.length(); i++) {
-    // Assert the descriptor index is in-bounds.
-    int descriptor = properties[i].descriptor_index;
-    CSA_ASSERT(this, Int32LessThan(Int32Constant(descriptor),
-                                   LoadNumberOfDescriptors(descriptors)));
-    // Assert that the name is correct. This essentially checks that
-    // the descriptor index corresponds to the insertion order in
-    // the bootstrapper.
-    CSA_ASSERT(this,
-               WordEqual(LoadKeyByDescriptorEntry(descriptors, descriptor),
-                         LoadRoot(properties[i].name_root_index)));
-
-    TNode<Uint32T> details =
-        DescriptorArrayGetDetails(descriptors, Uint32Constant(descriptor));
-    if (i == 0) {
-      combined_details = details;
-    } else {
-      combined_details = Word32And(combined_details, details);
+  // The continuation of a failed fast check: if property identity checks are
+  // enabled, we continue there (since they may still classify the prototype as
+  // fast), otherwise we bail out.
+  Label property_identity_check(this, Label::kDeferred);
+  Label* if_fast_check_failed =
+      ((flags_ & kCheckPrototypePropertyIdentity) == 0)
+          ? if_modified
+          : &property_identity_check;
+
+  if ((flags_ & kCheckPrototypePropertyConstness) != 0) {
+    // A simple prototype map identity check. Note that map identity does not
+    // guarantee unmodified properties. It does guarantee that no new properties
+    // have been added, or old properties deleted.
+
+    GotoIfNot(TaggedEqual(prototype_map, initial_prototype_map_),
+              if_fast_check_failed);
+
+    // We need to make sure that relevant properties in the prototype have
+    // not been tampered with. We do this by checking that their slots
+    // in the prototype's descriptor array are still marked as const.
+
+    TNode<Uint32T> combined_details;
+    for (int i = 0; i < properties_.length(); i++) {
+      // Assert the descriptor index is in-bounds.
+      int descriptor = properties_[i].descriptor_index;
+      CSA_ASSERT(this, Int32LessThan(Int32Constant(descriptor),
+                                     LoadNumberOfDescriptors(descriptors)));
+
+      // Assert that the name is correct. This essentially checks that
+      // the descriptor index corresponds to the insertion order in
+      // the bootstrapper.
+      CSA_ASSERT(
+          this,
+          TaggedEqual(LoadKeyByDescriptorEntry(descriptors, descriptor),
+                      CodeAssembler::LoadRoot(properties_[i].name_root_index)));
+
+      TNode<Uint32T> details =
+          DescriptorArrayGetDetails(descriptors, Uint32Constant(descriptor));
+
+      if (i == 0) {
+        combined_details = details;
+      } else {
+        combined_details = Word32And(combined_details, details);
+      }
     }
+
+    TNode<Uint32T> constness =
+        DecodeWord32<PropertyDetails::ConstnessField>(combined_details);
+
+    Branch(
+        Word32Equal(constness,
+                    Int32Constant(static_cast<int>(PropertyConstness::kConst))),
+        if_unmodified, if_fast_check_failed);
   }
 
-  TNode<Uint32T> constness =
-      DecodeWord32<PropertyDetails::ConstnessField>(combined_details);
+  if ((flags_ & kCheckPrototypePropertyIdentity) != 0) {
+    // The above checks have failed, for whatever reason (maybe the prototype
+    // map has changed, or a property is no longer const). This block implements
+    // a more thorough check that can also accept maps which 1. do not have the
+    // initial map, 2. have mutable relevant properties, but 3. still match the
+    // expected value for all relevant properties.
 
-  GotoIfNot(
-      Word32Equal(constness,
-                  Int32Constant(static_cast<int>(PropertyConstness::kConst))),
-      if_modified);
-}
+    BIND(&property_identity_check);
 
-TNode<String> CodeStubAssembler::TaggedToDirectString(TNode<Object> value,
-                                                      Label* fail) {
-  ToDirectStringAssembler to_direct(state(), value);
-  to_direct.TryToDirect(fail);
-  to_direct.PointerToData(fail);
-  return CAST(value);
+    int max_descriptor_index = -1;
+    for (int i = 0; i < properties_.length(); i++) {
+      max_descriptor_index =
+          std::max(max_descriptor_index, properties_[i].descriptor_index);
+    }
+
+    // If the greatest descriptor index is out of bounds, the map cannot be
+    // fast.
+    GotoIfNot(Int32LessThan(Int32Constant(max_descriptor_index),
+                            LoadNumberOfDescriptors(descriptors)),
+              if_modified);
+
+    // Logic below only handles maps with fast properties.
+    GotoIfMapHasSlowProperties(prototype_map, if_modified);
+
+    for (int i = 0; i < properties_.length(); i++) {
+      const DescriptorIndexNameValue& p = properties_[i];
+      const int descriptor = p.descriptor_index;
+
+      // Check if the name is correct. This essentially checks that
+      // the descriptor index corresponds to the insertion order in
+      // the bootstrapper.
+      GotoIfNot(TaggedEqual(LoadKeyByDescriptorEntry(descriptors, descriptor),
+                            CodeAssembler::LoadRoot(p.name_root_index)),
+                if_modified);
+
+      // Finally, check whether the actual value equals the expected value.
+      TNode<Uint32T> details =
+          DescriptorArrayGetDetails(descriptors, Uint32Constant(descriptor));
+      TVARIABLE(Uint32T, var_details, details);
+      TVARIABLE(Object, var_value);
+
+      const int key_index = DescriptorArray::ToKeyIndex(descriptor);
+      LoadPropertyFromFastObject(prototype, prototype_map, descriptors,
+                                 IntPtrConstant(key_index), &var_details,
+                                 &var_value);
+
+      TNode<Object> actual_value = var_value.value();
+      TNode<Object> expected_value =
+          LoadContextElement(native_context_, p.expected_value_context_index);
+      GotoIfNot(TaggedEqual(actual_value, expected_value), if_modified);
+    }
+
+    Goto(if_unmodified);
+  }
 }
 
 }  // namespace internal
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 47abd027490679..9884d04e66e1da 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -32,65 +32,124 @@ class StubCache;
 
 enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
 
-#define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V)                              \
-  V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \
-  V(PromiseSpeciesProtector, promise_species_protector,                    \
-    PromiseSpeciesProtector)                                               \
-  V(TypedArraySpeciesProtector, typed_array_species_protector,             \
-    TypedArraySpeciesProtector)                                            \
+#define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V)                                 \
+  V(ArrayIteratorProtector, array_iterator_protector, ArrayIteratorProtector) \
+  V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector)    \
+  V(MapIteratorProtector, map_iterator_protector, MapIteratorProtector)       \
+  V(NoElementsProtector, no_elements_protector, NoElementsProtector)          \
+  V(NumberStringCache, number_string_cache, NumberStringCache)                \
+  V(PromiseResolveProtector, promise_resolve_protector,                       \
+    PromiseResolveProtector)                                                  \
+  V(PromiseSpeciesProtector, promise_species_protector,                       \
+    PromiseSpeciesProtector)                                                  \
+  V(PromiseThenProtector, promise_then_protector, PromiseThenProtector)       \
+  V(SetIteratorProtector, set_iterator_protector, SetIteratorProtector)       \
+  V(SingleCharacterStringCache, single_character_string_cache,                \
+    SingleCharacterStringCache)                                               \
+  V(StringIteratorProtector, string_iterator_protector,                       \
+    StringIteratorProtector)                                                  \
+  V(TypedArraySpeciesProtector, typed_array_species_protector,                \
+    TypedArraySpeciesProtector)
 
 #define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V)                                \
   V(AccessorInfoMap, accessor_info_map, AccessorInfoMap)                       \
   V(AccessorPairMap, accessor_pair_map, AccessorPairMap)                       \
-  V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap)     \
+  V(AllocationMementoMap, allocation_memento_map, AllocationMementoMap)        \
   V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map,    \
     AllocationSiteWithoutWeakNextMap)                                          \
+  V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap)     \
+  V(arguments_to_string, arguments_to_string, ArgumentsToString)               \
+  V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map,         \
+    ArrayBoilerplateDescriptionMap)                                            \
+  V(Array_string, Array_string, ArrayString)                                   \
+  V(array_to_string, array_to_string, ArrayToString)                           \
   V(BooleanMap, boolean_map, BooleanMap)                                       \
+  V(boolean_to_string, boolean_to_string, BooleanToString)                     \
+  V(CellMap, cell_map, CellMap)                                                \
   V(CodeMap, code_map, CodeMap)                                                \
+  V(ConsOneByteStringMap, cons_one_byte_string_map, ConsOneByteStringMap)      \
+  V(ConsStringMap, cons_string_map, ConsStringMap)                             \
+  V(constructor_string, constructor_string, ConstructorString)                 \
+  V(date_to_string, date_to_string, DateToString)                              \
+  V(default_string, default_string, DefaultString)                             \
+  V(EmptyByteArray, empty_byte_array, EmptyByteArray)                          \
   V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray)                       \
   V(EmptyPropertyDictionary, empty_property_dictionary,                        \
     EmptyPropertyDictionary)                                                   \
   V(EmptySlowElementDictionary, empty_slow_element_dictionary,                 \
     EmptySlowElementDictionary)                                                \
   V(empty_string, empty_string, EmptyString)                                   \
+  V(error_to_string, error_to_string, ErrorToString)                           \
   V(FalseValue, false_value, False)                                            \
   V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap)                 \
   V(FixedArrayMap, fixed_array_map, FixedArrayMap)                             \
   V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap)                   \
   V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap)          \
+  V(Function_string, function_string, FunctionString)                          \
   V(FunctionTemplateInfoMap, function_template_info_map,                       \
     FunctionTemplateInfoMap)                                                   \
+  V(function_to_string, function_to_string, FunctionToString)                  \
   V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap)          \
   V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol)               \
   V(HeapNumberMap, heap_number_map, HeapNumberMap)                             \
+  V(is_concat_spreadable_symbol, is_concat_spreadable_symbol,                  \
+    IsConcatSpreadableSymbol)                                                  \
   V(iterator_symbol, iterator_symbol, IteratorSymbol)                          \
   V(length_string, length_string, LengthString)                                \
   V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap)          \
+  V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol)                 \
   V(MetaMap, meta_map, MetaMap)                                                \
   V(MinusZeroValue, minus_zero_value, MinusZero)                               \
-  V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap)       \
+  V(ModuleContextMap, module_context_map, ModuleContextMap)                    \
+  V(name_string, name_string, NameString)                                      \
   V(NanValue, nan_value, Nan)                                                  \
+  V(NativeContextMap, native_context_map, NativeContextMap)                    \
+  V(next_string, next_string, NextString)                                      \
   V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap)                \
+  V(null_to_string, null_to_string, NullToString)                              \
   V(NullValue, null_value, Null)                                               \
+  V(number_string, number_string, numberString)                                \
+  V(number_to_string, number_to_string, NumberToString)                        \
+  V(Object_string, Object_string, ObjectString)                                \
+  V(object_to_string, object_to_string, ObjectToString)                        \
   V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap)                \
+  V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap)          \
+  V(premonomorphic_symbol, premonomorphic_symbol, PremonomorphicSymbol)        \
   V(PreparseDataMap, preparse_data_map, PreparseDataMap)                       \
+  V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap)        \
+  V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map,   \
+    PromiseFulfillReactionJobTaskMap)                                          \
+  V(PromiseReactionMap, promise_reaction_map, PromiseReactionMap)              \
+  V(PromiseRejectReactionJobTaskMap, promise_reject_reaction_job_task_map,     \
+    PromiseRejectReactionJobTaskMap)                                           \
   V(prototype_string, prototype_string, PrototypeString)                       \
+  V(PrototypeInfoMap, prototype_info_map, PrototypeInfoMap)                    \
+  V(regexp_to_string, regexp_to_string, RegexpToString)                        \
+  V(resolve_string, resolve_string, ResolveString)                             \
   V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap)    \
+  V(SloppyArgumentsElementsMap, sloppy_arguments_elements_map,                 \
+    SloppyArgumentsElementsMap)                                                \
+  V(species_symbol, species_symbol, SpeciesSymbol)                             \
+  V(StaleRegister, stale_register, StaleRegister)                              \
   V(StoreHandler0Map, store_handler0_map, StoreHandler0Map)                    \
+  V(string_string, string_string, StringString)                                \
+  V(string_to_string, string_to_string, StringToString)                        \
   V(SymbolMap, symbol_map, SymbolMap)                                          \
   V(TheHoleValue, the_hole_value, TheHole)                                     \
+  V(then_string, then_string, ThenString)                                      \
+  V(to_string_tag_symbol, to_string_tag_symbol, ToStringTagSymbol)             \
   V(TransitionArrayMap, transition_array_map, TransitionArrayMap)              \
   V(TrueValue, true_value, True)                                               \
   V(Tuple2Map, tuple2_map, Tuple2Map)                                          \
   V(Tuple3Map, tuple3_map, Tuple3Map)                                          \
-  V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map,         \
-    ArrayBoilerplateDescriptionMap)                                            \
   V(UncompiledDataWithoutPreparseDataMap,                                      \
     uncompiled_data_without_preparse_data_map,                                 \
     UncompiledDataWithoutPreparseDataMap)                                      \
   V(UncompiledDataWithPreparseDataMap, uncompiled_data_with_preparse_data_map, \
     UncompiledDataWithPreparseDataMap)                                         \
+  V(undefined_to_string, undefined_to_string, UndefinedToString)               \
   V(UndefinedValue, undefined_value, Undefined)                                \
+  V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol)           \
   V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap)
 
 #define HEAP_IMMOVABLE_OBJECT_LIST(V)   \
@@ -119,18 +178,17 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
 #define CSA_ASSERT_2_ARGS(a, b, ...) {{a, #a}, {b, #b}}
 // clang-format on
 #define SWITCH_CSA_ASSERT_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b)
-#define CSA_ASSERT_ARGS(...)                                      \
-  SWITCH_CSA_ASSERT_ARGS(dummy, ##__VA_ARGS__, CSA_ASSERT_2_ARGS, \
-                         CSA_ASSERT_1_ARG, CSA_ASSERT_0_ARGS)
+#define CSA_ASSERT_ARGS(...)                                        \
+  CALL(SWITCH_CSA_ASSERT_ARGS, (, ##__VA_ARGS__, CSA_ASSERT_2_ARGS, \
+                                CSA_ASSERT_1_ARG, CSA_ASSERT_0_ARGS))
+// Workaround for MSVC to skip comma in empty __VA_ARGS__.
+#define CALL(x, y) x y
 
 // CSA_ASSERT(csa, <condition>, <extra values to print...>)
 
-#define CSA_ASSERT(csa, condition_node, ...)                                  \
-  (csa)->Assert(                                                              \
-      [&]() -> compiler::Node* {                                              \
-        return implicit_cast<compiler::SloppyTNode<Word32T>>(condition_node); \
-      },                                                                      \
-      #condition_node, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
+#define CSA_ASSERT(csa, condition_node, ...)                         \
+  (csa)->Assert(condition_node, #condition_node, __FILE__, __LINE__, \
+                CSA_ASSERT_ARGS(__VA_ARGS__))
 
 // CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
 //     <extra values to print...>)
@@ -141,8 +199,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
 #define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected)                         \
   (csa)->Assert(                                                             \
       [&]() -> compiler::Node* {                                             \
-        compiler::Node* const argc =                                         \
-            (csa)->Parameter(Descriptor::kJSActualArgumentsCount);           \
+        TNode<Word32T> const argc = UncheckedCast<Word32T>(                  \
+            (csa)->Parameter(Descriptor::kJSActualArgumentsCount));          \
         return (csa)->Op(argc, (csa)->Int32Constant(expected));              \
       },                                                                     \
       "argc " #op " " #expected, __FILE__, __LINE__,                         \
@@ -161,6 +219,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
   name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
 #define TYPED_VARIABLE_DEF(type, name, ...) \
   TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
+#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) \
+  name(CSA_DEBUG_INFO(name), __VA_ARGS__)
 #else  // DEBUG
 #define CSA_ASSERT(csa, ...) ((void)0)
 #define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
@@ -169,9 +229,12 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
 #define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
 #define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__)
 #define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
+#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) name(__VA_ARGS__)
 #endif  // DEBUG
 
 #define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this))
+#define TVARIABLE_CONSTRUCTOR(...) \
+  EXPAND(TYPED_VARIABLE_CONSTRUCTOR(__VA_ARGS__, this))
 
 #ifdef ENABLE_SLOW_DCHECKS
 #define CSA_SLOW_ASSERT(csa, ...) \
@@ -222,7 +285,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   // fewer live ranges. Thus only convert indices to untagged value on 64-bit
   // platforms.
   ParameterMode OptimalParameterMode() const {
-    return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS;
+#if defined(BINT_IS_SMI)
+    return SMI_PARAMETERS;
+#elif defined(BINT_IS_INTPTR)
+    return INTPTR_PARAMETERS;
+#else
+#error Unknown BInt type.
+#endif
   }
 
   MachineRepresentation ParameterRepresentation(ParameterMode mode) const {
@@ -268,7 +337,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
     } else {
       DCHECK_EQ(mode, ParameterMode::INTPTR_PARAMETERS);
       intptr_t constant;
-      if (ToIntPtrConstant(node, constant)) {
+      if (ToIntPtrConstant(node, &constant)) {
         *out = constant;
         return true;
       }
@@ -277,7 +346,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
     return false;
   }
 
-#if defined(V8_HOST_ARCH_32_BIT)
+#if defined(BINT_IS_SMI)
   TNode<Smi> BIntToSmi(TNode<BInt> source) { return source; }
   TNode<IntPtrT> BIntToIntPtr(TNode<BInt> source) {
     return SmiToIntPtr(source);
@@ -286,7 +355,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   TNode<BInt> IntPtrToBInt(TNode<IntPtrT> source) {
     return SmiFromIntPtr(source);
   }
-#elif defined(V8_HOST_ARCH_64_BIT)
+#elif defined(BINT_IS_INTPTR)
   TNode<Smi> BIntToSmi(TNode<BInt> source) { return SmiFromIntPtr(source); }
   TNode<IntPtrT> BIntToIntPtr(TNode<BInt> source) { return source; }
   TNode<BInt> SmiToBInt(TNode<Smi> source) { return SmiToIntPtr(source); }
@@ -404,6 +473,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   intptr_t ConstexprWordNot(intptr_t a) { return ~a; }
   uintptr_t ConstexprWordNot(uintptr_t a) { return ~a; }
 
+  TNode<BoolT> TaggedEqual(TNode<UnionT<Object, MaybeObject>> a,
+                           TNode<UnionT<Object, MaybeObject>> b) {
+    // In pointer-compressed architectures, the instruction selector will narrow
+    // this comparison to a 32-bit one.
+    return WordEqual(ReinterpretCast<WordT>(a), ReinterpretCast<WordT>(b));
+  }
+
+  TNode<BoolT> TaggedNotEqual(TNode<UnionT<Object, MaybeObject>> a,
+                              TNode<UnionT<Object, MaybeObject>> b) {
+    // In pointer-compressed architectures, the instruction selector will narrow
+    // this comparison to a 32-bit one.
+    return WordNotEqual(ReinterpretCast<WordT>(a), ReinterpretCast<WordT>(b));
+  }
+
   TNode<Object> NoContextConstant();
 
 #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name)  \
@@ -426,7 +509,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
 #undef HEAP_CONSTANT_TEST
 
+  TNode<BInt> BIntConstant(int value);
+
   Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
+  TNode<BoolT> IntPtrOrSmiEqual(Node* left, Node* right, ParameterMode mode);
+  TNode<BoolT> IntPtrOrSmiNotEqual(Node* left, Node* right, ParameterMode mode);
 
   bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
   bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
@@ -512,15 +599,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   }
 
   TNode<Smi> SmiShr(TNode<Smi> a, int shift) {
-    return BitcastWordToTaggedSigned(
-        WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift),
-                BitcastTaggedSignedToWord(SmiConstant(-1))));
+    if (kTaggedSize == kInt64Size) {
+      return BitcastWordToTaggedSigned(
+          WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift),
+                  BitcastTaggedSignedToWord(SmiConstant(-1))));
+    } else {
+      // For pointer compressed Smis, we want to make sure that we truncate to
+      // int32 before shifting, to avoid the values of the top 32-bits from
+      // leaking into the sign bit of the smi.
+      return BitcastWordToTaggedSigned(WordAnd(
+          ChangeInt32ToIntPtr(Word32Shr(
+              TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)),
+          BitcastTaggedSignedToWord(SmiConstant(-1))));
+    }
   }
 
   TNode<Smi> SmiSar(TNode<Smi> a, int shift) {
-    return BitcastWordToTaggedSigned(
-        WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift),
-                BitcastTaggedSignedToWord(SmiConstant(-1))));
+    if (kTaggedSize == kInt64Size) {
+      return BitcastWordToTaggedSigned(
+          WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift),
+                  BitcastTaggedSignedToWord(SmiConstant(-1))));
+    } else {
+      // For pointer compressed Smis, we want to make sure that we truncate to
+      // int32 before shifting, to avoid the values of the top 32-bits from
+      // changing the sign bit of the smi.
+      return BitcastWordToTaggedSigned(WordAnd(
+          ChangeInt32ToIntPtr(Word32Sar(
+              TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)),
+          BitcastTaggedSignedToWord(SmiConstant(-1))));
+    }
   }
 
   Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
@@ -543,10 +650,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
 
 #define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName)                \
   TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) {                         \
-    if (SmiValuesAre32Bits()) {                                                \
+    if (kTaggedSize == kInt64Size) {                                           \
       return IntPtrOpName(BitcastTaggedSignedToWord(a),                        \
                           BitcastTaggedSignedToWord(b));                       \
     } else {                                                                   \
+      DCHECK_EQ(kTaggedSize, kInt32Size);                                      \
       DCHECK(SmiValuesAre31Bits());                                            \
       if (kSystemPointerSize == kInt64Size) {                                  \
         CSA_ASSERT(this, IsValidSmi(a));                                       \
@@ -586,6 +694,31 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   //  1 iff x > y.
   TNode<Smi> SmiLexicographicCompare(TNode<Smi> x, TNode<Smi> y);
 
+#ifdef BINT_IS_SMI
+#define BINT_COMPARISON_OP(BIntOpName, SmiOpName, IntPtrOpName) \
+  TNode<BoolT> BIntOpName(TNode<BInt> a, TNode<BInt> b) {       \
+    return SmiOpName(a, b);                                     \
+  }
+#else
+#define BINT_COMPARISON_OP(BIntOpName, SmiOpName, IntPtrOpName) \
+  TNode<BoolT> BIntOpName(TNode<BInt> a, TNode<BInt> b) {       \
+    return IntPtrOpName(a, b);                                  \
+  }
+#endif
+  BINT_COMPARISON_OP(BIntEqual, SmiEqual, WordEqual)
+  BINT_COMPARISON_OP(BIntNotEqual, SmiNotEqual, WordNotEqual)
+  BINT_COMPARISON_OP(BIntAbove, SmiAbove, UintPtrGreaterThan)
+  BINT_COMPARISON_OP(BIntAboveOrEqual, SmiAboveOrEqual,
+                     UintPtrGreaterThanOrEqual)
+  BINT_COMPARISON_OP(BIntBelow, SmiBelow, UintPtrLessThan)
+  BINT_COMPARISON_OP(BIntLessThan, SmiLessThan, IntPtrLessThan)
+  BINT_COMPARISON_OP(BIntLessThanOrEqual, SmiLessThanOrEqual,
+                     IntPtrLessThanOrEqual)
+  BINT_COMPARISON_OP(BIntGreaterThan, SmiGreaterThan, IntPtrGreaterThan)
+  BINT_COMPARISON_OP(BIntGreaterThanOrEqual, SmiGreaterThanOrEqual,
+                     IntPtrGreaterThanOrEqual)
+#undef BINT_COMPARISON_OP
+
   // Smi | HeapNumber operations.
   TNode<Number> NumberInc(SloppyTNode<Number> value);
   TNode<Number> NumberDec(SloppyTNode<Number> value);
@@ -620,12 +753,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   void Assert(const NodeGenerator& condition_body, const char* message,
               const char* file, int line,
               std::initializer_list<ExtraNode> extra_nodes = {});
+  void Assert(SloppyTNode<Word32T> condition_node, const char* message,
+              const char* file, int line,
+              std::initializer_list<ExtraNode> extra_nodes = {});
   void Check(const BranchGenerator& branch, const char* message,
              const char* file, int line,
              std::initializer_list<ExtraNode> extra_nodes = {});
   void Check(const NodeGenerator& condition_body, const char* message,
              const char* file, int line,
              std::initializer_list<ExtraNode> extra_nodes = {});
+  void Check(SloppyTNode<Word32T> condition_node, const char* message,
+             const char* file, int line,
+             std::initializer_list<ExtraNode> extra_nodes = {});
   void FailAssert(const char* message, const char* file, int line,
                   std::initializer_list<ExtraNode> extra_nodes = {});
 
@@ -713,6 +852,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
             single_char[0]));
   }
 
+  TNode<Int32T> TruncateWordToInt32(SloppyTNode<WordT> value);
   TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
 
   // Check a value for smi-ness
@@ -751,21 +891,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
     Branch(SmiLessThanOrEqual(a, b), if_true, if_false);
   }
 
-  void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
+  void BranchIfFloat64IsNaN(TNode<Float64T> value, Label* if_true,
+                            Label* if_false) {
     Branch(Float64Equal(value, value), if_false, if_true);
   }
 
   // Branches to {if_true} if ToBoolean applied to {value} yields true,
   // otherwise goes to {if_false}.
-  void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false);
+  void BranchIfToBooleanIsTrue(SloppyTNode<Object> value, Label* if_true,
+                               Label* if_false);
 
   // Branches to {if_false} if ToBoolean applied to {value} yields false,
   // otherwise goes to {if_true}.
-  void BranchIfToBooleanIsFalse(Node* value, Label* if_false, Label* if_true) {
+  void BranchIfToBooleanIsFalse(SloppyTNode<Object> value, Label* if_false,
+                                Label* if_true) {
     BranchIfToBooleanIsTrue(value, if_true, if_false);
   }
 
-  void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
+  void BranchIfJSReceiver(SloppyTNode<Object> object, Label* if_true,
+                          Label* if_false);
 
   // Branches to {if_true} when --force-slow-path flag has been passed.
   // It's used for testing to ensure that slow path implementation behave
@@ -831,9 +975,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
                                          int offset);
   // Load a SMI field, untag it, and convert to Word32.
-  TNode<Int32T> LoadAndUntagToWord32ObjectField(Node* object, int offset);
-  // Load a SMI and untag it.
-  TNode<IntPtrT> LoadAndUntagSmi(Node* base, int index);
+  TNode<Int32T> LoadAndUntagToWord32ObjectField(SloppyTNode<HeapObject> object,
+                                                int offset);
 
   TNode<MaybeObject> LoadMaybeWeakObjectField(SloppyTNode<HeapObject> object,
                                               int offset) {
@@ -847,6 +990,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
 
   // Reference is the CSA-equivalent of a Torque reference value,
   // representing an inner pointer into a HeapObject.
+  // TODO(gsps): Remove in favor of flattened {Load,Store}Reference interface
   struct Reference {
     TNode<HeapObject> object;
     TNode<IntPtrT> offset;
@@ -899,11 +1043,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
                   value, StoreToObjectWriteBarrier::kNone);
   }
 
-  // Tag a smi and store it.
-  void StoreAndTagSmi(Node* base, int offset, Node* value);
-
   // Load the floating point value of a HeapNumber.
-  TNode<Float64T> LoadHeapNumberValue(SloppyTNode<HeapNumber> object);
+  TNode<Float64T> LoadHeapNumberValue(SloppyTNode<HeapObject> object);
   // Load the Map of an HeapObject.
   TNode<Map> LoadMap(SloppyTNode<HeapObject> object);
   // Load the instance type of an HeapObject.
@@ -915,6 +1056,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
                                       InstanceType type);
   TNode<BoolT> TaggedDoesntHaveInstanceType(SloppyTNode<HeapObject> any_tagged,
                                             InstanceType type);
+
+  TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
+  void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
+
   // Load the properties backing store of a JSObject.
   TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSObject> object);
   TNode<HeapObject> LoadFastProperties(SloppyTNode<JSObject> object);
@@ -940,6 +1085,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
       SloppyTNode<WeakFixedArray> array);
   // Load the number of descriptors in DescriptorArray.
   TNode<Int32T> LoadNumberOfDescriptors(TNode<DescriptorArray> array);
+  // Load the number of own descriptors of a map.
+  TNode<Int32T> LoadNumberOfOwnDescriptors(TNode<Map> map);
   // Load the bit field of a Map.
   TNode<Int32T> LoadMapBitField(SloppyTNode<Map> map);
   // Load bit field 2 of a map.
@@ -968,7 +1115,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   // Load the constructor of a Map (equivalent to Map::GetConstructor()).
   TNode<Object> LoadMapConstructor(SloppyTNode<Map> map);
   // Load the EnumLength of a Map.
-  Node* LoadMapEnumLength(SloppyTNode<Map> map);
+  TNode<WordT> LoadMapEnumLength(SloppyTNode<Map> map);
   // Load the back-pointer of a Map.
   TNode<Object> LoadMapBackPointer(SloppyTNode<Map> map);
   // Checks that |map| has only simple properties, returns bitfield3.
@@ -1176,9 +1323,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
                                        SMI_PARAMETERS, if_hole);
   }
 
-  Node* LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
-                                    TNode<IntPtrT> index,
-                                    Label* if_hole = nullptr) {
+  TNode<Float64T> LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
+                                              TNode<IntPtrT> index,
+                                              Label* if_hole = nullptr) {
     return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
                                        INTPTR_PARAMETERS, if_hole);
   }
@@ -1257,20 +1404,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   void StoreContextElementNoWriteBarrier(SloppyTNode<Context> context,
                                          int slot_index,
                                          SloppyTNode<Object> value);
-  TNode<Context> LoadNativeContext(SloppyTNode<Context> context);
+  TNode<NativeContext> LoadNativeContext(SloppyTNode<Context> context);
   // Calling this is only valid if there's a module context in the chain.
   TNode<Context> LoadModuleContext(SloppyTNode<Context> context);
 
-  void GotoIfContextElementEqual(Node* value, Node* native_context,
-                                 int slot_index, Label* if_equal) {
-    GotoIf(WordEqual(value, LoadContextElement(native_context, slot_index)),
+  void GotoIfContextElementEqual(SloppyTNode<Object> value,
+                                 Node* native_context, int slot_index,
+                                 Label* if_equal) {
+    GotoIf(TaggedEqual(value, LoadContextElement(native_context, slot_index)),
            if_equal);
   }
 
   TNode<Map> LoadJSArrayElementsMap(ElementsKind kind,
-                                    SloppyTNode<Context> native_context);
+                                    SloppyTNode<NativeContext> native_context);
   TNode<Map> LoadJSArrayElementsMap(SloppyTNode<Int32T> kind,
-                                    SloppyTNode<Context> native_context);
+                                    SloppyTNode<NativeContext> native_context);
 
   TNode<BoolT> HasPrototypeSlot(TNode<JSFunction> function);
   TNode<BoolT> IsGeneratorFunction(TNode<JSFunction> function);
@@ -1278,7 +1426,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   void GotoIfPrototypeRequiresRuntimeLookup(TNode<JSFunction> function,
                                             TNode<Map> map, Label* runtime);
   // Load the "prototype" property of a JSFunction.
-  Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout);
+  Node* LoadJSFunctionPrototype(TNode<JSFunction> function, Label* if_bailout);
 
   TNode<BytecodeArray> LoadSharedFunctionInfoBytecodeArray(
       SloppyTNode<SharedFunctionInfo> shared);
@@ -1289,8 +1437,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   // Store the floating point value of a HeapNumber.
   void StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
                             SloppyTNode<Float64T> value);
-  void StoreMutableHeapNumberValue(SloppyTNode<MutableHeapNumber> object,
-                                   SloppyTNode<Float64T> value);
   // Store a field to an object on the heap.
   void StoreObjectField(Node* object, int offset, Node* value);
   void StoreObjectField(Node* object, Node* offset, Node* value);
@@ -1361,9 +1507,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
                                   CheckBounds::kDebugOnly);
   }
 
-  void StoreJSArrayLength(TNode<JSArray> array, TNode<Smi> length);
-  void StoreElements(TNode<Object> object, TNode<FixedArrayBase> elements);
-
   void StoreFixedArrayOrPropertyArrayElement(
       Node* array, Node* index, Node* value,
       WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
@@ -1512,10 +1655,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
     return AllocateHeapNumberWithValue(Float64Constant(value));
   }
 
-  // Allocate a MutableHeapNumber with a specific value.
-  TNode<MutableHeapNumber> AllocateMutableHeapNumberWithValue(
-      SloppyTNode<Float64T> value);
-
   // Allocate a BigInt with {length} digits. Sets the sign bit to {false}.
   // Does not initialize the digits.
   TNode<BigInt> AllocateBigInt(TNode<IntPtrT> length);
@@ -1539,12 +1678,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   // Allocate a SeqOneByteString with the given length.
   TNode<String> AllocateSeqOneByteString(uint32_t length,
                                          AllocationFlags flags = kNone);
-  TNode<String> AllocateSeqOneByteString(Node* context, TNode<Uint32T> length,
+  TNode<String> AllocateSeqOneByteString(TNode<Uint32T> length,
                                          AllocationFlags flags = kNone);
   // Allocate a SeqTwoByteString with the given length.
   TNode<String> AllocateSeqTwoByteString(uint32_t length,
                                          AllocationFlags flags = kNone);
-  TNode<String> AllocateSeqTwoByteString(Node* context, TNode<Uint32T> length,
+  TNode<String> AllocateSeqTwoByteString(TNode<Uint32T> length,
                                          AllocationFlags flags = kNone);
 
   // Allocate a SlicedOneByteString with the given length, parent and offset.
@@ -1587,7 +1726,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   template <typename CollectionType>
   void FindOrderedHashTableEntry(
       Node* table, Node* hash,
-      const std::function<void(Node*, Label*, Label*)>& key_compare,
+      const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
       Variable* entry_start_position, Label* entry_found, Label* not_found);
 
   template <typename CollectionType>
@@ -1770,7 +1909,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   // array word by word. The source may be destroyed at the end of this macro.
   //
   // Otherwise, specify DestroySource::kNo for operations where an Object is
-  // being cloned, to ensure that MutableHeapNumbers are unique between the
+  // being cloned, to ensure that mutable HeapNumbers are unique between the
   // source and cloned object.
   void CopyPropertyArrayValues(Node* from_array, Node* to_array, Node* length,
                                WriteBarrierMode barrier_mode,
@@ -1856,16 +1995,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
 
   TNode<FixedDoubleArray> HeapObjectToFixedDoubleArray(TNode<HeapObject> base,
                                                        Label* cast_fail) {
-    GotoIf(
-        WordNotEqual(LoadMap(base), LoadRoot(RootIndex::kFixedDoubleArrayMap)),
-        cast_fail);
+    GotoIf(TaggedNotEqual(LoadMap(base), FixedDoubleArrayMapConstant()),
+           cast_fail);
     return UncheckedCast<FixedDoubleArray>(base);
   }
 
   TNode<SloppyArgumentsElements> HeapObjectToSloppyArgumentsElements(
       TNode<HeapObject> base, Label* cast_fail) {
-    GotoIf(WordNotEqual(LoadMap(base),
-                        LoadRoot(RootIndex::kSloppyArgumentsElementsMap)),
+    GotoIf(TaggedNotEqual(LoadMap(base), SloppyArgumentsElementsMapConstant()),
            cast_fail);
     return UncheckedCast<SloppyArgumentsElements>(base);
   }
@@ -1968,7 +2105,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   // HOLEY_SMI_ELEMENTS kind, and a conversion took place, the result will be
   // compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS.
   TNode<FixedArray> ExtractToFixedArray(
-      Node* source, Node* first, Node* count, Node* capacity, Node* source_map,
+      SloppyTNode<FixedArrayBase> source, Node* first, Node* count,
+      Node* capacity, SloppyTNode<Map> source_map,
       ElementsKind from_kind = PACKED_ELEMENTS,
       AllocationFlags allocation_flags = AllocationFlag::kNone,
       ExtractFixedArrayFlags extract_flags =
@@ -2169,10 +2307,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
                                InstanceType instance_type,
                                char const* method_name);
   // Throws a TypeError for {method_name} if {value} is not a JSReceiver.
-  // Returns the {value}'s map.
-  Node* ThrowIfNotJSReceiver(Node* context, Node* value,
-                             MessageTemplate msg_template,
-                             const char* method_name = nullptr);
+  void ThrowIfNotJSReceiver(TNode<Context> context, TNode<Object> value,
+                            MessageTemplate msg_template,
+                            const char* method_name);
   void ThrowIfNotCallable(TNode<Context> context, TNode<Object> value,
                           const char* method_name);
 
@@ -2191,7 +2328,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   TNode<BoolT> IsAccessorInfo(SloppyTNode<HeapObject> object);
   TNode<BoolT> IsAccessorPair(SloppyTNode<HeapObject> object);
   TNode<BoolT> IsAllocationSite(SloppyTNode<HeapObject> object);
-  TNode<BoolT> IsAnyHeapNumber(SloppyTNode<HeapObject> object);
   TNode<BoolT> IsNoElementsProtectorCellInvalid();
   TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
   TNode<BoolT> IsBigIntInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2210,7 +2346,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   TNode<BoolT> IsNameDictionary(SloppyTNode<HeapObject> object);
   TNode<BoolT> IsGlobalDictionary(SloppyTNode<HeapObject> object);
   TNode<BoolT> IsExtensibleMap(SloppyTNode<Map> map);
-  TNode<BoolT> IsFrozenOrSealedElementsKindMap(SloppyTNode<Map> map);
   TNode<BoolT> IsExtensibleNonPrototypeMap(TNode<Map> map);
   TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
   TNode<BoolT> IsFeedbackCell(SloppyTNode<HeapObject> object);
@@ -2265,7 +2400,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   TNode<BoolT> IsJSPrimitiveWrapperMap(SloppyTNode<Map> map);
   TNode<BoolT> IsJSPrimitiveWrapper(SloppyTNode<HeapObject> object);
   TNode<BoolT> IsMap(SloppyTNode<HeapObject> object);
-  TNode<BoolT> IsMutableHeapNumber(SloppyTNode<HeapObject> object);
   TNode<BoolT> IsName(SloppyTNode<HeapObject> object);
   TNode<BoolT> IsNameInstanceType(SloppyTNode<Int32T> instance_type);
   TNode<BoolT> IsNativeContext(SloppyTNode<HeapObject> object);
@@ -2322,7 +2456,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   TNode<BoolT> IsArraySpeciesProtectorCellInvalid();
   TNode<BoolT> IsTypedArraySpeciesProtectorCellInvalid();
   TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid(
-      TNode<Context> native_context);
+      TNode<NativeContext> native_context);
   TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
 
   TNode<BoolT> IsMockArrayBufferAllocatorFlag() {
@@ -2414,21 +2548,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
 
   // Check if |string| is an indirect (thin or flat cons) string type that can
   // be dereferenced by DerefIndirectString.
-  void BranchIfCanDerefIndirectString(Node* string, Node* instance_type,
+  void BranchIfCanDerefIndirectString(TNode<String> string,
+                                      TNode<Int32T> instance_type,
                                       Label* can_deref, Label* cannot_deref);
   // Unpack an indirect (thin or flat cons) string type.
-  void DerefIndirectString(Variable* var_string, Node* instance_type);
+  void DerefIndirectString(TVariable<String>* var_string,
+                           TNode<Int32T> instance_type);
   // Check if |var_string| has an indirect (thin or flat cons) string type,
   // and unpack it if so.
-  void MaybeDerefIndirectString(Variable* var_string, Node* instance_type,
-                                Label* did_deref, Label* cannot_deref);
+  void MaybeDerefIndirectString(TVariable<String>* var_string,
+                                TNode<Int32T> instance_type, Label* did_deref,
+                                Label* cannot_deref);
   // Check if |var_left| or |var_right| has an indirect (thin or flat cons)
   // string type, and unpack it/them if so. Fall through if nothing was done.
-  void MaybeDerefIndirectStrings(Variable* var_left, Node* left_instance_type,
-                                 Variable* var_right, Node* right_instance_type,
+  void MaybeDerefIndirectStrings(TVariable<String>* var_left,
+                                 TNode<Int32T> left_instance_type,
+                                 TVariable<String>* var_right,
+                                 TNode<Int32T> right_instance_type,
                                  Label* did_something);
-  Node* DerefIndirectString(TNode<String> string, TNode<Int32T> instance_type,
-                            Label* cannot_deref);
+  TNode<String> DerefIndirectString(TNode<String> string,
+                                    TNode<Int32T> instance_type,
+                                    Label* cannot_deref);
 
   TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint);
 
@@ -2470,9 +2610,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   TNode<String> ToString_Inline(SloppyTNode<Context> context,
                                 SloppyTNode<Object> input);
 
-  // Convert any object to a Primitive.
-  Node* JSReceiverToPrimitive(Node* context, Node* input);
-
   TNode<JSReceiver> ToObject(SloppyTNode<Context> context,
                              SloppyTNode<Object> input);
 
@@ -2618,7 +2755,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
 
   // Returns true if all of the mask's bits in given |word| are clear.
   TNode<BoolT> IsClearWord(SloppyTNode<WordT> word, uint32_t mask) {
-    return WordEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
+    return IntPtrEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
   }
 
   void SetCounter(StatsCounter* counter, int value);
@@ -2976,7 +3113,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   // Returns true if {object} has {prototype} somewhere in it's prototype
   // chain, otherwise false is returned. Might cause arbitrary side effects
   // due to [[GetPrototypeOf]] invocations.
-  Node* HasInPrototypeChain(Node* context, Node* object, Node* prototype);
+  Node* HasInPrototypeChain(Node* context, Node* object,
+                            SloppyTNode<Object> prototype);
   // ES6 section 7.3.19 OrdinaryHasInstance (C, O)
   Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object);
 
@@ -3017,7 +3155,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
 
   // Check if a property name might require protector invalidation when it is
   // used for a property store or deletion.
-  void CheckForAssociatedProtector(Node* name, Label* if_protector);
+  void CheckForAssociatedProtector(SloppyTNode<Name> name, Label* if_protector);
 
   TNode<Map> LoadReceiverMap(SloppyTNode<Object> receiver);
 
@@ -3075,7 +3213,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
                         Variable* maybe_converted_value = nullptr);
 
   Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind,
-                             Node* length, Node* key, ParameterMode mode,
+                             SloppyTNode<UintPtrT> length,
+                             SloppyTNode<WordT> key, ParameterMode mode,
                              Label* bailout);
 
   Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind,
@@ -3168,11 +3307,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   void InitializeFieldsWithRoot(Node* object, Node* start_offset,
                                 Node* end_offset, RootIndex root);
 
-  Node* RelationalComparison(Operation op, Node* left, Node* right,
-                             Node* context,
+  Node* RelationalComparison(Operation op, SloppyTNode<Object> left,
+                             SloppyTNode<Object> right,
+                             SloppyTNode<Context> context,
                              Variable* var_type_feedback = nullptr);
 
-  void BranchIfNumberRelationalComparison(Operation op, Node* left, Node* right,
+  void BranchIfNumberRelationalComparison(Operation op,
+                                          SloppyTNode<Number> left,
+                                          SloppyTNode<Number> right,
                                           Label* if_true, Label* if_false);
 
   void BranchIfNumberEqual(TNode<Number> left, TNode<Number> right,
@@ -3218,7 +3360,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
 
   void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false);
 
-  Node* Equal(Node* lhs, Node* rhs, Node* context,
+  Node* Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+              SloppyTNode<Context> context,
               Variable* var_type_feedback = nullptr);
 
   TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
@@ -3228,7 +3371,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   // Similar to StrictEqual except that NaNs are treated as equal and minus zero
   // differs from positive zero.
   enum class SameValueMode { kNumbersOnly, kFull };
-  void BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true, Label* if_false,
+  void BranchIfSameValue(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+                         Label* if_true, Label* if_false,
                          SameValueMode mode = SameValueMode::kFull);
   // A part of BranchIfSameValue() that handles two double values.
   // Treats NaN == NaN and +0 != -0.
@@ -3340,7 +3484,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   template <class... TArgs>
   Node* MakeTypeError(MessageTemplate message, Node* context, TArgs... args) {
     STATIC_ASSERT(sizeof...(TArgs) <= 3);
-    Node* const make_type_error = LoadContextElement(
+    TNode<Object> const make_type_error = LoadContextElement(
         LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX);
     return CallJS(CodeFactory::Call(isolate()), context, make_type_error,
                   UndefinedConstant(), SmiConstant(message), args...);
@@ -3354,6 +3498,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   bool ConstexprBoolNot(bool value) { return !value; }
 
   bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; }
+  bool ConstexprInt31NotEqual(int31_t a, int31_t b) { return a != b; }
   bool ConstexprInt31GreaterThanEqual(int31_t a, int31_t b) { return a >= b; }
   uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; }
   int31_t ConstexprInt31Add(int31_t a, int31_t b) {
@@ -3372,34 +3517,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   void SetPropertyLength(TNode<Context> context, TNode<Object> array,
                          TNode<Number> length);
 
-  // Checks that {object_map}'s prototype map is the {initial_prototype_map} and
-  // makes sure that the field with name at index {descriptor} is still
-  // constant. If it is not, go to label {if_modified}.
-  //
-  // To make the checks robust, the method also asserts that the descriptor has
-  // the right key, the caller must pass the root index of the key
-  // in {field_name_root_index}.
-  //
-  // This is useful for checking that given function has not been patched
-  // on the prototype.
-  void GotoIfInitialPrototypePropertyModified(TNode<Map> object_map,
-                                              TNode<Map> initial_prototype_map,
-                                              int descfriptor,
-                                              RootIndex field_name_root_index,
-                                              Label* if_modified);
-  struct DescriptorIndexAndName {
-    DescriptorIndexAndName() {}
-    DescriptorIndexAndName(int descriptor_index, RootIndex name_root_index)
-        : descriptor_index(descriptor_index),
-          name_root_index(name_root_index) {}
-
-    int descriptor_index;
-    RootIndex name_root_index;
-  };
-  void GotoIfInitialPrototypePropertiesModified(
-      TNode<Map> object_map, TNode<Map> initial_prototype_map,
-      Vector<DescriptorIndexAndName> properties, Label* if_modified);
-
   // Implements DescriptorArray::Search().
   void DescriptorLookup(SloppyTNode<Name> unique_name,
                         SloppyTNode<DescriptorArray> descriptors,
@@ -3514,8 +3631,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
 
   TNode<JSArray> ArrayCreate(TNode<Context> context, TNode<Number> length);
 
-  // Allocate a clone of a mutable primitive, if {object} is a
-  // MutableHeapNumber.
+  // Allocate a clone of a mutable primitive, if {object} is a mutable
+  // HeapNumber.
   TNode<Object> CloneIfMutablePrimitive(TNode<Object> object);
 
  private:
@@ -3556,9 +3673,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
                                      TNode<Uint32T> length,
                                      TNode<String> parent, TNode<Smi> offset);
 
-  // Allocate a MutableHeapNumber without initializing its value.
-  TNode<MutableHeapNumber> AllocateMutableHeapNumber();
-
   Node* SelectImpl(TNode<BoolT> condition, const NodeGenerator& true_body,
                    const NodeGenerator& false_body, MachineRepresentation rep);
 
@@ -3572,7 +3686,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
                                    TNode<Uint32T> entry_index);
 
   TNode<Smi> CollectFeedbackForString(SloppyTNode<Int32T> instance_type);
-  void GenerateEqual_Same(Node* value, Label* if_equal, Label* if_notequal,
+  void GenerateEqual_Same(SloppyTNode<Object> value, Label* if_equal,
+                          Label* if_notequal,
                           Variable* var_type_feedback = nullptr);
   TNode<String> AllocAndCopyStringCharacters(Node* from,
                                              Node* from_instance_type,
@@ -3602,6 +3717,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
   TNode<T> LoadDescriptorArrayElement(TNode<DescriptorArray> object,
                                       TNode<IntPtrT> index,
                                       int additional_offset);
+
+  // Hide LoadRoot for subclasses of CodeStubAssembler. If you get an error
+  // complaining about this method, don't make it public, add your root to
+  // HEAP_(IM)MUTABLE_IMMOVABLE_OBJECT_LIST instead. If you *really* need
+  // LoadRoot, use CodeAssembler::LoadRoot.
+  TNode<Object> LoadRoot(RootIndex root_index) {
+    return CodeAssembler::LoadRoot(root_index);
+  }
 };
 
 class V8_EXPORT_PRIVATE CodeStubArguments {
@@ -3725,8 +3848,8 @@ class ToDirectStringAssembler : public CodeStubAssembler {
   };
   using Flags = base::Flags<Flag>;
 
-  ToDirectStringAssembler(compiler::CodeAssemblerState* state, Node* string,
-                          Flags flags = Flags());
+  ToDirectStringAssembler(compiler::CodeAssemblerState* state,
+                          TNode<String> string, Flags flags = Flags());
 
   // Converts flat cons, thin, and sliced strings and returns the direct
   // string. The result can be either a sequential or external string.
@@ -3746,22 +3869,57 @@ class ToDirectStringAssembler : public CodeStubAssembler {
     return TryToSequential(PTR_TO_STRING, if_bailout);
   }
 
-  Node* string() { return var_string_.value(); }
-  Node* instance_type() { return var_instance_type_.value(); }
-  TNode<IntPtrT> offset() {
-    return UncheckedCast<IntPtrT>(var_offset_.value());
-  }
-  Node* is_external() { return var_is_external_.value(); }
+  TNode<String> string() { return var_string_.value(); }
+  TNode<Int32T> instance_type() { return var_instance_type_.value(); }
+  TNode<IntPtrT> offset() { return var_offset_.value(); }
+  TNode<Word32T> is_external() { return var_is_external_.value(); }
 
  private:
   TNode<RawPtrT> TryToSequential(StringPointerKind ptr_kind, Label* if_bailout);
 
-  Variable var_string_;
-  Variable var_instance_type_;
-  Variable var_offset_;
-  Variable var_is_external_;
+  TVariable<String> var_string_;
+  TVariable<Int32T> var_instance_type_;
+  TVariable<IntPtrT> var_offset_;
+  TVariable<Word32T> var_is_external_;
+
+  const Flags flags_;
+};
+
+// Performs checks on a given prototype (e.g. map identity, property
+// verification), intended for use in fast path checks.
+class PrototypeCheckAssembler : public CodeStubAssembler {
+ public:
+  enum Flag {
+    kCheckPrototypePropertyConstness = 1 << 0,
+    kCheckPrototypePropertyIdentity = 1 << 1,
+    kCheckFull =
+        kCheckPrototypePropertyConstness | kCheckPrototypePropertyIdentity,
+  };
+  using Flags = base::Flags<Flag>;
+
+  // A tuple describing a relevant property. It contains the descriptor index of
+  // the property (within the descriptor array), the property's expected name
+  // (stored as a root), and the property's expected value (stored on the native
+  // context).
+  struct DescriptorIndexNameValue {
+    int descriptor_index;
+    RootIndex name_root_index;
+    int expected_value_context_index;
+  };
+
+  PrototypeCheckAssembler(compiler::CodeAssemblerState* state, Flags flags,
+                          TNode<NativeContext> native_context,
+                          TNode<Map> initial_prototype_map,
+                          Vector<DescriptorIndexNameValue> properties);
 
+  void CheckAndBranch(TNode<HeapObject> prototype, Label* if_unmodified,
+                      Label* if_modified);
+
+ private:
   const Flags flags_;
+  const TNode<NativeContext> native_context_;
+  const TNode<Map> initial_prototype_map_;
+  const Vector<DescriptorIndexNameValue> properties_;
 };
 
 DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags)
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 906eb0f0ca2d5e..3a8ab3398aabf3 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -410,6 +410,12 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
     DCHECK(!compilation_info->has_asm_wasm_data());
     DCHECK(!shared_info->HasFeedbackMetadata());
 
+    // If the function failed asm-wasm compilation, mark asm_wasm as broken
+    // to ensure we don't try to compile as asm-wasm.
+    if (compilation_info->literal()->scope()->IsAsmModule()) {
+      shared_info->set_is_asm_wasm_broken(true);
+    }
+
     InstallBytecodeArray(compilation_info->bytecode_array(), shared_info,
                          parse_info, isolate);
 
@@ -529,20 +535,16 @@ std::unique_ptr<UnoptimizedCompilationJob> GenerateUnoptimizedCode(
   DisallowHeapAccess no_heap_access;
   DCHECK(inner_function_jobs->empty());
 
-  if (!Compiler::Analyze(parse_info)) {
-    return std::unique_ptr<UnoptimizedCompilationJob>();
+  std::unique_ptr<UnoptimizedCompilationJob> job;
+  if (Compiler::Analyze(parse_info)) {
+    job = ExecuteUnoptimizedCompileJobs(parse_info, parse_info->literal(),
+                                        allocator, inner_function_jobs);
   }
 
-  // Prepare and execute compilation of the outer-most function.
-  std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
-      ExecuteUnoptimizedCompileJobs(parse_info, parse_info->literal(),
-                                    allocator, inner_function_jobs));
-  if (!outer_function_job) return std::unique_ptr<UnoptimizedCompilationJob>();
-
   // Character stream shouldn't be used again.
   parse_info->ResetCharacterStream();
 
-  return outer_function_job;
+  return job;
 }
 
 MaybeHandle<SharedFunctionInfo> GenerateUnoptimizedCodeForToplevel(
@@ -1181,6 +1183,9 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
   DCHECK(shared_info->HasBytecodeArray());
   DCHECK(!shared_info->GetBytecodeArray().HasSourcePositionTable());
 
+  // Source position collection should be context independent.
+  NullContextScope null_context_scope(isolate);
+
   // Collecting source positions requires allocating a new source position
   // table.
   DCHECK(AllowHeapAllocation::IsAllowed());
@@ -1215,59 +1220,51 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
   parse_info.set_collect_source_positions();
   if (FLAG_allow_natives_syntax) parse_info.set_allow_natives_syntax();
 
-  // Parse and update ParseInfo with the results.
-  if (!parsing::ParseAny(&parse_info, shared_info, isolate)) {
+  // Parse and update ParseInfo with the results. Don't update parsing
+  // statistics since we've already parsed the code before.
+  if (!parsing::ParseAny(&parse_info, shared_info, isolate,
+                         parsing::ReportErrorsAndStatisticsMode::kNo)) {
     // Parsing failed probably as a result of stack exhaustion.
     bytecode->SetSourcePositionsFailedToCollect();
     return FailWithPendingException(
         isolate, &parse_info, Compiler::ClearExceptionFlag::CLEAR_EXCEPTION);
   }
 
+  // Character stream shouldn't be used again.
+  parse_info.ResetCharacterStream();
+
   // Generate the unoptimized bytecode.
   // TODO(v8:8510): Consider forcing preparsing of inner functions to avoid
   // wasting time fully parsing them when they won't ever be used.
-  UnoptimizedCompilationJobList inner_function_jobs;
-  std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
-      GenerateUnoptimizedCode(&parse_info, isolate->allocator(),
-                              &inner_function_jobs));
-  if (!outer_function_job) {
-    // Recompiling failed probably as a result of stack exhaustion.
-    bytecode->SetSourcePositionsFailedToCollect();
-    return FailWithPendingException(
-        isolate, &parse_info, Compiler::ClearExceptionFlag::CLEAR_EXCEPTION);
-  }
+  std::unique_ptr<UnoptimizedCompilationJob> job;
+  {
+    if (!Compiler::Analyze(&parse_info)) {
+      // Recompiling failed probably as a result of stack exhaustion.
+      bytecode->SetSourcePositionsFailedToCollect();
+      return FailWithPendingException(
+          isolate, &parse_info, Compiler::ClearExceptionFlag::CLEAR_EXCEPTION);
+    }
 
-  DCHECK(outer_function_job->compilation_info()->collect_source_positions());
+    job = interpreter::Interpreter::NewSourcePositionCollectionJob(
+        &parse_info, parse_info.literal(), bytecode, isolate->allocator());
 
-  // TODO(v8:8510) Avoid re-allocating bytecode array/constant pool and
-  // re-internalizeing the ast values. Maybe we could use the
-  // unoptimized_compilation_flag to signal that all we need is the source
-  // position table (and we could do the DCHECK that the bytecode array is the
-  // same in the bytecode-generator, by comparing the real bytecode array on the
-  // SFI with the off-heap bytecode array).
+    if (!job || job->ExecuteJob() != CompilationJob::SUCCEEDED ||
+        job->FinalizeJob(shared_info, isolate) != CompilationJob::SUCCEEDED) {
+      // Recompiling failed probably as a result of stack exhaustion.
+      bytecode->SetSourcePositionsFailedToCollect();
+      return FailWithPendingException(
+          isolate, &parse_info, Compiler::ClearExceptionFlag::CLEAR_EXCEPTION);
+    }
+  }
 
-  // Internalize ast values onto the heap.
-  parse_info.ast_value_factory()->Internalize(isolate);
+  DCHECK(job->compilation_info()->collect_source_positions());
 
-  {
-    // Allocate scope infos for the literal.
-    DeclarationScope::AllocateScopeInfos(&parse_info, isolate);
-    CHECK_EQ(outer_function_job->FinalizeJob(shared_info, isolate),
-             CompilationJob::SUCCEEDED);
-  }
-
-  // Update the source position table on the original bytecode.
-  DCHECK(bytecode->IsBytecodeEqual(
-      *outer_function_job->compilation_info()->bytecode_array()));
-  DCHECK(outer_function_job->compilation_info()->has_bytecode_array());
-  ByteArray source_position_table = outer_function_job->compilation_info()
-                                        ->bytecode_array()
-                                        ->SourcePositionTable();
-  bytecode->set_source_position_table(source_position_table);
   // If debugging, make sure that instrumented bytecode has the source position
   // table set on it as well.
   if (shared_info->HasDebugInfo() &&
       shared_info->GetDebugInfo().HasInstrumentedBytecodeArray()) {
+    ByteArray source_position_table =
+        job->compilation_info()->bytecode_array()->SourcePositionTable();
     shared_info->GetDebugBytecodeArray().set_source_position_table(
         source_position_table);
   }
@@ -1352,6 +1349,16 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
     // Collect source positions immediately to try and flush out bytecode
     // mismatches.
     SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
+
+    // Do the same for eagerly compiled inner functions.
+    for (auto&& inner_job : inner_function_jobs) {
+      Handle<SharedFunctionInfo> inner_shared_info =
+          Compiler::GetSharedFunctionInfo(
+              inner_job->compilation_info()->literal(), parse_info.script(),
+              isolate);
+      SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate,
+                                                         inner_shared_info);
+    }
   }
 
   return true;
@@ -2110,7 +2117,11 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
     script->set_wrapped_arguments(*arguments);
 
     parse_info.set_eval();  // Use an eval scope as declaration scope.
-    parse_info.set_wrapped_as_function();
+    parse_info.set_function_syntax_kind(FunctionSyntaxKind::kWrapped);
+    // TODO(delphick): Remove this and instead make the wrapped and wrapper
+    // functions fully non-lazy instead thus preventing source positions from
+    // being omitted.
+    parse_info.set_collect_source_positions(true);
     // parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
     if (!context->IsNativeContext()) {
       parse_info.set_outer_scope_info(handle(context->scope_info(), isolate));
@@ -2217,7 +2228,28 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
 
   // If we found an existing shared function info, return it.
   Handle<SharedFunctionInfo> existing;
-  if (maybe_existing.ToHandle(&existing)) return existing;
+  if (maybe_existing.ToHandle(&existing)) {
+    // If the function has been uncompiled (bytecode flushed) it will have lost
+    // any preparsed data. If we produced preparsed data during this compile for
+    // this function, replace the uncompiled data with one that includes it.
+    if (literal->produced_preparse_data() != nullptr &&
+        existing->HasUncompiledDataWithoutPreparseData()) {
+      DCHECK(literal->inferred_name()->Equals(
+          existing->uncompiled_data().inferred_name()));
+      DCHECK_EQ(literal->start_position(),
+                existing->uncompiled_data().start_position());
+      DCHECK_EQ(literal->end_position(),
+                existing->uncompiled_data().end_position());
+      Handle<PreparseData> preparse_data =
+          literal->produced_preparse_data()->Serialize(isolate);
+      Handle<UncompiledData> new_uncompiled_data =
+          isolate->factory()->NewUncompiledDataWithPreparseData(
+              literal->inferred_name(), literal->start_position(),
+              literal->end_position(), preparse_data);
+      existing->set_uncompiled_data(*new_uncompiled_data);
+    }
+    return existing;
+  }
 
   // Allocate a shared function info object which will be compiled lazily.
   Handle<SharedFunctionInfo> result =
@@ -2294,8 +2326,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
   return CompilationJob::FAILED;
 }
 
-void Compiler::PostInstantiation(Handle<JSFunction> function,
-                                 AllocationType allocation) {
+void Compiler::PostInstantiation(Handle<JSFunction> function) {
   Isolate* isolate = function->GetIsolate();
   Handle<SharedFunctionInfo> shared(function->shared(), isolate);
   IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 836f7381233b3a..83d44dea29cf78 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -86,7 +86,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
   // Give the compiler a chance to perform low-latency initialization tasks of
   // the given {function} on its instantiation. Note that only the runtime will
   // offer this chance, optimized closure instantiation will not call this.
-  static void PostInstantiation(Handle<JSFunction> function, AllocationType);
+  static void PostInstantiation(Handle<JSFunction> function);
 
   // Parser::Parse, then Compiler::Analyze.
   static bool ParseAndAnalyze(ParseInfo* parse_info,
@@ -201,15 +201,11 @@ class V8_EXPORT_PRIVATE CompilationJob {
     kFailed,
   };
 
-  CompilationJob(uintptr_t stack_limit, State initial_state)
-      : state_(initial_state), stack_limit_(stack_limit) {
+  explicit CompilationJob(State initial_state) : state_(initial_state) {
     timer_.Start();
   }
   virtual ~CompilationJob() = default;
 
-  void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
-  uintptr_t stack_limit() const { return stack_limit_; }
-
   State state() const { return state_; }
 
  protected:
@@ -228,7 +224,6 @@ class V8_EXPORT_PRIVATE CompilationJob {
 
  private:
   State state_;
-  uintptr_t stack_limit_;
   base::ElapsedTimer timer_;
 };
 
@@ -242,9 +237,10 @@ class V8_EXPORT_PRIVATE CompilationJob {
 // Either of phases can either fail or succeed.
 class UnoptimizedCompilationJob : public CompilationJob {
  public:
-  UnoptimizedCompilationJob(intptr_t stack_limit, ParseInfo* parse_info,
+  UnoptimizedCompilationJob(uintptr_t stack_limit, ParseInfo* parse_info,
                             UnoptimizedCompilationInfo* compilation_info)
-      : CompilationJob(stack_limit, State::kReadyToExecute),
+      : CompilationJob(State::kReadyToExecute),
+        stack_limit_(stack_limit),
         parse_info_(parse_info),
         compilation_info_(compilation_info) {}
 
@@ -265,6 +261,8 @@ class UnoptimizedCompilationJob : public CompilationJob {
     return compilation_info_;
   }
 
+  uintptr_t stack_limit() const { return stack_limit_; }
+
  protected:
   // Overridden by the actual implementation.
   virtual Status ExecuteJobImpl() = 0;
@@ -272,6 +270,7 @@ class UnoptimizedCompilationJob : public CompilationJob {
                                  Isolate* isolate) = 0;
 
  private:
+  uintptr_t stack_limit_;
   ParseInfo* parse_info_;
   UnoptimizedCompilationInfo* compilation_info_;
   base::TimeDelta time_taken_to_execute_;
@@ -289,11 +288,10 @@ class UnoptimizedCompilationJob : public CompilationJob {
 // Each of the three phases can either fail or succeed.
 class OptimizedCompilationJob : public CompilationJob {
  public:
-  OptimizedCompilationJob(uintptr_t stack_limit,
-                          OptimizedCompilationInfo* compilation_info,
+  OptimizedCompilationJob(OptimizedCompilationInfo* compilation_info,
                           const char* compiler_name,
                           State initial_state = State::kReadyToPrepare)
-      : CompilationJob(stack_limit, initial_state),
+      : CompilationJob(initial_state),
         compilation_info_(compilation_info),
         compiler_name_(compiler_name) {}
 
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index c0774079311122..44503e532d1ed0 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -26,6 +26,7 @@
 #include "src/logging/log.h"
 #include "src/numbers/math-random.h"
 #include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-interpreter.h"
 #include "src/regexp/regexp-macro-assembler-arch.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/strings/string-search.h"
@@ -327,13 +328,18 @@ ExternalReference ExternalReference::allocation_sites_list_address(
   return ExternalReference(isolate->heap()->allocation_sites_list_address());
 }
 
-ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
-  return ExternalReference(isolate->stack_guard()->address_of_jslimit());
+ExternalReference ExternalReference::address_of_jslimit(Isolate* isolate) {
+  Address address = isolate->stack_guard()->address_of_jslimit();
+  // For efficient generated code, this should be root-register-addressable.
+  DCHECK(isolate->root_register_addressable_region().contains(address));
+  return ExternalReference(address);
 }
 
-ExternalReference ExternalReference::address_of_real_stack_limit(
-    Isolate* isolate) {
-  return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
+ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) {
+  Address address = isolate->stack_guard()->address_of_real_jslimit();
+  // For efficient generated code, this should be root-register-addressable.
+  DCHECK(isolate->root_register_addressable_region().contains(address));
+  return ExternalReference(address);
 }
 
 ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
@@ -481,6 +487,9 @@ FUNCTION_REFERENCE_WITH_ISOLATE(re_check_stack_guard_state, re_stack_check_func)
 FUNCTION_REFERENCE_WITH_ISOLATE(re_grow_stack,
                                 NativeRegExpMacroAssembler::GrowStack)
 
+FUNCTION_REFERENCE_WITH_ISOLATE(re_match_for_call_from_js,
+                                IrregexpInterpreter::MatchForCallFromJs)
+
 FUNCTION_REFERENCE_WITH_ISOLATE(
     re_case_insensitive_compare_uc16,
     NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)
@@ -496,14 +505,14 @@ ExternalReference ExternalReference::address_of_static_offsets_vector(
       reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
 }
 
-ExternalReference ExternalReference::address_of_regexp_stack_limit(
+ExternalReference ExternalReference::address_of_regexp_stack_limit_address(
     Isolate* isolate) {
-  return ExternalReference(isolate->regexp_stack()->limit_address());
+  return ExternalReference(isolate->regexp_stack()->limit_address_address());
 }
 
 ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
     Isolate* isolate) {
-  return ExternalReference(isolate->regexp_stack()->memory_address());
+  return ExternalReference(isolate->regexp_stack()->memory_address_address());
 }
 
 ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
@@ -511,6 +520,12 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
   return ExternalReference(isolate->regexp_stack()->memory_size_address());
 }
 
+ExternalReference ExternalReference::address_of_regexp_stack_memory_top_address(
+    Isolate* isolate) {
+  return ExternalReference(
+      isolate->regexp_stack()->memory_top_address_address());
+}
+
 FUNCTION_REFERENCE_WITH_TYPE(ieee754_acos_function, base::ieee754::acos,
                              BUILTIN_FP_CALL)
 FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function, base::ieee754::acosh,
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index b663ae1621e953..45c26bdfb091d5 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -36,8 +36,8 @@ class StatsCounter;
   V(force_slow_path, "Isolate::force_slow_path_address()")                     \
   V(isolate_root, "Isolate::isolate_root()")                                   \
   V(allocation_sites_list_address, "Heap::allocation_sites_list_address()")    \
-  V(address_of_stack_limit, "StackGuard::address_of_jslimit()")                \
-  V(address_of_real_stack_limit, "StackGuard::address_of_real_jslimit()")      \
+  V(address_of_jslimit, "StackGuard::address_of_jslimit()")                    \
+  V(address_of_real_jslimit, "StackGuard::address_of_real_jslimit()")          \
   V(store_buffer_top, "store_buffer_top")                                      \
   V(heap_is_marking_flag_address, "heap_is_marking_flag_address")              \
   V(new_space_allocation_top_address, "Heap::NewSpaceAllocationTopAddress()")  \
@@ -73,15 +73,20 @@ class StatsCounter;
   V(fast_c_call_caller_pc_address,                                             \
     "IsolateData::fast_c_call_caller_pc_address")                              \
   V(stack_is_iterable_address, "IsolateData::stack_is_iterable_address")       \
-  V(address_of_regexp_stack_limit, "RegExpStack::limit_address()")             \
-  V(address_of_regexp_stack_memory_address, "RegExpStack::memory_address()")   \
-  V(address_of_regexp_stack_memory_size, "RegExpStack::memory_size()")         \
+  V(address_of_regexp_stack_limit_address,                                     \
+    "RegExpStack::limit_address_address()")                                    \
+  V(address_of_regexp_stack_memory_address,                                    \
+    "RegExpStack::memory_address_address()")                                   \
+  V(address_of_regexp_stack_memory_size, "RegExpStack::memory_size_address()") \
+  V(address_of_regexp_stack_memory_top_address,                                \
+    "RegExpStack::memory_top_address_address()")                               \
   V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector")  \
   V(re_case_insensitive_compare_uc16,                                          \
     "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()")                \
   V(re_check_stack_guard_state,                                                \
     "RegExpMacroAssembler*::CheckStackGuardState()")                           \
   V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()")                  \
+  V(re_match_for_call_from_js, "IrregexpInterpreter::MatchForCallFromJs")      \
   V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map")
 
 #define EXTERNAL_REFERENCE_LIST(V)                                            \
diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h
index 362412525d8a24..1aa6b8120315de 100644
--- a/deps/v8/src/codegen/handler-table.h
+++ b/deps/v8/src/codegen/handler-table.h
@@ -129,8 +129,8 @@ class V8_EXPORT_PRIVATE HandlerTable {
   static const int kReturnEntrySize = 2;
 
   // Encoding of the {handler} field.
-  class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
-  class HandlerOffsetField : public BitField<int, 3, 29> {};
+  using HandlerPredictionField = BitField<CatchPrediction, 0, 3>;
+  using HandlerOffsetField = BitField<int, 3, 29>;
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 2423f73bdbe9b8..52256212763e44 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -342,8 +342,8 @@ class Displacement {
  private:
   int data_;
 
-  class TypeField : public BitField<Type, 0, 2> {};
-  class NextField : public BitField<int, 2, 32 - 2> {};
+  using TypeField = BitField<Type, 0, 2>;
+  using NextField = BitField<int, 2, 32 - 2>;
 
   void init(Label* L, Type type);
 };
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index f6f0153e54c02c..070f3159776a76 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -91,26 +91,16 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) {
   }
 }
 
-void TurboAssembler::CompareStackLimit(Register with) {
-  if (root_array_available()) {
-    CompareRoot(with, RootIndex::kStackLimit);
-  } else {
-    DCHECK(!options().isolate_independent_code);
-    ExternalReference ref =
-        ExternalReference::address_of_stack_limit(isolate());
-    cmp(with, Operand(ref.address(), RelocInfo::EXTERNAL_REFERENCE));
-  }
-}
-
 void TurboAssembler::CompareRealStackLimit(Register with) {
-  if (root_array_available()) {
-    CompareRoot(with, RootIndex::kRealStackLimit);
-  } else {
-    DCHECK(!options().isolate_independent_code);
-    ExternalReference ref =
-        ExternalReference::address_of_real_stack_limit(isolate());
-    cmp(with, Operand(ref.address(), RelocInfo::EXTERNAL_REFERENCE));
-  }
+  CHECK(root_array_available());  // Only used by builtins.
+
+  // Address through the root register. No load is needed.
+  ExternalReference limit =
+      ExternalReference::address_of_real_jslimit(isolate());
+  DCHECK(IsAddressableThroughRootRegister(isolate(), limit));
+
+  intptr_t offset = RootRegisterOffsetForExternalReference(isolate(), limit);
+  cmp(with, Operand(kRootRegister, offset));
 }
 
 void MacroAssembler::PushRoot(RootIndex index) {
@@ -465,8 +455,9 @@ void MacroAssembler::RecordWrite(Register object, Register address,
   DCHECK(value != address);
   AssertNotSmi(object);
 
-  if (remembered_set_action == OMIT_REMEMBERED_SET &&
-      !FLAG_incremental_marking) {
+  if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+       !FLAG_incremental_marking) ||
+      FLAG_disable_write_barriers) {
     return;
   }
 
@@ -1875,11 +1866,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
     if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
         Builtins::IsIsolateIndependent(builtin_index)) {
       // Inline the trampoline.
-      RecordCommentForOffHeapTrampoline(builtin_index);
-      CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
-      EmbeddedData d = EmbeddedData::FromBlob();
-      Address entry = d.InstructionStartOfBuiltin(builtin_index);
-      call(entry, RelocInfo::OFF_HEAP_TARGET);
+      CallBuiltin(builtin_index);
       return;
     }
   }
@@ -1907,6 +1894,16 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
   call(builtin_index);
 }
 
+void TurboAssembler::CallBuiltin(int builtin_index) {
+  DCHECK(Builtins::IsBuiltinId(builtin_index));
+  DCHECK(FLAG_embedded_builtins);
+  RecordCommentForOffHeapTrampoline(builtin_index);
+  CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+  EmbeddedData d = EmbeddedData::FromBlob();
+  Address entry = d.InstructionStartOfBuiltin(builtin_index);
+  call(entry, RelocInfo::OFF_HEAP_TARGET);
+}
+
 void TurboAssembler::LoadCodeObjectEntry(Register destination,
                                          Register code_object) {
   // Code objects are called differently depending on whether we are generating
@@ -1960,6 +1957,12 @@ void TurboAssembler::JumpCodeObject(Register code_object) {
   jmp(code_object);
 }
 
+void TurboAssembler::Jump(const ExternalReference& reference) {
+  DCHECK(root_array_available());
+  jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
+                                 isolate(), reference)));
+}
+
 void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
   DCHECK_IMPLIES(options().isolate_independent_code,
                  Builtins::IsIsolateIndependentBuiltin(*code_object));
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 9b13e87447920f..c65871cfad34a2 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -91,10 +91,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   // register.
   void LoadEntryFromBuiltinIndex(Register builtin_index);
   void CallBuiltinByIndex(Register builtin_index) override;
+  void CallBuiltin(int builtin_index);
 
   void LoadCodeObjectEntry(Register destination, Register code_object) override;
   void CallCodeObject(Register code_object) override;
   void JumpCodeObject(Register code_object) override;
+  void Jump(const ExternalReference& reference) override;
 
   void RetpolineCall(Register reg);
   void RetpolineCall(Address destination, RelocInfo::Mode rmode);
@@ -213,7 +215,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
 
   void LoadAddress(Register destination, ExternalReference source);
 
-  void CompareStackLimit(Register with);
   void CompareRealStackLimit(Register with);
   void CompareRoot(Register with, RootIndex index);
   void CompareRoot(Register with, Register scratch, RootIndex index);
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 5934c80a7d218b..f537ebc899428c 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -416,10 +416,20 @@ void I64ToBigIntDescriptor::InitializePlatformSpecific(
   DefaultInitializePlatformSpecific(data, kParameterCount);
 }
 
+void I32PairToBigIntDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
 void BigIntToI64Descriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   DefaultInitializePlatformSpecific(data, kParameterCount);
 }
 
+void BigIntToI32PairDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index f6c1adfe47fe5e..544d62fd9f01d7 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -28,7 +28,9 @@ namespace internal {
   V(ArraySingleArgumentConstructor)   \
   V(AsyncFunctionStackParameter)      \
   V(BigIntToI64)                      \
+  V(BigIntToI32Pair)                  \
   V(I64ToBigInt)                      \
+  V(I32PairToBigInt)                  \
   V(BinaryOp)                         \
   V(CallForwardVarargs)               \
   V(CallFunctionTemplate)             \
@@ -660,11 +662,13 @@ class StoreGlobalWithVectorDescriptor : public StoreGlobalDescriptor {
 
 class LoadWithVectorDescriptor : public LoadDescriptor {
  public:
+  // TODO(v8:9497): Revert the Machine type for kSlot to the
+  // TaggedSigned once Torque can emit better call descriptors
   DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector)
-  DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(),     // kReceiver
-                         MachineType::AnyTagged(),     // kName
-                         MachineType::TaggedSigned(),  // kSlot
-                         MachineType::AnyTagged())     // kVector
+  DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(),  // kReceiver
+                         MachineType::AnyTagged(),  // kName
+                         MachineType::AnyTagged(),  // kSlot
+                         MachineType::AnyTagged())  // kVector
   DECLARE_DESCRIPTOR(LoadWithVectorDescriptor, LoadDescriptor)
 
   static const Register VectorRegister();
@@ -1205,14 +1209,26 @@ class WasmThrowDescriptor final : public CallInterfaceDescriptor {
   DECLARE_DESCRIPTOR(WasmThrowDescriptor, CallInterfaceDescriptor)
 };
 
-class I64ToBigIntDescriptor final : public CallInterfaceDescriptor {
+class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final
+    : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS_NO_CONTEXT(kArgument)
   DEFINE_PARAMETER_TYPES(MachineType::Int64())  // kArgument
   DECLARE_DESCRIPTOR(I64ToBigIntDescriptor, CallInterfaceDescriptor)
 };
 
-class BigIntToI64Descriptor final : public CallInterfaceDescriptor {
+// 32 bits version of the I64ToBigIntDescriptor call interface descriptor
+class V8_EXPORT_PRIVATE I32PairToBigIntDescriptor final
+    : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS_NO_CONTEXT(kLow, kHigh)
+  DEFINE_PARAMETER_TYPES(MachineType::Uint32(),  // kLow
+                         MachineType::Uint32())  // kHigh
+  DECLARE_DESCRIPTOR(I32PairToBigIntDescriptor, CallInterfaceDescriptor)
+};
+
+class V8_EXPORT_PRIVATE BigIntToI64Descriptor final
+    : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS(kArgument)
   DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int64(),      // result 1
@@ -1220,6 +1236,16 @@ class BigIntToI64Descriptor final : public CallInterfaceDescriptor {
   DECLARE_DESCRIPTOR(BigIntToI64Descriptor, CallInterfaceDescriptor)
 };
 
+class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final
+    : public CallInterfaceDescriptor {
+ public:
+  DEFINE_RESULT_AND_PARAMETERS(2, kArgument)
+  DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(),     // result 1
+                                    MachineType::Uint32(),     // result 2
+                                    MachineType::AnyTagged())  // kArgument
+  DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor, CallInterfaceDescriptor)
+};
+
 class WasmAtomicNotifyDescriptor final : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kCount)
diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index 29da269e8c76e6..0e588c08059933 100644
--- a/deps/v8/src/codegen/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -61,7 +61,7 @@ namespace v8 {
 namespace internal {
 
 // Simulators only support C calls with up to kMaxCParameters parameters.
-static constexpr int kMaxCParameters = 9;
+static constexpr int kMaxCParameters = 10;
 
 class FrameScope {
  public:
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 86a07ab06e9ac2..0359be2c94aef8 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -122,7 +122,7 @@ class Operand {
 
 // On MIPS we have only one addressing mode with base_reg + offset.
 // Class MemOperand represents a memory operand in load and store instructions.
-class MemOperand : public Operand {
+class V8_EXPORT_PRIVATE MemOperand : public Operand {
  public:
   // Immediate value attached to offset.
   enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 };
@@ -1872,7 +1872,7 @@ class EnsureSpace {
   explicit inline EnsureSpace(Assembler* assembler);
 };
 
-class UseScratchRegisterScope {
+class V8_EXPORT_PRIVATE UseScratchRegisterScope {
  public:
   explicit UseScratchRegisterScope(Assembler* assembler);
   ~UseScratchRegisterScope();
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 79373c1b5be197..2e4698a9e71c78 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -330,8 +330,9 @@ void MacroAssembler::RecordWrite(Register object, Register address,
            Operand(value));
   }
 
-  if (remembered_set_action == OMIT_REMEMBERED_SET &&
-      !FLAG_incremental_marking) {
+  if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+       !FLAG_incremental_marking) ||
+      FLAG_disable_write_barriers) {
     return;
   }
 
@@ -1302,6 +1303,18 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
   CheckTrampolinePoolQuick(1);
 }
 
+void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
+  MemOperand source = rs;
+  AdjustBaseAndOffset(source);
+  lw(rd, source);
+}
+
+void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
+  MemOperand dest = rs;
+  AdjustBaseAndOffset(dest);
+  sw(rd, dest);
+}
+
 void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
   bool is_one_instruction = IsMipsArchVariant(kMips32r6)
                                 ? is_int9(rs.offset())
@@ -3839,6 +3852,13 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
   Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
 }
 
+void TurboAssembler::Jump(const ExternalReference& reference) {
+  UseScratchRegisterScope temps(this);
+  Register scratch = temps.Acquire();
+  li(scratch, reference);
+  Jump(scratch);
+}
+
 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
                                      unsigned higher_limit,
                                      Label* on_in_range) {
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index 3dfc7bfbad1987..d9c372f8687155 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -206,6 +206,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+  void Jump(const ExternalReference& reference) override;
   void Call(Register target, int16_t offset = 0, COND_ARGS);
   void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
@@ -258,6 +259,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
 
   void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
 
+  void Lw(Register rd, const MemOperand& rs);
+  void Sw(Register rd, const MemOperand& rs);
+
   void push(Register src) {
     Addu(sp, sp, Operand(-kPointerSize));
     sw(src, MemOperand(sp, 0));
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index a22ddf0e7d2cc1..9695aa652486ff 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -121,7 +121,7 @@ class Operand {
 
 // On MIPS we have only one addressing mode with base_reg + offset.
 // Class MemOperand represents a memory operand in load and store instructions.
-class MemOperand : public Operand {
+class V8_EXPORT_PRIVATE  MemOperand : public Operand {
  public:
   // Immediate value attached to offset.
   enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 };
@@ -1899,7 +1899,7 @@ class EnsureSpace {
   explicit inline EnsureSpace(Assembler* assembler);
 };
 
-class UseScratchRegisterScope {
+class V8_EXPORT_PRIVATE UseScratchRegisterScope {
  public:
   explicit UseScratchRegisterScope(Assembler* assembler);
   ~UseScratchRegisterScope();
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 97e5af1fa8e5bf..b3537860643784 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -328,8 +328,9 @@ void MacroAssembler::RecordWrite(Register object, Register address,
            Operand(value));
   }
 
-  if (remembered_set_action == OMIT_REMEMBERED_SET &&
-      !FLAG_incremental_marking) {
+  if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+       !FLAG_incremental_marking) ||
+      FLAG_disable_write_barriers) {
     return;
   }
 
@@ -4200,6 +4201,13 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
   Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
 }
 
+void TurboAssembler::Jump(const ExternalReference& reference) {
+  UseScratchRegisterScope temps(this);
+  Register scratch = temps.Acquire();
+  li(scratch, reference);
+  Jump(scratch);
+}
+
 // Note: To call gcc-compiled C code on mips, you must call through t9.
 void TurboAssembler::Call(Register target, Condition cond, Register rs,
                           const Operand& rt, BranchDelaySlot bd) {
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index eb62bec0e82395..c2b701a5affcaa 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -229,6 +229,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+  void Jump(const ExternalReference& reference) override;
   void Call(Register target, COND_ARGS);
   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index f3582d868af0e1..7dc94f39cd6a3a 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -21,6 +21,7 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
     Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
     Handle<JSFunction> closure)
     : OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) {
+  DCHECK_EQ(*shared, closure->shared());
   DCHECK(shared->is_compiled());
   bytecode_array_ = handle(shared->GetBytecodeArray(), isolate);
   shared_info_ = shared;
diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc
index 9e33de7918cda2..b7be9c77757eee 100644
--- a/deps/v8/src/codegen/pending-optimization-table.cc
+++ b/deps/v8/src/codegen/pending-optimization-table.cc
@@ -4,6 +4,7 @@
 
 #include "src/codegen/pending-optimization-table.h"
 
+#include "src/base/flags.h"
 #include "src/execution/isolate-inl.h"
 #include "src/heap/heap-inl.h"
 #include "src/objects/hash-table.h"
@@ -12,12 +13,24 @@
 namespace v8 {
 namespace internal {
 
-enum class FunctionStatus { kPrepareForOptimize, kMarkForOptimize };
+enum class FunctionStatus : int {
+  kPrepareForOptimize = 1 << 0,
+  kMarkForOptimize = 1 << 1,
+  kAllowHeuristicOptimization = 1 << 2,
+};
+
+using FunctionStatusFlags = base::Flags<FunctionStatus>;
 
 void PendingOptimizationTable::PreparedForOptimization(
-    Isolate* isolate, Handle<JSFunction> function) {
+    Isolate* isolate, Handle<JSFunction> function,
+    bool allow_heuristic_optimization) {
   DCHECK(FLAG_testing_d8_test_runner);
 
+  FunctionStatusFlags status = FunctionStatus::kPrepareForOptimize;
+  if (allow_heuristic_optimization) {
+    status |= FunctionStatus::kAllowHeuristicOptimization;
+  }
+
   Handle<ObjectHashTable> table =
       isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()
           ? ObjectHashTable::New(isolate, 1)
@@ -26,15 +39,33 @@ void PendingOptimizationTable::PreparedForOptimization(
                    isolate);
   Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
       handle(function->shared().GetBytecodeArray(), isolate),
-      handle(
-          Smi::FromInt(static_cast<int>(FunctionStatus::kPrepareForOptimize)),
-          isolate),
-      AllocationType::kYoung);
+      handle(Smi::FromInt(status), isolate), AllocationType::kYoung);
   table =
       ObjectHashTable::Put(table, handle(function->shared(), isolate), tuple);
   isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
 }
 
+bool PendingOptimizationTable::IsHeuristicOptimizationAllowed(
+    Isolate* isolate, JSFunction function) {
+  DCHECK(FLAG_testing_d8_test_runner);
+
+  Handle<Object> table =
+      handle(isolate->heap()->pending_optimize_for_test_bytecode(), isolate);
+  Handle<Object> entry =
+      table->IsUndefined()
+          ? handle(ReadOnlyRoots(isolate).the_hole_value(), isolate)
+          : handle(Handle<ObjectHashTable>::cast(table)->Lookup(
+                       handle(function.shared(), isolate)),
+                   isolate);
+  if (entry->IsTheHole()) {
+    return true;
+  }
+  DCHECK(entry->IsTuple2());
+  DCHECK(Handle<Tuple2>::cast(entry)->value2().IsSmi());
+  FunctionStatusFlags status(Smi::ToInt(Handle<Tuple2>::cast(entry)->value2()));
+  return status & FunctionStatus::kAllowHeuristicOptimization;
+}
+
 void PendingOptimizationTable::MarkedForOptimization(
     Isolate* isolate, Handle<JSFunction> function) {
   DCHECK(FLAG_testing_d8_test_runner);
@@ -58,8 +89,11 @@ void PendingOptimizationTable::MarkedForOptimization(
   }
 
   DCHECK(entry->IsTuple2());
-  Handle<Tuple2>::cast(entry)->set_value2(
-      Smi::FromInt(static_cast<int>(FunctionStatus::kMarkForOptimize)));
+  DCHECK(Handle<Tuple2>::cast(entry)->value2().IsSmi());
+  FunctionStatusFlags status(Smi::ToInt(Handle<Tuple2>::cast(entry)->value2()));
+  status = status.without(FunctionStatus::kPrepareForOptimize) |
+           FunctionStatus::kMarkForOptimize;
+  Handle<Tuple2>::cast(entry)->set_value2(Smi::FromInt(status));
   table = ObjectHashTable::Put(Handle<ObjectHashTable>::cast(table),
                                handle(function->shared(), isolate), entry);
   isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
diff --git a/deps/v8/src/codegen/pending-optimization-table.h b/deps/v8/src/codegen/pending-optimization-table.h
index 2a2782d17a67cd..43b939726dd89a 100644
--- a/deps/v8/src/codegen/pending-optimization-table.h
+++ b/deps/v8/src/codegen/pending-optimization-table.h
@@ -21,7 +21,8 @@ class PendingOptimizationTable {
   // strongly in pending optimization table preventing the bytecode to be
   // flushed.
   static void PreparedForOptimization(Isolate* isolate,
-                                      Handle<JSFunction> function);
+                                      Handle<JSFunction> function,
+                                      bool allow_heuristic_optimization);
 
   // This function should be called when the function is marked for optimization
   // via the intrinsics. This will update the state of the bytecode array in the
@@ -36,6 +37,12 @@ class PendingOptimizationTable {
   // then this function removes the entry from pending optimization table.
   static void FunctionWasOptimized(Isolate* isolate,
                                    Handle<JSFunction> function);
+
+  // This function returns whether a heuristic is allowed to trigger
+  // optimization the function. This mechanism is used in tests to prevent
+  // heuristics from interfering with manually triggered optimization.
+  static bool IsHeuristicOptimizationAllowed(Isolate* isolate,
+                                             JSFunction function);
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 8ab3e5b83b1866..41162063331b2e 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -205,6 +205,13 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
   Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
 }
 
+void TurboAssembler::Jump(const ExternalReference& reference) {
+  UseScratchRegisterScope temps(this);
+  Register scratch = temps.Acquire();
+  Move(scratch, reference);
+  Jump(scratch);
+}
+
 void TurboAssembler::Call(Register target) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
   // branch via link register and set LK bit for return point
@@ -558,8 +565,9 @@ void MacroAssembler::RecordWrite(Register object, Register address,
     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
   }
 
-  if (remembered_set_action == OMIT_REMEMBERED_SET &&
-      !FLAG_incremental_marking) {
+  if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+       !FLAG_incremental_marking) ||
+      FLAG_disable_write_barriers) {
     return;
   }
 
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 6249c405e3aa11..fd4cb6014bb322 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -400,6 +400,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
             CRegister cr = cr7);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al,
             CRegister cr = cr7);
+  void Jump(const ExternalReference& reference) override;
   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
             CRegister cr = cr7);
   void Call(Register target);
diff --git a/deps/v8/src/codegen/register.cc b/deps/v8/src/codegen/register.cc
new file mode 100644
index 00000000000000..4ad76c6caafe5b
--- /dev/null
+++ b/deps/v8/src/codegen/register.cc
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/register.h"
+#include "src/codegen/register-arch.h"
+
+namespace v8 {
+namespace internal {
+
+bool ShouldPadArguments(int argument_count) {
+  return kPadArguments && (argument_count % 2 != 0);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/codegen/register.h b/deps/v8/src/codegen/register.h
index 619f4f2890a0a8..406a423892cced 100644
--- a/deps/v8/src/codegen/register.h
+++ b/deps/v8/src/codegen/register.h
@@ -105,6 +105,9 @@ class RegisterBase {
   int reg_code_;
 };
 
+// Whether padding is needed for the given stack argument count.
+bool ShouldPadArguments(int argument_count);
+
 template <typename RegType,
           typename = decltype(RegisterName(std::declval<RegType>()))>
 inline std::ostream& operator<<(std::ostream& os, RegType reg) {
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 6776626a23a3d4..873c0a2ad060c8 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -35,7 +35,6 @@
 // Copyright 2014 the V8 project authors. All rights reserved.
 
 #include "src/codegen/s390/assembler-s390.h"
-#include <sys/auxv.h>
 #include <set>
 #include <string>
 
@@ -43,6 +42,7 @@
 
 #if V8_HOST_ARCH_S390
 #include <elf.h>  // Required for auxv checks for STFLE support
+#include <sys/auxv.h>
 #endif
 
 #include "src/base/bits.h"
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index f6c2314a84b8b8..355d536379a1b6 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -193,6 +193,13 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
   jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
 }
 
+void TurboAssembler::Jump(const ExternalReference& reference) {
+  UseScratchRegisterScope temps(this);
+  Register scratch = temps.Acquire();
+  Move(scratch, reference);
+  Jump(scratch);
+}
+
 void TurboAssembler::Call(Register target) {
   // Branch to target via indirect branch
   basr(r14, target);
@@ -576,8 +583,9 @@ void MacroAssembler::RecordWrite(Register object, Register address,
     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
   }
 
-  if (remembered_set_action == OMIT_REMEMBERED_SET &&
-      !FLAG_incremental_marking) {
+  if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+       !FLAG_incremental_marking) ||
+      FLAG_disable_write_barriers) {
     return;
   }
   // First, check if a write barrier is even needed. The tests below
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index 52f668d1755a2f..856e4b592ecef0 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -137,6 +137,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   void Jump(Register target, Condition cond = al);
   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Jump(const ExternalReference& reference) override;
   // Jump the register contains a smi.
   inline void JumpIfSmi(Register value, Label* smi_label) {
     TestIfSmi(value);
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 2afdb5f90cb939..962b1ea17f8dec 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -91,32 +91,24 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
     Assembler* assembler, Safepoint::DeoptMode deopt_mode) {
   deoptimization_info_.push_back(
       DeoptimizationInfo(zone_, assembler->pc_offset()));
-  if (deopt_mode == Safepoint::kNoLazyDeopt) {
-    last_lazy_safepoint_ = deoptimization_info_.size();
-  }
   DeoptimizationInfo& new_info = deoptimization_info_.back();
   return Safepoint(new_info.indexes);
 }
 
-void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
-  for (auto it = deoptimization_info_.Find(last_lazy_safepoint_);
-       it != deoptimization_info_.end(); it++, last_lazy_safepoint_++) {
-    it->deopt_index = index;
-  }
-}
-
 unsigned SafepointTableBuilder::GetCodeOffset() const {
   DCHECK(emitted_);
   return offset_;
 }
 
 int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
-                                                    int start) {
+                                                    int start,
+                                                    unsigned deopt_index) {
   int index = start;
   for (auto it = deoptimization_info_.Find(start);
        it != deoptimization_info_.end(); it++, index++) {
     if (static_cast<int>(it->pc) == pc) {
       it->trampoline = trampoline;
+      it->deopt_index = deopt_index;
       return index;
     }
   }
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index fccce1a7a69122..1df4311036d5d7 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -164,7 +164,6 @@ class SafepointTableBuilder {
   explicit SafepointTableBuilder(Zone* zone)
       : deoptimization_info_(zone),
         emitted_(false),
-        last_lazy_safepoint_(0),
         zone_(zone) {}
 
   // Get the offset of the emitted safepoint table in the code.
@@ -173,13 +172,6 @@ class SafepointTableBuilder {
   // Define a new safepoint for the current position in the body.
   Safepoint DefineSafepoint(Assembler* assembler, Safepoint::DeoptMode mode);
 
-  // Record deoptimization index for lazy deoptimization for the last
-  // outstanding safepoints.
-  void RecordLazyDeoptimizationIndex(int index);
-  void BumpLastLazySafepointIndex() {
-    last_lazy_safepoint_ = deoptimization_info_.size();
-  }
-
   // Emit the safepoint table after the body. The number of bits per
   // entry must be enough to hold all the pointer indexes.
   V8_EXPORT_PRIVATE void Emit(Assembler* assembler, int bits_per_entry);
@@ -188,7 +180,8 @@ class SafepointTableBuilder {
   // trampoline field. Calling this function ensures that the safepoint
   // table contains the trampoline PC {trampoline} that replaced the
   // return PC {pc} on the stack.
-  int UpdateDeoptimizationInfo(int pc, int trampoline, int start);
+  int UpdateDeoptimizationInfo(int pc, int trampoline, int start,
+                               unsigned deopt_index);
 
  private:
   struct DeoptimizationInfo {
@@ -215,7 +208,6 @@ class SafepointTableBuilder {
 
   unsigned offset_;
   bool emitted_;
-  size_t last_lazy_safepoint_;
 
   Zone* zone_;
 
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index e10cc075714e62..870241eac69b92 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -27,8 +27,8 @@ namespace internal {
 namespace {
 
 // Each byte is encoded as MoreBit | ValueBits.
-class MoreBit : public BitField8<bool, 7, 1> {};
-class ValueBits : public BitField8<unsigned, 0, 7> {};
+using MoreBit = BitField8<bool, 7, 1>;
+using ValueBits = BitField8<unsigned, 0, 7>;
 
 // Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
 void AddAndSetEntry(PositionTableEntry& value,  // NOLINT(runtime/references)
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index 2f058eda19aa7a..3a3e65a41e753d 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -50,6 +50,8 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
   void set_has_frame(bool v) { has_frame_ = v; }
   bool has_frame() const { return has_frame_; }
 
+  virtual void Jump(const ExternalReference& reference) = 0;
+
   // Calls the builtin given by the Smi in |builtin|. If builtins are embedded,
   // the trampoline Code object on the heap is not used.
   virtual void CallBuiltinByIndex(Register builtin_index) = 0;
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 1d28f1d45dd304..1783da700ba53d 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -109,15 +109,16 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
 void CpuFeatures::PrintTarget() {}
 void CpuFeatures::PrintFeatures() {
   printf(
-      "SSE3=%d SSSE3=%d SSE4_1=%d SAHF=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d "
+      "SSE3=%d SSSE3=%d SSE4_1=%d SSE4_2=%d SAHF=%d AVX=%d FMA3=%d BMI1=%d "
+      "BMI2=%d "
       "LZCNT=%d "
       "POPCNT=%d ATOM=%d\n",
       CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSSE3),
-      CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(SAHF),
-      CpuFeatures::IsSupported(AVX), CpuFeatures::IsSupported(FMA3),
-      CpuFeatures::IsSupported(BMI1), CpuFeatures::IsSupported(BMI2),
-      CpuFeatures::IsSupported(LZCNT), CpuFeatures::IsSupported(POPCNT),
-      CpuFeatures::IsSupported(ATOM));
+      CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(SSE4_2),
+      CpuFeatures::IsSupported(SAHF), CpuFeatures::IsSupported(AVX),
+      CpuFeatures::IsSupported(FMA3), CpuFeatures::IsSupported(BMI1),
+      CpuFeatures::IsSupported(BMI2), CpuFeatures::IsSupported(LZCNT),
+      CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM));
 }
 
 // -----------------------------------------------------------------------------
@@ -428,6 +429,9 @@ Assembler::Assembler(const AssemblerOptions& options,
                      std::unique_ptr<AssemblerBuffer> buffer)
     : AssemblerBase(options, std::move(buffer)), constpool_(this) {
   reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
+  if (CpuFeatures::IsSupported(SSE4_2)) {
+    EnableCpuFeature(SSE4_1);
+  }
   if (CpuFeatures::IsSupported(SSE4_1)) {
     EnableCpuFeature(SSSE3);
   }
@@ -3524,8 +3528,8 @@ void Assembler::cmpps(XMMRegister dst, Operand src, int8_t cmp) {
 
 void Assembler::cmppd(XMMRegister dst, XMMRegister src, int8_t cmp) {
   EnsureSpace ensure_space(this);
-  emit_optional_rex_32(dst, src);
   emit(0x66);
+  emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xC2);
   emit_sse_operand(dst, src);
@@ -3534,8 +3538,8 @@ void Assembler::cmppd(XMMRegister dst, XMMRegister src, int8_t cmp) {
 
 void Assembler::cmppd(XMMRegister dst, Operand src, int8_t cmp) {
   EnsureSpace ensure_space(this);
-  emit_optional_rex_32(dst, src);
   emit(0x66);
+  emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xC2);
   emit_sse_operand(dst, src);
@@ -4716,6 +4720,26 @@ void Assembler::lddqu(XMMRegister dst, Operand src) {
   emit_sse_operand(dst, src);
 }
 
+void Assembler::movddup(XMMRegister dst, XMMRegister src) {
+  DCHECK(IsEnabled(SSE3));
+  EnsureSpace ensure_space(this);
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x12);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::movddup(XMMRegister dst, Operand src) {
+  DCHECK(IsEnabled(SSE3));
+  EnsureSpace ensure_space(this);
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x12);
+  emit_sse_operand(dst, src);
+}
+
 void Assembler::psrldq(XMMRegister dst, uint8_t shift) {
   EnsureSpace ensure_space(this);
   emit(0x66);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index acb4fce82c1ab3..7c69b4c4736dff 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -916,6 +916,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
 
   // SSE3
   void lddqu(XMMRegister dst, Operand src);
+  void movddup(XMMRegister dst, Operand src);
+  void movddup(XMMRegister dst, XMMRegister src);
 
   // SSSE3
   void ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
@@ -1329,14 +1331,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
   }
 
   AVX_SP_3(vsqrt, 0x51)
-  AVX_SP_3(vadd, 0x58)
-  AVX_SP_3(vsub, 0x5c)
-  AVX_SP_3(vmul, 0x59)
-  AVX_SP_3(vdiv, 0x5e)
-  AVX_SP_3(vmin, 0x5d)
-  AVX_SP_3(vmax, 0x5f)
+  AVX_S_3(vadd, 0x58)
+  AVX_S_3(vsub, 0x5c)
+  AVX_S_3(vmul, 0x59)
+  AVX_S_3(vdiv, 0x5e)
+  AVX_S_3(vmin, 0x5d)
+  AVX_S_3(vmax, 0x5f)
   AVX_P_3(vand, 0x54)
-  AVX_P_3(vandn, 0x55)
+  AVX_3(vandnps, 0x55, vps)
   AVX_P_3(vor, 0x56)
   AVX_P_3(vxor, 0x57)
   AVX_3(vcvtsd2ss, 0x5a, vsd)
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index f13811b1aec361..4deeb1bc02df08 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -505,8 +505,9 @@ void MacroAssembler::RecordWrite(Register object, Register address,
   DCHECK(value != address);
   AssertNotSmi(object);
 
-  if (remembered_set_action == OMIT_REMEMBERED_SET &&
-      !FLAG_incremental_marking) {
+  if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+       !FLAG_incremental_marking) ||
+      FLAG_disable_write_barriers) {
     return;
   }
 
@@ -1523,9 +1524,10 @@ void MacroAssembler::Pop(Operand dst) { popq(dst); }
 
 void MacroAssembler::PopQuad(Operand dst) { popq(dst); }
 
-void TurboAssembler::Jump(ExternalReference ext) {
-  LoadAddress(kScratchRegister, ext);
-  jmp(kScratchRegister);
+void TurboAssembler::Jump(const ExternalReference& reference) {
+  DCHECK(root_array_available());
+  jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
+                                 isolate(), reference)));
 }
 
 void TurboAssembler::Jump(Operand op) { jmp(op); }
@@ -1594,12 +1596,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
     if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
         Builtins::IsIsolateIndependent(builtin_index)) {
       // Inline the trampoline.
-      RecordCommentForOffHeapTrampoline(builtin_index);
-      CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
-      EmbeddedData d = EmbeddedData::FromBlob();
-      Address entry = d.InstructionStartOfBuiltin(builtin_index);
-      Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
-      call(kScratchRegister);
+      CallBuiltin(builtin_index);
       return;
     }
   }
@@ -1634,6 +1631,17 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
   Call(EntryFromBuiltinIndexAsOperand(builtin_index));
 }
 
+void TurboAssembler::CallBuiltin(int builtin_index) {
+  DCHECK(Builtins::IsBuiltinId(builtin_index));
+  DCHECK(FLAG_embedded_builtins);
+  RecordCommentForOffHeapTrampoline(builtin_index);
+  CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+  EmbeddedData d = EmbeddedData::FromBlob();
+  Address entry = d.InstructionStartOfBuiltin(builtin_index);
+  Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
+  call(kScratchRegister);
+}
+
 void TurboAssembler::LoadCodeObjectEntry(Register destination,
                                          Register code_object) {
   // Code objects are called differently depending on whether we are generating
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 139690bb8df9b0..8e7766c7e1946c 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -344,6 +344,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
 
   Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
   void CallBuiltinByIndex(Register builtin_index) override;
+  void CallBuiltin(int builtin_index);
 
   void LoadCodeObjectEntry(Register destination, Register code_object) override;
   void CallCodeObject(Register code_object) override;
@@ -353,7 +354,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
   void RetpolineCall(Address destination, RelocInfo::Mode rmode);
 
   void Jump(Address destination, RelocInfo::Mode rmode);
-  void Jump(ExternalReference ext);
+  void Jump(const ExternalReference& reference) override;
   void Jump(Operand op);
   void Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
             Condition cc = always);
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index 56618d20e0efbf..8ba54e85b42ec1 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -6,7 +6,14 @@
 #define V8_CODEGEN_X64_SSE_INSTR_H_
 
 #define SSE2_INSTRUCTION_LIST(V) \
+  V(andnpd, 66, 0F, 55)          \
+  V(addpd, 66, 0F, 58)           \
+  V(mulpd, 66, 0F, 59)           \
   V(cvtps2dq, 66, 0F, 5B)        \
+  V(subpd, 66, 0F, 5C)           \
+  V(minpd, 66, 0F, 5D)           \
+  V(maxpd, 66, 0F, 5F)           \
+  V(divpd, 66, 0F, 5E)           \
   V(punpcklbw, 66, 0F, 60)       \
   V(punpcklwd, 66, 0F, 61)       \
   V(punpckldq, 66, 0F, 62)       \
@@ -40,10 +47,12 @@
   V(pmuludq, 66, 0F, F4)         \
   V(psllw, 66, 0F, F1)           \
   V(pslld, 66, 0F, F2)           \
+  V(psllq, 66, 0F, F3)           \
   V(psraw, 66, 0F, E1)           \
   V(psrad, 66, 0F, E2)           \
   V(psrlw, 66, 0F, D1)           \
   V(psrld, 66, 0F, D2)           \
+  V(psrlq, 66, 0F, D3)           \
   V(psubb, 66, 0F, F8)           \
   V(psubw, 66, 0F, F9)           \
   V(psubd, 66, 0F, FA)           \
@@ -68,6 +77,7 @@
   V(psignd, 66, 0F, 38, 0A)
 
 #define SSE4_INSTRUCTION_LIST(V) \
+  V(blendvpd, 66, 0F, 38, 15)    \
   V(pcmpeqq, 66, 0F, 38, 29)     \
   V(ptest, 66, 0F, 38, 17)       \
   V(pmovsxbw, 66, 0F, 38, 20)    \
diff --git a/deps/v8/src/common/OWNERS b/deps/v8/src/common/OWNERS
index 3f9de7e204c675..47506200728b99 100644
--- a/deps/v8/src/common/OWNERS
+++ b/deps/v8/src/common/OWNERS
@@ -1,3 +1,3 @@
-file://COMMON_OWNERS
+file:../../COMMON_OWNERS
 
 # COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/common/assert-scope.cc b/deps/v8/src/common/assert-scope.cc
index 5a299fa1eed0cd..f1fe717cc0983c 100644
--- a/deps/v8/src/common/assert-scope.cc
+++ b/deps/v8/src/common/assert-scope.cc
@@ -92,16 +92,18 @@ bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
   return current_data == nullptr || current_data->Get(kType);
 }
 
-template <PerIsolateAssertType kType, bool kAllow>
-class PerIsolateAssertScope<kType, kAllow>::DataBit
-    : public BitField<bool, kType, 1> {};
+namespace {
+template <PerIsolateAssertType kType>
+using DataBit = BitField<bool, kType, 1>;
+}
 
 template <PerIsolateAssertType kType, bool kAllow>
 PerIsolateAssertScope<kType, kAllow>::PerIsolateAssertScope(Isolate* isolate)
     : isolate_(isolate), old_data_(isolate->per_isolate_assert_data()) {
   DCHECK_NOT_NULL(isolate);
   STATIC_ASSERT(kType < 32);
-  isolate_->set_per_isolate_assert_data(DataBit::update(old_data_, kAllow));
+  isolate_->set_per_isolate_assert_data(
+      DataBit<kType>::update(old_data_, kAllow));
 }
 
 template <PerIsolateAssertType kType, bool kAllow>
@@ -112,7 +114,7 @@ PerIsolateAssertScope<kType, kAllow>::~PerIsolateAssertScope() {
 // static
 template <PerIsolateAssertType kType, bool kAllow>
 bool PerIsolateAssertScope<kType, kAllow>::IsAllowed(Isolate* isolate) {
-  return DataBit::decode(isolate->per_isolate_assert_data());
+  return DataBit<kType>::decode(isolate->per_isolate_assert_data());
 }
 
 // -----------------------------------------------------------------------------
diff --git a/deps/v8/src/common/assert-scope.h b/deps/v8/src/common/assert-scope.h
index 606439d42bcf31..73729400ac6c95 100644
--- a/deps/v8/src/common/assert-scope.h
+++ b/deps/v8/src/common/assert-scope.h
@@ -81,8 +81,6 @@ class PerIsolateAssertScope {
   static bool IsAllowed(Isolate* isolate);
 
  private:
-  class DataBit;
-
   Isolate* isolate_;
   uint32_t old_data_;
 
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 8d1bf5dfcc1e72..a0584b95c40475 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -101,6 +101,14 @@ constexpr int kStackSpaceRequiredForCompilation = 40;
 #define V8_OS_WIN_X64 true
 #endif
 
+#if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64)
+#define V8_OS_WIN_ARM64 true
+#endif
+
+#if defined(V8_OS_WIN_X64) || defined(V8_OS_WIN_ARM64)
+#define V8_OS_WIN64 true
+#endif
+
 // Superclass for classes only using static method functions.
 // The subclass of AllStatic cannot be instantiated at all.
 class AllStatic {
@@ -882,14 +890,14 @@ constexpr int kIeeeDoubleExponentWordOffset = 0;
 // Testers for test.
 
 #define HAS_SMI_TAG(value) \
-  ((static_cast<intptr_t>(value) & ::i::kSmiTagMask) == ::i::kSmiTag)
+  ((static_cast<i::Tagged_t>(value) & ::i::kSmiTagMask) == ::i::kSmiTag)
 
-#define HAS_STRONG_HEAP_OBJECT_TAG(value)                       \
-  (((static_cast<intptr_t>(value) & ::i::kHeapObjectTagMask) == \
+#define HAS_STRONG_HEAP_OBJECT_TAG(value)                          \
+  (((static_cast<i::Tagged_t>(value) & ::i::kHeapObjectTagMask) == \
     ::i::kHeapObjectTag))
 
-#define HAS_WEAK_HEAP_OBJECT_TAG(value)                         \
-  (((static_cast<intptr_t>(value) & ::i::kHeapObjectTagMask) == \
+#define HAS_WEAK_HEAP_OBJECT_TAG(value)                            \
+  (((static_cast<i::Tagged_t>(value) & ::i::kHeapObjectTagMask) == \
     ::i::kWeakHeapObjectTag))
 
 // OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
@@ -1060,6 +1068,25 @@ enum class VariableMode : uint8_t {
                   // has been shadowed by an eval-introduced
                   // variable
 
+  // Variables for private methods or accessors whose access require
+  // brand check. Declared only in class scopes by the compiler
+  // and allocated only in class contexts:
+  kPrivateMethod,  // Does not coexist with any other variable with the same
+                   // name in the same scope.
+
+  kPrivateSetterOnly,  // Incompatible with variables with the same name but
+                       // any mode other than kPrivateGetterOnly. Transition to
+                       // kPrivateGetterAndSetter if a later declaration for the
+                       // same name with kPrivateGetterOnly is made.
+
+  kPrivateGetterOnly,  // Incompatible with variables with the same name but
+                       // any mode other than kPrivateSetterOnly. Transition to
+                       // kPrivateGetterAndSetter if a later declaration for the
+                       // same name with kPrivateSetterOnly is made.
+
+  kPrivateGetterAndSetter,  // Does not coexist with any other variable with the
+                            // same name in the same scope.
+
   kLastLexicalVariableMode = kConst,
 };
 
@@ -1071,6 +1098,14 @@ inline const char* VariableMode2String(VariableMode mode) {
       return "VAR";
     case VariableMode::kLet:
       return "LET";
+    case VariableMode::kPrivateGetterOnly:
+      return "PRIVATE_GETTER_ONLY";
+    case VariableMode::kPrivateSetterOnly:
+      return "PRIVATE_SETTER_ONLY";
+    case VariableMode::kPrivateMethod:
+      return "PRIVATE_METHOD";
+    case VariableMode::kPrivateGetterAndSetter:
+      return "PRIVATE_GETTER_AND_SETTER";
     case VariableMode::kConst:
       return "CONST";
     case VariableMode::kDynamic:
@@ -1104,6 +1139,21 @@ inline bool IsDeclaredVariableMode(VariableMode mode) {
   return mode <= VariableMode::kVar;
 }
 
+inline bool IsPrivateMethodOrAccessorVariableMode(VariableMode mode) {
+  return mode >= VariableMode::kPrivateMethod &&
+         mode <= VariableMode::kPrivateGetterAndSetter;
+}
+
+inline bool IsSerializableVariableMode(VariableMode mode) {
+  return IsDeclaredVariableMode(mode) ||
+         IsPrivateMethodOrAccessorVariableMode(mode);
+}
+
+inline bool IsConstVariableMode(VariableMode mode) {
+  return mode == VariableMode::kConst ||
+         IsPrivateMethodOrAccessorVariableMode(mode);
+}
+
 inline bool IsLexicalVariableMode(VariableMode mode) {
   STATIC_ASSERT(static_cast<uint8_t>(VariableMode::kLet) ==
                 0);  // Implies that mode >= VariableMode::kLet.
@@ -1168,8 +1218,6 @@ enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
 
 enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
 
-enum RequiresBrandCheckFlag : uint8_t { kNoBrandCheck, kRequiresBrandCheck };
-
 enum class InterpreterPushArgsMode : unsigned {
   kArrayFunction,
   kWithFinalSpread,
@@ -1498,12 +1546,12 @@ enum KeyedAccessStoreMode {
 
 enum MutableMode { MUTABLE, IMMUTABLE };
 
-static inline bool IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode) {
+inline bool IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode) {
   return store_mode == STORE_HANDLE_COW ||
          store_mode == STORE_AND_GROW_HANDLE_COW;
 }
 
-static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
+inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
   return store_mode == STORE_AND_GROW_HANDLE_COW;
 }
 
@@ -1535,6 +1583,11 @@ constexpr int kSmallOrderedHashMapMinCapacity = 4;
 // has correct value range (see Issue 830 for more details).
 enum StackFrameId { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 };
 
+enum class ExceptionStatus : bool { kException = false, kSuccess = true };
+V8_INLINE bool operator!(ExceptionStatus status) {
+  return !static_cast<bool>(status);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index fedbfa5a100497..e3307a525c81fa 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -118,9 +118,9 @@ namespace internal {
   T(NoAccess, "no access")                                                     \
   T(NonCallableInInstanceOfCheck,                                              \
     "Right-hand side of 'instanceof' is not callable")                         \
-  T(NonCoercible, "Cannot destructure 'undefined' or 'null'.")                 \
+  T(NonCoercible, "Cannot destructure '%' as it is %.")                        \
   T(NonCoercibleWithProperty,                                                  \
-    "Cannot destructure property `%` of 'undefined' or 'null'.")               \
+    "Cannot destructure property '%' of '%' as it is %.")                      \
   T(NonExtensibleProto, "% is not extensible")                                 \
   T(NonObjectInInstanceOfCheck,                                                \
     "Right-hand side of 'instanceof' is not an object")                        \
@@ -146,7 +146,8 @@ namespace internal {
   T(NotSuperConstructorAnonymousClass,                                         \
     "Super constructor % of anonymous class is not a constructor")             \
   T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.")     \
-  T(NotInt32SharedTypedArray, "% is not an int32 shared typed array.")         \
+  T(NotInt32OrBigInt64SharedTypedArray,                                        \
+    "% is not an int32 or BigInt64 shared typed array.")                       \
   T(ObjectGetterExpectingFunction,                                             \
     "Object.prototype.__defineGetter__: Expecting function")                   \
   T(ObjectGetterCallable, "Getter must be a function: %")                      \
@@ -412,11 +413,15 @@ namespace internal {
   T(InvalidOrUnexpectedToken, "Invalid or unexpected token")                   \
   T(InvalidPrivateFieldResolution,                                             \
     "Private field '%' must be declared in an enclosing class")                \
-  T(InvalidPrivateFieldRead,                                                   \
-    "Read of private field % from an object which did not contain the field")  \
-  T(InvalidPrivateFieldWrite,                                                  \
-    "Write of private field % to an object which did not contain the field")   \
+  T(InvalidPrivateMemberRead,                                                  \
+    "Cannot read private member % from an object whose class did not declare " \
+    "it")                                                                      \
+  T(InvalidPrivateMemberWrite,                                                 \
+    "Cannot write private member % to an object whose class did not declare "  \
+    "it")                                                                      \
   T(InvalidPrivateMethodWrite, "Private method '%' is not writable")           \
+  T(InvalidPrivateGetterAccess, "'%' was defined without a getter")            \
+  T(InvalidPrivateSetterAccess, "'%' was defined without a setter")            \
   T(JsonParseUnexpectedEOS, "Unexpected end of JSON input")                    \
   T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %")      \
   T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
@@ -484,6 +489,7 @@ namespace internal {
     "Too many arguments in function call (only 65535 allowed)")                \
   T(TooManyParameters,                                                         \
     "Too many parameters in function definition (only 65534 allowed)")         \
+  T(TooManyProperties, "Too many properties to enumerate")                     \
   T(TooManySpreads,                                                            \
     "Literal containing too many nested spreads (up to 65534 allowed)")        \
   T(TooManyVariables, "Too many variables declared (only 4194303 allowed)")    \
@@ -574,7 +580,10 @@ namespace internal {
     "FinalizationGroup.prototype.register: target and holdings must not be "   \
     "same")                                                                    \
   T(WeakRefsWeakRefConstructorTargetMustBeObject,                              \
-    "WeakRef: target must be an object")
+    "WeakRef: target must be an object")                                       \
+  T(OptionalChainingNoNew, "Invalid optional chain from new expression")       \
+  T(OptionalChainingNoSuper, "Invalid optional chain from super property")     \
+  T(OptionalChainingNoTemplate, "Invalid tagged template on optional chain")
 
 enum class MessageTemplate {
 #define TEMPLATE(NAME, STRING) k##NAME,
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index 00a79bb29107ff..a8fd7f245cb90c 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -35,7 +35,12 @@ V8_INLINE Address GetIsolateRoot<Address>(Address on_heap_addr) {
 
 template <>
 V8_INLINE Address GetIsolateRoot<Isolate*>(Isolate* isolate) {
-  return isolate->isolate_root();
+  Address isolate_root = isolate->isolate_root();
+#ifdef V8_COMPRESS_POINTERS
+  isolate_root = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
+      reinterpret_cast<void*>(isolate_root), kPtrComprIsolateRootAlignment));
+#endif
+  return isolate_root;
 }
 
 // Decompresses smi value.
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index a369de48859ef9..7a72be80284c05 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -31,18 +31,6 @@ FieldAccess AccessBuilder::ForExternalTaggedValue() {
   return access;
 }
 
-// static
-FieldAccess AccessBuilder::ForExternalUint8Value() {
-  FieldAccess access = {kUntaggedBase,
-                        0,
-                        MaybeHandle<Name>(),
-                        MaybeHandle<Map>(),
-                        TypeCache::Get()->kUint8,
-                        MachineType::Uint8(),
-                        kNoWriteBarrier};
-  return access;
-}
-
 // static
 FieldAccess AccessBuilder::ForMap() {
   FieldAccess access = {
@@ -93,10 +81,20 @@ FieldAccess AccessBuilder::ForBigIntLeastSignificantDigit64() {
 
 // static
 FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
+  FieldAccess access = {
+      kTaggedBase,         JSObject::kPropertiesOrHashOffset,
+      MaybeHandle<Name>(), MaybeHandle<Map>(),
+      Type::Any(),         MachineType::TypeCompressedTagged(),
+      kFullWriteBarrier,   LoadSensitivity::kCritical};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer() {
   FieldAccess access = {
       kTaggedBase,          JSObject::kPropertiesOrHashOffset,
       MaybeHandle<Name>(),  MaybeHandle<Map>(),
-      Type::Any(),          MachineType::TypeCompressedTagged(),
+      Type::Any(),          MachineType::TypeCompressedTaggedPointer(),
       kPointerWriteBarrier, LoadSensitivity::kCritical};
   return access;
 }
@@ -172,8 +170,8 @@ FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
   FieldAccess access = {
       kTaggedBase,         JSFunction::kPrototypeOrInitialMapOffset,
       MaybeHandle<Name>(), MaybeHandle<Map>(),
-      Type::Any(),         MachineType::TypeCompressedTagged(),
-      kFullWriteBarrier};
+      Type::Any(),         MachineType::TypeCompressedTaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
@@ -182,7 +180,7 @@ FieldAccess AccessBuilder::ForJSFunctionContext() {
   FieldAccess access = {
       kTaggedBase,         JSFunction::kContextOffset,
       MaybeHandle<Name>(), MaybeHandle<Map>(),
-      Type::Internal(),    MachineType::TypeCompressedTagged(),
+      Type::Internal(),    MachineType::TypeCompressedTaggedPointer(),
       kPointerWriteBarrier};
   return access;
 }
@@ -304,7 +302,7 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectParametersAndRegisters() {
   FieldAccess access = {
       kTaggedBase,         JSGeneratorObject::kParametersAndRegistersOffset,
       Handle<Name>(),      MaybeHandle<Map>(),
-      Type::Internal(),    MachineType::TypeCompressedTagged(),
+      Type::Internal(),    MachineType::TypeCompressedTaggedPointer(),
       kPointerWriteBarrier};
   return access;
 }
@@ -325,7 +323,7 @@ FieldAccess AccessBuilder::ForJSAsyncFunctionObjectPromise() {
       kTaggedBase,         JSAsyncFunctionObject::kPromiseOffset,
       Handle<Name>(),      MaybeHandle<Map>(),
       Type::OtherObject(), MachineType::TypeCompressedTaggedPointer(),
-      kFullWriteBarrier};
+      kPointerWriteBarrier};
   return access;
 }
 
@@ -357,29 +355,20 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
                         Handle<Name>(),
                         MaybeHandle<Map>(),
                         type_cache->kJSArrayLengthType,
-                        MachineType::TypeCompressedTaggedSigned(),
+                        MachineType::TypeCompressedTagged(),
                         kFullWriteBarrier};
   if (IsDoubleElementsKind(elements_kind)) {
     access.type = type_cache->kFixedDoubleArrayLengthType;
+    access.machine_type = MachineType::TypeCompressedTaggedSigned();
     access.write_barrier_kind = kNoWriteBarrier;
   } else if (IsFastElementsKind(elements_kind)) {
     access.type = type_cache->kFixedArrayLengthType;
+    access.machine_type = MachineType::TypeCompressedTaggedSigned();
     access.write_barrier_kind = kNoWriteBarrier;
   }
   return access;
 }
 
-
-// static
-FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
-  FieldAccess access = {
-      kTaggedBase,           JSArrayBuffer::kBackingStoreOffset,
-      MaybeHandle<Name>(),   MaybeHandle<Map>(),
-      Type::OtherInternal(), MachineType::Pointer(),
-      kNoWriteBarrier};
-  return access;
-}
-
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
   FieldAccess access = {
@@ -441,7 +430,7 @@ FieldAccess AccessBuilder::ForJSTypedArrayBasePointer() {
       kTaggedBase,           JSTypedArray::kBasePointerOffset,
       MaybeHandle<Name>(),   MaybeHandle<Map>(),
       Type::OtherInternal(), MachineType::TypeCompressedTagged(),
-      kPointerWriteBarrier,  LoadSensitivity::kCritical};
+      kFullWriteBarrier,     LoadSensitivity::kCritical};
   return access;
 }
 
@@ -746,20 +735,6 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
   return access;
 }
 
-// static
-ElementAccess AccessBuilder::ForExternalOneByteStringCharacter() {
-  ElementAccess access = {kUntaggedBase, 0, TypeCache::Get()->kUint8,
-                          MachineType::Uint8(), kNoWriteBarrier};
-  return access;
-}
-
-// static
-ElementAccess AccessBuilder::ForExternalTwoByteStringCharacter() {
-  ElementAccess access = {kUntaggedBase, 0, TypeCache::Get()->kUint16,
-                          MachineType::Uint16(), kNoWriteBarrier};
-  return access;
-}
-
 // static
 ElementAccess AccessBuilder::ForSeqOneByteStringCharacter() {
   ElementAccess access = {kTaggedBase, SeqOneByteString::kHeaderSize,
@@ -776,26 +751,6 @@ ElementAccess AccessBuilder::ForSeqTwoByteStringCharacter() {
   return access;
 }
 
-// static
-FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
-  FieldAccess access = {
-      kTaggedBase,         JSGlobalObject::kGlobalProxyOffset,
-      Handle<Name>(),      MaybeHandle<Map>(),
-      Type::Receiver(),    MachineType::TypeCompressedTaggedPointer(),
-      kPointerWriteBarrier};
-  return access;
-}
-
-// static
-FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
-  FieldAccess access = {
-      kTaggedBase,         JSGlobalObject::kNativeContextOffset,
-      Handle<Name>(),      MaybeHandle<Map>(),
-      Type::Internal(),    MachineType::TypeCompressedTaggedPointer(),
-      kPointerWriteBarrier};
-  return access;
-}
-
 // static
 FieldAccess AccessBuilder::ForJSGlobalProxyNativeContext() {
   FieldAccess access = {
@@ -864,17 +819,6 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
   return access;
 }
 
-// static
-FieldAccess AccessBuilder::ForValue() {
-  FieldAccess access = {
-      kTaggedBase,         JSPrimitiveWrapper::kValueOffset,
-      Handle<Name>(),      MaybeHandle<Map>(),
-      Type::NonInternal(), MachineType::TypeCompressedTagged(),
-      kFullWriteBarrier};
-  return access;
-}
-
-
 // static
 FieldAccess AccessBuilder::ForArgumentsLength() {
   FieldAccess access = {
@@ -892,7 +836,7 @@ FieldAccess AccessBuilder::ForArgumentsCallee() {
       kTaggedBase,         JSSloppyArgumentsObject::kCalleeOffset,
       Handle<Name>(),      MaybeHandle<Map>(),
       Type::NonInternal(), MachineType::TypeCompressedTagged(),
-      kPointerWriteBarrier};
+      kFullWriteBarrier};
   return access;
 }
 
@@ -930,6 +874,19 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForContextSlotKnownPointer(size_t index) {
+  int offset = Context::OffsetOfElementAt(static_cast<int>(index));
+  DCHECK_EQ(offset,
+            Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
+  FieldAccess access = {
+      kTaggedBase,         offset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::Any(),         MachineType::TypeCompressedTaggedPointer(),
+      kPointerWriteBarrier};
+  return access;
+}
+
 // static
 ElementAccess AccessBuilder::ForFixedArrayElement() {
   ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
@@ -1188,19 +1145,6 @@ ElementAccess AccessBuilder::ForOrderedHashMapEntryValue() {
   return access;
 }
 
-// static
-FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
-  FieldAccess access = {
-      kTaggedBase,
-      FixedArray::OffsetOfElementAt(NumberDictionary::kMaxNumberKeyIndex),
-      MaybeHandle<Name>(),
-      MaybeHandle<Map>(),
-      Type::Any(),
-      MachineType::TypeCompressedTagged(),
-      kNoWriteBarrier};
-  return access;
-}
-
 // static
 FieldAccess AccessBuilder::ForDictionaryNextEnumerationIndex() {
   FieldAccess access = {
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index e3a17fe257d405..231e75f819587b 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -54,6 +54,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
   // Provides access to JSObject::properties() field.
   static FieldAccess ForJSObjectPropertiesOrHash();
 
+  // Provides access to JSObject::properties() field for known pointers.
+  static FieldAccess ForJSObjectPropertiesOrHashKnownPointer();
+
   // Provides access to JSObject::elements() field.
   static FieldAccess ForJSObjectElements();
 
@@ -128,9 +131,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
   // Provides access to JSArray::length() field.
   static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
 
-  // Provides access to JSArrayBuffer::backing_store() field.
-  static FieldAccess ForJSArrayBufferBackingStore();
-
   // Provides access to JSArrayBuffer::bit_field() field.
   static FieldAccess ForJSArrayBufferBitField();
 
@@ -236,24 +236,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final
   // Provides access to ExternalString::resource_data() field.
   static FieldAccess ForExternalStringResourceData();
 
-  // Provides access to ExternalOneByteString characters.
-  static ElementAccess ForExternalOneByteStringCharacter();
-
-  // Provides access to ExternalTwoByteString characters.
-  static ElementAccess ForExternalTwoByteStringCharacter();
-
   // Provides access to SeqOneByteString characters.
   static ElementAccess ForSeqOneByteStringCharacter();
 
   // Provides access to SeqTwoByteString characters.
   static ElementAccess ForSeqTwoByteStringCharacter();
 
-  // Provides access to JSGlobalObject::global_proxy() field.
-  static FieldAccess ForJSGlobalObjectGlobalProxy();
-
-  // Provides access to JSGlobalObject::native_context() field.
-  static FieldAccess ForJSGlobalObjectNativeContext();
-
   // Provides access to JSGlobalProxy::native_context() field.
   static FieldAccess ForJSGlobalProxyNativeContext();
 
@@ -272,9 +260,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
   // Provides access to JSStringIterator::index() field.
   static FieldAccess ForJSStringIteratorIndex();
 
-  // Provides access to JSPrimitiveWrapper::value() field.
-  static FieldAccess ForValue();
-
   // Provides access to Cell::value() field.
   static FieldAccess ForCellValue();
 
@@ -289,6 +274,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
   // Provides access to Context slots.
   static FieldAccess ForContextSlot(size_t index);
 
+  // Provides access to Context slots that are known to be pointers.
+  static FieldAccess ForContextSlotKnownPointer(size_t index);
+
   // Provides access to FixedArray elements.
   static ElementAccess ForFixedArrayElement();
   static ElementAccess ForFixedArrayElement(
@@ -327,7 +315,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
   static ElementAccess ForOrderedHashMapEntryValue();
 
   // Provides access to Dictionary fields.
-  static FieldAccess ForDictionaryMaxNumberKey();
   static FieldAccess ForDictionaryNextEnumerationIndex();
   static FieldAccess ForDictionaryObjectHashIndex();
 
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 6fc9e8214e5b49..269ef903751ee7 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -9,6 +9,7 @@
 #include "src/builtins/accessors.h"
 #include "src/compiler/compilation-dependencies.h"
 #include "src/compiler/compilation-dependency.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/compiler/type-cache.h"
 #include "src/ic/call-optimization.h"
 #include "src/logging/counters.h"
@@ -81,11 +82,12 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
     Zone* zone, Handle<Map> receiver_map,
     ZoneVector<CompilationDependency const*>&& dependencies,
     FieldIndex field_index, Representation field_representation,
-    Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
-    MaybeHandle<Map> transition_map) {
+    Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
+    MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
   return PropertyAccessInfo(kDataField, holder, transition_map, field_index,
-                            field_representation, field_type, field_map,
-                            {{receiver_map}, zone}, std::move(dependencies));
+                            field_representation, field_type, field_owner_map,
+                            field_map, {{receiver_map}, zone},
+                            std::move(dependencies));
 }
 
 // static
@@ -93,11 +95,12 @@ PropertyAccessInfo PropertyAccessInfo::DataConstant(
     Zone* zone, Handle<Map> receiver_map,
     ZoneVector<CompilationDependency const*>&& dependencies,
     FieldIndex field_index, Representation field_representation,
-    Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
-    MaybeHandle<Map> transition_map) {
+    Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
+    MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
   return PropertyAccessInfo(kDataConstant, holder, transition_map, field_index,
-                            field_representation, field_type, field_map,
-                            {{receiver_map}, zone}, std::move(dependencies));
+                            field_representation, field_type, field_owner_map,
+                            field_map, {{receiver_map}, zone},
+                            std::move(dependencies));
 }
 
 // static
@@ -155,7 +158,7 @@ PropertyAccessInfo::PropertyAccessInfo(Zone* zone, Kind kind,
 PropertyAccessInfo::PropertyAccessInfo(
     Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
     FieldIndex field_index, Representation field_representation,
-    Type field_type, MaybeHandle<Map> field_map,
+    Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
     ZoneVector<Handle<Map>>&& receiver_maps,
     ZoneVector<CompilationDependency const*>&& unrecorded_dependencies)
     : kind_(kind),
@@ -166,7 +169,11 @@ PropertyAccessInfo::PropertyAccessInfo(
       field_index_(field_index),
       field_representation_(field_representation),
       field_type_(field_type),
-      field_map_(field_map) {}
+      field_owner_map_(field_owner_map),
+      field_map_(field_map) {
+  DCHECK_IMPLIES(!transition_map.is_null(),
+                 field_owner_map.address() == transition_map.address());
+}
 
 bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
                                AccessMode access_mode, Zone* zone) {
@@ -258,6 +265,13 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
   }
 }
 
+ConstFieldInfo PropertyAccessInfo::GetConstFieldInfo() const {
+  if (IsDataConstant()) {
+    return ConstFieldInfo(field_owner_map_.ToHandleChecked());
+  }
+  return ConstFieldInfo::None();
+}
+
 AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
                                      CompilationDependencies* dependencies,
                                      Zone* zone)
@@ -276,35 +290,32 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo(
 }
 
 bool AccessInfoFactory::ComputeElementAccessInfos(
-    ElementAccessFeedback const& processed, AccessMode access_mode,
+    ElementAccessFeedback const& feedback,
     ZoneVector<ElementAccessInfo>* access_infos) const {
+  AccessMode access_mode = feedback.keyed_mode().access_mode();
   if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
     // For polymorphic loads of similar elements kinds (i.e. all tagged or all
     // double), always use the "worst case" code without a transition.  This is
     // much faster than transitioning the elements to the worst case, trading a
     // TransitionElementsKind for a CheckMaps, avoiding mutation of the array.
     base::Optional<ElementAccessInfo> access_info =
-        ConsolidateElementLoad(processed);
+        ConsolidateElementLoad(feedback);
     if (access_info.has_value()) {
       access_infos->push_back(*access_info);
       return true;
     }
   }
 
-  for (Handle<Map> receiver_map : processed.receiver_maps) {
-    // Compute the element access information.
+  for (auto const& group : feedback.transition_groups()) {
+    DCHECK(!group.empty());
+    Handle<Map> target = group.front();
     base::Optional<ElementAccessInfo> access_info =
-        ComputeElementAccessInfo(receiver_map, access_mode);
+        ComputeElementAccessInfo(target, access_mode);
     if (!access_info.has_value()) return false;
 
-    // Collect the possible transitions for the {receiver_map}.
-    for (auto transition : processed.transitions) {
-      if (transition.second.equals(receiver_map)) {
-        access_info->AddTransitionSource(transition.first);
-      }
+    for (size_t i = 1; i < group.size(); ++i) {
+      access_info->AddTransitionSource(group[i]);
     }
-
-    // Schedule the access information.
     access_infos->push_back(*access_info);
   }
   return true;
@@ -378,15 +389,19 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
     map_ref.SerializeOwnDescriptor(descriptor);
     constness = dependencies()->DependOnFieldConstness(map_ref, descriptor);
   }
+  Handle<Map> field_owner_map(map->FindFieldOwner(isolate(), descriptor),
+                              isolate());
   switch (constness) {
     case PropertyConstness::kMutable:
       return PropertyAccessInfo::DataField(
           zone(), receiver_map, std::move(unrecorded_dependencies), field_index,
-          details_representation, field_type, field_map, holder);
+          details_representation, field_type, field_owner_map, field_map,
+          holder);
     case PropertyConstness::kConst:
       return PropertyAccessInfo::DataConstant(
           zone(), receiver_map, std::move(unrecorded_dependencies), field_index,
-          details_representation, field_type, field_map, holder);
+          details_representation, field_type, field_owner_map, field_map,
+          holder);
   }
   UNREACHABLE();
 }
@@ -431,7 +446,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
     CallOptimization optimization(isolate(), accessor);
     if (!optimization.is_simple_api_call() ||
         optimization.IsCrossContextLazyAccessorPair(
-            *broker()->native_context().object(), *map)) {
+            *broker()->target_native_context().object(), *map)) {
       return PropertyAccessInfo::Invalid(zone());
     }
 
@@ -537,11 +552,13 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
     }
 
     // Walk up the prototype chain.
+    MapRef(broker(), map).SerializePrototype();
     if (!map->prototype().IsJSObject()) {
       // Perform the implicit ToObject for primitives here.
       // Implemented according to ES6 section 7.3.2 GetV (V, P).
       Handle<JSFunction> constructor;
-      if (Map::GetConstructorFunction(map, broker()->native_context().object())
+      if (Map::GetConstructorFunction(
+              map, broker()->target_native_context().object())
               .ToHandle(&constructor)) {
         map = handle(constructor->initial_map(), isolate());
         DCHECK(map->prototype().IsJSObject());
@@ -615,6 +632,7 @@ void PropertyAccessInfo::RecordDependencies(
 bool AccessInfoFactory::FinalizePropertyAccessInfos(
     ZoneVector<PropertyAccessInfo> access_infos, AccessMode access_mode,
     ZoneVector<PropertyAccessInfo>* result) const {
+  if (access_infos.empty()) return false;
   MergePropertyAccessInfos(access_infos, access_mode, result);
   for (PropertyAccessInfo const& info : *result) {
     if (info.IsInvalid()) return false;
@@ -668,22 +686,28 @@ Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
 }  // namespace
 
 base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
-    ElementAccessFeedback const& processed) const {
-  ElementAccessFeedback::MapIterator it = processed.all_maps(broker());
-  MapRef first_map = it.current();
+    ElementAccessFeedback const& feedback) const {
+  if (feedback.transition_groups().empty()) return base::nullopt;
+
+  DCHECK(!feedback.transition_groups().front().empty());
+  MapRef first_map(broker(), feedback.transition_groups().front().front());
   InstanceType instance_type = first_map.instance_type();
   ElementsKind elements_kind = first_map.elements_kind();
+
   ZoneVector<Handle<Map>> maps(zone());
-  for (; !it.done(); it.advance()) {
-    MapRef map = it.current();
-    if (map.instance_type() != instance_type || !CanInlineElementAccess(map)) {
-      return base::nullopt;
-    }
-    if (!GeneralizeElementsKind(elements_kind, map.elements_kind())
-             .To(&elements_kind)) {
-      return base::nullopt;
+  for (auto const& group : feedback.transition_groups()) {
+    for (Handle<Map> map_handle : group) {
+      MapRef map(broker(), map_handle);
+      if (map.instance_type() != instance_type ||
+          !CanInlineElementAccess(map)) {
+        return base::nullopt;
+      }
+      if (!GeneralizeElementsKind(elements_kind, map.elements_kind())
+               .To(&elements_kind)) {
+        return base::nullopt;
+      }
+      maps.push_back(map.object());
     }
-    maps.push_back(map.object());
   }
 
   return ElementAccessInfo(std::move(maps), elements_kind, zone());
@@ -723,7 +747,7 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor(
     }
     // Special fields are always mutable.
     return PropertyAccessInfo::DataField(zone(), map, {{}, zone()}, field_index,
-                                         field_representation, field_type);
+                                         field_representation, field_type, map);
   }
   return PropertyAccessInfo::Invalid(zone());
 }
@@ -799,12 +823,12 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
     case PropertyConstness::kMutable:
       return PropertyAccessInfo::DataField(
           zone(), map, std::move(unrecorded_dependencies), field_index,
-          details_representation, field_type, field_map, holder,
+          details_representation, field_type, transition_map, field_map, holder,
           transition_map);
     case PropertyConstness::kConst:
       return PropertyAccessInfo::DataConstant(
           zone(), map, std::move(unrecorded_dependencies), field_index,
-          details_representation, field_type, field_map, holder,
+          details_representation, field_type, transition_map, field_map, holder,
           transition_map);
   }
   UNREACHABLE();
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 4c7c3611df685c..e2f6e6d453da7f 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -29,6 +29,7 @@ class CompilationDependency;
 class ElementAccessFeedback;
 class JSHeapBroker;
 class TypeCache;
+struct ConstFieldInfo;
 
 std::ostream& operator<<(std::ostream&, AccessMode);
 
@@ -77,14 +78,16 @@ class PropertyAccessInfo final {
       Zone* zone, Handle<Map> receiver_map,
       ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
       FieldIndex field_index, Representation field_representation,
-      Type field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
+      Type field_type, Handle<Map> field_owner_map,
+      MaybeHandle<Map> field_map = MaybeHandle<Map>(),
       MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
       MaybeHandle<Map> transition_map = MaybeHandle<Map>());
   static PropertyAccessInfo DataConstant(
       Zone* zone, Handle<Map> receiver_map,
       ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
       FieldIndex field_index, Representation field_representation,
-      Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
+      Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
+      MaybeHandle<JSObject> holder,
       MaybeHandle<Map> transition_map = MaybeHandle<Map>());
   static PropertyAccessInfo AccessorConstant(Zone* zone,
                                              Handle<Map> receiver_map,
@@ -109,6 +112,7 @@ class PropertyAccessInfo final {
   bool IsStringLength() const { return kind() == kStringLength; }
 
   bool HasTransitionMap() const { return !transition_map().is_null(); }
+  ConstFieldInfo GetConstFieldInfo() const;
 
   Kind kind() const { return kind_; }
   MaybeHandle<JSObject> holder() const {
@@ -137,7 +141,7 @@ class PropertyAccessInfo final {
   PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
                      MaybeHandle<Map> transition_map, FieldIndex field_index,
                      Representation field_representation, Type field_type,
-                     MaybeHandle<Map> field_map,
+                     Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
                      ZoneVector<Handle<Map>>&& receiver_maps,
                      ZoneVector<CompilationDependency const*>&& dependencies);
 
@@ -150,6 +154,7 @@ class PropertyAccessInfo final {
   FieldIndex field_index_;
   Representation field_representation_;
   Type field_type_;
+  MaybeHandle<Map> field_owner_map_;
   MaybeHandle<Map> field_map_;
 };
 
@@ -163,7 +168,7 @@ class AccessInfoFactory final {
   base::Optional<ElementAccessInfo> ComputeElementAccessInfo(
       Handle<Map> map, AccessMode access_mode) const;
   bool ComputeElementAccessInfos(
-      ElementAccessFeedback const& processed, AccessMode access_mode,
+      ElementAccessFeedback const& feedback,
       ZoneVector<ElementAccessInfo>* access_infos) const;
 
   PropertyAccessInfo ComputePropertyAccessInfo(Handle<Map> map,
@@ -191,7 +196,7 @@ class AccessInfoFactory final {
 
  private:
   base::Optional<ElementAccessInfo> ConsolidateElementLoad(
-      ElementAccessFeedback const& processed) const;
+      ElementAccessFeedback const& feedback) const;
   PropertyAccessInfo LookupSpecialFieldAccessor(Handle<Map> map,
                                                 Handle<Name> name) const;
   PropertyAccessInfo LookupTransition(Handle<Map> map, Handle<Name> name,
diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h
index 8da7c685a112d8..4cab0a7e6e2d15 100644
--- a/deps/v8/src/compiler/allocation-builder-inl.h
+++ b/deps/v8/src/compiler/allocation-builder-inl.h
@@ -14,11 +14,9 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
-void AllocationBuilder::AllocateContext(int variadic_part_length,
-                                        Handle<Map> map) {
-  DCHECK(
-      IsInRange(map->instance_type(), FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE));
-  DCHECK_NE(NATIVE_CONTEXT_TYPE, map->instance_type());
+void AllocationBuilder::AllocateContext(int variadic_part_length, MapRef map) {
+  DCHECK(IsInRange(map.instance_type(), FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE));
+  DCHECK_NE(NATIVE_CONTEXT_TYPE, map.instance_type());
   int size = Context::SizeFor(variadic_part_length);
   Allocate(size, AllocationType::kYoung, Type::OtherInternal());
   Store(AccessBuilder::ForMap(), map);
@@ -29,11 +27,11 @@ void AllocationBuilder::AllocateContext(int variadic_part_length,
 }
 
 // Compound allocation of a FixedArray.
-void AllocationBuilder::AllocateArray(int length, Handle<Map> map,
+void AllocationBuilder::AllocateArray(int length, MapRef map,
                                       AllocationType allocation) {
-  DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
-         map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
-  int size = (map->instance_type() == FIXED_ARRAY_TYPE)
+  DCHECK(map.instance_type() == FIXED_ARRAY_TYPE ||
+         map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+  int size = (map.instance_type() == FIXED_ARRAY_TYPE)
                  ? FixedArray::SizeFor(length)
                  : FixedDoubleArray::SizeFor(length);
   Allocate(size, allocation, Type::OtherInternal());
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index d92e0f769b357e..040dd014051270 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -49,16 +49,12 @@ class AllocationBuilder final {
   }
 
   // Compound allocation of a context.
-  inline void AllocateContext(int variadic_part_length, Handle<Map> map);
+  inline void AllocateContext(int variadic_part_length, MapRef map);
 
   // Compound allocation of a FixedArray.
-  inline void AllocateArray(int length, Handle<Map> map,
+  inline void AllocateArray(int length, MapRef map,
                             AllocationType allocation = AllocationType::kYoung);
 
-  // Compound store of a constant into a field.
-  void Store(const FieldAccess& access, Handle<Object> value) {
-    Store(access, jsgraph()->Constant(value));
-  }
   // Compound store of a constant into a field.
   void Store(const FieldAccess& access, const ObjectRef& value) {
     Store(access, jsgraph()->Constant(value));
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 88a9c52a3339f5..65a569d755b1fd 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -909,10 +909,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArchDeoptimize: {
-      int deopt_state_id =
+      DeoptimizationExit* exit =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
+      CodeGenResult result = AssembleDeoptimizerCall(exit);
       if (result != kSuccess) return result;
       unwinding_info_writer_.MarkBlockWillExit();
       break;
@@ -921,10 +920,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       AssembleReturn(instr->InputAt(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
-    case kArchStackPointer:
-      __ mov(i.OutputRegister(), sp);
-      DCHECK_EQ(LeaveCC, i.OutputSBit());
-      break;
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -936,6 +931,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
         __ mov(i.OutputRegister(), fp);
       }
       break;
+    case kArchStackPointerGreaterThan: {
+      constexpr size_t kValueIndex = 0;
+      DCHECK(instr->InputAt(kValueIndex)->IsRegister());
+      __ cmp(sp, i.InputRegister(kValueIndex));
+      break;
+    }
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
                            i.InputDoubleRegister(0), DetermineStubCallMode());
@@ -1838,6 +1839,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
               i.InputSimd128Register(1));
       break;
     }
+    case kArmF32x4Div: {
+      QwNeonRegister dst = i.OutputSimd128Register();
+      QwNeonRegister src1 = i.InputSimd128Register(0);
+      QwNeonRegister src2 = i.InputSimd128Register(1);
+      DCHECK_EQ(dst, q0);
+      DCHECK_EQ(src1, q0);
+      DCHECK_EQ(src2, q1);
+#define S_FROM_Q(reg, lane) SwVfpRegister::from_code(reg.code() * 4 + lane)
+      __ vdiv(S_FROM_Q(dst, 0), S_FROM_Q(src1, 0), S_FROM_Q(src2, 0));
+      __ vdiv(S_FROM_Q(dst, 1), S_FROM_Q(src1, 1), S_FROM_Q(src2, 1));
+      __ vdiv(S_FROM_Q(dst, 2), S_FROM_Q(src1, 2), S_FROM_Q(src2, 2));
+      __ vdiv(S_FROM_Q(dst, 3), S_FROM_Q(src1, 3), S_FROM_Q(src2, 3));
+#undef S_FROM_Q
+      break;
+    }
     case kArmF32x4Min: {
       __ vmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
               i.InputSimd128Register(1));
@@ -1902,13 +1918,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kArmI32x4Shl: {
+      QwNeonRegister tmp = i.TempSimd128Register(0);
+      __ vdup(Neon32, tmp, i.InputRegister(1));
       __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
-              i.InputInt5(1));
+              tmp);
       break;
     }
     case kArmI32x4ShrS: {
-      __ vshr(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
-              i.InputInt5(1));
+      QwNeonRegister tmp = i.TempSimd128Register(0);
+      __ vdup(Neon32, tmp, i.InputRegister(1));
+      __ vneg(Neon32, tmp, tmp);
+      __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              tmp);
       break;
     }
     case kArmI32x4Add: {
@@ -1976,8 +1997,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kArmI32x4ShrU: {
-      __ vshr(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
-              i.InputInt5(1));
+      QwNeonRegister tmp = i.TempSimd128Register(0);
+      __ vdup(Neon32, tmp, i.InputRegister(1));
+      __ vneg(Neon32, tmp, tmp);
+      __ vshl(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              tmp);
       break;
     }
     case kArmI32x4MinU: {
@@ -2029,13 +2053,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kArmI16x8Shl: {
+      QwNeonRegister tmp = i.TempSimd128Register(0);
+      __ vdup(Neon16, tmp, i.InputRegister(1));
       __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
-              i.InputInt4(1));
+              tmp);
       break;
     }
     case kArmI16x8ShrS: {
-      __ vshr(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
-              i.InputInt4(1));
+      QwNeonRegister tmp = i.TempSimd128Register(0);
+      __ vdup(Neon16, tmp, i.InputRegister(1));
+      __ vneg(Neon16, tmp, tmp);
+      __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              tmp);
       break;
     }
     case kArmI16x8SConvertI32x4:
@@ -2112,8 +2141,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kArmI16x8ShrU: {
-      __ vshr(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
-              i.InputInt4(1));
+      QwNeonRegister tmp = i.TempSimd128Register(0);
+      __ vdup(Neon16, tmp, i.InputRegister(1));
+      __ vneg(Neon16, tmp, tmp);
+      __ vshl(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              tmp);
       break;
     }
     case kArmI16x8UConvertI32x4:
@@ -2168,13 +2200,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kArmI8x16Shl: {
+      QwNeonRegister tmp = i.TempSimd128Register(0);
+      __ vdup(Neon8, tmp, i.InputRegister(1));
       __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
-              i.InputInt3(1));
+              tmp);
       break;
     }
     case kArmI8x16ShrS: {
-      __ vshr(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
-              i.InputInt3(1));
+      QwNeonRegister tmp = i.TempSimd128Register(0);
+      __ vdup(Neon8, tmp, i.InputRegister(1));
+      __ vneg(Neon8, tmp, tmp);
+      __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              tmp);
       break;
     }
     case kArmI8x16SConvertI16x8:
@@ -2237,8 +2274,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kArmI8x16ShrU: {
-      __ vshr(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
-              i.InputInt3(1));
+      QwNeonRegister tmp = i.TempSimd128Register(0);
+      __ vdup(Neon8, tmp, i.InputRegister(1));
+      __ vneg(Neon8, tmp, tmp);
+      __ vshl(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              tmp);
       break;
     }
     case kArmI8x16UConvertI16x8:
@@ -3192,6 +3232,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
 
 void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
 
+void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+
 void CodeGenerator::AssembleMove(InstructionOperand* source,
                                  InstructionOperand* destination) {
   ArmOperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 165ca39f9d3620..3551e26aea8832 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -141,6 +141,7 @@ namespace compiler {
   V(ArmF32x4AddHoriz)              \
   V(ArmF32x4Sub)                   \
   V(ArmF32x4Mul)                   \
+  V(ArmF32x4Div)                   \
   V(ArmF32x4Min)                   \
   V(ArmF32x4Max)                   \
   V(ArmF32x4Eq)                    \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 41d7b4055fce5a..1d7cf61dfe7374 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -121,6 +121,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kArmF32x4AddHoriz:
     case kArmF32x4Sub:
     case kArmF32x4Mul:
+    case kArmF32x4Div:
     case kArmF32x4Min:
     case kArmF32x4Max:
     case kArmF32x4Eq:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 06aba4491ac737..ce74faa4a62422 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -74,17 +74,6 @@ class ArmOperandGenerator : public OperandGenerator {
     }
     return false;
   }
-
-  // Use the stack pointer if the node is LoadStackPointer, otherwise assign a
-  // register.
-  InstructionOperand UseRegisterOrStackPointer(Node* node) {
-    if (node->opcode() == IrOpcode::kLoadStackPointer) {
-      return LocationOperand(LocationOperand::EXPLICIT,
-                             LocationOperand::REGISTER,
-                             MachineRepresentation::kWord32, sp.code());
-    }
-    return UseRegister(node);
-  }
 };
 
 namespace {
@@ -102,6 +91,15 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
                  g.UseRegister(node->InputAt(1)));
 }
 
+void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
+                       Node* node) {
+  ArmOperandGenerator g(selector);
+  InstructionOperand temps[] = {g.TempSimd128Register()};
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
 void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
                      Node* node) {
   ArmOperandGenerator g(selector);
@@ -509,7 +507,8 @@ void InstructionSelector::VisitStore(Node* node) {
   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
   MachineRepresentation rep = store_rep.representation();
 
-  if (write_barrier_kind != kNoWriteBarrier) {
+  if (write_barrier_kind != kNoWriteBarrier &&
+      V8_LIKELY(!FLAG_disable_write_barriers)) {
     DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
@@ -887,6 +886,15 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
   VisitBinop(this, node, kArmEor, kArmEor);
 }
 
+void InstructionSelector::VisitStackPointerGreaterThan(
+    Node* node, FlagsContinuation* cont) {
+  Node* const value = node->InputAt(0);
+  InstructionCode opcode = kArchStackPointerGreaterThan;
+
+  ArmOperandGenerator g(this);
+  EmitWithContinuation(opcode, g.UseRegister(value), cont);
+}
+
 namespace {
 
 template <typename TryMatchShift>
@@ -1686,17 +1694,17 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
 
   if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
                                &input_count, &inputs[1])) {
-    inputs[0] = g.UseRegisterOrStackPointer(m.left().node());
+    inputs[0] = g.UseRegister(m.left().node());
     input_count++;
   } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
                                       &input_count, &inputs[1])) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    inputs[0] = g.UseRegisterOrStackPointer(m.right().node());
+    inputs[0] = g.UseRegister(m.right().node());
     input_count++;
   } else {
     opcode |= AddressingModeField::encode(kMode_Operand2_R);
-    inputs[input_count++] = g.UseRegisterOrStackPointer(m.left().node());
-    inputs[input_count++] = g.UseRegisterOrStackPointer(m.right().node());
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
   }
 
   if (has_result) {
@@ -1848,6 +1856,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
         return VisitShift(this, value, TryMatchLSR, cont);
       case IrOpcode::kWord32Ror:
         return VisitShift(this, value, TryMatchROR, cont);
+      case IrOpcode::kStackPointerGreaterThan:
+        cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+        return VisitStackPointerGreaterThan(value, cont);
       default:
         break;
     }
@@ -2488,7 +2499,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
 
 #define SIMD_VISIT_SHIFT_OP(Name)                     \
   void InstructionSelector::Visit##Name(Node* node) { \
-    VisitRRI(this, kArm##Name, node);                 \
+    VisitSimdShiftRRR(this, kArm##Name, node);        \
   }
 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
 #undef SIMD_VISIT_SHIFT_OP
@@ -2502,6 +2513,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
 #undef SIMD_VISIT_BINOP
 #undef SIMD_BINOP_LIST
 
+void InstructionSelector::VisitF32x4Div(Node* node) {
+  ArmOperandGenerator g(this);
+  // Use fixed registers in the lower 8 Q-registers so we can directly access
+  // mapped registers S0-S31.
+  Emit(kArmF32x4Div, g.DefineAsFixed(node, q0),
+       g.UseFixed(node->InputAt(0), q0), g.UseFixed(node->InputAt(1), q1));
+}
+
 void InstructionSelector::VisitS128Select(Node* node) {
   ArmOperandGenerator g(this);
   Emit(kArmS128Select, g.DefineSameAsFirst(node),
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index c71a63cc3d96e5..66ca7f6cf0cf35 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -820,20 +820,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       // don't emit code for nops.
       break;
     case kArchDeoptimize: {
-      int deopt_state_id =
+      DeoptimizationExit* exit =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
-      if (result != kSuccess) return result;
-      unwinding_info_writer_.MarkBlockWillExit();
+      __ B(exit->label());
       break;
     }
     case kArchRet:
       AssembleReturn(instr->InputAt(0));
       break;
-    case kArchStackPointer:
-      __ mov(i.OutputRegister(), sp);
-      break;
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
       break;
@@ -844,6 +838,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
         __ mov(i.OutputRegister(), fp);
       }
       break;
+    case kArchStackPointerGreaterThan: {
+      constexpr size_t kValueIndex = 0;
+      DCHECK(instr->InputAt(kValueIndex)->IsRegister());
+      __ Cmp(sp, i.InputRegister(kValueIndex));
+      break;
+    }
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
                            i.InputDoubleRegister(0), DetermineStubCallMode());
@@ -1598,12 +1598,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       __ DecompressAnyTagged(i.OutputRegister(), i.InputRegister(0));
       break;
     }
-    case kArm64CompressSigned:   // Fall through.
-    case kArm64CompressPointer:  // Fall through.
-    case kArm64CompressAny: {
-      __ Uxtw(i.OutputRegister(), i.InputRegister(0));
-      break;
-    }
     case kArm64LdrS:
       __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
       break;
@@ -1780,6 +1774,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
              i.InputSimd128Register(1).V##FORMAT()); \
     break;
 
+    case kArm64F64x2Splat: {
+      __ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
+      break;
+    }
+    case kArm64F64x2ExtractLane: {
+      __ Mov(i.OutputSimd128Register().D(), i.InputSimd128Register(0).V2D(),
+             i.InputInt8(1));
+      break;
+    }
+    case kArm64F64x2ReplaceLane: {
+      VRegister dst = i.OutputSimd128Register().V2D(),
+                src1 = i.InputSimd128Register(0).V2D();
+      if (!dst.is(src1)) {
+        __ Mov(dst, src1);
+      }
+      __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0);
+      break;
+    }
+      SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D);
+      SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D);
+      SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D);
+      SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D);
+      SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D);
+      SIMD_BINOP_CASE(kArm64F64x2Div, Fdiv, 2D);
+      SIMD_BINOP_CASE(kArm64F64x2Min, Fmin, 2D);
+      SIMD_BINOP_CASE(kArm64F64x2Max, Fmax, 2D);
+      SIMD_BINOP_CASE(kArm64F64x2Eq, Fcmeq, 2D);
+    case kArm64F64x2Ne: {
+      VRegister dst = i.OutputSimd128Register().V2D();
+      __ Fcmeq(dst, i.InputSimd128Register(0).V2D(),
+               i.InputSimd128Register(1).V2D());
+      __ Mvn(dst, dst);
+      break;
+    }
+    case kArm64F64x2Lt: {
+      __ Fcmgt(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
+               i.InputSimd128Register(0).V2D());
+      break;
+    }
+    case kArm64F64x2Le: {
+      __ Fcmge(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
+               i.InputSimd128Register(0).V2D());
+      break;
+    }
     case kArm64F32x4Splat: {
       __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
       break;
@@ -1808,6 +1846,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       SIMD_BINOP_CASE(kArm64F32x4AddHoriz, Faddp, 4S);
       SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S);
       SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S);
+      SIMD_BINOP_CASE(kArm64F32x4Div, Fdiv, 4S);
       SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S);
       SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S);
       SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S);
@@ -1828,6 +1867,62 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
                i.InputSimd128Register(0).V4S());
       break;
     }
+    case kArm64I64x2Splat: {
+      __ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
+      break;
+    }
+    case kArm64I64x2ExtractLane: {
+      __ Mov(i.OutputRegister64(), i.InputSimd128Register(0).V2D(),
+             i.InputInt8(1));
+      break;
+    }
+    case kArm64I64x2ReplaceLane: {
+      VRegister dst = i.OutputSimd128Register().V2D(),
+                src1 = i.InputSimd128Register(0).V2D();
+      if (!dst.is(src1)) {
+        __ Mov(dst, src1);
+      }
+      __ Mov(dst, i.InputInt8(1), i.InputRegister64(2));
+      break;
+    }
+      SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
+    case kArm64I64x2Shl: {
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V2D(), i.InputRegister64(1));
+      __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
+              tmp.V2D());
+      break;
+    }
+    case kArm64I64x2ShrS: {
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V2D(), i.InputRegister64(1));
+      __ Neg(tmp.V2D(), tmp.V2D());
+      __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
+              tmp.V2D());
+      break;
+    }
+      SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D);
+      SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D);
+      SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
+    case kArm64I64x2Ne: {
+      VRegister dst = i.OutputSimd128Register().V2D();
+      __ Cmeq(dst, i.InputSimd128Register(0).V2D(),
+              i.InputSimd128Register(1).V2D());
+      __ Mvn(dst, dst);
+      break;
+    }
+      SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
+      SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
+    case kArm64I64x2ShrU: {
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V2D(), i.InputRegister64(1));
+      __ Neg(tmp.V2D(), tmp.V2D());
+      __ Ushl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
+              tmp.V2D());
+      break;
+    }
+      SIMD_BINOP_CASE(kArm64I64x2GtU, Cmhi, 2D);
+      SIMD_BINOP_CASE(kArm64I64x2GeU, Cmhs, 2D);
     case kArm64I32x4Splat: {
       __ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0));
       break;
@@ -1851,13 +1946,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8High, Sxtl2, 4S, 8H);
       SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
     case kArm64I32x4Shl: {
-      __ Shl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
-             i.InputInt5(1));
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V4S(), i.InputRegister32(1));
+      __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+              tmp.V4S());
       break;
     }
     case kArm64I32x4ShrS: {
-      __ Sshr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
-              i.InputInt5(1));
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V4S(), i.InputRegister32(1));
+      __ Neg(tmp.V4S(), tmp.V4S());
+      __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+              tmp.V4S());
       break;
     }
       SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
@@ -1880,8 +1980,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8Low, Uxtl, 4S, 4H);
       SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H);
     case kArm64I32x4ShrU: {
-      __ Ushr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
-              i.InputInt5(1));
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V4S(), i.InputRegister32(1));
+      __ Neg(tmp.V4S(), tmp.V4S());
+      __ Ushl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+              tmp.V4S());
       break;
     }
       SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S);
@@ -1910,13 +2013,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16High, Sxtl2, 8H, 16B);
       SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
     case kArm64I16x8Shl: {
-      __ Shl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
-             i.InputInt5(1));
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V8H(), i.InputRegister32(1));
+      __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
+              tmp.V8H());
       break;
     }
     case kArm64I16x8ShrS: {
-      __ Sshr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
-              i.InputInt5(1));
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V8H(), i.InputRegister32(1));
+      __ Neg(tmp.V8H(), tmp.V8H());
+      __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
+              tmp.V8H());
       break;
     }
     case kArm64I16x8SConvertI32x4: {
@@ -1961,8 +2069,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kArm64I16x8ShrU: {
-      __ Ushr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
-              i.InputInt5(1));
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V8H(), i.InputRegister32(1));
+      __ Neg(tmp.V8H(), tmp.V8H());
+      __ Ushl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
+              tmp.V8H());
       break;
     }
     case kArm64I16x8UConvertI32x4: {
@@ -2005,13 +2116,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
       SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
     case kArm64I8x16Shl: {
-      __ Shl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
-             i.InputInt5(1));
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V16B(), i.InputRegister32(1));
+      __ Sshl(i.OutputSimd128Register().V16B(),
+              i.InputSimd128Register(0).V16B(), tmp.V16B());
       break;
     }
     case kArm64I8x16ShrS: {
-      __ Sshr(i.OutputSimd128Register().V16B(),
-              i.InputSimd128Register(0).V16B(), i.InputInt5(1));
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V16B(), i.InputRegister32(1));
+      __ Neg(tmp.V16B(), tmp.V16B());
+      __ Sshl(i.OutputSimd128Register().V16B(),
+              i.InputSimd128Register(0).V16B(), tmp.V16B());
       break;
     }
     case kArm64I8x16SConvertI16x8: {
@@ -2046,8 +2162,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B);
       SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
     case kArm64I8x16ShrU: {
-      __ Ushr(i.OutputSimd128Register().V16B(),
-              i.InputSimd128Register(0).V16B(), i.InputInt5(1));
+      VRegister tmp = i.TempSimd128Register(0);
+      __ Dup(tmp.V16B(), i.InputRegister32(1));
+      __ Neg(tmp.V16B(), tmp.V16B());
+      __ Ushl(i.OutputSimd128Register().V16B(),
+              i.InputSimd128Register(0).V16B(), tmp.V16B());
       break;
     }
     case kArm64I8x16UConvertI16x8: {
@@ -2192,7 +2311,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B);
       SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
       SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
+    case kArm64S1x2AllTrue: {
+      UseScratchRegisterScope scope(tasm());
+      VRegister temp1 = scope.AcquireV(kFormat2D);
+      VRegister temp2 = scope.AcquireV(kFormatS);
 
+      __ Cmeq(temp1, i.InputSimd128Register(0).V2D(), 0);
+      __ Umaxv(temp2, temp1.V4S());
+      __ Umov(i.OutputRegister32(), temp2, 0);
+      __ Add(i.OutputRegister32(), i.OutputRegister32(), 1);
+      break;
+    }
 #define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT)     \
   case Op: {                                               \
     UseScratchRegisterScope scope(tasm());                 \
@@ -2203,6 +2332,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     __ Cset(i.OutputRegister32(), ne);                     \
     break;                                                 \
   }
+      // for AnyTrue, the format does not matter, umaxv does not support 2D
+      SIMD_REDUCE_OP_CASE(kArm64S1x2AnyTrue, Umaxv, kFormatS, 4S);
       SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
       SIMD_REDUCE_OP_CASE(kArm64S1x4AllTrue, Uminv, kFormatS, 4S);
       SIMD_REDUCE_OP_CASE(kArm64S1x8AnyTrue, Umaxv, kFormatH, 8H);
@@ -2669,6 +2800,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
 
 void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
 
+void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {
+  __ ForceConstantPoolEmissionWithoutJump();
+  __ CheckVeneerPool(false, false, deopt_count * Deoptimizer::kDeoptExitSize);
+}
+
 void CodeGenerator::AssembleMove(InstructionOperand* source,
                                  InstructionOperand* destination) {
   Arm64OperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 1c4c0e333542c5..4b56e402c15efe 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -168,11 +168,23 @@ namespace compiler {
   V(Arm64DecompressSigned)                  \
   V(Arm64DecompressPointer)                 \
   V(Arm64DecompressAny)                     \
-  V(Arm64CompressSigned)                    \
-  V(Arm64CompressPointer)                   \
-  V(Arm64CompressAny)                       \
   V(Arm64DmbIsh)                            \
   V(Arm64DsbIsb)                            \
+  V(Arm64F64x2Splat)                        \
+  V(Arm64F64x2ExtractLane)                  \
+  V(Arm64F64x2ReplaceLane)                  \
+  V(Arm64F64x2Abs)                          \
+  V(Arm64F64x2Neg)                          \
+  V(Arm64F64x2Add)                          \
+  V(Arm64F64x2Sub)                          \
+  V(Arm64F64x2Mul)                          \
+  V(Arm64F64x2Div)                          \
+  V(Arm64F64x2Min)                          \
+  V(Arm64F64x2Max)                          \
+  V(Arm64F64x2Eq)                           \
+  V(Arm64F64x2Ne)                           \
+  V(Arm64F64x2Lt)                           \
+  V(Arm64F64x2Le)                           \
   V(Arm64F32x4Splat)                        \
   V(Arm64F32x4ExtractLane)                  \
   V(Arm64F32x4ReplaceLane)                  \
@@ -186,12 +198,28 @@ namespace compiler {
   V(Arm64F32x4AddHoriz)                     \
   V(Arm64F32x4Sub)                          \
   V(Arm64F32x4Mul)                          \
+  V(Arm64F32x4Div)                          \
   V(Arm64F32x4Min)                          \
   V(Arm64F32x4Max)                          \
   V(Arm64F32x4Eq)                           \
   V(Arm64F32x4Ne)                           \
   V(Arm64F32x4Lt)                           \
   V(Arm64F32x4Le)                           \
+  V(Arm64I64x2Splat)                        \
+  V(Arm64I64x2ExtractLane)                  \
+  V(Arm64I64x2ReplaceLane)                  \
+  V(Arm64I64x2Neg)                          \
+  V(Arm64I64x2Shl)                          \
+  V(Arm64I64x2ShrS)                         \
+  V(Arm64I64x2Add)                          \
+  V(Arm64I64x2Sub)                          \
+  V(Arm64I64x2Eq)                           \
+  V(Arm64I64x2Ne)                           \
+  V(Arm64I64x2GtS)                          \
+  V(Arm64I64x2GeS)                          \
+  V(Arm64I64x2ShrU)                         \
+  V(Arm64I64x2GtU)                          \
+  V(Arm64I64x2GeU)                          \
   V(Arm64I32x4Splat)                        \
   V(Arm64I32x4ExtractLane)                  \
   V(Arm64I32x4ReplaceLane)                  \
@@ -310,6 +338,8 @@ namespace compiler {
   V(Arm64S8x8Reverse)                       \
   V(Arm64S8x4Reverse)                       \
   V(Arm64S8x2Reverse)                       \
+  V(Arm64S1x2AnyTrue)                       \
+  V(Arm64S1x2AllTrue)                       \
   V(Arm64S1x4AnyTrue)                       \
   V(Arm64S1x4AllTrue)                       \
   V(Arm64S1x8AnyTrue)                       \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 8344887ec2feda..7cba2d50ea0059 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -137,6 +137,21 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kArm64Float64MoveU64:
     case kArm64U64MoveFloat64:
     case kArm64Float64SilenceNaN:
+    case kArm64F64x2Splat:
+    case kArm64F64x2ExtractLane:
+    case kArm64F64x2ReplaceLane:
+    case kArm64F64x2Abs:
+    case kArm64F64x2Neg:
+    case kArm64F64x2Add:
+    case kArm64F64x2Sub:
+    case kArm64F64x2Mul:
+    case kArm64F64x2Div:
+    case kArm64F64x2Min:
+    case kArm64F64x2Max:
+    case kArm64F64x2Eq:
+    case kArm64F64x2Ne:
+    case kArm64F64x2Lt:
+    case kArm64F64x2Le:
     case kArm64F32x4Splat:
     case kArm64F32x4ExtractLane:
     case kArm64F32x4ReplaceLane:
@@ -150,12 +165,28 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kArm64F32x4AddHoriz:
     case kArm64F32x4Sub:
     case kArm64F32x4Mul:
+    case kArm64F32x4Div:
     case kArm64F32x4Min:
     case kArm64F32x4Max:
     case kArm64F32x4Eq:
     case kArm64F32x4Ne:
     case kArm64F32x4Lt:
     case kArm64F32x4Le:
+    case kArm64I64x2Splat:
+    case kArm64I64x2ExtractLane:
+    case kArm64I64x2ReplaceLane:
+    case kArm64I64x2Neg:
+    case kArm64I64x2Shl:
+    case kArm64I64x2ShrS:
+    case kArm64I64x2Add:
+    case kArm64I64x2Sub:
+    case kArm64I64x2Eq:
+    case kArm64I64x2Ne:
+    case kArm64I64x2GtS:
+    case kArm64I64x2GeS:
+    case kArm64I64x2ShrU:
+    case kArm64I64x2GtU:
+    case kArm64I64x2GeU:
     case kArm64I32x4Splat:
     case kArm64I32x4ExtractLane:
     case kArm64I32x4ReplaceLane:
@@ -274,6 +305,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kArm64S8x8Reverse:
     case kArm64S8x4Reverse:
     case kArm64S8x2Reverse:
+    case kArm64S1x2AnyTrue:
+    case kArm64S1x2AllTrue:
     case kArm64S1x4AnyTrue:
     case kArm64S1x4AllTrue:
     case kArm64S1x8AnyTrue:
@@ -287,9 +320,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kArm64DecompressSigned:
     case kArm64DecompressPointer:
     case kArm64DecompressAny:
-    case kArm64CompressSigned:
-    case kArm64CompressPointer:
-    case kArm64CompressAny:
       return kNoOpcodeFlags;
 
     case kArm64LdrS:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index a953e35a669ffb..4abbd68c49a4a5 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -48,16 +48,6 @@ class Arm64OperandGenerator final : public OperandGenerator {
     return UseRegister(node);
   }
 
-  // Use the stack pointer if the node is LoadStackPointer, otherwise assign a
-  // register.
-  InstructionOperand UseRegisterOrStackPointer(Node* node, bool sp_allowed) {
-    if (sp_allowed && node->opcode() == IrOpcode::kLoadStackPointer)
-      return LocationOperand(LocationOperand::EXPLICIT,
-                             LocationOperand::REGISTER,
-                             MachineRepresentation::kWord64, sp.code());
-    return UseRegister(node);
-  }
-
   // Use the provided node if it has the required value, or create a
   // TempImmediate otherwise.
   InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
@@ -160,6 +150,15 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
                  g.UseRegister(node->InputAt(1)));
 }
 
+void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
+                       Node* node) {
+  Arm64OperandGenerator g(selector);
+  InstructionOperand temps[] = {g.TempSimd128Register()};
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
 void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
   Arm64OperandGenerator g(selector);
   int32_t imm = OpParameter<int32_t>(node->op());
@@ -554,23 +553,21 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
   // is used when we merge a conversion into the load.
   outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
 
-  if (selector->CanAddressRelativeToRootsRegister()) {
-    ExternalReferenceMatcher m(base);
-    if (m.HasValue() && g.IsIntegerConstant(index)) {
-      ptrdiff_t const delta =
-          g.GetIntegerConstantValue(index) +
-          TurboAssemblerBase::RootRegisterOffsetForExternalReference(
-              selector->isolate(), m.Value());
-      input_count = 1;
-      // Check that the delta is a 32-bit integer due to the limitations of
-      // immediate operands.
-      if (is_int32(delta)) {
-        inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
-        opcode |= AddressingModeField::encode(kMode_Root);
-        selector->Emit(opcode, arraysize(outputs), outputs, input_count,
-                       inputs);
-        return;
-      }
+  ExternalReferenceMatcher m(base);
+  if (m.HasValue() && g.IsIntegerConstant(index) &&
+      selector->CanAddressRelativeToRootsRegister(m.Value())) {
+    ptrdiff_t const delta =
+        g.GetIntegerConstantValue(index) +
+        TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+            selector->isolate(), m.Value());
+    input_count = 1;
+    // Check that the delta is a 32-bit integer due to the limitations of
+    // immediate operands.
+    if (is_int32(delta)) {
+      inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
+      opcode |= AddressingModeField::encode(kMode_Root);
+      selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
+      return;
     }
   }
 
@@ -670,7 +667,8 @@ void InstructionSelector::VisitStore(Node* node) {
   MachineRepresentation rep = store_rep.representation();
 
   // TODO(arm64): I guess this could be done in a better way.
-  if (write_barrier_kind != kNoWriteBarrier) {
+  if (write_barrier_kind != kNoWriteBarrier &&
+      V8_LIKELY(!FLAG_disable_write_barriers)) {
     DCHECK(CanBeTaggedOrCompressedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
@@ -1004,6 +1002,15 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
   VisitRRO(this, kArm64Lsl, node, kShift64Imm);
 }
 
+void InstructionSelector::VisitStackPointerGreaterThan(
+    Node* node, FlagsContinuation* cont) {
+  Node* const value = node->InputAt(0);
+  InstructionCode opcode = kArchStackPointerGreaterThan;
+
+  Arm64OperandGenerator g(this);
+  EmitWithContinuation(opcode, g.UseRegister(value), cont);
+}
+
 namespace {
 
 bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
@@ -1625,23 +1632,23 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
 }
 
 void InstructionSelector::VisitChangeTaggedToCompressed(Node* node) {
-  Arm64OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kArm64CompressAny, g.DefineAsRegister(node), g.UseRegister(value));
+  // The top 32 bits in the 64-bit register will be undefined, and
+  // must not be used by a dependent node.
+  EmitIdentity(node);
 }
 
 void InstructionSelector::VisitChangeTaggedPointerToCompressedPointer(
     Node* node) {
-  Arm64OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kArm64CompressPointer, g.DefineAsRegister(node), g.UseRegister(value));
+  // The top 32 bits in the 64-bit register will be undefined, and
+  // must not be used by a dependent node.
+  EmitIdentity(node);
 }
 
 void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
     Node* node) {
-  Arm64OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kArm64CompressSigned, g.DefineAsRegister(node), g.UseRegister(value));
+  // The top 32 bits in the 64-bit register will be undefined, and
+  // must not be used by a dependent node.
+  EmitIdentity(node);
 }
 
 void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
@@ -1826,26 +1833,25 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont,
-                      bool commutative, ImmediateMode immediate_mode) {
+                      ImmediateMode immediate_mode) {
   Arm64OperandGenerator g(selector);
+
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  if (right->opcode() == IrOpcode::kLoadStackPointer ||
+  // If one of the two inputs is an immediate, make sure it's on the right.
+  if (!g.CanBeImmediate(right, immediate_mode) &&
       g.CanBeImmediate(left, immediate_mode)) {
-    if (!commutative) cont->Commute();
+    cont->Commute();
     std::swap(left, right);
   }
 
-  // Match immediates on left or right side of comparison.
   if (g.CanBeImmediate(right, immediate_mode)) {
-    VisitCompare(selector, opcode,
-                 g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp),
-                 g.UseImmediate(right), cont);
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
   } else {
-    VisitCompare(selector, opcode,
-                 g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp),
-                 g.UseRegister(right), cont);
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
   }
 }
 
@@ -2370,8 +2376,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
         if (m.right().Is(0)) {
           Node* const left = m.left().node();
           if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) {
-            return VisitWordCompare(this, left, kArm64Tst, cont, true,
-                                    kLogical64Imm);
+            return VisitWordCompare(this, left, kArm64Tst, cont, kLogical64Imm);
           }
           // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
           if ((cont->IsBranch() || cont->IsDeoptimize()) &&
@@ -2381,25 +2386,20 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
             return;
           }
         }
-        return VisitWordCompare(this, value, kArm64Cmp, cont, false,
-                                kArithmeticImm);
+        return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm);
       }
       case IrOpcode::kInt64LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWordCompare(this, value, kArm64Cmp, cont, false,
-                                kArithmeticImm);
+        return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm);
       case IrOpcode::kInt64LessThanOrEqual:
         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kArm64Cmp, cont, false,
-                                kArithmeticImm);
+        return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm);
       case IrOpcode::kUint64LessThan:
         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWordCompare(this, value, kArm64Cmp, cont, false,
-                                kArithmeticImm);
+        return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm);
       case IrOpcode::kUint64LessThanOrEqual:
         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kArm64Cmp, cont, false,
-                                kArithmeticImm);
+        return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm);
       case IrOpcode::kFloat32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitFloat32Compare(this, value, cont);
@@ -2461,16 +2461,16 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
         }
         break;
       case IrOpcode::kInt32Add:
-        return VisitWordCompare(this, value, kArm64Cmn32, cont, true,
-                                kArithmeticImm);
+        return VisitWordCompare(this, value, kArm64Cmn32, cont, kArithmeticImm);
       case IrOpcode::kInt32Sub:
         return VisitWord32Compare(this, value, cont);
       case IrOpcode::kWord32And:
-        return VisitWordCompare(this, value, kArm64Tst32, cont, true,
-                                kLogical32Imm);
+        return VisitWordCompare(this, value, kArm64Tst32, cont, kLogical32Imm);
       case IrOpcode::kWord64And:
-        return VisitWordCompare(this, value, kArm64Tst, cont, true,
-                                kLogical64Imm);
+        return VisitWordCompare(this, value, kArm64Tst, cont, kLogical64Imm);
+      case IrOpcode::kStackPointerGreaterThan:
+        cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+        return VisitStackPointerGreaterThan(value, cont);
       default:
         break;
     }
@@ -2530,7 +2530,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
         case IrOpcode::kWord32And:
           return VisitWord32Compare(this, node, &cont);
         case IrOpcode::kInt32Sub:
-          return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
+          return VisitWordCompare(this, value, kArm64Cmp32, &cont,
                                   kArithmeticImm);
         case IrOpcode::kWord32Equal: {
           // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
@@ -2587,15 +2587,14 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
     if (CanCover(user, value)) {
       switch (value->opcode()) {
         case IrOpcode::kWord64And:
-          return VisitWordCompare(this, value, kArm64Tst, &cont, true,
-                                  kLogical64Imm);
+          return VisitWordCompare(this, value, kArm64Tst, &cont, kLogical64Imm);
         default:
           break;
       }
       return VisitWord64Test(this, value, &cont);
     }
   }
-  VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+  VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm);
 }
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
@@ -2653,24 +2652,24 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
 
 void InstructionSelector::VisitInt64LessThan(Node* node) {
   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
-  VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+  VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm);
 }
 
 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
   FlagsContinuation cont =
       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
-  VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+  VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm);
 }
 
 void InstructionSelector::VisitUint64LessThan(Node* node) {
   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
-  VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+  VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm);
 }
 
 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
   FlagsContinuation cont =
       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
-  VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+  VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm);
 }
 
 void InstructionSelector::VisitFloat32Neg(Node* node) {
@@ -3045,18 +3044,23 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
 }
 
 #define SIMD_TYPE_LIST(V) \
+  V(F64x2)                \
   V(F32x4)                \
+  V(I64x2)                \
   V(I32x4)                \
   V(I16x8)                \
   V(I8x16)
 
 #define SIMD_UNOP_LIST(V)                                 \
+  V(F64x2Abs, kArm64F64x2Abs)                             \
+  V(F64x2Neg, kArm64F64x2Neg)                             \
   V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4)         \
   V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4)         \
   V(F32x4Abs, kArm64F32x4Abs)                             \
   V(F32x4Neg, kArm64F32x4Neg)                             \
   V(F32x4RecipApprox, kArm64F32x4RecipApprox)             \
   V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox)     \
+  V(I64x2Neg, kArm64I64x2Neg)                             \
   V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4)         \
   V(I32x4SConvertI16x8Low, kArm64I32x4SConvertI16x8Low)   \
   V(I32x4SConvertI16x8High, kArm64I32x4SConvertI16x8High) \
@@ -3071,6 +3075,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
   V(I16x8UConvertI8x16High, kArm64I16x8UConvertI8x16High) \
   V(I8x16Neg, kArm64I8x16Neg)                             \
   V(S128Not, kArm64S128Not)                               \
+  V(S1x2AnyTrue, kArm64S1x2AnyTrue)                       \
+  V(S1x2AllTrue, kArm64S1x2AllTrue)                       \
   V(S1x4AnyTrue, kArm64S1x4AnyTrue)                       \
   V(S1x4AllTrue, kArm64S1x4AllTrue)                       \
   V(S1x8AnyTrue, kArm64S1x8AnyTrue)                       \
@@ -3079,6 +3085,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
   V(S1x16AllTrue, kArm64S1x16AllTrue)
 
 #define SIMD_SHIFT_OP_LIST(V) \
+  V(I64x2Shl)                 \
+  V(I64x2ShrS)                \
+  V(I64x2ShrU)                \
   V(I32x4Shl)                 \
   V(I32x4ShrS)                \
   V(I32x4ShrU)                \
@@ -3090,16 +3099,35 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
   V(I8x16ShrU)
 
 #define SIMD_BINOP_LIST(V)                        \
+  V(F64x2Add, kArm64F64x2Add)                     \
+  V(F64x2Sub, kArm64F64x2Sub)                     \
+  V(F64x2Mul, kArm64F64x2Mul)                     \
+  V(F64x2Div, kArm64F64x2Div)                     \
+  V(F64x2Min, kArm64F64x2Min)                     \
+  V(F64x2Max, kArm64F64x2Max)                     \
+  V(F64x2Eq, kArm64F64x2Eq)                       \
+  V(F64x2Ne, kArm64F64x2Ne)                       \
+  V(F64x2Lt, kArm64F64x2Lt)                       \
+  V(F64x2Le, kArm64F64x2Le)                       \
   V(F32x4Add, kArm64F32x4Add)                     \
   V(F32x4AddHoriz, kArm64F32x4AddHoriz)           \
   V(F32x4Sub, kArm64F32x4Sub)                     \
   V(F32x4Mul, kArm64F32x4Mul)                     \
+  V(F32x4Div, kArm64F32x4Div)                     \
   V(F32x4Min, kArm64F32x4Min)                     \
   V(F32x4Max, kArm64F32x4Max)                     \
   V(F32x4Eq, kArm64F32x4Eq)                       \
   V(F32x4Ne, kArm64F32x4Ne)                       \
   V(F32x4Lt, kArm64F32x4Lt)                       \
   V(F32x4Le, kArm64F32x4Le)                       \
+  V(I64x2Add, kArm64I64x2Add)                     \
+  V(I64x2Sub, kArm64I64x2Sub)                     \
+  V(I64x2Eq, kArm64I64x2Eq)                       \
+  V(I64x2Ne, kArm64I64x2Ne)                       \
+  V(I64x2GtS, kArm64I64x2GtS)                     \
+  V(I64x2GeS, kArm64I64x2GeS)                     \
+  V(I64x2GtU, kArm64I64x2GtU)                     \
+  V(I64x2GeU, kArm64I64x2GeU)                     \
   V(I32x4Add, kArm64I32x4Add)                     \
   V(I32x4AddHoriz, kArm64I32x4AddHoriz)           \
   V(I32x4Sub, kArm64I32x4Sub)                     \
@@ -3194,7 +3222,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
 
 #define SIMD_VISIT_SHIFT_OP(Name)                     \
   void InstructionSelector::Visit##Name(Node* node) { \
-    VisitRRI(this, kArm64##Name, node);               \
+    VisitSimdShiftRRR(this, kArm64##Name, node);      \
   }
 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
 #undef SIMD_VISIT_SHIFT_OP
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index 75f8e702038ae6..2bfb009980dcf8 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -116,6 +116,10 @@ class InstructionOperandConverter {
     return ToSimd128Register(instr_->Output());
   }
 
+  Simd128Register TempSimd128Register(size_t index) {
+    return ToSimd128Register(instr_->TempAt(index));
+  }
+
   // -- Conversions for operands -----------------------------------------------
 
   Label* ToLabel(InstructionOperand* op) {
@@ -176,20 +180,55 @@ class InstructionOperandConverter {
   Instruction* instr_;
 };
 
-// Eager deoptimization exit.
+// Deoptimization exit.
 class DeoptimizationExit : public ZoneObject {
  public:
-  explicit DeoptimizationExit(int deoptimization_id, SourcePosition pos)
-      : deoptimization_id_(deoptimization_id), pos_(pos) {}
-
-  int deoptimization_id() const { return deoptimization_id_; }
-  Label* label() { return &label_; }
+  explicit DeoptimizationExit(SourcePosition pos, BailoutId bailout_id,
+                              int translation_id, int pc_offset,
+                              DeoptimizeKind kind, DeoptimizeReason reason)
+      : deoptimization_id_(kNoDeoptIndex),
+        pos_(pos),
+        bailout_id_(bailout_id),
+        translation_id_(translation_id),
+        pc_offset_(pc_offset),
+        kind_(kind),
+        reason_(reason),
+        emitted_(false) {}
+
+  bool has_deoptimization_id() const {
+    return deoptimization_id_ != kNoDeoptIndex;
+  }
+  int deoptimization_id() const {
+    DCHECK(has_deoptimization_id());
+    return deoptimization_id_;
+  }
+  void set_deoptimization_id(int deoptimization_id) {
+    deoptimization_id_ = deoptimization_id;
+  }
   SourcePosition pos() const { return pos_; }
+  Label* label() { return &label_; }
+  BailoutId bailout_id() const { return bailout_id_; }
+  int translation_id() const { return translation_id_; }
+  int pc_offset() const { return pc_offset_; }
+  DeoptimizeKind kind() const { return kind_; }
+  DeoptimizeReason reason() const { return reason_; }
+  // Returns whether the deopt exit has already been emitted. Most deopt exits
+  // are emitted contiguously at the end of the code, but unconditional deopt
+  // exits (kArchDeoptimize) may be inlined where they are encountered.
+  bool emitted() const { return emitted_; }
+  void set_emitted() { emitted_ = true; }
 
  private:
-  int const deoptimization_id_;
+  static const int kNoDeoptIndex = kMaxInt16 + 1;
+  int deoptimization_id_;
+  const SourcePosition pos_;
   Label label_;
-  SourcePosition const pos_;
+  const BailoutId bailout_id_;
+  const int translation_id_;
+  const int pc_offset_;
+  const DeoptimizeKind kind_;
+  const DeoptimizeReason reason_;
+  bool emitted_;
 };
 
 // Generator for out-of-line code that is emitted after the main code is done.
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 9ce92dadaa9469..e7702bcdf625d2 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -47,7 +47,8 @@ CodeGenerator::CodeGenerator(
     Isolate* isolate, base::Optional<OsrHelper> osr_helper,
     int start_source_position, JumpOptimizationInfo* jump_opt,
     PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
-    int32_t builtin_index, std::unique_ptr<AssemblerBuffer> buffer)
+    int32_t builtin_index, size_t max_unoptimized_frame_height,
+    std::unique_ptr<AssemblerBuffer> buffer)
     : zone_(codegen_zone),
       isolate_(isolate),
       frame_access_state_(nullptr),
@@ -64,9 +65,9 @@ CodeGenerator::CodeGenerator(
       safepoints_(zone()),
       handlers_(zone()),
       deoptimization_exits_(zone()),
-      deoptimization_states_(zone()),
       deoptimization_literals_(zone()),
       translations_(zone()),
+      max_unoptimized_frame_height_(max_unoptimized_frame_height),
       caller_registers_saved_(false),
       jump_tables_(nullptr),
       ools_(nullptr),
@@ -91,6 +92,7 @@ CodeGenerator::CodeGenerator(
       code_kind == Code::WASM_TO_CAPI_FUNCTION ||
       code_kind == Code::WASM_TO_JS_FUNCTION ||
       code_kind == Code::WASM_INTERPRETER_ENTRY ||
+      code_kind == Code::JS_TO_WASM_FUNCTION ||
       (Builtins::IsBuiltinId(builtin_index) &&
        Builtins::IsWasmRuntimeStub(builtin_index))) {
     tasm_.set_abort_hard(true);
@@ -114,20 +116,22 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, SourcePosition pos) {
+    DeoptimizationExit* exit) {
+  int deoptimization_id = exit->deoptimization_id();
   if (deoptimization_id > Deoptimizer::kMaxNumberOfEntries) {
     return kTooManyDeoptimizationBailouts;
   }
 
-  DeoptimizeKind deopt_kind = GetDeoptimizationKind(deoptimization_id);
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
+  DeoptimizeKind deopt_kind = exit->kind();
+  DeoptimizeReason deoptimization_reason = exit->reason();
   Address deopt_entry =
       Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
   if (info()->is_source_positions_enabled()) {
-    tasm()->RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+    tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(),
+                              deoptimization_id);
   }
   tasm()->CallForDeoptimization(deopt_entry, deoptimization_id);
+  exit->set_emitted();
   return kSuccess;
 }
 
@@ -146,7 +150,7 @@ void CodeGenerator::AssembleCode() {
   if (info->is_source_positions_enabled()) {
     AssembleSourcePosition(start_source_position());
   }
-
+  offsets_info_.code_start_register_check = tasm()->pc_offset();
   // Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
   if (FLAG_debug_code && (info->code_kind() == Code::OPTIMIZED_FUNCTION ||
                           info->code_kind() == Code::BYTECODE_HANDLER)) {
@@ -154,6 +158,7 @@ void CodeGenerator::AssembleCode() {
     AssembleCodeStartRegisterCheck();
   }
 
+  offsets_info_.deopt_check = tasm()->pc_offset();
   // We want to bailout only from JS functions, which are the only ones
   // that are optimized.
   if (info->IsOptimizing()) {
@@ -162,6 +167,7 @@ void CodeGenerator::AssembleCode() {
     BailoutIfDeoptimized();
   }
 
+  offsets_info_.init_poison = tasm()->pc_offset();
   InitializeSpeculationPoison();
 
   // Define deoptimization literals for all inlined functions.
@@ -191,10 +197,10 @@ void CodeGenerator::AssembleCode() {
 
   if (info->trace_turbo_json_enabled()) {
     block_starts_.assign(instructions()->instruction_blocks().size(), -1);
-    instr_starts_.assign(instructions()->instructions().size(), -1);
+    instr_starts_.assign(instructions()->instructions().size(), {});
   }
-
   // Assemble instructions in assembly order.
+  offsets_info_.blocks_start = tasm()->pc_offset();
   for (const InstructionBlock* block : instructions()->ao_blocks()) {
     // Align loop headers on vendor recommended boundaries.
     if (block->ShouldAlign() && !tasm()->jump_optimization_info()) {
@@ -252,6 +258,7 @@ void CodeGenerator::AssembleCode() {
   }
 
   // Assemble all out-of-line code.
+  offsets_info_.out_of_line_code = tasm()->pc_offset();
   if (ools_) {
     tasm()->RecordComment("-- Out of line code --");
     for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
@@ -266,28 +273,45 @@ void CodeGenerator::AssembleCode() {
   // The test regress/regress-259 is an example of where we need it.
   tasm()->nop();
 
+  // For some targets, we must make sure that constant and veneer pools are
+  // emitted before emitting the deoptimization exits.
+  PrepareForDeoptimizationExits(static_cast<int>(deoptimization_exits_.size()));
+
+  if (Deoptimizer::kSupportsFixedDeoptExitSize) {
+    deopt_exit_start_offset_ = tasm()->pc_offset();
+  }
+
   // Assemble deoptimization exits.
+  offsets_info_.deoptimization_exits = tasm()->pc_offset();
   int last_updated = 0;
   for (DeoptimizationExit* exit : deoptimization_exits_) {
+    if (exit->emitted()) continue;
+    if (Deoptimizer::kSupportsFixedDeoptExitSize) {
+      exit->set_deoptimization_id(next_deoptimization_id_++);
+    }
     tasm()->bind(exit->label());
-    int trampoline_pc = tasm()->pc_offset();
-    int deoptimization_id = exit->deoptimization_id();
-    DeoptimizationState* ds = deoptimization_states_[deoptimization_id];
 
-    if (ds->kind() == DeoptimizeKind::kLazy) {
+    // UpdateDeoptimizationInfo expects lazy deopts to be visited in pc_offset
+    // order, which is always the case since they are added to
+    // deoptimization_exits_ in that order.
+    if (exit->kind() == DeoptimizeKind::kLazy) {
+      int trampoline_pc = tasm()->pc_offset();
       last_updated = safepoints()->UpdateDeoptimizationInfo(
-          ds->pc_offset(), trampoline_pc, last_updated);
+          exit->pc_offset(), trampoline_pc, last_updated,
+          exit->deoptimization_id());
     }
-    result_ = AssembleDeoptimizerCall(deoptimization_id, exit->pos());
+    result_ = AssembleDeoptimizerCall(exit);
     if (result_ != kSuccess) return;
   }
 
+  offsets_info_.pools = tasm()->pc_offset();
   // TODO(jgruber): Move all inlined metadata generation into a new,
   // architecture-independent version of FinishCode. Currently, this includes
   // the safepoint table, handler table, constant pool, and code comments, in
   // that order.
   FinishCode();
 
+  offsets_info_.jump_tables = tasm()->pc_offset();
   // Emit the jump tables.
   if (jump_tables_) {
     tasm()->Align(kSystemPointerSize);
@@ -396,12 +420,12 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
   CodeDesc desc;
   tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   if (Builtins::IsBuiltinId(info_->builtin_index())) {
     isolate_->SetBuiltinUnwindData(info_->builtin_index(),
                                    tasm()->GetUnwindInfo());
   }
-#endif
+#endif  // V8_OS_WIN64
 
   if (unwinding_info_writer_.eh_frame_writer()) {
     unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
@@ -473,11 +497,7 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
 CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
     const InstructionBlock* block) {
   for (int i = block->code_start(); i < block->code_end(); ++i) {
-    if (info()->trace_turbo_json_enabled()) {
-      instr_starts_[i] = tasm()->pc_offset();
-    }
-    Instruction* instr = instructions()->InstructionAt(i);
-    CodeGenResult result = AssembleInstruction(instr, block);
+    CodeGenResult result = AssembleInstruction(i, block);
     if (result != kSuccess) return result;
   }
   return kSuccess;
@@ -631,7 +651,11 @@ RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
-    Instruction* instr, const InstructionBlock* block) {
+    int instruction_index, const InstructionBlock* block) {
+  Instruction* instr = instructions()->InstructionAt(instruction_index);
+  if (info()->trace_turbo_json_enabled()) {
+    instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset();
+  }
   int first_unused_stack_slot;
   FlagsMode mode = FlagsModeField::decode(instr->opcode());
   if (mode != kFlags_trap) {
@@ -649,10 +673,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
   if (instr->IsJump() && block->must_deconstruct_frame()) {
     AssembleDeconstructFrame();
   }
+  if (info()->trace_turbo_json_enabled()) {
+    instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset();
+  }
   // Assemble architecture-specific code for the instruction.
   CodeGenResult result = AssembleArchInstruction(instr);
   if (result != kSuccess) return result;
 
+  if (info()->trace_turbo_json_enabled()) {
+    instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset();
+  }
+
   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
   switch (mode) {
     case kFlags_branch:
@@ -801,7 +832,7 @@ Handle<PodArray<InliningPosition>> CreateInliningPositions(
 
 Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
   OptimizedCompilationInfo* info = this->info();
-  int deopt_count = static_cast<int>(deoptimization_states_.size());
+  int deopt_count = static_cast<int>(deoptimization_exits_.size());
   if (deopt_count == 0 && !info->is_osr()) {
     return DeoptimizationData::Empty(isolate());
   }
@@ -816,6 +847,8 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
       Smi::FromInt(static_cast<int>(inlined_function_count_)));
   data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
 
+  data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
+
   if (info->has_shared_info()) {
     data->SetSharedFunctionInfo(*info->shared_info());
   } else {
@@ -846,12 +879,13 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
 
   // Populate deoptimization entries.
   for (int i = 0; i < deopt_count; i++) {
-    DeoptimizationState* deoptimization_state = deoptimization_states_[i];
-    data->SetBytecodeOffset(i, deoptimization_state->bailout_id());
-    CHECK(deoptimization_state);
+    DeoptimizationExit* deoptimization_exit = deoptimization_exits_[i];
+    CHECK_NOT_NULL(deoptimization_exit);
+    DCHECK_EQ(i, deoptimization_exit->deoptimization_id());
+    data->SetBytecodeOffset(i, deoptimization_exit->bailout_id());
     data->SetTranslationIndex(
-        i, Smi::FromInt(deoptimization_state->translation_id()));
-    data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
+        i, Smi::FromInt(deoptimization_exit->translation_id()));
+    data->SetPc(i, Smi::FromInt(deoptimization_exit->pc_offset()));
   }
 
   return data;
@@ -885,13 +919,8 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
     FrameStateDescriptor* descriptor =
         GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
     int pc_offset = tasm()->pc_offset();
-    int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
-                                          descriptor->state_combine());
-
-    DeoptimizationExit* const exit = new (zone())
-        DeoptimizationExit(deopt_state_id, current_source_position_);
-    deoptimization_exits_.push_back(exit);
-    safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
+    BuildTranslation(instr, pc_offset, frame_state_offset,
+                     descriptor->state_combine());
   }
 }
 
@@ -911,20 +940,6 @@ DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
   return instructions()->GetDeoptimizationEntry(state_id);
 }
 
-DeoptimizeKind CodeGenerator::GetDeoptimizationKind(
-    int deoptimization_id) const {
-  size_t const index = static_cast<size_t>(deoptimization_id);
-  DCHECK_LT(index, deoptimization_states_.size());
-  return deoptimization_states_[index]->kind();
-}
-
-DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
-    int deoptimization_id) const {
-  size_t const index = static_cast<size_t>(deoptimization_id);
-  DCHECK_LT(index, deoptimization_states_.size());
-  return deoptimization_states_[index]->reason();
-}
-
 void CodeGenerator::TranslateStateValueDescriptor(
     StateValueDescriptor* desc, StateValueList* nested,
     Translation* translation, InstructionOperandIterator* iter) {
@@ -996,8 +1011,12 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
     }
     shared_info = info()->shared_info();
   }
-  int shared_info_id =
+
+  const BailoutId bailout_id = descriptor->bailout_id();
+  const int shared_info_id =
       DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
+  const unsigned int height =
+      static_cast<unsigned int>(descriptor->GetHeight());
 
   switch (descriptor->type()) {
     case FrameStateType::kInterpretedFunction: {
@@ -1007,45 +1026,30 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
         return_offset = static_cast<int>(state_combine.GetOffsetToPokeAt());
         return_count = static_cast<int>(iter->instruction()->OutputCount());
       }
-      translation->BeginInterpretedFrame(
-          descriptor->bailout_id(), shared_info_id,
-          static_cast<unsigned int>(descriptor->locals_count() + 1),
-          return_offset, return_count);
+      translation->BeginInterpretedFrame(bailout_id, shared_info_id, height,
+                                         return_offset, return_count);
       break;
     }
     case FrameStateType::kArgumentsAdaptor:
-      translation->BeginArgumentsAdaptorFrame(
-          shared_info_id,
-          static_cast<unsigned int>(descriptor->parameters_count()));
+      translation->BeginArgumentsAdaptorFrame(shared_info_id, height);
       break;
     case FrameStateType::kConstructStub:
-      DCHECK(descriptor->bailout_id().IsValidForConstructStub());
-      translation->BeginConstructStubFrame(
-          descriptor->bailout_id(), shared_info_id,
-          static_cast<unsigned int>(descriptor->parameters_count() + 1));
+      DCHECK(bailout_id.IsValidForConstructStub());
+      translation->BeginConstructStubFrame(bailout_id, shared_info_id, height);
       break;
     case FrameStateType::kBuiltinContinuation: {
-      BailoutId bailout_id = descriptor->bailout_id();
-      int parameter_count =
-          static_cast<unsigned int>(descriptor->parameters_count());
       translation->BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
-                                                 parameter_count);
+                                                 height);
       break;
     }
     case FrameStateType::kJavaScriptBuiltinContinuation: {
-      BailoutId bailout_id = descriptor->bailout_id();
-      int parameter_count =
-          static_cast<unsigned int>(descriptor->parameters_count());
       translation->BeginJavaScriptBuiltinContinuationFrame(
-          bailout_id, shared_info_id, parameter_count);
+          bailout_id, shared_info_id, height);
       break;
     }
     case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
-      BailoutId bailout_id = descriptor->bailout_id();
-      int parameter_count =
-          static_cast<unsigned int>(descriptor->parameters_count());
       translation->BeginJavaScriptBuiltinContinuationWithCatchFrame(
-          bailout_id, shared_info_id, parameter_count);
+          bailout_id, shared_info_id, height);
       break;
     }
   }
@@ -1053,9 +1057,9 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
   TranslateFrameStateDescriptorOperands(descriptor, iter, translation);
 }
 
-int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
-                                    size_t frame_state_offset,
-                                    OutputFrameStateCombine state_combine) {
+DeoptimizationExit* CodeGenerator::BuildTranslation(
+    Instruction* instr, int pc_offset, size_t frame_state_offset,
+    OutputFrameStateCombine state_combine) {
   DeoptimizationEntry const& entry =
       GetDeoptimizationEntry(instr, frame_state_offset);
   FrameStateDescriptor* const descriptor = entry.descriptor();
@@ -1068,21 +1072,24 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
                           update_feedback_count, zone());
   if (entry.feedback().IsValid()) {
     DeoptimizationLiteral literal =
-        DeoptimizationLiteral(entry.feedback().vector());
+        DeoptimizationLiteral(entry.feedback().vector);
     int literal_id = DefineDeoptimizationLiteral(literal);
-    translation.AddUpdateFeedback(literal_id, entry.feedback().slot().ToInt());
+    translation.AddUpdateFeedback(literal_id, entry.feedback().slot.ToInt());
   }
   InstructionOperandIterator iter(instr, frame_state_offset);
   BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
                                           state_combine);
 
-  int deoptimization_id = static_cast<int>(deoptimization_states_.size());
+  DeoptimizationExit* const exit = new (zone()) DeoptimizationExit(
+      current_source_position_, descriptor->bailout_id(), translation.index(),
+      pc_offset, entry.kind(), entry.reason());
 
-  deoptimization_states_.push_back(new (zone()) DeoptimizationState(
-      descriptor->bailout_id(), translation.index(), pc_offset, entry.kind(),
-      entry.reason()));
+  if (!Deoptimizer::kSupportsFixedDeoptExitSize) {
+    exit->set_deoptimization_id(next_deoptimization_id_++);
+  }
 
-  return deoptimization_id;
+  deoptimization_exits_.push_back(exit);
+  return exit;
 }
 
 void CodeGenerator::AddTranslationForOperand(Translation* translation,
@@ -1236,13 +1243,8 @@ void CodeGenerator::MarkLazyDeoptSite() {
 
 DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
     Instruction* instr, size_t frame_state_offset) {
-  int const deoptimization_id = BuildTranslation(
-      instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
-
-  DeoptimizationExit* const exit = new (zone())
-      DeoptimizationExit(deoptimization_id, current_source_position_);
-  deoptimization_exits_.push_back(exit);
-  return exit;
+  return BuildTranslation(instr, -1, frame_state_offset,
+                          OutputFrameStateCombine::Ignore());
 }
 
 void CodeGenerator::InitializeSpeculationPoison() {
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 74dd90c5dea70b..e9ebf675905dbd 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -85,6 +85,25 @@ class DeoptimizationLiteral {
   const StringConstantBase* string_ = nullptr;
 };
 
+// These structs hold pc offsets for generated instructions and is only used
+// when tracing for turbolizer is enabled.
+struct TurbolizerCodeOffsetsInfo {
+  int code_start_register_check = -1;
+  int deopt_check = -1;
+  int init_poison = -1;
+  int blocks_start = -1;
+  int out_of_line_code = -1;
+  int deoptimization_exits = -1;
+  int pools = -1;
+  int jump_tables = -1;
+};
+
+struct TurbolizerInstructionStartInfo {
+  int gap_pc_offset = -1;
+  int arch_instr_pc_offset = -1;
+  int condition_pc_offset = -1;
+};
+
 // Generates native code for a sequence of instructions.
 class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
  public:
@@ -96,6 +115,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
                          JumpOptimizationInfo* jump_opt,
                          PoisoningMitigationLevel poisoning_level,
                          const AssemblerOptions& options, int32_t builtin_index,
+                         size_t max_unoptimized_frame_height,
                          std::unique_ptr<AssemblerBuffer> = {});
 
   // Generate native code. After calling AssembleCode, call FinalizeCode to
@@ -139,7 +159,13 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
   size_t GetHandlerTableOffset() const { return handler_table_offset_; }
 
   const ZoneVector<int>& block_starts() const { return block_starts_; }
-  const ZoneVector<int>& instr_starts() const { return instr_starts_; }
+  const ZoneVector<TurbolizerInstructionStartInfo>& instr_starts() const {
+    return instr_starts_;
+  }
+
+  const TurbolizerCodeOffsetsInfo& offsets_info() const {
+    return offsets_info_;
+  }
 
   static constexpr int kBinarySearchSwitchMinimalCases = 4;
 
@@ -182,7 +208,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
   void GenerateSpeculationPoisonFromCodeStartRegister();
 
   // Assemble code for the specified instruction.
-  CodeGenResult AssembleInstruction(Instruction* instr,
+  CodeGenResult AssembleInstruction(int instruction_index,
                                     const InstructionBlock* block);
   void AssembleGaps(Instruction* instr);
 
@@ -199,8 +225,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
   // Determines how to call helper stubs depending on the code kind.
   StubCallMode DetermineStubCallMode() const;
 
-  CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
-                                        SourcePosition pos);
+  CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit* exit);
 
   // ===========================================================================
   // ============= Architecture-specific code generation methods. ==============
@@ -342,11 +367,9 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
   int DefineDeoptimizationLiteral(DeoptimizationLiteral literal);
   DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
                                                     size_t frame_state_offset);
-  DeoptimizeKind GetDeoptimizationKind(int deoptimization_id) const;
-  DeoptimizeReason GetDeoptimizationReason(int deoptimization_id) const;
-  int BuildTranslation(Instruction* instr, int pc_offset,
-                       size_t frame_state_offset,
-                       OutputFrameStateCombine state_combine);
+  DeoptimizationExit* BuildTranslation(Instruction* instr, int pc_offset,
+                                       size_t frame_state_offset,
+                                       OutputFrameStateCombine state_combine);
   void BuildTranslationForFrameStateDescriptor(
       FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
       Translation* translation, OutputFrameStateCombine state_combine);
@@ -361,35 +384,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
                                 InstructionOperand* op, MachineType type);
   void MarkLazyDeoptSite();
 
+  void PrepareForDeoptimizationExits(int deopt_count);
   DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
                                             size_t frame_state_offset);
 
   // ===========================================================================
 
-  class DeoptimizationState final : public ZoneObject {
-   public:
-    DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset,
-                        DeoptimizeKind kind, DeoptimizeReason reason)
-        : bailout_id_(bailout_id),
-          translation_id_(translation_id),
-          pc_offset_(pc_offset),
-          kind_(kind),
-          reason_(reason) {}
-
-    BailoutId bailout_id() const { return bailout_id_; }
-    int translation_id() const { return translation_id_; }
-    int pc_offset() const { return pc_offset_; }
-    DeoptimizeKind kind() const { return kind_; }
-    DeoptimizeReason reason() const { return reason_; }
-
-   private:
-    BailoutId bailout_id_;
-    int translation_id_;
-    int pc_offset_;
-    DeoptimizeKind kind_;
-    DeoptimizeReason reason_;
-  };
-
   struct HandlerInfo {
     Label* handler;
     int pc_offset;
@@ -414,14 +414,19 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
   GapResolver resolver_;
   SafepointTableBuilder safepoints_;
   ZoneVector<HandlerInfo> handlers_;
+  int next_deoptimization_id_ = 0;
+  int deopt_exit_start_offset_ = 0;
   ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
-  ZoneDeque<DeoptimizationState*> deoptimization_states_;
   ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
   size_t inlined_function_count_ = 0;
   TranslationBuffer translations_;
   int handler_table_offset_ = 0;
   int last_lazy_deopt_pc_ = 0;
 
+  // The maximal combined height of all frames produced upon deoptimization.
+  // Applied as an offset to the first stack check of an optimized function.
+  const size_t max_unoptimized_frame_height_;
+
   // kArchCallCFunction could be reached either:
   //   kArchCallCFunction;
   // or:
@@ -444,7 +449,8 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
   CodeGenResult result_;
   PoisoningMitigationLevel poisoning_level_;
   ZoneVector<int> block_starts_;
-  ZoneVector<int> instr_starts_;
+  TurbolizerCodeOffsetsInfo offsets_info_;
+  ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/backend/frame-elider.cc b/deps/v8/src/compiler/backend/frame-elider.cc
index 2167d0abaa44a2..064501b0971b06 100644
--- a/deps/v8/src/compiler/backend/frame-elider.cc
+++ b/deps/v8/src/compiler/backend/frame-elider.cc
@@ -24,7 +24,7 @@ void FrameElider::MarkBlocks() {
     for (int i = block->code_start(); i < block->code_end(); ++i) {
       const Instruction* instr = InstructionAt(i);
       if (instr->IsCall() || instr->IsDeoptimizeCall() ||
-          instr->arch_opcode() == ArchOpcode::kArchStackPointer ||
+          instr->arch_opcode() == ArchOpcode::kArchStackPointerGreaterThan ||
           instr->arch_opcode() == ArchOpcode::kArchFramePointer) {
         block->mark_needs_frame();
         break;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index ed4be7a47cb296..4542da643b4b87 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -165,6 +165,11 @@ class IA32OperandConverter : public InstructionOperandConverter {
         Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
         return Operand(ctant.ToInt32(), ctant.rmode());
       }
+      case kMode_Root: {
+        Register base = kRootRegister;
+        int32_t disp = InputInt32(NextOffset(offset));
+        return Operand(base, disp);
+      }
       case kMode_None:
         UNREACHABLE();
     }
@@ -205,10 +210,18 @@ class IA32OperandConverter : public InstructionOperandConverter {
 
 namespace {
 
+bool HasAddressingMode(Instruction* instr) {
+  return instr->addressing_mode() != kMode_None;
+}
+
 bool HasImmediateInput(Instruction* instr, size_t index) {
   return instr->InputAt(index)->IsImmediate();
 }
 
+bool HasRegisterInput(Instruction* instr, size_t index) {
+  return instr->InputAt(index)->IsRegister();
+}
+
 class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
  public:
   OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
@@ -256,6 +269,8 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
       // Just encode the stub index. This will be patched when the code
       // is added to the native module and copied into wasm code space.
       __ wasm_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+    } else if (tasm()->options().inline_offheap_trampolines) {
+      __ CallBuiltin(Builtins::kDoubleToI);
     } else {
       __ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
     }
@@ -326,31 +341,31 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
 
 }  // namespace
 
-#define ASSEMBLE_COMPARE(asm_instr)                                   \
-  do {                                                                \
-    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
-      size_t index = 0;                                               \
-      Operand left = i.MemoryOperand(&index);                         \
-      if (HasImmediateInput(instr, index)) {                          \
-        __ asm_instr(left, i.InputImmediate(index));                  \
-      } else {                                                        \
-        __ asm_instr(left, i.InputRegister(index));                   \
-      }                                                               \
-    } else {                                                          \
-      if (HasImmediateInput(instr, 1)) {                              \
-        if (instr->InputAt(0)->IsRegister()) {                        \
-          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
-        } else {                                                      \
-          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
-        }                                                             \
-      } else {                                                        \
-        if (instr->InputAt(1)->IsRegister()) {                        \
-          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
-        } else {                                                      \
-          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
-        }                                                             \
-      }                                                               \
-    }                                                                 \
+#define ASSEMBLE_COMPARE(asm_instr)                              \
+  do {                                                           \
+    if (HasAddressingMode(instr)) {                              \
+      size_t index = 0;                                          \
+      Operand left = i.MemoryOperand(&index);                    \
+      if (HasImmediateInput(instr, index)) {                     \
+        __ asm_instr(left, i.InputImmediate(index));             \
+      } else {                                                   \
+        __ asm_instr(left, i.InputRegister(index));              \
+      }                                                          \
+    } else {                                                     \
+      if (HasImmediateInput(instr, 1)) {                         \
+        if (HasRegisterInput(instr, 0)) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
+        } else {                                                 \
+          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
+        }                                                        \
+      } else {                                                   \
+        if (HasRegisterInput(instr, 1)) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
+        } else {                                                 \
+          __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
+        }                                                        \
+      }                                                          \
+    }                                                            \
   } while (0)
 
 #define ASSEMBLE_IEEE754_BINOP(name)                                     \
@@ -382,19 +397,19 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
     __ add(esp, Immediate(kDoubleSize));                                 \
   } while (false)
 
-#define ASSEMBLE_BINOP(asm_instr)                                     \
-  do {                                                                \
-    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
-      size_t index = 1;                                               \
-      Operand right = i.MemoryOperand(&index);                        \
-      __ asm_instr(i.InputRegister(0), right);                        \
-    } else {                                                          \
-      if (HasImmediateInput(instr, 1)) {                              \
-        __ asm_instr(i.InputOperand(0), i.InputImmediate(1));         \
-      } else {                                                        \
-        __ asm_instr(i.InputRegister(0), i.InputOperand(1));          \
-      }                                                               \
-    }                                                                 \
+#define ASSEMBLE_BINOP(asm_instr)                             \
+  do {                                                        \
+    if (HasAddressingMode(instr)) {                           \
+      size_t index = 1;                                       \
+      Operand right = i.MemoryOperand(&index);                \
+      __ asm_instr(i.InputRegister(0), right);                \
+    } else {                                                  \
+      if (HasImmediateInput(instr, 1)) {                      \
+        __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
+      } else {                                                \
+        __ asm_instr(i.InputRegister(0), i.InputOperand(1));  \
+      }                                                       \
+    }                                                         \
   } while (0)
 
 #define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
@@ -431,9 +446,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
 
 #define ASSEMBLE_MOVX(mov_instr)                            \
   do {                                                      \
-    if (instr->addressing_mode() != kMode_None) {           \
+    if (HasAddressingMode(instr)) {                         \
       __ mov_instr(i.OutputRegister(), i.MemoryOperand());  \
-    } else if (instr->InputAt(0)->IsRegister()) {           \
+    } else if (HasRegisterInput(instr, 0)) {                \
       __ mov_instr(i.OutputRegister(), i.InputRegister(0)); \
     } else {                                                \
       __ mov_instr(i.OutputRegister(), i.InputOperand(0));  \
@@ -905,19 +920,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       // don't emit code for nops.
       break;
     case kArchDeoptimize: {
-      int deopt_state_id =
+      DeoptimizationExit* exit =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
+      CodeGenResult result = AssembleDeoptimizerCall(exit);
       if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
       AssembleReturn(instr->InputAt(0));
       break;
-    case kArchStackPointer:
-      __ mov(i.OutputRegister(), esp);
-      break;
     case kArchFramePointer:
       __ mov(i.OutputRegister(), ebp);
       break;
@@ -928,6 +939,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
         __ mov(i.OutputRegister(), ebp);
       }
       break;
+    case kArchStackPointerGreaterThan: {
+      constexpr size_t kValueIndex = 0;
+      if (HasAddressingMode(instr)) {
+        __ cmp(esp, i.MemoryOperand(kValueIndex));
+      } else {
+        __ cmp(esp, i.InputRegister(kValueIndex));
+      }
+      break;
+    }
     case kArchTruncateDoubleToI: {
       auto result = i.OutputRegister();
       auto input = i.InputDoubleRegister(0);
@@ -1115,7 +1135,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       // i.InputRegister(2) ... right low word.
       // i.InputRegister(3) ... right high word.
       bool use_temp = false;
-      if ((instr->InputAt(1)->IsRegister() &&
+      if ((HasRegisterInput(instr, 1) &&
            i.OutputRegister(0).code() == i.InputRegister(1).code()) ||
           i.OutputRegister(0).code() == i.InputRegister(3).code()) {
         // We cannot write to the output register directly, because it would
@@ -1140,7 +1160,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       // i.InputRegister(2) ... right low word.
       // i.InputRegister(3) ... right high word.
       bool use_temp = false;
-      if ((instr->InputAt(1)->IsRegister() &&
+      if ((HasRegisterInput(instr, 1) &&
            i.OutputRegister(0).code() == i.InputRegister(1).code()) ||
           i.OutputRegister(0).code() == i.InputRegister(3).code()) {
         // We cannot write to the output register directly, because it would
@@ -1671,7 +1691,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       }
       break;
     case kIA32BitcastIF:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
@@ -1762,7 +1782,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       frame_access_state()->IncreaseSPDelta(kSimd128Size / kSystemPointerSize);
       break;
     case kIA32Push:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+      if (HasAddressingMode(instr)) {
         size_t index = 0;
         Operand operand = i.MemoryOperand(&index);
         __ push(operand);
@@ -1984,6 +2004,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
                 i.InputOperand(1));
       break;
     }
+    case kSSEF32x4Div: {
+      DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      __ divps(i.OutputSimd128Register(), i.InputOperand(1));
+      break;
+    }
+    case kAVXF32x4Div: {
+      CpuFeatureScope avx_scope(tasm(), AVX);
+      __ vdivps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                i.InputOperand(1));
+      break;
+    }
     case kSSEF32x4Min: {
       XMMRegister src1 = i.InputSimd128Register(1),
                   dst = i.OutputSimd128Register();
@@ -2180,24 +2211,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kSSEI32x4Shl: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
-      __ pslld(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ pslld(i.OutputSimd128Register(), tmp);
       break;
     }
     case kAVXI32x4Shl: {
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0),
-                i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
       break;
     }
     case kSSEI32x4ShrS: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
-      __ psrad(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ psrad(i.OutputSimd128Register(), tmp);
       break;
     }
     case kAVXI32x4ShrS: {
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0),
-                i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
       break;
     }
     case kSSEI32x4Add: {
@@ -2329,7 +2366,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
       XMMRegister dst = i.OutputSimd128Register();
-      XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+      XMMRegister tmp = i.TempSimd128Register(0);
       // NAN->0, negative->0
       __ pxor(kScratchDoubleReg, kScratchDoubleReg);
       __ maxps(dst, kScratchDoubleReg);
@@ -2357,7 +2394,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
       CpuFeatureScope avx_scope(tasm(), AVX);
       XMMRegister dst = i.OutputSimd128Register();
-      XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+      XMMRegister tmp = i.TempSimd128Register(0);
       // NAN->0, negative->0
       __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
       __ vmaxps(dst, dst, kScratchDoubleReg);
@@ -2392,13 +2429,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kSSEI32x4ShrU: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
-      __ psrld(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ psrld(i.OutputSimd128Register(), tmp);
       break;
     }
     case kAVXI32x4ShrU: {
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0),
-                i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
       break;
     }
     case kSSEI32x4MinU: {
@@ -2512,24 +2552,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kSSEI16x8Shl: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
-      __ psllw(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ psllw(i.OutputSimd128Register(), tmp);
       break;
     }
     case kAVXI16x8Shl: {
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0),
-                i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
       break;
     }
     case kSSEI16x8ShrS: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
-      __ psraw(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ psraw(i.OutputSimd128Register(), tmp);
       break;
     }
     case kAVXI16x8ShrS: {
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0),
-                i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
       break;
     }
     case kSSEI16x8SConvertI32x4: {
@@ -2698,13 +2744,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kSSEI16x8ShrU: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
-      __ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ psrlw(i.OutputSimd128Register(), tmp);
       break;
     }
     case kAVXI16x8ShrU: {
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0),
-                i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movd(tmp, i.InputRegister(1));
+      __ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
       break;
     }
     case kSSEI16x8UConvertI32x4: {
@@ -2867,53 +2916,54 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     case kSSEI8x16Shl: {
       XMMRegister dst = i.OutputSimd128Register();
       DCHECK_EQ(dst, i.InputSimd128Register(0));
-      int8_t shift = i.InputInt8(1) & 0x7;
-      if (shift < 4) {
-        // For small shifts, doubling is faster.
-        for (int i = 0; i < shift; ++i) {
-          __ paddb(dst, dst);
-        }
-      } else {
-        // Mask off the unwanted bits before word-shifting.
-        __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
-        __ psrlw(kScratchDoubleReg, 8 + shift);
-        __ packuswb(kScratchDoubleReg, kScratchDoubleReg);
-        __ pand(dst, kScratchDoubleReg);
-        __ psllw(dst, shift);
-      }
+      Register shift = i.InputRegister(1);
+      Register tmp = i.ToRegister(instr->TempAt(0));
+      XMMRegister tmp_simd = i.TempSimd128Register(1);
+      // Mask off the unwanted bits before word-shifting.
+      __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+      __ mov(tmp, shift);
+      __ add(tmp, Immediate(8));
+      __ movd(tmp_simd, tmp);
+      __ psrlw(kScratchDoubleReg, tmp_simd);
+      __ packuswb(kScratchDoubleReg, kScratchDoubleReg);
+      __ pand(dst, kScratchDoubleReg);
+      __ movd(tmp_simd, shift);
+      __ psllw(dst, tmp_simd);
       break;
     }
     case kAVXI8x16Shl: {
       CpuFeatureScope avx_scope(tasm(), AVX);
       XMMRegister dst = i.OutputSimd128Register();
       XMMRegister src = i.InputSimd128Register(0);
-      int8_t shift = i.InputInt8(1) & 0x7;
-      if (shift < 4) {
-        // For small shifts, doubling is faster.
-        for (int i = 0; i < shift; ++i) {
-          __ vpaddb(dst, src, src);
-          src = dst;
-        }
-      } else {
-        // Mask off the unwanted bits before word-shifting.
-        __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
-        __ vpsrlw(kScratchDoubleReg, kScratchDoubleReg, 8 + shift);
-        __ vpackuswb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
-        __ vpand(dst, src, kScratchDoubleReg);
-        __ vpsllw(dst, dst, shift);
-      }
+      Register shift = i.InputRegister(1);
+      Register tmp = i.ToRegister(instr->TempAt(0));
+      XMMRegister tmp_simd = i.TempSimd128Register(1);
+      // Mask off the unwanted bits before word-shifting.
+      __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+      __ mov(tmp, shift);
+      __ add(tmp, Immediate(8));
+      __ movd(tmp_simd, tmp);
+      __ vpsrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
+      __ vpackuswb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+      __ vpand(dst, src, kScratchDoubleReg);
+      __ movd(tmp_simd, shift);
+      __ vpsllw(dst, dst, tmp_simd);
       break;
     }
     case kIA32I8x16ShrS: {
       XMMRegister dst = i.OutputSimd128Register();
-      XMMRegister src = i.InputSimd128Register(0);
-      int8_t shift = i.InputInt8(1) & 0x7;
+      DCHECK_EQ(dst, i.InputSimd128Register(0));
+      Register tmp = i.ToRegister(instr->TempAt(0));
+      XMMRegister tmp_simd = i.TempSimd128Register(1);
       // Unpack the bytes into words, do arithmetic shifts, and repack.
-      __ Punpckhbw(kScratchDoubleReg, src);
-      __ Punpcklbw(dst, src);
-      __ Psraw(kScratchDoubleReg, 8 + shift);
-      __ Psraw(dst, 8 + shift);
-      __ Packsswb(dst, kScratchDoubleReg);
+      __ punpckhbw(kScratchDoubleReg, dst);
+      __ punpcklbw(dst, dst);
+      __ mov(tmp, i.InputRegister(1));
+      __ add(tmp, Immediate(8));
+      __ movd(tmp_simd, tmp);
+      __ psraw(kScratchDoubleReg, tmp_simd);
+      __ psraw(dst, tmp_simd);
+      __ packsswb(dst, kScratchDoubleReg);
       break;
     }
     case kSSEI8x16Add: {
@@ -2964,7 +3014,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       XMMRegister dst = i.OutputSimd128Register();
       DCHECK_EQ(dst, i.InputSimd128Register(0));
       XMMRegister right = i.InputSimd128Register(1);
-      XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+      XMMRegister tmp = i.TempSimd128Register(0);
 
       // I16x8 view of I8x16
       // left = AAaa AAaa ... AAaa AAaa
@@ -3004,7 +3054,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       XMMRegister dst = i.OutputSimd128Register();
       XMMRegister left = i.InputSimd128Register(0);
       XMMRegister right = i.InputSimd128Register(1);
-      XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+      XMMRegister tmp = i.TempSimd128Register(0);
 
       // I16x8 view of I8x16
       // left = AAaa AAaa ... AAaa AAaa
@@ -3165,15 +3215,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kIA32I8x16ShrU: {
+      DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
       XMMRegister dst = i.OutputSimd128Register();
-      XMMRegister src = i.InputSimd128Register(0);
-      int8_t shift = i.InputInt8(1) & 0x7;
+      Register tmp = i.ToRegister(instr->TempAt(0));
+      XMMRegister tmp_simd = i.TempSimd128Register(1);
       // Unpack the bytes into words, do logical shifts, and repack.
-      __ Punpckhbw(kScratchDoubleReg, src);
-      __ Punpcklbw(dst, src);
-      __ Psrlw(kScratchDoubleReg, 8 + shift);
-      __ Psrlw(dst, 8 + shift);
-      __ Packuswb(dst, kScratchDoubleReg);
+      __ punpckhbw(kScratchDoubleReg, dst);
+      __ punpcklbw(dst, dst);
+      __ mov(tmp, i.InputRegister(1));
+      __ add(tmp, Immediate(8));
+      __ movd(tmp_simd, tmp);
+      __ psrlw(kScratchDoubleReg, tmp_simd);
+      __ psrlw(dst, tmp_simd);
+      __ packuswb(dst, kScratchDoubleReg);
       break;
     }
     case kSSEI8x16MinU: {
@@ -3693,10 +3747,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
       break;
     }
-    case kIA32StackCheck: {
-      __ CompareStackLimit(esp);
-      break;
-    }
     case kIA32Word32AtomicPairLoad: {
       XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
       __ movq(tmp, i.MemoryOperand());
@@ -4402,6 +4452,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
 
 void CodeGenerator::FinishCode() {}
 
+void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+
 void CodeGenerator::AssembleMove(InstructionOperand* source,
                                  InstructionOperand* destination) {
   IA32OperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 56dea82fe2c29a..7530c716b85c0c 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -116,7 +116,6 @@ namespace compiler {
   V(IA32PushSimd128)               \
   V(IA32Poke)                      \
   V(IA32Peek)                      \
-  V(IA32StackCheck)                \
   V(SSEF32x4Splat)                 \
   V(AVXF32x4Splat)                 \
   V(SSEF32x4ExtractLane)           \
@@ -140,6 +139,8 @@ namespace compiler {
   V(AVXF32x4Sub)                   \
   V(SSEF32x4Mul)                   \
   V(AVXF32x4Mul)                   \
+  V(SSEF32x4Div)                   \
+  V(AVXF32x4Div)                   \
   V(SSEF32x4Min)                   \
   V(AVXF32x4Min)                   \
   V(SSEF32x4Max)                   \
@@ -394,7 +395,8 @@ namespace compiler {
   V(M2I)  /* [      %r2*2 + K] */      \
   V(M4I)  /* [      %r2*4 + K] */      \
   V(M8I)  /* [      %r2*8 + K] */      \
-  V(MI)   /* [              K] */
+  V(MI)   /* [              K] */      \
+  V(Root) /* [%root       + K] */
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 15f69b991c7288..c2097a6691fd1b 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -120,6 +120,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kAVXF32x4Sub:
     case kSSEF32x4Mul:
     case kAVXF32x4Mul:
+    case kSSEF32x4Div:
+    case kAVXF32x4Div:
     case kSSEF32x4Min:
     case kAVXF32x4Min:
     case kSSEF32x4Max:
@@ -356,7 +358,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
       // Moves are used for memory load/store operations.
       return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
 
-    case kIA32StackCheck:
     case kIA32Peek:
       return kIsLoadOperation;
 
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index e1fc66b4ba4843..ebef39a93a65ec 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -152,6 +152,21 @@ class IA32OperandGenerator final : public OperandGenerator {
   AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
                                                   InstructionOperand inputs[],
                                                   size_t* input_count) {
+    {
+      LoadMatcher<ExternalReferenceMatcher> m(node);
+      if (m.index().HasValue() && m.object().HasValue() &&
+          selector()->CanAddressRelativeToRootsRegister(m.object().Value())) {
+        ptrdiff_t const delta =
+            m.index().Value() +
+            TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+                selector()->isolate(), m.object().Value());
+        if (is_int32(delta)) {
+          inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
+          return kMode_Root;
+        }
+      }
+    }
+
     BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
     DCHECK(m.matches());
     if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
@@ -261,6 +276,31 @@ void VisitRRISimd(InstructionSelector* selector, Node* node,
   }
 }
 
+void VisitRROSimdShift(InstructionSelector* selector, Node* node,
+                       ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+  InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+  InstructionOperand temps[] = {g.TempSimd128Register()};
+  if (selector->IsSupported(AVX)) {
+    selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1,
+                   arraysize(temps), temps);
+  } else {
+    selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1,
+                   arraysize(temps), temps);
+  }
+}
+
+void VisitRROI8x16SimdRightShift(InstructionSelector* selector, Node* node,
+                                 ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+  InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+  InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
+  selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1,
+                 arraysize(temps), temps);
+}
+
 }  // namespace
 
 void InstructionSelector::VisitStackSlot(Node* node) {
@@ -344,7 +384,8 @@ void InstructionSelector::VisitStore(Node* node) {
   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
   MachineRepresentation rep = store_rep.representation();
 
-  if (write_barrier_kind != kNoWriteBarrier) {
+  if (write_barrier_kind != kNoWriteBarrier &&
+      V8_LIKELY(!FLAG_disable_write_barriers)) {
     DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[] = {
@@ -516,6 +557,35 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
   }
 }
 
+void InstructionSelector::VisitStackPointerGreaterThan(
+    Node* node, FlagsContinuation* cont) {
+  Node* const value = node->InputAt(0);
+  InstructionCode opcode = kArchStackPointerGreaterThan;
+
+  DCHECK(cont->IsBranch());
+  const int effect_level =
+      GetEffectLevel(cont->true_block()->PredecessorAt(0)->control_input());
+
+  IA32OperandGenerator g(this);
+  if (g.CanBeMemoryOperand(kIA32Cmp, node, value, effect_level)) {
+    DCHECK_EQ(IrOpcode::kLoad, value->opcode());
+
+    // GetEffectiveAddressMemoryOperand can create at most 3 inputs.
+    static constexpr int kMaxInputCount = 3;
+
+    size_t input_count = 0;
+    InstructionOperand inputs[kMaxInputCount];
+    AddressingMode addressing_mode =
+        g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
+    opcode |= AddressingModeField::encode(addressing_mode);
+    DCHECK_LE(input_count, kMaxInputCount);
+
+    EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
+  } else {
+    EmitWithContinuation(opcode, g.UseRegister(value), cont);
+  }
+}
+
 // Shared routine for multiple shift operations.
 static inline void VisitShift(InstructionSelector* selector, Node* node,
                               ArchOpcode opcode) {
@@ -1243,30 +1313,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
 
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       FlagsContinuation* cont) {
-  if (selector->isolate() != nullptr) {
-    StackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> m(
-        selector->isolate(), node);
-    if (m.Matched()) {
-      // Compare(Load(js_stack_limit), LoadStackPointer)
-      if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-      InstructionCode opcode = cont->Encode(kIA32StackCheck);
-      CHECK(cont->IsBranch());
-      selector->EmitWithContinuation(opcode, cont);
-      return;
-    }
-  }
-  WasmStackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> wasm_m(
-      node);
-  if (wasm_m.Matched()) {
-    // This is a wasm stack check. By structure, we know that we can use the
-    // stack pointer directly, as wasm code does not modify the stack at points
-    // where stack checks are performed.
-    Node* left = node->InputAt(0);
-    LocationOperand esp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER,
-                        InstructionSequence::DefaultRepresentation(),
-                        RegisterCode::kRegCode_esp);
-    return VisitCompareWithMemoryOperand(selector, kIA32Cmp, left, esp, cont);
-  }
   VisitWordCompare(selector, node, kIA32Cmp, cont);
 }
 
@@ -1433,6 +1479,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
         return VisitWordCompare(this, value, cont);
       case IrOpcode::kWord32And:
         return VisitWordCompare(this, value, kIA32Test, cont);
+      case IrOpcode::kStackPointerGreaterThan:
+        cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+        return VisitStackPointerGreaterThan(value, cont);
       default:
         break;
     }
@@ -1842,6 +1891,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
   V(F32x4AddHoriz)         \
   V(F32x4Sub)              \
   V(F32x4Mul)              \
+  V(F32x4Div)              \
   V(F32x4Min)              \
   V(F32x4Max)              \
   V(F32x4Eq)               \
@@ -1939,8 +1989,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
   V(I32x4ShrU)                \
   V(I16x8Shl)                 \
   V(I16x8ShrS)                \
-  V(I16x8ShrU)                \
-  V(I8x16Shl)
+  V(I16x8ShrU)
 
 #define SIMD_I8X16_RIGHT_SHIFT_OPCODES(V) \
   V(I8x16ShrS)                            \
@@ -2037,22 +2086,21 @@ VISIT_SIMD_REPLACE_LANE(F32x4)
 #undef VISIT_SIMD_REPLACE_LANE
 #undef SIMD_INT_TYPES
 
-#define VISIT_SIMD_SHIFT(Opcode)                          \
-  void InstructionSelector::Visit##Opcode(Node* node) {   \
-    VisitRRISimd(this, node, kAVX##Opcode, kSSE##Opcode); \
+#define VISIT_SIMD_SHIFT(Opcode)                               \
+  void InstructionSelector::Visit##Opcode(Node* node) {        \
+    VisitRROSimdShift(this, node, kAVX##Opcode, kSSE##Opcode); \
   }
 SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
 #undef VISIT_SIMD_SHIFT
 #undef SIMD_SHIFT_OPCODES
 
-#define VISIT_SIMD_I8X16_RIGHT_SHIFT(Op)            \
-  void InstructionSelector::Visit##Op(Node* node) { \
-    VisitRRISimd(this, node, kIA32##Op);            \
+#define VISIT_SIMD_I8x16_RIGHT_SHIFT(Opcode)                \
+  void InstructionSelector::Visit##Opcode(Node* node) {     \
+    VisitRROI8x16SimdRightShift(this, node, kIA32##Opcode); \
   }
-
-SIMD_I8X16_RIGHT_SHIFT_OPCODES(VISIT_SIMD_I8X16_RIGHT_SHIFT)
+SIMD_I8X16_RIGHT_SHIFT_OPCODES(VISIT_SIMD_I8x16_RIGHT_SHIFT)
 #undef SIMD_I8X16_RIGHT_SHIFT_OPCODES
-#undef VISIT_SIMD_I8X16_RIGHT_SHIFT
+#undef VISIT_SIMD_I8x16_RIGHT_SHIFT
 
 #define VISIT_SIMD_UNOP(Opcode)                                             \
   void InstructionSelector::Visit##Opcode(Node* node) {                     \
@@ -2123,6 +2171,20 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
   VisitPack(this, node, kAVXI8x16UConvertI16x8, kSSEI8x16UConvertI16x8);
 }
 
+void InstructionSelector::VisitI8x16Shl(Node* node) {
+  IA32OperandGenerator g(this);
+  InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+  InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+  InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
+  if (IsSupported(AVX)) {
+    Emit(kAVXI8x16Shl, g.DefineAsRegister(node), operand0, operand1,
+         arraysize(temps), temps);
+  } else {
+    Emit(kSSEI8x16Shl, g.DefineSameAsFirst(node), operand0, operand1,
+         arraysize(temps), temps);
+  }
+}
+
 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
   UNREACHABLE();
 }
@@ -2259,13 +2321,13 @@ static const ShuffleEntry arch_shuffles[] = {
     {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
      kSSES8x8Reverse,
      kAVXS8x8Reverse,
-     false,
-     false},
+     true,
+     true},
     {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
      kSSES8x4Reverse,
      kAVXS8x4Reverse,
-     false,
-     false},
+     true,
+     true},
     {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
      kSSES8x2Reverse,
      kAVXS8x2Reverse,
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 1085de2196f8cf..589c1bda3b1509 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -88,13 +88,13 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
   V(ArchThrowTerminator)                  \
   V(ArchDeoptimize)                       \
   V(ArchRet)                              \
-  V(ArchStackPointer)                     \
   V(ArchFramePointer)                     \
   V(ArchParentFramePointer)               \
   V(ArchTruncateDoubleToI)                \
   V(ArchStoreWithWriteBarrier)            \
   V(ArchStackSlot)                        \
   V(ArchWordPoisonOnSpeculation)          \
+  V(ArchStackPointerGreaterThan)          \
   V(Word32AtomicLoadInt8)                 \
   V(Word32AtomicLoadUint8)                \
   V(Word32AtomicLoadInt16)                \
@@ -238,6 +238,9 @@ enum FlagsCondition {
   kNegative
 };
 
+static constexpr FlagsCondition kStackPointerGreaterThanCondition =
+    kUnsignedGreaterThan;
+
 inline FlagsCondition NegateFlagsCondition(FlagsCondition condition) {
   return static_cast<FlagsCondition>(condition ^ 1);
 }
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index 538af71bb469f3..dc66813740b3ee 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -275,9 +275,10 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
     case kIeee754Float64Tanh:
       return kNoOpcodeFlags;
 
-    case kArchStackPointer:
-      // ArchStackPointer instruction loads the current stack pointer value and
-      // must not be reordered with instruction with side effects.
+    case kArchStackPointerGreaterThan:
+      // The ArchStackPointerGreaterThan instruction loads the current stack
+      // pointer value and must not be reordered with instructions with side
+      // effects.
       return kIsLoadOperation;
 
     case kArchWordPoisonOnSpeculation:
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 11ba9104059453..43193ec2b110e9 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -26,6 +26,7 @@ InstructionSelector::InstructionSelector(
     InstructionSequence* sequence, Schedule* schedule,
     SourcePositionTable* source_positions, Frame* frame,
     EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
+    size_t* max_unoptimized_frame_height,
     SourcePositionMode source_position_mode, Features features,
     EnableScheduling enable_scheduling,
     EnableRootsRelativeAddressing enable_roots_relative_addressing,
@@ -56,7 +57,10 @@ InstructionSelector::InstructionSelector(
       instruction_selection_failed_(false),
       instr_origins_(sequence->zone()),
       trace_turbo_(trace_turbo),
-      tick_counter_(tick_counter) {
+      tick_counter_(tick_counter),
+      max_unoptimized_frame_height_(max_unoptimized_frame_height) {
+  DCHECK_EQ(*max_unoptimized_frame_height, 0);  // Caller-initialized.
+
   instructions_.reserve(node_count);
   continuation_inputs_.reserve(5);
   continuation_outputs_.reserve(2);
@@ -421,9 +425,27 @@ void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
   effect_level_[id] = effect_level;
 }
 
-bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
-  return enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing &&
-         CanUseRootsRegister();
+bool InstructionSelector::CanAddressRelativeToRootsRegister(
+    const ExternalReference& reference) const {
+  // There are three things to consider here:
+  // 1. CanUseRootsRegister: Is kRootRegister initialized?
+  const bool root_register_is_available_and_initialized = CanUseRootsRegister();
+  if (!root_register_is_available_and_initialized) return false;
+
+  // 2. enable_roots_relative_addressing_: Can we address everything on the heap
+  //    through the root register, i.e. are root-relative addresses to arbitrary
+  //    addresses guaranteed not to change between code generation and
+  //    execution?
+  const bool all_root_relative_offsets_are_constant =
+      (enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing);
+  if (all_root_relative_offsets_are_constant) return true;
+
+  // 3. IsAddressableThroughRootRegister: Is the target address guaranteed to
+  //    have a fixed root-relative offset? If so, we can ignore 2.
+  const bool this_root_relative_offset_is_constant =
+      TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(),
+                                                           reference);
+  return this_root_relative_offset_is_constant;
 }
 
 bool InstructionSelector::CanUseRootsRegister() const {
@@ -744,7 +766,7 @@ Instruction* InstructionSelector::EmitWithContinuation(
 
 void InstructionSelector::AppendDeoptimizeArguments(
     InstructionOperandVector* args, DeoptimizeKind kind,
-    DeoptimizeReason reason, VectorSlotPair const& feedback,
+    DeoptimizeReason reason, FeedbackSource const& feedback,
     Node* frame_state) {
   OperandGenerator g(this);
   FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
@@ -761,7 +783,7 @@ void InstructionSelector::AppendDeoptimizeArguments(
 Instruction* InstructionSelector::EmitDeoptimize(
     InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
     size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
-    DeoptimizeReason reason, VectorSlotPair const& feedback,
+    DeoptimizeReason reason, FeedbackSource const& feedback,
     Node* frame_state) {
   InstructionOperandVector args(instruction_zone());
   for (size_t i = 0; i < input_count; ++i) {
@@ -972,7 +994,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
 
     int const state_id = sequence()->AddDeoptimizationEntry(
         buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
-        DeoptimizeReason::kUnknown, VectorSlotPair());
+        DeoptimizeReason::kUnknown, FeedbackSource());
     buffer->instruction_args.push_back(g.TempImmediate(state_id));
 
     StateObjectDeduplicator deduplicator(instruction_zone());
@@ -1056,7 +1078,6 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
 bool InstructionSelector::IsSourcePositionUsed(Node* node) {
   return (source_position_mode_ == kAllSourcePositions ||
           node->opcode() == IrOpcode::kCall ||
-          node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
           node->opcode() == IrOpcode::kTrapIf ||
           node->opcode() == IrOpcode::kTrapUnless ||
           node->opcode() == IrOpcode::kProtectedLoad ||
@@ -1078,10 +1099,13 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
     if (node->opcode() == IrOpcode::kStore ||
         node->opcode() == IrOpcode::kUnalignedStore ||
         node->opcode() == IrOpcode::kCall ||
-        node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
         node->opcode() == IrOpcode::kProtectedLoad ||
         node->opcode() == IrOpcode::kProtectedStore ||
-        node->opcode() == IrOpcode::kMemoryBarrier) {
+#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \
+  node->opcode() == IrOpcode::k##Opcode ||
+        MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)
+#undef ADD_EFFECT_FOR_ATOMIC_OP
+                node->opcode() == IrOpcode::kMemoryBarrier) {
       ++effect_level;
     }
   }
@@ -1274,9 +1298,9 @@ void InstructionSelector::VisitNode(Node* node) {
       // No code needed for these graph artifacts.
       return;
     case IrOpcode::kIfException:
-      return MarkAsReference(node), VisitIfException(node);
+      return MarkAsTagged(node), VisitIfException(node);
     case IrOpcode::kFinishRegion:
-      return MarkAsReference(node), VisitFinishRegion(node);
+      return MarkAsTagged(node), VisitFinishRegion(node);
     case IrOpcode::kParameter: {
       MachineType type =
           linkage()->GetParameterType(ParameterIndexOf(node->op()));
@@ -1284,7 +1308,7 @@ void InstructionSelector::VisitNode(Node* node) {
       return VisitParameter(node);
     }
     case IrOpcode::kOsrValue:
-      return MarkAsReference(node), VisitOsrValue(node);
+      return MarkAsTagged(node), VisitOsrValue(node);
     case IrOpcode::kPhi: {
       MachineRepresentation rep = PhiRepresentationOf(node->op());
       if (rep == MachineRepresentation::kNone) return;
@@ -1304,20 +1328,18 @@ void InstructionSelector::VisitNode(Node* node) {
     case IrOpcode::kFloat64Constant:
       return MarkAsFloat64(node), VisitConstant(node);
     case IrOpcode::kHeapConstant:
-      return MarkAsReference(node), VisitConstant(node);
+      return MarkAsTagged(node), VisitConstant(node);
     case IrOpcode::kCompressedHeapConstant:
       return MarkAsCompressed(node), VisitConstant(node);
     case IrOpcode::kNumberConstant: {
       double value = OpParameter<double>(node->op());
-      if (!IsSmiDouble(value)) MarkAsReference(node);
+      if (!IsSmiDouble(value)) MarkAsTagged(node);
       return VisitConstant(node);
     }
     case IrOpcode::kDelayedStringConstant:
-      return MarkAsReference(node), VisitConstant(node);
+      return MarkAsTagged(node), VisitConstant(node);
     case IrOpcode::kCall:
       return VisitCall(node);
-    case IrOpcode::kCallWithCallerSavedRegisters:
-      return VisitCallWithCallerSavedRegisters(node);
     case IrOpcode::kDeoptimizeIf:
       return VisitDeoptimizeIf(node);
     case IrOpcode::kDeoptimizeUnless:
@@ -1484,10 +1506,16 @@ void InstructionSelector::VisitNode(Node* node) {
       return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
              VisitBitcastTaggedToWord(node);
     case IrOpcode::kBitcastWordToTagged:
-      return MarkAsReference(node), VisitBitcastWordToTagged(node);
+      return MarkAsTagged(node), VisitBitcastWordToTagged(node);
     case IrOpcode::kBitcastWordToTaggedSigned:
       return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
              EmitIdentity(node);
+    case IrOpcode::kBitcastWord32ToCompressedSigned:
+      return MarkAsRepresentation(MachineRepresentation::kCompressedSigned,
+                                  node),
+             EmitIdentity(node);
+    case IrOpcode::kBitcastCompressedSignedToWord32:
+      return MarkAsWord32(node), EmitIdentity(node);
     case IrOpcode::kChangeFloat32ToFloat64:
       return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
     case IrOpcode::kChangeInt32ToFloat64:
@@ -1536,18 +1564,20 @@ void InstructionSelector::VisitNode(Node* node) {
     case IrOpcode::kChangeTaggedToCompressed:
       return MarkAsCompressed(node), VisitChangeTaggedToCompressed(node);
     case IrOpcode::kChangeTaggedPointerToCompressedPointer:
-      return MarkAsCompressed(node),
+      return MarkAsRepresentation(MachineRepresentation::kCompressedPointer,
+                                  node),
              VisitChangeTaggedPointerToCompressedPointer(node);
     case IrOpcode::kChangeTaggedSignedToCompressedSigned:
-      return MarkAsWord32(node),
+      return MarkAsRepresentation(MachineRepresentation::kCompressedSigned,
+                                  node),
              VisitChangeTaggedSignedToCompressedSigned(node);
     case IrOpcode::kChangeCompressedToTagged:
-      return MarkAsReference(node), VisitChangeCompressedToTagged(node);
+      return MarkAsTagged(node), VisitChangeCompressedToTagged(node);
     case IrOpcode::kChangeCompressedPointerToTaggedPointer:
-      return MarkAsReference(node),
+      return MarkAsRepresentation(MachineRepresentation::kTaggedPointer, node),
              VisitChangeCompressedPointerToTaggedPointer(node);
     case IrOpcode::kChangeCompressedSignedToTaggedSigned:
-      return MarkAsWord64(node),
+      return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
              VisitChangeCompressedSignedToTaggedSigned(node);
 #endif
     case IrOpcode::kTruncateFloat64ToFloat32:
@@ -1697,15 +1727,15 @@ void InstructionSelector::VisitNode(Node* node) {
     case IrOpcode::kFloat64InsertHighWord32:
       return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
     case IrOpcode::kTaggedPoisonOnSpeculation:
-      return MarkAsReference(node), VisitTaggedPoisonOnSpeculation(node);
+      return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node);
     case IrOpcode::kWord32PoisonOnSpeculation:
       return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
     case IrOpcode::kWord64PoisonOnSpeculation:
       return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
     case IrOpcode::kStackSlot:
       return VisitStackSlot(node);
-    case IrOpcode::kLoadStackPointer:
-      return VisitLoadStackPointer(node);
+    case IrOpcode::kStackPointerGreaterThan:
+      return VisitStackPointerGreaterThan(node);
     case IrOpcode::kLoadFramePointer:
       return VisitLoadFramePointer(node);
     case IrOpcode::kLoadParentFramePointer:
@@ -1827,6 +1857,18 @@ void InstructionSelector::VisitNode(Node* node) {
       return MarkAsSimd128(node), VisitF64x2Abs(node);
     case IrOpcode::kF64x2Neg:
       return MarkAsSimd128(node), VisitF64x2Neg(node);
+    case IrOpcode::kF64x2Add:
+      return MarkAsSimd128(node), VisitF64x2Add(node);
+    case IrOpcode::kF64x2Sub:
+      return MarkAsSimd128(node), VisitF64x2Sub(node);
+    case IrOpcode::kF64x2Mul:
+      return MarkAsSimd128(node), VisitF64x2Mul(node);
+    case IrOpcode::kF64x2Div:
+      return MarkAsSimd128(node), VisitF64x2Div(node);
+    case IrOpcode::kF64x2Min:
+      return MarkAsSimd128(node), VisitF64x2Min(node);
+    case IrOpcode::kF64x2Max:
+      return MarkAsSimd128(node), VisitF64x2Max(node);
     case IrOpcode::kF64x2Eq:
       return MarkAsSimd128(node), VisitF64x2Eq(node);
     case IrOpcode::kF64x2Ne:
@@ -1861,6 +1903,8 @@ void InstructionSelector::VisitNode(Node* node) {
       return MarkAsSimd128(node), VisitF32x4Sub(node);
     case IrOpcode::kF32x4Mul:
       return MarkAsSimd128(node), VisitF32x4Mul(node);
+    case IrOpcode::kF32x4Div:
+      return MarkAsSimd128(node), VisitF32x4Div(node);
     case IrOpcode::kF32x4Min:
       return MarkAsSimd128(node), VisitF32x4Min(node);
     case IrOpcode::kF32x4Max:
@@ -1891,6 +1935,10 @@ void InstructionSelector::VisitNode(Node* node) {
       return MarkAsSimd128(node), VisitI64x2Sub(node);
     case IrOpcode::kI64x2Mul:
       return MarkAsSimd128(node), VisitI64x2Mul(node);
+    case IrOpcode::kI64x2MinS:
+      return MarkAsSimd128(node), VisitI64x2MinS(node);
+    case IrOpcode::kI64x2MaxS:
+      return MarkAsSimd128(node), VisitI64x2MaxS(node);
     case IrOpcode::kI64x2Eq:
       return MarkAsSimd128(node), VisitI64x2Eq(node);
     case IrOpcode::kI64x2Ne:
@@ -1901,6 +1949,10 @@ void InstructionSelector::VisitNode(Node* node) {
       return MarkAsSimd128(node), VisitI64x2GeS(node);
     case IrOpcode::kI64x2ShrU:
       return MarkAsSimd128(node), VisitI64x2ShrU(node);
+    case IrOpcode::kI64x2MinU:
+      return MarkAsSimd128(node), VisitI64x2MinU(node);
+    case IrOpcode::kI64x2MaxU:
+      return MarkAsSimd128(node), VisitI64x2MaxU(node);
     case IrOpcode::kI64x2GtU:
       return MarkAsSimd128(node), VisitI64x2GtU(node);
     case IrOpcode::kI64x2GeU:
@@ -2134,9 +2186,10 @@ void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
   EmitWordPoisonOnSpeculation(node);
 }
 
-void InstructionSelector::VisitLoadStackPointer(Node* node) {
-  OperandGenerator g(this);
-  Emit(kArchStackPointer, g.DefineAsRegister(node));
+void InstructionSelector::VisitStackPointerGreaterThan(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node);
+  VisitStackPointerGreaterThan(node, &cont);
 }
 
 void InstructionSelector::VisitLoadFramePointer(Node* node) {
@@ -2553,11 +2606,18 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
         // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
 
 #if !V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_ARM64
 void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
@@ -2566,20 +2626,25 @@ void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
 void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); }
+#endif  // !V8_TARGET_ARCH_ARM64
+void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2MinS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2MaxS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2MaxU(Node* node) { UNIMPLEMENTED(); }
 #endif  // !V8_TARGET_ARCH_X64
 
 void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -2677,6 +2742,12 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
   OperandGenerator g(this);
   auto call_descriptor = CallDescriptorOf(node->op());
 
+  if (call_descriptor->NeedsCallerSavedRegisters()) {
+    Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(
+                                        call_descriptor->get_save_fp_mode())),
+         g.NoOutput());
+  }
+
   FrameStateDescriptor* frame_state_descriptor = nullptr;
   if (call_descriptor->NeedsFrameState()) {
     frame_state_descriptor = GetFrameStateDescriptor(
@@ -2745,18 +2816,13 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
   call_instr->MarkAsCall();
 
   EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node);
-}
 
-void InstructionSelector::VisitCallWithCallerSavedRegisters(
-    Node* node, BasicBlock* handler) {
-  OperandGenerator g(this);
-  const auto fp_mode = CallDescriptorOf(node->op())->get_save_fp_mode();
-  Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(fp_mode)),
-       g.NoOutput());
-  VisitCall(node, handler);
-  Emit(kArchRestoreCallerRegisters |
-           MiscField::encode(static_cast<int>(fp_mode)),
-       g.NoOutput());
+  if (call_descriptor->NeedsCallerSavedRegisters()) {
+    Emit(kArchRestoreCallerRegisters |
+             MiscField::encode(
+                 static_cast<int>(call_descriptor->get_save_fp_mode())),
+         g.NoOutput());
+  }
 }
 
 void InstructionSelector::VisitTailCall(Node* node) {
@@ -2764,7 +2830,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
   auto call_descriptor = CallDescriptorOf(node->op());
 
   CallDescriptor* caller = linkage()->GetIncomingDescriptor();
-  DCHECK(caller->CanTailCall(node));
+  DCHECK(caller->CanTailCall(CallDescriptorOf(node->op())));
   const CallDescriptor* callee = CallDescriptorOf(node->op());
   int stack_param_delta = callee->GetStackParameterDelta(caller);
   CallBuffer buffer(zone(), call_descriptor, nullptr);
@@ -2912,14 +2978,13 @@ void InstructionSelector::VisitTrapUnless(Node* node, TrapId trap_id) {
 }
 
 void InstructionSelector::EmitIdentity(Node* node) {
-  OperandGenerator g(this);
   MarkAsUsed(node->InputAt(0));
   SetRename(node, node->InputAt(0));
 }
 
 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
                                           DeoptimizeReason reason,
-                                          VectorSlotPair const& feedback,
+                                          FeedbackSource const& feedback,
                                           Node* value) {
   EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason,
                  feedback, value);
@@ -2980,8 +3045,9 @@ bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
   return true;
 }
 
-FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
-    Node* state) {
+namespace {
+
+FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone, Node* state) {
   DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
   DCHECK_EQ(kFrameStateInputCount, state->InputCount());
   FrameStateInfo state_info = FrameStateInfoOf(state->op());
@@ -2999,13 +3065,24 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
   FrameStateDescriptor* outer_state = nullptr;
   Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
   if (outer_node->opcode() == IrOpcode::kFrameState) {
-    outer_state = GetFrameStateDescriptor(outer_node);
+    outer_state = GetFrameStateDescriptorInternal(zone, outer_node);
   }
 
-  return new (instruction_zone()) FrameStateDescriptor(
-      instruction_zone(), state_info.type(), state_info.bailout_id(),
-      state_info.state_combine(), parameters, locals, stack,
-      state_info.shared_info(), outer_state);
+  return new (zone)
+      FrameStateDescriptor(zone, state_info.type(), state_info.bailout_id(),
+                           state_info.state_combine(), parameters, locals,
+                           stack, state_info.shared_info(), outer_state);
+}
+
+}  // namespace
+
+FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
+    Node* state) {
+  auto* desc = GetFrameStateDescriptorInternal(instruction_zone(), state);
+  *max_unoptimized_frame_height_ =
+      std::max(*max_unoptimized_frame_height_,
+               desc->total_conservative_frame_size_in_bytes());
+  return desc;
 }
 
 // static
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 16f88bb5167462..eb3e0984272a30 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -12,6 +12,7 @@
 #include "src/compiler/backend/instruction-scheduler.h"
 #include "src/compiler/backend/instruction.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node.h"
@@ -60,7 +61,7 @@ class FlagsContinuation final {
   static FlagsContinuation ForDeoptimize(FlagsCondition condition,
                                          DeoptimizeKind kind,
                                          DeoptimizeReason reason,
-                                         VectorSlotPair const& feedback,
+                                         FeedbackSource const& feedback,
                                          Node* frame_state) {
     return FlagsContinuation(kFlags_deoptimize, condition, kind, reason,
                              feedback, frame_state);
@@ -69,7 +70,7 @@ class FlagsContinuation final {
   // Creates a new flags continuation for an eager deoptimization exit.
   static FlagsContinuation ForDeoptimizeAndPoison(
       FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
-      VectorSlotPair const& feedback, Node* frame_state) {
+      FeedbackSource const& feedback, Node* frame_state) {
     return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
                              reason, feedback, frame_state);
   }
@@ -110,7 +111,7 @@ class FlagsContinuation final {
     DCHECK(IsDeoptimize());
     return reason_;
   }
-  VectorSlotPair const& feedback() const {
+  FeedbackSource const& feedback() const {
     DCHECK(IsDeoptimize());
     return feedback_;
   }
@@ -196,7 +197,7 @@ class FlagsContinuation final {
 
   FlagsContinuation(FlagsMode mode, FlagsCondition condition,
                     DeoptimizeKind kind, DeoptimizeReason reason,
-                    VectorSlotPair const& feedback, Node* frame_state)
+                    FeedbackSource const& feedback, Node* frame_state)
       : mode_(mode),
         condition_(condition),
         kind_(kind),
@@ -226,7 +227,7 @@ class FlagsContinuation final {
   FlagsCondition condition_;
   DeoptimizeKind kind_;          // Only valid if mode_ == kFlags_deoptimize*
   DeoptimizeReason reason_;      // Only valid if mode_ == kFlags_deoptimize*
-  VectorSlotPair feedback_;      // Only valid if mode_ == kFlags_deoptimize*
+  FeedbackSource feedback_;      // Only valid if mode_ == kFlags_deoptimize*
   Node* frame_state_or_result_;  // Only valid if mode_ == kFlags_deoptimize*
                                  // or mode_ == kFlags_set.
   BasicBlock* true_block_;       // Only valid if mode_ == kFlags_branch*.
@@ -270,6 +271,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
       InstructionSequence* sequence, Schedule* schedule,
       SourcePositionTable* source_positions, Frame* frame,
       EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
+      size_t* max_unoptimized_frame_height,
       SourcePositionMode source_position_mode = kCallSourcePositions,
       Features features = SupportedFeatures(),
       EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
@@ -352,7 +354,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
                               InstructionOperand* outputs, size_t input_count,
                               InstructionOperand* inputs, DeoptimizeKind kind,
                               DeoptimizeReason reason,
-                              VectorSlotPair const& feedback,
+                              FeedbackSource const& feedback,
                               Node* frame_state);
 
   // ===========================================================================
@@ -446,7 +448,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
 
   // Check if we can generate loads and stores of ExternalConstants relative
   // to the roots register.
-  bool CanAddressRelativeToRootsRegister() const;
+  bool CanAddressRelativeToRootsRegister(
+      const ExternalReference& reference) const;
   // Check if we can use the roots register to access GC roots.
   bool CanUseRootsRegister() const;
 
@@ -496,7 +499,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
 
   void AppendDeoptimizeArguments(InstructionOperandVector* args,
                                  DeoptimizeKind kind, DeoptimizeReason reason,
-                                 VectorSlotPair const& feedback,
+                                 FeedbackSource const& feedback,
                                  Node* frame_state);
 
   void EmitTableSwitch(
@@ -543,7 +546,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
   void MarkAsSimd128(Node* node) {
     MarkAsRepresentation(MachineRepresentation::kSimd128, node);
   }
-  void MarkAsReference(Node* node) {
+  void MarkAsTagged(Node* node) {
     MarkAsRepresentation(MachineRepresentation::kTagged, node);
   }
   void MarkAsCompressed(Node* node) {
@@ -621,8 +624,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
   void VisitProjection(Node* node);
   void VisitConstant(Node* node);
   void VisitCall(Node* call, BasicBlock* handler = nullptr);
-  void VisitCallWithCallerSavedRegisters(Node* call,
-                                         BasicBlock* handler = nullptr);
   void VisitDeoptimizeIf(Node* node);
   void VisitDeoptimizeUnless(Node* node);
   void VisitTrapIf(Node* node, TrapId trap_id);
@@ -632,7 +633,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
   void VisitSwitch(Node* node, const SwitchInfo& sw);
   void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
-                       VectorSlotPair const& feedback, Node* value);
+                       FeedbackSource const& feedback, Node* value);
   void VisitReturn(Node* ret);
   void VisitThrow(Node* node);
   void VisitRetain(Node* node);
@@ -640,6 +641,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
   void VisitStaticAssert(Node* node);
   void VisitDeadValue(Node* node);
 
+  void VisitStackPointerGreaterThan(Node* node, FlagsContinuation* cont);
+
   void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
 
   void EmitWordPoisonOnSpeculation(Node* node);
@@ -782,6 +785,10 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
   ZoneVector<std::pair<int, int>> instr_origins_;
   EnableTraceTurboJson trace_turbo_;
   TickCounter* const tick_counter_;
+
+  // Store the maximal unoptimized frame height. Later used to apply an offset
+  // to stack checks.
+  size_t* max_unoptimized_frame_height_;
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 09c7fe22c5f03e..06158b0c72e851 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -6,12 +6,14 @@
 
 #include <iomanip>
 
+#include "src/codegen/interface-descriptors.h"
 #include "src/codegen/register-configuration.h"
 #include "src/codegen/source-position.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/state-values-utils.h"
+#include "src/execution/frames.h"
 
 namespace v8 {
 namespace internal {
@@ -942,7 +944,7 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
 
 int InstructionSequence::AddDeoptimizationEntry(
     FrameStateDescriptor* descriptor, DeoptimizeKind kind,
-    DeoptimizeReason reason, VectorSlotPair const& feedback) {
+    DeoptimizeReason reason, FeedbackSource const& feedback) {
   int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
   deoptimization_entries_.push_back(
       DeoptimizationEntry(descriptor, kind, reason, feedback));
@@ -1002,6 +1004,59 @@ void InstructionSequence::SetRegisterConfigurationForTesting(
   GetRegConfig = InstructionSequence::RegisterConfigurationForTesting;
 }
 
+namespace {
+
+size_t GetConservativeFrameSizeInBytes(FrameStateType type,
+                                       size_t parameters_count,
+                                       size_t locals_count,
+                                       BailoutId bailout_id) {
+  switch (type) {
+    case FrameStateType::kInterpretedFunction: {
+      auto info = InterpretedFrameInfo::Conservative(
+          static_cast<int>(parameters_count), static_cast<int>(locals_count));
+      return info.frame_size_in_bytes();
+    }
+    case FrameStateType::kArgumentsAdaptor: {
+      auto info = ArgumentsAdaptorFrameInfo::Conservative(
+          static_cast<int>(parameters_count));
+      return info.frame_size_in_bytes();
+    }
+    case FrameStateType::kConstructStub: {
+      auto info = ConstructStubFrameInfo::Conservative(
+          static_cast<int>(parameters_count));
+      return info.frame_size_in_bytes();
+    }
+    case FrameStateType::kBuiltinContinuation:
+    case FrameStateType::kJavaScriptBuiltinContinuation:
+    case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
+      const RegisterConfiguration* config = RegisterConfiguration::Default();
+      auto info = BuiltinContinuationFrameInfo::Conservative(
+          static_cast<int>(parameters_count),
+          Builtins::CallInterfaceDescriptorFor(
+              Builtins::GetBuiltinFromBailoutId(bailout_id)),
+          config);
+      return info.frame_size_in_bytes();
+    }
+  }
+  UNREACHABLE();
+}
+
+size_t GetTotalConservativeFrameSizeInBytes(FrameStateType type,
+                                            size_t parameters_count,
+                                            size_t locals_count,
+                                            BailoutId bailout_id,
+                                            FrameStateDescriptor* outer_state) {
+  size_t outer_total_conservative_frame_size_in_bytes =
+      (outer_state == nullptr)
+          ? 0
+          : outer_state->total_conservative_frame_size_in_bytes();
+  return GetConservativeFrameSizeInBytes(type, parameters_count, locals_count,
+                                         bailout_id) +
+         outer_total_conservative_frame_size_in_bytes;
+}
+
+}  // namespace
+
 FrameStateDescriptor::FrameStateDescriptor(
     Zone* zone, FrameStateType type, BailoutId bailout_id,
     OutputFrameStateCombine state_combine, size_t parameters_count,
@@ -1014,10 +1069,35 @@ FrameStateDescriptor::FrameStateDescriptor(
       parameters_count_(parameters_count),
       locals_count_(locals_count),
       stack_count_(stack_count),
+      total_conservative_frame_size_in_bytes_(
+          GetTotalConservativeFrameSizeInBytes(
+              type, parameters_count, locals_count, bailout_id, outer_state)),
       values_(zone),
       shared_info_(shared_info),
       outer_state_(outer_state) {}
 
+size_t FrameStateDescriptor::GetHeight() const {
+  switch (type()) {
+    case FrameStateType::kInterpretedFunction:
+      return locals_count();  // The accumulator is *not* included.
+    case FrameStateType::kBuiltinContinuation:
+      // Custom, non-JS calling convention (that does not have a notion of
+      // a receiver or context).
+      return parameters_count();
+    case FrameStateType::kArgumentsAdaptor:
+    case FrameStateType::kConstructStub:
+    case FrameStateType::kJavaScriptBuiltinContinuation:
+    case FrameStateType::kJavaScriptBuiltinContinuationWithCatch:
+      // JS linkage. The parameters count
+      // - includes the receiver (input 1 in CreateArtificialFrameState, and
+      //   passed as part of stack parameters to
+      //   CreateJavaScriptBuiltinContinuationFrameState), and
+      // - does *not* include the context.
+      return parameters_count();
+  }
+  UNREACHABLE();
+}
+
 size_t FrameStateDescriptor::GetSize() const {
   return 1 + parameters_count() + locals_count() + stack_count() +
          (HasContext() ? 1 : 0);
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 9b322040551df4..f5f7f64c51e50d 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -17,6 +17,7 @@
 #include "src/common/globals.h"
 #include "src/compiler/backend/instruction-codes.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/frame.h"
 #include "src/compiler/opcodes.h"
 #include "src/numbers/double.h"
@@ -130,7 +131,7 @@ class V8_EXPORT_PRIVATE InstructionOperand {
 
   inline uint64_t GetCanonicalizedValue() const;
 
-  class KindField : public BitField64<Kind, 0, 3> {};
+  using KindField = BitField64<Kind, 0, 3>;
 
   uint64_t value_;
 };
@@ -331,20 +332,20 @@ class UnallocatedOperand final : public InstructionOperand {
 
   STATIC_ASSERT(KindField::kSize == 3);
 
-  class VirtualRegisterField : public BitField64<uint32_t, 3, 32> {};
+  using VirtualRegisterField = BitField64<uint32_t, 3, 32>;
 
   // BitFields for all unallocated operands.
-  class BasicPolicyField : public BitField64<BasicPolicy, 35, 1> {};
+  using BasicPolicyField = BitField64<BasicPolicy, 35, 1>;
 
   // BitFields specific to BasicPolicy::FIXED_SLOT.
-  class FixedSlotIndexField : public BitField64<int, 36, 28> {};
+  using FixedSlotIndexField = BitField64<int, 36, 28>;
 
   // BitFields specific to BasicPolicy::EXTENDED_POLICY.
-  class ExtendedPolicyField : public BitField64<ExtendedPolicy, 36, 3> {};
-  class LifetimeField : public BitField64<Lifetime, 39, 1> {};
-  class HasSecondaryStorageField : public BitField64<bool, 40, 1> {};
-  class FixedRegisterField : public BitField64<int, 41, 6> {};
-  class SecondaryStorageField : public BitField64<int, 47, 3> {};
+  using ExtendedPolicyField = BitField64<ExtendedPolicy, 36, 3>;
+  using LifetimeField = BitField64<Lifetime, 39, 1>;
+  using HasSecondaryStorageField = BitField64<bool, 40, 1>;
+  using FixedRegisterField = BitField64<int, 41, 6>;
+  using SecondaryStorageField = BitField64<int, 47, 3>;
 
  private:
   explicit UnallocatedOperand(int virtual_register)
@@ -373,7 +374,7 @@ class ConstantOperand : public InstructionOperand {
   INSTRUCTION_OPERAND_CASTS(ConstantOperand, CONSTANT)
 
   STATIC_ASSERT(KindField::kSize == 3);
-  class VirtualRegisterField : public BitField64<uint32_t, 3, 32> {};
+  using VirtualRegisterField = BitField64<uint32_t, 3, 32>;
 };
 
 class ImmediateOperand : public InstructionOperand {
@@ -406,8 +407,8 @@ class ImmediateOperand : public InstructionOperand {
   INSTRUCTION_OPERAND_CASTS(ImmediateOperand, IMMEDIATE)
 
   STATIC_ASSERT(KindField::kSize == 3);
-  class TypeField : public BitField64<ImmediateType, 3, 1> {};
-  class ValueField : public BitField64<int32_t, 32, 32> {};
+  using TypeField = BitField64<ImmediateType, 3, 1>;
+  using ValueField = BitField64<int32_t, 32, 32>;
 };
 
 class LocationOperand : public InstructionOperand {
@@ -509,9 +510,9 @@ class LocationOperand : public InstructionOperand {
   }
 
   STATIC_ASSERT(KindField::kSize == 3);
-  class LocationKindField : public BitField64<LocationKind, 3, 2> {};
-  class RepresentationField : public BitField64<MachineRepresentation, 5, 8> {};
-  class IndexField : public BitField64<int32_t, 35, 29> {};
+  using LocationKindField = BitField64<LocationKind, 3, 2>;
+  using RepresentationField = BitField64<MachineRepresentation, 5, 8>;
+  using IndexField = BitField64<int32_t, 35, 29>;
 };
 
 class V8_EXPORT_PRIVATE ExplicitOperand
@@ -1270,6 +1271,20 @@ class FrameStateDescriptor : public ZoneObject {
            type_ == FrameStateType::kConstructStub;
   }
 
+  // The frame height on the stack, in number of slots, as serialized into a
+  // Translation and later used by the deoptimizer. Does *not* include
+  // information from the chain of outer states. Unlike |GetSize| this does not
+  // always include parameters, locals, and stack slots; instead, the returned
+  // slot kinds depend on the frame type.
+  size_t GetHeight() const;
+
+  // Returns an overapproximation of the unoptimized stack frame size in bytes,
+  // as later produced by the deoptimizer. Considers both this and the chain of
+  // outer states.
+  size_t total_conservative_frame_size_in_bytes() const {
+    return total_conservative_frame_size_in_bytes_;
+  }
+
   size_t GetSize() const;
   size_t GetTotalSize() const;
   size_t GetFrameCount() const;
@@ -1283,12 +1298,13 @@ class FrameStateDescriptor : public ZoneObject {
   FrameStateType type_;
   BailoutId bailout_id_;
   OutputFrameStateCombine frame_state_combine_;
-  size_t parameters_count_;
-  size_t locals_count_;
-  size_t stack_count_;
+  const size_t parameters_count_;
+  const size_t locals_count_;
+  const size_t stack_count_;
+  const size_t total_conservative_frame_size_in_bytes_;
   StateValueList values_;
   MaybeHandle<SharedFunctionInfo> const shared_info_;
-  FrameStateDescriptor* outer_state_;
+  FrameStateDescriptor* const outer_state_;
 };
 
 // A deoptimization entry is a pair of the reason why we deoptimize and the
@@ -1297,7 +1313,7 @@ class DeoptimizationEntry final {
  public:
   DeoptimizationEntry() = default;
   DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
-                      DeoptimizeReason reason, VectorSlotPair const& feedback)
+                      DeoptimizeReason reason, FeedbackSource const& feedback)
       : descriptor_(descriptor),
         kind_(kind),
         reason_(reason),
@@ -1306,13 +1322,13 @@ class DeoptimizationEntry final {
   FrameStateDescriptor* descriptor() const { return descriptor_; }
   DeoptimizeKind kind() const { return kind_; }
   DeoptimizeReason reason() const { return reason_; }
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
  private:
   FrameStateDescriptor* descriptor_ = nullptr;
   DeoptimizeKind kind_ = DeoptimizeKind::kEager;
   DeoptimizeReason reason_ = DeoptimizeReason::kUnknown;
-  VectorSlotPair feedback_ = VectorSlotPair();
+  FeedbackSource feedback_ = FeedbackSource();
 };
 
 using DeoptimizationVector = ZoneVector<DeoptimizationEntry>;
@@ -1577,7 +1593,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final
 
   int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
                              DeoptimizeKind kind, DeoptimizeReason reason,
-                             VectorSlotPair const& feedback);
+                             FeedbackSource const& feedback);
   DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
   int GetDeoptimizationEntryCount() const {
     return static_cast<int>(deoptimization_entries_.size());
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 5cec4a8a16beff..239075392afb81 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -850,18 +850,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       // don't emit code for nops.
       break;
     case kArchDeoptimize: {
-      int deopt_state_id =
+      DeoptimizationExit* exit =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
+      CodeGenResult result = AssembleDeoptimizerCall(exit);
       if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
       AssembleReturn(instr->InputAt(0));
       break;
-    case kArchStackPointer:
-      __ mov(i.OutputRegister(), sp);
+    case kArchStackPointerGreaterThan:
+      // Pseudo-instruction used for cmp/branch. No opcode emitted here.
       break;
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
@@ -2067,6 +2066,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
                 i.InputSimd128Register(1));
       break;
     }
+    case kMipsF32x4Div: {
+      CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+      __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                i.InputSimd128Register(1));
+      break;
+    }
     case kMipsF32x4Max: {
       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
       __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3015,6 +3020,9 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
   } else if (instr->arch_opcode() == kMipsCmp) {
     cc = FlagsConditionToConditionCmp(condition);
     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+  } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+    cc = FlagsConditionToConditionCmp(condition);
+    __ Branch(tlabel, cc, sp, Operand(i.InputRegister(0)));
   } else if (instr->arch_opcode() == kMipsCmpS ||
              instr->arch_opcode() == kMipsCmpD) {
     bool predicate;
@@ -3444,6 +3452,42 @@ void CodeGenerator::AssembleConstructFrame() {
 
   const RegList saves = call_descriptor->CalleeSavedRegisters();
   const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+
+  if (required_slots > 0) {
+    DCHECK(frame_access_state()->has_frame());
+    if (info()->IsWasm() && required_slots > 128) {
+      // For WebAssembly functions with big frames we have to do the stack
+      // overflow check before we construct the frame. Otherwise we may not
+      // have enough space on the stack to call the runtime for the stack
+      // overflow.
+      Label done;
+
+      // If the frame is bigger than the stack, we throw the stack overflow
+      // exception unconditionally. Thereby we can avoid the integer overflow
+      // check in the condition code.
+      if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+        __ Lw(
+             kScratchReg,
+             FieldMemOperand(kWasmInstanceRegister,
+                             WasmInstanceObject::kRealStackLimitAddressOffset));
+        __ Lw(kScratchReg, MemOperand(kScratchReg));
+        __ Addu(kScratchReg, kScratchReg,
+                      Operand(required_slots * kSystemPointerSize));
+        __ Branch(&done, uge, sp, Operand(kScratchReg));
+      }
+
+      __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+      // We come from WebAssembly, there are no references for the GC.
+      ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+      RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+      if (FLAG_debug_code) {
+        __ stop();
+      }
+
+      __ bind(&done);
+    }
+  }
+
   const int returns = frame()->GetReturnSlotCount();
 
   // Skip callee-saved and return slots, which are pushed below.
@@ -3527,6 +3571,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
 
 void CodeGenerator::FinishCode() {}
 
+void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+
 void CodeGenerator::AssembleMove(InstructionOperand* source,
                                  InstructionOperand* destination) {
   MipsOperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 44e53ac044e13d..e8020d9e895661 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -165,6 +165,7 @@ namespace compiler {
   V(MipsF32x4AddHoriz)             \
   V(MipsF32x4Sub)                  \
   V(MipsF32x4Mul)                  \
+  V(MipsF32x4Div)                  \
   V(MipsF32x4Max)                  \
   V(MipsF32x4Min)                  \
   V(MipsF32x4Eq)                   \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 92ab3f93443c65..4e6aef52f49f70 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -51,6 +51,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kMipsF32x4Max:
     case kMipsF32x4Min:
     case kMipsF32x4Mul:
+    case kMipsF32x4Div:
     case kMipsF32x4Ne:
     case kMipsF32x4Neg:
     case kMipsF32x4RecipApprox:
@@ -1673,7 +1674,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
     case kMipsCmp:
       return 0;
     case kArchDebugBreak:
-    case kArchStackPointer:
     case kArchFramePointer:
     case kArchParentFramePointer:
     case kMipsShl:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 452e92a174989e..bb47262c6c32db 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -352,7 +352,8 @@ void InstructionSelector::VisitStore(Node* node) {
   MachineRepresentation rep = store_rep.representation();
 
   // TODO(mips): I guess this could be done in a better way.
-  if (write_barrier_kind != kNoWriteBarrier) {
+  if (write_barrier_kind != kNoWriteBarrier &&
+      V8_LIKELY(!FLAG_disable_write_barriers)) {
     DCHECK(CanBeTaggedPointer(rep));
     InstructionOperand inputs[3];
     size_t input_count = 0;
@@ -1529,6 +1530,15 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
 
 }  // namespace
 
+void InstructionSelector::VisitStackPointerGreaterThan(
+    Node* node, FlagsContinuation* cont) {
+  Node* const value = node->InputAt(0);
+  InstructionCode opcode = kArchStackPointerGreaterThan;
+
+  MipsOperandGenerator g(this);
+  EmitWithContinuation(opcode, g.UseRegister(value), cont);
+}
+
 // Shared routine for word comparisons against zero.
 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
                                                FlagsContinuation* cont) {
@@ -1607,6 +1617,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
         break;
       case IrOpcode::kWord32And:
         return VisitWordCompare(this, value, kMipsTst, cont, true);
+      case IrOpcode::kStackPointerGreaterThan:
+        cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+        return VisitStackPointerGreaterThan(value, cont);
       default:
         break;
     }
@@ -2041,6 +2054,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
   V(F32x4AddHoriz, kMipsF32x4AddHoriz)           \
   V(F32x4Sub, kMipsF32x4Sub)                     \
   V(F32x4Mul, kMipsF32x4Mul)                     \
+  V(F32x4Div, kMipsF32x4Div)                     \
   V(F32x4Max, kMipsF32x4Max)                     \
   V(F32x4Min, kMipsF32x4Min)                     \
   V(F32x4Eq, kMipsF32x4Eq)                       \
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index f746b52df67bf6..5682bed71a42cf 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -828,18 +828,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       // don't emit code for nops.
       break;
     case kArchDeoptimize: {
-      int deopt_state_id =
+      DeoptimizationExit* exit =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
+      CodeGenResult result = AssembleDeoptimizerCall(exit);
       if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
       AssembleReturn(instr->InputAt(0));
       break;
-    case kArchStackPointer:
-      __ mov(i.OutputRegister(), sp);
+    case kArchStackPointerGreaterThan:
+      // Pseudo-instruction used for cmp/branch. No opcode emitted here.
       break;
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
@@ -2182,6 +2181,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
                 i.InputSimd128Register(1));
       break;
     }
+    case kMips64F32x4Div: {
+      CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+      __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                i.InputSimd128Register(1));
+      break;
+    }
     case kMips64F32x4Max: {
       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
       __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3140,6 +3145,9 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
   } else if (instr->arch_opcode() == kMips64Cmp) {
     cc = FlagsConditionToConditionCmp(condition);
     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+  } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+    cc = FlagsConditionToConditionCmp(condition);
+    __ Branch(tlabel, cc, sp, Operand(i.InputRegister(0)));
   } else if (instr->arch_opcode() == kMips64CmpS ||
              instr->arch_opcode() == kMips64CmpD) {
     bool predicate;
@@ -3603,6 +3611,42 @@ void CodeGenerator::AssembleConstructFrame() {
 
   const RegList saves = call_descriptor->CalleeSavedRegisters();
   const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+
+  if (required_slots > 0) {
+    DCHECK(frame_access_state()->has_frame());
+    if (info()->IsWasm() && required_slots > 128) {
+      // For WebAssembly functions with big frames we have to do the stack
+      // overflow check before we construct the frame. Otherwise we may not
+      // have enough space on the stack to call the runtime for the stack
+      // overflow.
+      Label done;
+
+      // If the frame is bigger than the stack, we throw the stack overflow
+      // exception unconditionally. Thereby we can avoid the integer overflow
+      // check in the condition code.
+      if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+        __ Ld(
+             kScratchReg,
+             FieldMemOperand(kWasmInstanceRegister,
+                             WasmInstanceObject::kRealStackLimitAddressOffset));
+        __ Ld(kScratchReg, MemOperand(kScratchReg));
+        __ Daddu(kScratchReg, kScratchReg,
+                 Operand(required_slots * kSystemPointerSize));
+        __ Branch(&done, uge, sp, Operand(kScratchReg));
+      }
+
+      __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+      // We come from WebAssembly, there are no references for the GC.
+      ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+      RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+      if (FLAG_debug_code) {
+        __ stop();
+      }
+
+      __ bind(&done);
+    }
+  }
+
   const int returns = frame()->GetReturnSlotCount();
 
   // Skip callee-saved and return slots, which are pushed below.
@@ -3686,6 +3730,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
 
 void CodeGenerator::FinishCode() {}
 
+void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+
 void CodeGenerator::AssembleMove(InstructionOperand* source,
                                  InstructionOperand* destination) {
   MipsOperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index e375ee8d07dfd5..edc8924757d11d 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -195,6 +195,7 @@ namespace compiler {
   V(Mips64F32x4AddHoriz)                     \
   V(Mips64F32x4Sub)                          \
   V(Mips64F32x4Mul)                          \
+  V(Mips64F32x4Div)                          \
   V(Mips64F32x4Max)                          \
   V(Mips64F32x4Min)                          \
   V(Mips64F32x4Eq)                           \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 4dcafe41977a15..880b424c416e8b 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -79,6 +79,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kMips64F32x4Max:
     case kMips64F32x4Min:
     case kMips64F32x4Mul:
+    case kMips64F32x4Div:
     case kMips64F32x4Ne:
     case kMips64F32x4Neg:
     case kMips64F32x4RecipApprox:
@@ -1275,7 +1276,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
       return 0;
     case kArchRet:
       return AssemblerReturnLatency();
-    case kArchStackPointer:
     case kArchFramePointer:
       return 1;
     case kArchParentFramePointer:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 95f11ebed1cd00..9c717ab1e91aa9 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -422,7 +422,8 @@ void InstructionSelector::VisitStore(Node* node) {
   MachineRepresentation rep = store_rep.representation();
 
   // TODO(mips): I guess this could be done in a better way.
-  if (write_barrier_kind != kNoWriteBarrier) {
+  if (write_barrier_kind != kNoWriteBarrier &&
+      V8_LIKELY(!FLAG_disable_write_barriers)) {
     DCHECK(CanBeTaggedPointer(rep));
     InstructionOperand inputs[3];
     size_t input_count = 0;
@@ -2090,6 +2091,15 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
 
 }  // namespace
 
+void InstructionSelector::VisitStackPointerGreaterThan(
+    Node* node, FlagsContinuation* cont) {
+  Node* const value = node->InputAt(0);
+  InstructionCode opcode = kArchStackPointerGreaterThan;
+
+  Mips64OperandGenerator g(this);
+  EmitWithContinuation(opcode, g.UseRegister(value), cont);
+}
+
 // Shared routine for word comparisons against zero.
 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
                                                FlagsContinuation* cont) {
@@ -2199,6 +2209,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
       case IrOpcode::kWord32And:
       case IrOpcode::kWord64And:
         return VisitWordCompare(this, value, kMips64Tst, cont, true);
+      case IrOpcode::kStackPointerGreaterThan:
+        cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+        return VisitStackPointerGreaterThan(value, cont);
       default:
         break;
     }
@@ -2704,6 +2717,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
   V(F32x4AddHoriz, kMips64F32x4AddHoriz)           \
   V(F32x4Sub, kMips64F32x4Sub)                     \
   V(F32x4Mul, kMips64F32x4Mul)                     \
+  V(F32x4Div, kMips64F32x4Div)                     \
   V(F32x4Max, kMips64F32x4Max)                     \
   V(F32x4Min, kMips64F32x4Min)                     \
   V(F32x4Eq, kMips64F32x4Eq)                       \
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 5289812cb5f280..5c69bc34a12ee0 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -1024,13 +1024,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       Label start_call;
       bool isWasmCapiFunction =
           linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
-      constexpr int offset = 12;
+      constexpr int offset = 9 * kInstrSize;
       if (isWasmCapiFunction) {
-        __ mflr(kScratchReg);
+        __ mflr(r0);
         __ bind(&start_call);
-        __ LoadPC(r0);
-        __ addi(r0, r0, Operand(offset));
-        __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+        __ LoadPC(kScratchReg);
+        __ addi(kScratchReg, kScratchReg, Operand(offset));
+        __ StoreP(kScratchReg,
+                  MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
         __ mtlr(r0);
       }
       if (instr->InputAt(0)->IsImmediate()) {
@@ -1040,11 +1041,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
         Register func = i.InputRegister(0);
         __ CallCFunction(func, num_parameters);
       }
-      // TODO(miladfar): In the above block, r0 must be populated with the
-      // strictly-correct PC, which is the return address at this spot. The
-      // offset is set to 12 right now, which is counted from where we are
-      // binding to the label and ends at this spot. If failed, replace it it
-      // with the correct offset suggested. More info on f5ab7d3.
+      // TODO(miladfar): In the above block, kScratchReg must be populated with
+      // the strictly-correct PC, which is the return address at this spot. The
+      // offset is set to 36 (9 * kInstrSize) right now, which is counted from
+      // where we are binding to the label and ends at this spot. If failed,
+      // replace it with the correct offset suggested. More info on f5ab7d3.
       if (isWasmCapiFunction)
         CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
 
@@ -1104,10 +1105,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
     case kArchDeoptimize: {
-      int deopt_state_id =
+      DeoptimizationExit* exit =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
+      CodeGenResult result = AssembleDeoptimizerCall(exit);
       if (result != kSuccess) return result;
       break;
     }
@@ -1115,10 +1115,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       AssembleReturn(instr->InputAt(0));
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kArchStackPointer:
-      __ mr(i.OutputRegister(), sp);
-      DCHECK_EQ(LeaveRC, i.OutputRCBit());
-      break;
     case kArchFramePointer:
       __ mr(i.OutputRegister(), fp);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -1130,6 +1126,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
         __ mr(i.OutputRegister(), fp);
       }
       break;
+    case kArchStackPointerGreaterThan: {
+      constexpr size_t kValueIndex = 0;
+      DCHECK(instr->InputAt(kValueIndex)->IsRegister());
+      __ cmpl(sp, i.InputRegister(kValueIndex), cr0);
+      break;
+    }
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
                            i.InputDoubleRegister(0), DetermineStubCallMode());
@@ -2516,6 +2518,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
 
 void CodeGenerator::FinishCode() {}
 
+void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+
 void CodeGenerator::AssembleMove(InstructionOperand* source,
                                  InstructionOperand* destination) {
   PPCOperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index bfc77b9412a890..ef8490a7265398 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -65,17 +65,6 @@ class PPCOperandGenerator final : public OperandGenerator {
     }
     return false;
   }
-
-  // Use the stack pointer if the node is LoadStackPointer, otherwise assign a
-  // register.
-  InstructionOperand UseRegisterOrStackPointer(Node* node) {
-    if (node->opcode() == IrOpcode::kLoadStackPointer) {
-      return LocationOperand(LocationOperand::EXPLICIT,
-                             LocationOperand::REGISTER,
-                             MachineRepresentation::kWord32, sp.code());
-    }
-    return UseRegister(node);
-  }
 };
 
 namespace {
@@ -267,7 +256,8 @@ void InstructionSelector::VisitStore(Node* node) {
     rep = store_rep.representation();
   }
 
-  if (write_barrier_kind != kNoWriteBarrier) {
+  if (write_barrier_kind != kNoWriteBarrier &&
+      V8_LIKELY(!FLAG_disable_write_barriers)) {
     DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
@@ -558,6 +548,15 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
   }
 }
 
+void InstructionSelector::VisitStackPointerGreaterThan(
+    Node* node, FlagsContinuation* cont) {
+  Node* const value = node->InputAt(0);
+  InstructionCode opcode = kArchStackPointerGreaterThan;
+
+  PPCOperandGenerator g(this);
+  EmitWithContinuation(opcode, g.UseRegister(value), cont);
+}
+
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitWord64Xor(Node* node) {
   PPCOperandGenerator g(this);
@@ -1456,15 +1455,15 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
 
   // Match immediates on left or right side of comparison.
   if (g.CanBeImmediate(right, immediate_mode)) {
-    VisitCompare(selector, opcode, g.UseRegisterOrStackPointer(left),
-                 g.UseImmediate(right), cont);
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
   } else if (g.CanBeImmediate(left, immediate_mode)) {
     if (!commutative) cont->Commute();
-    VisitCompare(selector, opcode, g.UseRegisterOrStackPointer(right),
-                 g.UseImmediate(left), cont);
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
   } else {
-    VisitCompare(selector, opcode, g.UseRegisterOrStackPointer(left),
-                 g.UseRegisterOrStackPointer(right), cont);
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
   }
 }
 
@@ -1639,6 +1638,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
 // case IrOpcode::kWord64Shr:
 // case IrOpcode::kWord64Ror:
 #endif
+      case IrOpcode::kStackPointerGreaterThan:
+        cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+        return VisitStackPointerGreaterThan(value, cont);
       default:
         break;
     }
@@ -2281,6 +2283,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
 
 void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
 
+void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
+
 void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
 
 void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 44701f8159385c..21eef0485c5952 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -2989,34 +2989,72 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
 }
 
 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
-    LiveRange* range, LifetimePosition pos) {
+    LiveRange* range, LifetimePosition pos, SpillMode spill_mode,
+    LiveRange** begin_spill_out) {
+  *begin_spill_out = range;
+  // TODO(herhut): Be more clever here as long as we do not move pos out of
+  // deferred code.
+  if (spill_mode == SpillMode::kSpillDeferred) return pos;
   const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
   const InstructionBlock* loop_header =
       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
-
   if (loop_header == nullptr) return pos;
 
-  const UsePosition* prev_use =
-      range->PreviousUsePositionRegisterIsBeneficial(pos);
-
-  while (loop_header != nullptr) {
-    // We are going to spill live range inside the loop.
-    // If possible try to move spilling position backwards to loop header.
-    // This will reduce number of memory moves on the back edge.
-    LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
-        loop_header->first_instruction_index());
-
-    if (range->Covers(loop_start)) {
-      if (prev_use == nullptr || prev_use->pos() < loop_start) {
+  if (data()->is_turbo_control_flow_aware_allocation()) {
+    while (loop_header != nullptr) {
+      // We are going to spill live range inside the loop.
+      // If possible try to move spilling position backwards to loop header.
+      // This will reduce number of memory moves on the back edge.
+      LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
+          loop_header->first_instruction_index());
+      auto& loop_header_state =
+          data()->GetSpillState(loop_header->rpo_number());
+      for (LiveRange* live_at_header : loop_header_state) {
+        if (live_at_header->TopLevel() != range->TopLevel() ||
+            !live_at_header->Covers(loop_start) || live_at_header->spilled()) {
+          continue;
+        }
+        LiveRange* check_use = live_at_header;
+        for (; check_use != nullptr && check_use->Start() < pos;
+             check_use = check_use->next()) {
+          UsePosition* next_use =
+              check_use->NextUsePositionRegisterIsBeneficial(loop_start);
+          if (next_use != nullptr && next_use->pos() < pos) {
+            return pos;
+          }
+        }
         // No register beneficial use inside the loop before the pos.
+        *begin_spill_out = live_at_header;
         pos = loop_start;
+        break;
       }
+
+      // Try hoisting out to an outer loop.
+      loop_header = GetContainingLoop(code(), loop_header);
     }
+  } else {
+    const UsePosition* prev_use =
+        range->PreviousUsePositionRegisterIsBeneficial(pos);
+
+    while (loop_header != nullptr) {
+      // We are going to spill live range inside the loop.
+      // If possible try to move spilling position backwards to loop header
+      // inside the current range. This will reduce number of memory moves on
+      // the back edge.
+      LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
+          loop_header->first_instruction_index());
+
+      if (range->Covers(loop_start)) {
+        if (prev_use == nullptr || prev_use->pos() < loop_start) {
+          // No register beneficial use inside the loop before the pos.
+          pos = loop_start;
+        }
+      }
 
-    // Try hoisting out to an outer loop.
-    loop_header = GetContainingLoop(code(), loop_header);
+      // Try hoisting out to an outer loop.
+      loop_header = GetContainingLoop(code(), loop_header);
+    }
   }
-
   return pos;
 }
 
@@ -3064,6 +3102,28 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
   inactive_live_ranges().reserve(8);
 }
 
+void LinearScanAllocator::MaybeSpillPreviousRanges(LiveRange* begin_range,
+                                                   LifetimePosition begin_pos,
+                                                   LiveRange* end_range) {
+  // Spill begin_range after begin_pos, then spill every live range of this
+  // virtual register until but excluding end_range.
+  DCHECK(begin_range->Covers(begin_pos));
+  DCHECK_EQ(begin_range->TopLevel(), end_range->TopLevel());
+
+  if (begin_range != end_range) {
+    DCHECK_LE(begin_range->End(), end_range->Start());
+    if (!begin_range->spilled()) {
+      SpillAfter(begin_range, begin_pos, SpillMode::kSpillAtDefinition);
+    }
+    for (LiveRange* range = begin_range->next(); range != end_range;
+         range = range->next()) {
+      if (!range->spilled()) {
+        range->Spill();
+      }
+    }
+  }
+}
+
 void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) {
   if (range->next() != nullptr && range->next()->ShouldRecombine()) {
     LiveRange* to_remove = range->next();
@@ -4407,11 +4467,10 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
     }
 
     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
-    // TODO(herhut): Be more clever here as long as we do not move split_pos
-    // out of deferred code.
-    LifetimePosition spill_pos = spill_mode == SpillMode::kSpillDeferred
-                                     ? split_pos
-                                     : FindOptimalSpillingPos(range, split_pos);
+    LiveRange* begin_spill = nullptr;
+    LifetimePosition spill_pos =
+        FindOptimalSpillingPos(range, split_pos, spill_mode, &begin_spill);
+    MaybeSpillPreviousRanges(begin_spill, spill_pos, range);
     if (next_pos == nullptr) {
       SpillAfter(range, spill_pos, spill_mode);
     } else {
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 55f8a8dd1f608a..bc7b09d147dd06 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -1238,7 +1238,9 @@ class RegisterAllocator : public ZoneObject {
   // If we are trying to spill a range inside the loop try to
   // hoist spill position out to the point just before the loop.
   LifetimePosition FindOptimalSpillingPos(LiveRange* range,
-                                          LifetimePosition pos);
+                                          LifetimePosition pos,
+                                          SpillMode spill_mode,
+                                          LiveRange** begin_spill_out);
 
   const ZoneVector<TopLevelLiveRange*>& GetFixedRegisters() const;
   const char* RegisterName(int allocation_index) const;
@@ -1292,6 +1294,9 @@ class LinearScanAllocator final : public RegisterAllocator {
       ZoneUnorderedSet<RangeWithRegister, RangeWithRegister::Hash,
                        RangeWithRegister::Equals>;
 
+  void MaybeSpillPreviousRanges(LiveRange* begin_range,
+                                LifetimePosition begin_pos,
+                                LiveRange* end_range);
   void MaybeUndoPreviousSplit(LiveRange* range);
   void SpillNotLiveRanges(
       RangeWithRegisterSet& to_be_live,  // NOLINT(runtime/references)
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 6457b7c8b44493..4c2d862fc44a1b 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1578,19 +1578,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       // don't emit code for nops.
       break;
     case kArchDeoptimize: {
-      int deopt_state_id =
+      DeoptimizationExit* exit =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
+      CodeGenResult result = AssembleDeoptimizerCall(exit);
       if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
       AssembleReturn(instr->InputAt(0));
       break;
-    case kArchStackPointer:
-      __ LoadRR(i.OutputRegister(), sp);
-      break;
     case kArchFramePointer:
       __ LoadRR(i.OutputRegister(), fp);
       break;
@@ -1601,6 +1597,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
         __ LoadRR(i.OutputRegister(), fp);
       }
       break;
+    case kArchStackPointerGreaterThan: {
+      constexpr size_t kValueIndex = 0;
+      DCHECK(instr->InputAt(kValueIndex)->IsRegister());
+      __ CmpLogicalP(sp, i.InputRegister(kValueIndex));
+      break;
+    }
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
                            i.InputDoubleRegister(0), DetermineStubCallMode());
@@ -3193,6 +3195,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
 
 void CodeGenerator::FinishCode() {}
 
+void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+
 void CodeGenerator::AssembleMove(InstructionOperand* source,
                                  InstructionOperand* destination) {
   S390OperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 99d3b0fa0f0acf..7f3277fc68d831 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -243,17 +243,6 @@ class S390OperandGenerator final : public OperandGenerator {
   bool Is64BitOperand(Node* node) {
     return MachineRepresentation::kWord64 == GetRepresentation(node);
   }
-
-  // Use the stack pointer if the node is LoadStackPointer, otherwise assign a
-  // register.
-  InstructionOperand UseRegisterOrStackPointer(Node* node) {
-    if (node->opcode() == IrOpcode::kLoadStackPointer) {
-      return LocationOperand(LocationOperand::EXPLICIT,
-                             LocationOperand::REGISTER,
-                             MachineRepresentation::kWord32, sp.code());
-    }
-    return UseRegister(node);
-  }
 };
 
 namespace {
@@ -727,7 +716,8 @@ static void VisitGeneralStore(
   Node* base = node->InputAt(0);
   Node* offset = node->InputAt(1);
   Node* value = node->InputAt(2);
-  if (write_barrier_kind != kNoWriteBarrier) {
+  if (write_barrier_kind != kNoWriteBarrier &&
+      V8_LIKELY(!FLAG_disable_write_barriers)) {
     DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
@@ -837,6 +827,15 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 // Architecture supports unaligned access, therefore VisitStore is used instead
 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
 
+void InstructionSelector::VisitStackPointerGreaterThan(
+    Node* node, FlagsContinuation* cont) {
+  Node* const value = node->InputAt(0);
+  InstructionCode opcode = kArchStackPointerGreaterThan;
+
+  S390OperandGenerator g(this);
+  EmitWithContinuation(opcode, g.UseRegister(value), cont);
+}
+
 #if 0
 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
   int mask_width = base::bits::CountPopulation(value);
@@ -1681,7 +1680,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
     return VisitLoadAndTest(selector, load_and_test, node, left, cont, true);
   }
 
-  inputs[input_count++] = g.UseRegisterOrStackPointer(left);
+  inputs[input_count++] = g.UseRegister(left);
   if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
     // generate memory operand
     AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
@@ -2008,6 +2007,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
         // doesn't generate cc, so ignore
         break;
 #endif
+      case IrOpcode::kStackPointerGreaterThan:
+        cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+        return VisitStackPointerGreaterThan(value, cont);
       default:
         break;
     }
@@ -2689,6 +2691,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
 
 void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
 
+void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
+
 void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
 
 void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index a108edeff0e592..a4f82b153b6387 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -155,10 +155,18 @@ class X64OperandConverter : public InstructionOperandConverter {
 
 namespace {
 
+bool HasAddressingMode(Instruction* instr) {
+  return instr->addressing_mode() != kMode_None;
+}
+
 bool HasImmediateInput(Instruction* instr, size_t index) {
   return instr->InputAt(index)->IsImmediate();
 }
 
+bool HasRegisterInput(Instruction* instr, size_t index) {
+  return instr->InputAt(index)->IsRegister();
+}
+
 class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
  public:
   OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
@@ -210,6 +218,10 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
       // Just encode the stub index. This will be patched when the code
       // is added to the native module and copied into wasm code space.
       __ near_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+    } else if (tasm()->options().inline_offheap_trampolines) {
+      // With embedded builtins we do not need the isolate here. This allows
+      // the call to be generated asynchronously.
+      __ CallBuiltin(Builtins::kDoubleToI);
     } else {
       __ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
     }
@@ -380,60 +392,60 @@ void EmitWordLoadPoisoningIfNeeded(
     }                                    \
   } while (false)
 
-#define ASSEMBLE_BINOP(asm_instr)                                     \
-  do {                                                                \
-    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
-      size_t index = 1;                                               \
-      Operand right = i.MemoryOperand(&index);                        \
-      __ asm_instr(i.InputRegister(0), right);                        \
-    } else {                                                          \
-      if (HasImmediateInput(instr, 1)) {                              \
-        if (instr->InputAt(0)->IsRegister()) {                        \
-          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
-        } else {                                                      \
-          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
-        }                                                             \
-      } else {                                                        \
-        if (instr->InputAt(1)->IsRegister()) {                        \
-          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
-        } else {                                                      \
-          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
-        }                                                             \
-      }                                                               \
-    }                                                                 \
+#define ASSEMBLE_BINOP(asm_instr)                                \
+  do {                                                           \
+    if (HasAddressingMode(instr)) {                              \
+      size_t index = 1;                                          \
+      Operand right = i.MemoryOperand(&index);                   \
+      __ asm_instr(i.InputRegister(0), right);                   \
+    } else {                                                     \
+      if (HasImmediateInput(instr, 1)) {                         \
+        if (HasRegisterInput(instr, 0)) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
+        } else {                                                 \
+          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
+        }                                                        \
+      } else {                                                   \
+        if (HasRegisterInput(instr, 1)) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
+        } else {                                                 \
+          __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
+        }                                                        \
+      }                                                          \
+    }                                                            \
   } while (false)
 
-#define ASSEMBLE_COMPARE(asm_instr)                                   \
-  do {                                                                \
-    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
-      size_t index = 0;                                               \
-      Operand left = i.MemoryOperand(&index);                         \
-      if (HasImmediateInput(instr, index)) {                          \
-        __ asm_instr(left, i.InputImmediate(index));                  \
-      } else {                                                        \
-        __ asm_instr(left, i.InputRegister(index));                   \
-      }                                                               \
-    } else {                                                          \
-      if (HasImmediateInput(instr, 1)) {                              \
-        if (instr->InputAt(0)->IsRegister()) {                        \
-          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
-        } else {                                                      \
-          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
-        }                                                             \
-      } else {                                                        \
-        if (instr->InputAt(1)->IsRegister()) {                        \
-          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
-        } else {                                                      \
-          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
-        }                                                             \
-      }                                                               \
-    }                                                                 \
+#define ASSEMBLE_COMPARE(asm_instr)                              \
+  do {                                                           \
+    if (HasAddressingMode(instr)) {                              \
+      size_t index = 0;                                          \
+      Operand left = i.MemoryOperand(&index);                    \
+      if (HasImmediateInput(instr, index)) {                     \
+        __ asm_instr(left, i.InputImmediate(index));             \
+      } else {                                                   \
+        __ asm_instr(left, i.InputRegister(index));              \
+      }                                                          \
+    } else {                                                     \
+      if (HasImmediateInput(instr, 1)) {                         \
+        if (HasRegisterInput(instr, 0)) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
+        } else {                                                 \
+          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
+        }                                                        \
+      } else {                                                   \
+        if (HasRegisterInput(instr, 1)) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
+        } else {                                                 \
+          __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
+        }                                                        \
+      }                                                          \
+    }                                                            \
   } while (false)
 
 #define ASSEMBLE_MULT(asm_instr)                              \
   do {                                                        \
     if (HasImmediateInput(instr, 1)) {                        \
-      if (instr->InputAt(0)->IsRegister()) {                  \
+      if (HasRegisterInput(instr, 0)) {                       \
         __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
                      i.InputImmediate(1));                    \
       } else {                                                \
@@ -441,7 +453,7 @@ void EmitWordLoadPoisoningIfNeeded(
                      i.InputImmediate(1));                    \
       }                                                       \
     } else {                                                  \
-      if (instr->InputAt(1)->IsRegister()) {                  \
+      if (HasRegisterInput(instr, 1)) {                       \
         __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
       } else {                                                \
         __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
@@ -468,9 +480,9 @@ void EmitWordLoadPoisoningIfNeeded(
 
 #define ASSEMBLE_MOVX(asm_instr)                            \
   do {                                                      \
-    if (instr->addressing_mode() != kMode_None) {           \
+    if (HasAddressingMode(instr)) {                         \
       __ asm_instr(i.OutputRegister(), i.MemoryOperand());  \
-    } else if (instr->InputAt(0)->IsRegister()) {           \
+    } else if (HasRegisterInput(instr, 0)) {                \
       __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
     } else {                                                \
       __ asm_instr(i.OutputRegister(), i.InputOperand(0));  \
@@ -576,17 +588,18 @@ void EmitWordLoadPoisoningIfNeeded(
     __ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \
   } while (false)
 
-#define ASSEMBLE_SIMD_ALL_TRUE(opcode)                       \
-  do {                                                       \
-    CpuFeatureScope sse_scope(tasm(), SSE4_1);               \
-    Register dst = i.OutputRegister();                       \
-    Register tmp = i.TempRegister(0);                        \
-    __ movq(tmp, Immediate(1));                              \
-    __ xorq(dst, dst);                                       \
-    __ pxor(kScratchDoubleReg, kScratchDoubleReg);           \
-    __ opcode(kScratchDoubleReg, i.InputSimd128Register(0)); \
-    __ ptest(kScratchDoubleReg, kScratchDoubleReg);          \
-    __ cmovq(zero, dst, tmp);                                \
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode)           \
+  do {                                           \
+    CpuFeatureScope sse_scope(tasm(), SSE4_1);   \
+    Register dst = i.OutputRegister();           \
+    Register tmp1 = i.TempRegister(0);           \
+    XMMRegister tmp2 = i.TempSimd128Register(1); \
+    __ movq(tmp1, Immediate(1));                 \
+    __ xorq(dst, dst);                           \
+    __ pxor(tmp2, tmp2);                         \
+    __ opcode(tmp2, i.InputSimd128Register(0));  \
+    __ ptest(tmp2, tmp2);                        \
+    __ cmovq(zero, dst, tmp1);                   \
   } while (false)
 
 void CodeGenerator::AssembleDeconstructFrame() {
@@ -989,10 +1002,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       // don't emit code for nops.
       break;
     case kArchDeoptimize: {
-      int deopt_state_id =
+      DeoptimizationExit* exit =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
+      CodeGenResult result = AssembleDeoptimizerCall(exit);
       if (result != kSuccess) return result;
       unwinding_info_writer_.MarkBlockWillExit();
       break;
@@ -1000,9 +1012,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     case kArchRet:
       AssembleReturn(instr->InputAt(0));
       break;
-    case kArchStackPointer:
-      __ movq(i.OutputRegister(), rsp);
-      break;
     case kArchFramePointer:
       __ movq(i.OutputRegister(), rbp);
       break;
@@ -1013,6 +1022,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
         __ movq(i.OutputRegister(), rbp);
       }
       break;
+    case kArchStackPointerGreaterThan: {
+      constexpr size_t kValueIndex = 0;
+      if (HasAddressingMode(instr)) {
+        __ cmpq(rsp, i.MemoryOperand(kValueIndex));
+      } else {
+        __ cmpq(rsp, i.InputRegister(kValueIndex));
+      }
+      break;
+    }
     case kArchTruncateDoubleToI: {
       auto result = i.OutputRegister();
       auto input = i.InputDoubleRegister(0);
@@ -1176,14 +1194,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       ASSEMBLE_MULT(imulq);
       break;
     case kX64ImulHigh32:
-      if (instr->InputAt(1)->IsRegister()) {
+      if (HasRegisterInput(instr, 1)) {
         __ imull(i.InputRegister(1));
       } else {
         __ imull(i.InputOperand(1));
       }
       break;
     case kX64UmulHigh32:
-      if (instr->InputAt(1)->IsRegister()) {
+      if (HasRegisterInput(instr, 1)) {
         __ mull(i.InputRegister(1));
       } else {
         __ mull(i.InputOperand(1));
@@ -1254,42 +1272,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       ASSEMBLE_SHIFT(rorq, 6);
       break;
     case kX64Lzcnt:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Lzcntq(i.OutputRegister(), i.InputRegister(0));
       } else {
         __ Lzcntq(i.OutputRegister(), i.InputOperand(0));
       }
       break;
     case kX64Lzcnt32:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
       } else {
         __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
       }
       break;
     case kX64Tzcnt:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Tzcntq(i.OutputRegister(), i.InputRegister(0));
       } else {
         __ Tzcntq(i.OutputRegister(), i.InputOperand(0));
       }
       break;
     case kX64Tzcnt32:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Tzcntl(i.OutputRegister(), i.InputRegister(0));
       } else {
         __ Tzcntl(i.OutputRegister(), i.InputOperand(0));
       }
       break;
     case kX64Popcnt:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Popcntq(i.OutputRegister(), i.InputRegister(0));
       } else {
         __ Popcntq(i.OutputRegister(), i.InputOperand(0));
       }
       break;
     case kX64Popcnt32:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Popcntl(i.OutputRegister(), i.InputRegister(0));
       } else {
         __ Popcntl(i.OutputRegister(), i.InputOperand(0));
@@ -1321,16 +1339,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     case kSSEFloat32Abs: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
-      __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
-      __ Psrlq(kScratchDoubleReg, 33);
-      __ Andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+      XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+      __ Pcmpeqd(tmp, tmp);
+      __ Psrlq(tmp, 33);
+      __ Andps(i.OutputDoubleRegister(), tmp);
       break;
     }
     case kSSEFloat32Neg: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
-      __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
-      __ Psllq(kScratchDoubleReg, 31);
-      __ Xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+      XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+      __ Pcmpeqd(tmp, tmp);
+      __ Psllq(tmp, 31);
+      __ Xorps(i.OutputDoubleRegister(), tmp);
       break;
     }
     case kSSEFloat32Sqrt:
@@ -1532,17 +1552,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     case kX64F64x2Abs:
     case kSSEFloat64Abs: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
-      __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
-      __ Psrlq(kScratchDoubleReg, 1);
-      __ Andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+      XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+      __ Pcmpeqd(tmp, tmp);
+      __ Psrlq(tmp, 1);
+      __ Andpd(i.OutputDoubleRegister(), tmp);
       break;
     }
     case kX64F64x2Neg:
     case kSSEFloat64Neg: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
-      __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
-      __ Psllq(kScratchDoubleReg, 63);
-      __ Xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+      XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+      __ Pcmpeqd(tmp, tmp);
+      __ Psllq(tmp, 63);
+      __ Xorpd(i.OutputDoubleRegister(), tmp);
       break;
     }
     case kSSEFloat64Sqrt:
@@ -1659,56 +1681,56 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kSSEInt32ToFloat64:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
     case kSSEInt32ToFloat32:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
     case kSSEInt64ToFloat32:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
     case kSSEInt64ToFloat64:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
     case kSSEUint64ToFloat32:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Cvtqui2ss(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Cvtqui2ss(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
     case kSSEUint64ToFloat64:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Cvtqui2sd(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Cvtqui2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
     case kSSEUint32ToFloat64:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Cvtlui2sd(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Cvtlui2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
     case kSSEUint32ToFloat32:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Cvtlui2ss(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Cvtlui2ss(i.OutputDoubleRegister(), i.InputOperand(0));
@@ -1729,21 +1751,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       }
       break;
     case kSSEFloat64InsertLowWord32:
-      if (instr->InputAt(1)->IsRegister()) {
+      if (HasRegisterInput(instr, 1)) {
         __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
       } else {
         __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
       }
       break;
     case kSSEFloat64InsertHighWord32:
-      if (instr->InputAt(1)->IsRegister()) {
+      if (HasRegisterInput(instr, 1)) {
         __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
       } else {
         __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
       }
       break;
     case kSSEFloat64LoadLowWord32:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
@@ -1800,56 +1822,52 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     case kAVXFloat32Abs: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
-      __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
+      XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+      __ vpcmpeqd(tmp, tmp, tmp);
+      __ vpsrlq(tmp, tmp, 33);
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
-                  i.InputDoubleRegister(0));
+        __ vandps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
       } else {
-        __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
-                  i.InputOperand(0));
+        __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
       }
       break;
     }
     case kAVXFloat32Neg: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
-      __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
+      XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+      __ vpcmpeqd(tmp, tmp, tmp);
+      __ vpsllq(tmp, tmp, 31);
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
-                  i.InputDoubleRegister(0));
+        __ vxorps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
       } else {
-        __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
-                  i.InputOperand(0));
+        __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
       }
       break;
     }
     case kAVXFloat64Abs: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
-      __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
+      XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+      __ vpcmpeqd(tmp, tmp, tmp);
+      __ vpsrlq(tmp, tmp, 1);
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
-                  i.InputDoubleRegister(0));
+        __ vandpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
       } else {
-        __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
-                  i.InputOperand(0));
+        __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
       }
       break;
     }
     case kAVXFloat64Neg: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
       CpuFeatureScope avx_scope(tasm(), AVX);
-      __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
-      __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
+      XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+      __ vpcmpeqd(tmp, tmp, tmp);
+      __ vpsllq(tmp, tmp, 63);
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
-                  i.InputDoubleRegister(0));
+        __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
       } else {
-        __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
-                  i.InputOperand(0));
+        __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
       }
       break;
     }
@@ -1929,14 +1947,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     case kX64Movl:
       EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
       if (instr->HasOutput()) {
-        if (instr->addressing_mode() == kMode_None) {
-          if (instr->InputAt(0)->IsRegister()) {
+        if (HasAddressingMode(instr)) {
+          __ movl(i.OutputRegister(), i.MemoryOperand());
+        } else {
+          if (HasRegisterInput(instr, 0)) {
             __ movl(i.OutputRegister(), i.InputRegister(0));
           } else {
             __ movl(i.OutputRegister(), i.InputOperand(0));
           }
-        } else {
-          __ movl(i.OutputRegister(), i.MemoryOperand());
         }
         __ AssertZeroExtended(i.OutputRegister());
       } else {
@@ -2002,12 +2020,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
       break;
     }
-    case kX64CompressSigned:   // Fall through.
-    case kX64CompressPointer:  // Fall through.
-    case kX64CompressAny: {
-      ASSEMBLE_MOVX(movl);
-      break;
-    }
     case kX64Movq:
       EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
       if (instr->HasOutput()) {
@@ -2082,14 +2094,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       }
       break;
     case kX64BitcastIF:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Movss(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
     case kX64BitcastLD:
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ Movq(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
         __ Movsd(i.OutputDoubleRegister(), i.InputOperand(0));
@@ -2177,7 +2189,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       __ incl(i.OutputRegister());
       break;
     case kX64Push:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+      if (HasAddressingMode(instr)) {
         size_t index = 0;
         Operand operand = i.MemoryOperand(&index);
         __ pushq(operand);
@@ -2189,7 +2201,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
         frame_access_state()->IncreaseSPDelta(1);
         unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
                                                          kSystemPointerSize);
-      } else if (instr->InputAt(0)->IsRegister()) {
+      } else if (HasRegisterInput(instr, 0)) {
         __ pushq(i.InputRegister(0));
         frame_access_state()->IncreaseSPDelta(1);
         unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
@@ -2256,11 +2268,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64F64x2Splat: {
+      CpuFeatureScope sse_scope(tasm(), SSE3);
       XMMRegister dst = i.OutputSimd128Register();
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ pshufd(dst, i.InputDoubleRegister(0), 0x44);
+        __ movddup(dst, i.InputDoubleRegister(0));
       } else {
-        __ pshufd(dst, i.InputOperand(0), 0x44);
+        __ movddup(dst, i.InputOperand(0));
       }
       break;
     }
@@ -2280,6 +2293,61 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       __ movq(i.OutputDoubleRegister(), kScratchRegister);
       break;
     }
+    case kX64F64x2Add: {
+      ASSEMBLE_SSE_BINOP(addpd);
+      break;
+    }
+    case kX64F64x2Sub: {
+      ASSEMBLE_SSE_BINOP(subpd);
+      break;
+    }
+    case kX64F64x2Mul: {
+      ASSEMBLE_SSE_BINOP(mulpd);
+      break;
+    }
+    case kX64F64x2Div: {
+      ASSEMBLE_SSE_BINOP(divpd);
+      break;
+    }
+    case kX64F64x2Min: {
+      XMMRegister src1 = i.InputSimd128Register(1),
+                  dst = i.OutputSimd128Register();
+      DCHECK_EQ(dst, i.InputSimd128Register(0));
+      // The minpd instruction doesn't propagate NaNs and +0's in its first
+      // operand. Perform minpd in both orders, merge the resuls, and adjust.
+      __ movapd(kScratchDoubleReg, src1);
+      __ minpd(kScratchDoubleReg, dst);
+      __ minpd(dst, src1);
+      // propagate -0's and NaNs, which may be non-canonical.
+      __ orpd(kScratchDoubleReg, dst);
+      // Canonicalize NaNs by quieting and clearing the payload.
+      __ cmppd(dst, kScratchDoubleReg, 3);
+      __ orpd(kScratchDoubleReg, dst);
+      __ psrlq(dst, 13);
+      __ andnpd(dst, kScratchDoubleReg);
+      break;
+    }
+    case kX64F64x2Max: {
+      XMMRegister src1 = i.InputSimd128Register(1),
+                  dst = i.OutputSimd128Register();
+      DCHECK_EQ(dst, i.InputSimd128Register(0));
+      // The maxpd instruction doesn't propagate NaNs and +0's in its first
+      // operand. Perform maxpd in both orders, merge the resuls, and adjust.
+      __ movapd(kScratchDoubleReg, src1);
+      __ maxpd(kScratchDoubleReg, dst);
+      __ maxpd(dst, src1);
+      // Find discrepancies.
+      __ xorpd(dst, kScratchDoubleReg);
+      // Propagate NaNs, which may be non-canonical.
+      __ orpd(kScratchDoubleReg, dst);
+      // Propagate sign discrepancy and (subtle) quiet NaNs.
+      __ subpd(kScratchDoubleReg, dst);
+      // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+      __ cmppd(dst, kScratchDoubleReg, 3);
+      __ psrlq(dst, 13);
+      __ andnpd(dst, kScratchDoubleReg);
+      break;
+    }
     case kX64F64x2Eq: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
       __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
@@ -2406,6 +2474,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       __ mulps(i.OutputSimd128Register(), i.InputSimd128Register(1));
       break;
     }
+    case kX64F32x4Div: {
+      DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      __ divps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+      break;
+    }
     case kX64F32x4Min: {
       XMMRegister src1 = i.InputSimd128Register(1),
                   dst = i.OutputSimd128Register();
@@ -2466,13 +2539,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I64x2Splat: {
+      CpuFeatureScope sse_scope(tasm(), SSE3);
       XMMRegister dst = i.OutputSimd128Register();
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ movq(dst, i.InputRegister(0));
       } else {
         __ movq(dst, i.InputOperand(0));
       }
-      __ pshufd(dst, dst, 0x44);
+      __ movddup(dst, dst);
       break;
     }
     case kX64I64x2ExtractLane: {
@@ -2482,7 +2556,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kX64I64x2ReplaceLane: {
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
-      if (instr->InputAt(2)->IsRegister()) {
+      if (HasRegisterInput(instr, 2)) {
         __ pinsrq(i.OutputSimd128Register(), i.InputRegister(2),
                   i.InputInt8(1));
       } else {
@@ -2502,7 +2576,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I64x2Shl: {
-      __ psllq(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movq(tmp, i.InputRegister(1));
+      __ psllq(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I64x2ShrS: {
@@ -2511,16 +2587,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       // ShrS on each quadword one at a time
       XMMRegister dst = i.OutputSimd128Register();
       XMMRegister src = i.InputSimd128Register(0);
+      Register tmp = i.ToRegister(instr->TempAt(0));
 
       // lower quadword
-      __ pextrq(kScratchRegister, src, 0x0);
-      __ sarq(kScratchRegister, Immediate(i.InputInt8(1)));
-      __ pinsrq(dst, kScratchRegister, 0x0);
+      __ pextrq(tmp, src, 0x0);
+      __ sarq_cl(tmp);
+      __ pinsrq(dst, tmp, 0x0);
 
       // upper quadword
-      __ pextrq(kScratchRegister, src, 0x1);
-      __ sarq(kScratchRegister, Immediate(i.InputInt8(1)));
-      __ pinsrq(dst, kScratchRegister, 0x1);
+      __ pextrq(tmp, src, 0x1);
+      __ sarq_cl(tmp);
+      __ pinsrq(dst, tmp, 0x1);
       break;
     }
     case kX64I64x2Add: {
@@ -2538,8 +2615,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
       XMMRegister left = i.InputSimd128Register(0);
       XMMRegister right = i.InputSimd128Register(1);
-      XMMRegister tmp1 = i.ToSimd128Register(instr->TempAt(0));
-      XMMRegister tmp2 = i.ToSimd128Register(instr->TempAt(1));
+      XMMRegister tmp1 = i.TempSimd128Register(0);
+      XMMRegister tmp2 = i.TempSimd128Register(1);
 
       __ movaps(tmp1, left);
       __ movaps(tmp2, right);
@@ -2559,6 +2636,66 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       __ paddq(left, tmp2);  // left == dst
       break;
     }
+    case kX64I64x2MinS: {
+      if (CpuFeatures::IsSupported(SSE4_2)) {
+        CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
+        XMMRegister dst = i.OutputSimd128Register();
+        XMMRegister src = i.InputSimd128Register(1);
+        XMMRegister tmp = i.TempSimd128Register(0);
+        DCHECK_EQ(dst, i.InputSimd128Register(0));
+        DCHECK_EQ(src, xmm0);
+
+        __ movaps(tmp, src);
+        __ pcmpgtq(src, dst);
+        __ blendvpd(tmp, dst);  // implicit use of xmm0 as mask
+        __ movaps(dst, tmp);
+      } else {
+        CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
+        XMMRegister dst = i.OutputSimd128Register();
+        XMMRegister src = i.InputSimd128Register(1);
+        XMMRegister tmp = i.TempSimd128Register(0);
+        Register tmp1 = i.TempRegister(1);
+        Register tmp2 = i.TempRegister(2);
+        DCHECK_EQ(dst, i.InputSimd128Register(0));
+        // backup src since we cannot change it
+        __ movaps(tmp, src);
+
+        // compare the lower quardwords
+        __ movq(tmp1, dst);
+        __ movq(tmp2, tmp);
+        __ cmpq(tmp1, tmp2);
+        // tmp2 now has the min of lower quadwords
+        __ cmovq(less_equal, tmp2, tmp1);
+        // tmp1 now has the higher quadword
+        // must do this before movq, movq clears top quadword
+        __ pextrq(tmp1, dst, 1);
+        // save tmp2 into dst
+        __ movq(dst, tmp2);
+        // tmp2 now has the higher quadword
+        __ pextrq(tmp2, tmp, 1);
+        //  compare higher quadwords
+        __ cmpq(tmp1, tmp2);
+        // tmp2 now has the min of higher quadwords
+        __ cmovq(less_equal, tmp2, tmp1);
+        __ movq(tmp, tmp2);
+        // dst = [tmp[0], dst[0]]
+        __ punpcklqdq(dst, tmp);
+      }
+      break;
+    }
+    case kX64I64x2MaxS: {
+      CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
+      XMMRegister dst = i.OutputSimd128Register();
+      XMMRegister src = i.InputSimd128Register(1);
+      XMMRegister tmp = i.TempSimd128Register(0);
+      DCHECK_EQ(dst, i.InputSimd128Register(0));
+      DCHECK_EQ(src, xmm0);
+
+      __ movaps(tmp, src);
+      __ pcmpgtq(src, dst);
+      __ blendvpd(dst, tmp);  // implicit use of xmm0 as mask
+      break;
+    }
     case kX64I64x2Eq: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
@@ -2568,9 +2705,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     case kX64I64x2Ne: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
+      XMMRegister tmp = i.TempSimd128Register(0);
       __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
-      __ pcmpeqq(kScratchDoubleReg, kScratchDoubleReg);
-      __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+      __ pcmpeqq(tmp, tmp);
+      __ pxor(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I64x2GtS: {
@@ -2584,7 +2722,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       CpuFeatureScope sse_scope(tasm(), SSE4_2);
       XMMRegister dst = i.OutputSimd128Register();
       XMMRegister src = i.InputSimd128Register(1);
-      XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+      XMMRegister tmp = i.TempSimd128Register(0);
 
       __ movaps(tmp, src);
       __ pcmpgtq(tmp, dst);
@@ -2593,7 +2731,56 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I64x2ShrU: {
-      __ psrlq(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movq(tmp, i.InputRegister(1));
+      __ psrlq(i.OutputSimd128Register(), tmp);
+      break;
+    }
+    case kX64I64x2MinU: {
+      CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
+      CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
+      XMMRegister dst = i.OutputSimd128Register();
+      XMMRegister src = i.InputSimd128Register(1);
+      XMMRegister src_tmp = i.TempSimd128Register(0);
+      XMMRegister dst_tmp = i.TempSimd128Register(1);
+      DCHECK_EQ(dst, i.InputSimd128Register(0));
+      DCHECK_EQ(src, xmm0);
+
+      __ movaps(src_tmp, src);
+      __ movaps(dst_tmp, dst);
+
+      __ pcmpeqd(src, src);
+      __ psllq(src, 63);
+
+      __ pxor(dst_tmp, src);
+      __ pxor(src, src_tmp);
+
+      __ pcmpgtq(src, dst_tmp);
+      __ blendvpd(src_tmp, dst);  // implicit use of xmm0 as mask
+      __ movaps(dst, src_tmp);
+      break;
+    }
+    case kX64I64x2MaxU: {
+      CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
+      CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
+      XMMRegister dst = i.OutputSimd128Register();
+      XMMRegister src = i.InputSimd128Register(1);
+      XMMRegister src_tmp = i.TempSimd128Register(0);
+      XMMRegister dst_tmp = i.TempSimd128Register(1);
+      DCHECK_EQ(dst, i.InputSimd128Register(0));
+      DCHECK_EQ(src, xmm0);
+
+      __ movaps(src_tmp, src);
+      __ movaps(dst_tmp, dst);
+
+      __ pcmpeqd(src, src);
+      __ psllq(src, 63);
+
+      __ pxor(dst_tmp, src);
+      __ pxor(src, src_tmp);
+
+      __ pcmpgtq(src, dst_tmp);
+      __ blendvpd(dst, src_tmp);  // implicit use of xmm0 as mask
       break;
     }
     case kX64I64x2GtU: {
@@ -2601,7 +2788,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       CpuFeatureScope sse_scope(tasm(), SSE4_2);
       XMMRegister dst = i.OutputSimd128Register();
       XMMRegister src = i.InputSimd128Register(1);
-      XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+      XMMRegister tmp = i.TempSimd128Register(0);
 
       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
       __ psllq(kScratchDoubleReg, 63);
@@ -2617,7 +2804,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       CpuFeatureScope sse_scope(tasm(), SSE4_2);
       XMMRegister dst = i.OutputSimd128Register();
       XMMRegister src = i.InputSimd128Register(1);
-      XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+      XMMRegister tmp = i.TempSimd128Register(0);
 
       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
       __ psllq(kScratchDoubleReg, 63);
@@ -2632,7 +2819,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kX64I32x4Splat: {
       XMMRegister dst = i.OutputSimd128Register();
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ movd(dst, i.InputRegister(0));
       } else {
         __ movd(dst, i.InputOperand(0));
@@ -2647,7 +2834,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kX64I32x4ReplaceLane: {
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
-      if (instr->InputAt(2)->IsRegister()) {
+      if (HasRegisterInput(instr, 2)) {
         __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
                   i.InputInt8(1));
       } else {
@@ -2658,19 +2845,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     case kX64I32x4SConvertF32x4: {
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
       XMMRegister dst = i.OutputSimd128Register();
+      XMMRegister tmp = i.TempSimd128Register(0);
       // NAN->0
-      __ movaps(kScratchDoubleReg, dst);
-      __ cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
-      __ pand(dst, kScratchDoubleReg);
+      __ movaps(tmp, dst);
+      __ cmpeqps(tmp, tmp);
+      __ pand(dst, tmp);
       // Set top bit if >= 0 (but not -0.0!)
-      __ pxor(kScratchDoubleReg, dst);
+      __ pxor(tmp, dst);
       // Convert
       __ cvttps2dq(dst, dst);
       // Set top bit if >=0 is now < 0
-      __ pand(kScratchDoubleReg, dst);
-      __ psrad(kScratchDoubleReg, 31);
+      __ pand(tmp, dst);
+      __ psrad(tmp, 31);
       // Set positive overflow lanes to 0x7FFFFFFF
-      __ pxor(dst, kScratchDoubleReg);
+      __ pxor(dst, tmp);
       break;
     }
     case kX64I32x4SConvertI16x8Low: {
@@ -2699,11 +2887,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I32x4Shl: {
-      __ pslld(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movq(tmp, i.InputRegister(1));
+      __ pslld(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I32x4ShrS: {
-      __ psrad(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movq(tmp, i.InputRegister(1));
+      __ psrad(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I32x4Add: {
@@ -2739,9 +2931,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I32x4Ne: {
+      XMMRegister tmp = i.TempSimd128Register(0);
       __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
-      __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
-      __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+      __ pcmpeqd(tmp, tmp);
+      __ pxor(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I32x4GtS: {
@@ -2760,24 +2953,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
       XMMRegister dst = i.OutputSimd128Register();
-      XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      XMMRegister tmp2 = i.TempSimd128Register(1);
       // NAN->0, negative->0
-      __ pxor(kScratchDoubleReg, kScratchDoubleReg);
-      __ maxps(dst, kScratchDoubleReg);
+      __ pxor(tmp2, tmp2);
+      __ maxps(dst, tmp2);
       // scratch: float representation of max_signed
-      __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
-      __ psrld(kScratchDoubleReg, 1);                     // 0x7fffffff
-      __ cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg);  // 0x4f000000
+      __ pcmpeqd(tmp2, tmp2);
+      __ psrld(tmp2, 1);        // 0x7fffffff
+      __ cvtdq2ps(tmp2, tmp2);  // 0x4f000000
       // tmp: convert (src-max_signed).
       // Positive overflow lanes -> 0x7FFFFFFF
       // Negative lanes -> 0
       __ movaps(tmp, dst);
-      __ subps(tmp, kScratchDoubleReg);
-      __ cmpleps(kScratchDoubleReg, tmp);
+      __ subps(tmp, tmp2);
+      __ cmpleps(tmp2, tmp);
       __ cvttps2dq(tmp, tmp);
-      __ pxor(tmp, kScratchDoubleReg);
-      __ pxor(kScratchDoubleReg, kScratchDoubleReg);
-      __ pmaxsd(tmp, kScratchDoubleReg);
+      __ pxor(tmp, tmp2);
+      __ pxor(tmp2, tmp2);
+      __ pmaxsd(tmp, tmp2);
       // convert. Overflow lanes above max_signed will be 0x80000000
       __ cvttps2dq(dst, dst);
       // Add (src-max_signed) for overflow lanes.
@@ -2797,7 +2991,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I32x4ShrU: {
-      __ psrld(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movq(tmp, i.InputRegister(1));
+      __ psrld(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I32x4MinU: {
@@ -2814,10 +3010,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
       XMMRegister dst = i.OutputSimd128Register();
       XMMRegister src = i.InputSimd128Register(1);
+      XMMRegister tmp = i.TempSimd128Register(0);
       __ pmaxud(dst, src);
       __ pcmpeqd(dst, src);
-      __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
-      __ pxor(dst, kScratchDoubleReg);
+      __ pcmpeqd(tmp, tmp);
+      __ pxor(dst, tmp);
       break;
     }
     case kX64I32x4GeU: {
@@ -2835,7 +3032,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kX64I16x8Splat: {
       XMMRegister dst = i.OutputSimd128Register();
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ movd(dst, i.InputRegister(0));
       } else {
         __ movd(dst, i.InputOperand(0));
@@ -2853,7 +3050,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kX64I16x8ReplaceLane: {
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
-      if (instr->InputAt(2)->IsRegister()) {
+      if (HasRegisterInput(instr, 2)) {
         __ pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
                   i.InputInt8(1));
       } else {
@@ -2887,11 +3084,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I16x8Shl: {
-      __ psllw(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movq(tmp, i.InputRegister(1));
+      __ psllw(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I16x8ShrS: {
-      __ psraw(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movq(tmp, i.InputRegister(1));
+      __ psraw(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I16x8SConvertI32x4: {
@@ -2940,9 +3141,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I16x8Ne: {
+      XMMRegister tmp = i.TempSimd128Register(0);
       __ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
-      __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
-      __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+      __ pcmpeqw(tmp, tmp);
+      __ pxor(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I16x8GtS: {
@@ -2970,7 +3172,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I16x8ShrU: {
-      __ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
+      XMMRegister tmp = i.TempSimd128Register(0);
+      __ movq(tmp, i.InputRegister(1));
+      __ psrlw(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I16x8UConvertI32x4: {
@@ -3007,10 +3211,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
       XMMRegister dst = i.OutputSimd128Register();
       XMMRegister src = i.InputSimd128Register(1);
+      XMMRegister tmp = i.TempSimd128Register(0);
       __ pmaxuw(dst, src);
       __ pcmpeqw(dst, src);
-      __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
-      __ pxor(dst, kScratchDoubleReg);
+      __ pcmpeqw(tmp, tmp);
+      __ pxor(dst, tmp);
       break;
     }
     case kX64I16x8GeU: {
@@ -3024,7 +3229,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     case kX64I8x16Splat: {
       CpuFeatureScope sse_scope(tasm(), SSSE3);
       XMMRegister dst = i.OutputSimd128Register();
-      if (instr->InputAt(0)->IsRegister()) {
+      if (HasRegisterInput(instr, 0)) {
         __ movd(dst, i.InputRegister(0));
       } else {
         __ movd(dst, i.InputOperand(0));
@@ -3042,7 +3247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kX64I8x16ReplaceLane: {
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
-      if (instr->InputAt(2)->IsRegister()) {
+      if (HasRegisterInput(instr, 2)) {
         __ pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
                   i.InputInt8(1));
       } else {
@@ -3071,31 +3276,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     case kX64I8x16Shl: {
       XMMRegister dst = i.OutputSimd128Register();
       DCHECK_EQ(dst, i.InputSimd128Register(0));
-      int8_t shift = i.InputInt8(1) & 0x7;
-      if (shift < 4) {
-        // For small shifts, doubling is faster.
-        for (int i = 0; i < shift; ++i) {
-          __ paddb(dst, dst);
-        }
-      } else {
-        // Mask off the unwanted bits before word-shifting.
-        __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
-        __ psrlw(kScratchDoubleReg, 8 + shift);
-        __ packuswb(kScratchDoubleReg, kScratchDoubleReg);
-        __ pand(dst, kScratchDoubleReg);
-        __ psllw(dst, shift);
-      }
+      // Temp registers for shift mask andadditional moves to XMM registers.
+      Register tmp = i.ToRegister(instr->TempAt(0));
+      XMMRegister tmp_simd = i.TempSimd128Register(1);
+      // Mask off the unwanted bits before word-shifting.
+      __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+      __ movq(tmp, i.InputRegister(1));
+      __ addq(tmp, Immediate(8));
+      __ movq(tmp_simd, tmp);
+      __ psrlw(kScratchDoubleReg, tmp_simd);
+      __ packuswb(kScratchDoubleReg, kScratchDoubleReg);
+      __ pand(dst, kScratchDoubleReg);
+      __ movq(tmp_simd, i.InputRegister(1));
+      __ psllw(dst, tmp_simd);
       break;
     }
     case kX64I8x16ShrS: {
       XMMRegister dst = i.OutputSimd128Register();
-      XMMRegister src = i.InputSimd128Register(0);
-      int8_t shift = i.InputInt8(1) & 0x7;
+      DCHECK_EQ(dst, i.InputSimd128Register(0));
+      // Temp registers for shift mask andadditional moves to XMM registers.
+      Register tmp = i.ToRegister(instr->TempAt(0));
+      XMMRegister tmp_simd = i.TempSimd128Register(1);
       // Unpack the bytes into words, do arithmetic shifts, and repack.
-      __ punpckhbw(kScratchDoubleReg, src);
-      __ punpcklbw(dst, src);
-      __ psraw(kScratchDoubleReg, 8 + shift);
-      __ psraw(dst, 8 + shift);
+      __ punpckhbw(kScratchDoubleReg, dst);
+      __ punpcklbw(dst, dst);
+      // Prepare shift value
+      __ movq(tmp, i.InputRegister(1));
+      __ addq(tmp, Immediate(8));
+      __ movq(tmp_simd, tmp);
+      __ psraw(kScratchDoubleReg, tmp_simd);
+      __ psraw(dst, tmp_simd);
       __ packsswb(dst, kScratchDoubleReg);
       break;
     }
@@ -3119,7 +3329,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       XMMRegister dst = i.OutputSimd128Register();
       DCHECK_EQ(dst, i.InputSimd128Register(0));
       XMMRegister right = i.InputSimd128Register(1);
-      XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+      XMMRegister tmp = i.TempSimd128Register(0);
       // I16x8 view of I8x16
       // left = AAaa AAaa ... AAaa AAaa
       // right= BBbb BBbb ... BBbb BBbb
@@ -3163,9 +3373,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       break;
     }
     case kX64I8x16Ne: {
+      XMMRegister tmp = i.TempSimd128Register(0);
       __ pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
-      __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
-      __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+      __ pcmpeqb(tmp, tmp);
+      __ pxor(i.OutputSimd128Register(), tmp);
       break;
     }
     case kX64I8x16GtS: {
@@ -3194,13 +3405,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     }
     case kX64I8x16ShrU: {
       XMMRegister dst = i.OutputSimd128Register();
-      XMMRegister src = i.InputSimd128Register(0);
-      int8_t shift = i.InputInt8(1) & 0x7;
       // Unpack the bytes into words, do logical shifts, and repack.
-      __ punpckhbw(kScratchDoubleReg, src);
-      __ punpcklbw(dst, src);
-      __ psrlw(kScratchDoubleReg, 8 + shift);
-      __ psrlw(dst, 8 + shift);
+      DCHECK_EQ(dst, i.InputSimd128Register(0));
+      // Temp registers for shift mask andadditional moves to XMM registers.
+      Register tmp = i.ToRegister(instr->TempAt(0));
+      XMMRegister tmp_simd = i.TempSimd128Register(1);
+      __ punpckhbw(kScratchDoubleReg, dst);
+      __ punpcklbw(dst, dst);
+      // Prepare shift value
+      __ movq(tmp, i.InputRegister(1));
+      __ addq(tmp, Immediate(8));
+      __ movq(tmp_simd, tmp);
+      __ psrlw(kScratchDoubleReg, tmp_simd);
+      __ psrlw(dst, tmp_simd);
       __ packuswb(dst, kScratchDoubleReg);
       break;
     }
@@ -3226,10 +3443,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       CpuFeatureScope sse_scope(tasm(), SSE4_1);
       XMMRegister dst = i.OutputSimd128Register();
       XMMRegister src = i.InputSimd128Register(1);
+      XMMRegister tmp = i.TempSimd128Register(0);
       __ pmaxub(dst, src);
       __ pcmpeqb(dst, src);
-      __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
-      __ pxor(dst, kScratchDoubleReg);
+      __ pcmpeqb(tmp, tmp);
+      __ pxor(dst, tmp);
       break;
     }
     case kX64I8x16GeU: {
@@ -3561,9 +3779,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
       ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
       break;
     }
-    case kX64StackCheck:
-      __ CompareRoot(rsp, RootIndex::kStackLimit);
-      break;
     case kWord32AtomicExchangeInt8: {
       __ xchgb(i.InputRegister(0), i.MemoryOperand(1));
       __ movsxbl(i.InputRegister(0), i.InputRegister(0));
@@ -4167,6 +4382,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
 
 void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); }
 
+void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+
 void CodeGenerator::AssembleMove(InstructionOperand* source,
                                  InstructionOperand* destination) {
   X64OperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index d6ac3f43dfaa88..8a0a45a916afc6 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -140,9 +140,6 @@ namespace compiler {
   V(X64DecompressSigned)                  \
   V(X64DecompressPointer)                 \
   V(X64DecompressAny)                     \
-  V(X64CompressSigned)                    \
-  V(X64CompressPointer)                   \
-  V(X64CompressAny)                       \
   V(X64Movq)                              \
   V(X64Movsd)                             \
   V(X64Movss)                             \
@@ -158,12 +155,17 @@ namespace compiler {
   V(X64Push)                              \
   V(X64Poke)                              \
   V(X64Peek)                              \
-  V(X64StackCheck)                        \
   V(X64F64x2Splat)                        \
   V(X64F64x2ExtractLane)                  \
   V(X64F64x2ReplaceLane)                  \
   V(X64F64x2Abs)                          \
   V(X64F64x2Neg)                          \
+  V(X64F64x2Add)                          \
+  V(X64F64x2Sub)                          \
+  V(X64F64x2Mul)                          \
+  V(X64F64x2Div)                          \
+  V(X64F64x2Min)                          \
+  V(X64F64x2Max)                          \
   V(X64F64x2Eq)                           \
   V(X64F64x2Ne)                           \
   V(X64F64x2Lt)                           \
@@ -181,6 +183,7 @@ namespace compiler {
   V(X64F32x4AddHoriz)                     \
   V(X64F32x4Sub)                          \
   V(X64F32x4Mul)                          \
+  V(X64F32x4Div)                          \
   V(X64F32x4Min)                          \
   V(X64F32x4Max)                          \
   V(X64F32x4Eq)                           \
@@ -196,11 +199,15 @@ namespace compiler {
   V(X64I64x2Add)                          \
   V(X64I64x2Sub)                          \
   V(X64I64x2Mul)                          \
+  V(X64I64x2MinS)                         \
+  V(X64I64x2MaxS)                         \
   V(X64I64x2Eq)                           \
   V(X64I64x2Ne)                           \
   V(X64I64x2GtS)                          \
   V(X64I64x2GeS)                          \
   V(X64I64x2ShrU)                         \
+  V(X64I64x2MinU)                         \
+  V(X64I64x2MaxU)                         \
   V(X64I64x2GtU)                          \
   V(X64I64x2GeU)                          \
   V(X64I32x4Splat)                        \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 6389ef2e503f73..e9fa450c3820e7 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -129,6 +129,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kX64F64x2ReplaceLane:
     case kX64F64x2Abs:
     case kX64F64x2Neg:
+    case kX64F64x2Add:
+    case kX64F64x2Sub:
+    case kX64F64x2Mul:
+    case kX64F64x2Div:
+    case kX64F64x2Min:
+    case kX64F64x2Max:
     case kX64F64x2Eq:
     case kX64F64x2Ne:
     case kX64F64x2Lt:
@@ -146,6 +152,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kX64F32x4AddHoriz:
     case kX64F32x4Sub:
     case kX64F32x4Mul:
+    case kX64F32x4Div:
     case kX64F32x4Min:
     case kX64F32x4Max:
     case kX64F32x4Eq:
@@ -161,11 +168,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kX64I64x2Add:
     case kX64I64x2Sub:
     case kX64I64x2Mul:
+    case kX64I64x2MinS:
+    case kX64I64x2MaxS:
     case kX64I64x2Eq:
     case kX64I64x2Ne:
     case kX64I64x2GtS:
     case kX64I64x2GeS:
     case kX64I64x2ShrU:
+    case kX64I64x2MinU:
+    case kX64I64x2MaxU:
     case kX64I64x2GtU:
     case kX64I64x2GeU:
     case kX64I32x4Splat:
@@ -295,9 +306,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kX64DecompressSigned:
     case kX64DecompressPointer:
     case kX64DecompressAny:
-    case kX64CompressSigned:
-    case kX64CompressPointer:
-    case kX64CompressAny:
       return (instr->addressing_mode() == kMode_None)
                  ? kNoOpcodeFlags
                  : kIsLoadOperation | kHasSideEffect;
@@ -346,7 +354,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
     case kX64Movdqu:
       return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
 
-    case kX64StackCheck:
     case kX64Peek:
       return kIsLoadOperation;
 
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index a4908fb846167b..5379074bac8666 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -170,9 +170,10 @@ class X64OperandGenerator final : public OperandGenerator {
   AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
                                                   InstructionOperand inputs[],
                                                   size_t* input_count) {
-    if (selector()->CanAddressRelativeToRootsRegister()) {
+    {
       LoadMatcher<ExternalReferenceMatcher> m(operand);
-      if (m.index().HasValue() && m.object().HasValue()) {
+      if (m.index().HasValue() && m.object().HasValue() &&
+          selector()->CanAddressRelativeToRootsRegister(m.object().Value())) {
         ptrdiff_t const delta =
             m.index().Value() +
             TurboAssemblerBase::RootRegisterOffsetForExternalReference(
@@ -350,7 +351,8 @@ void InstructionSelector::VisitStore(Node* node) {
   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
 
-  if (write_barrier_kind != kNoWriteBarrier) {
+  if (write_barrier_kind != kNoWriteBarrier &&
+      V8_LIKELY(!FLAG_disable_write_barriers)) {
     DCHECK(CanBeTaggedOrCompressedPointer(store_rep.representation()));
     AddressingMode addressing_mode;
     InstructionOperand inputs[] = {
@@ -528,6 +530,35 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
   }
 }
 
+void InstructionSelector::VisitStackPointerGreaterThan(
+    Node* node, FlagsContinuation* cont) {
+  Node* const value = node->InputAt(0);
+  InstructionCode opcode = kArchStackPointerGreaterThan;
+
+  DCHECK(cont->IsBranch());
+  const int effect_level =
+      GetEffectLevel(cont->true_block()->PredecessorAt(0)->control_input());
+
+  X64OperandGenerator g(this);
+  if (g.CanBeMemoryOperand(kX64Cmp, node, value, effect_level)) {
+    DCHECK_EQ(IrOpcode::kLoad, value->opcode());
+
+    // GetEffectiveAddressMemoryOperand can create at most 3 inputs.
+    static constexpr int kMaxInputCount = 3;
+
+    size_t input_count = 0;
+    InstructionOperand inputs[kMaxInputCount];
+    AddressingMode addressing_mode =
+        g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
+    opcode |= AddressingModeField::encode(addressing_mode);
+    DCHECK_LE(input_count, kMaxInputCount);
+
+    EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
+  } else {
+    EmitWithContinuation(opcode, g.UseRegister(value), cont);
+  }
+}
+
 namespace {
 
 bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector,
@@ -1238,23 +1269,23 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
 }
 
 void InstructionSelector::VisitChangeTaggedToCompressed(Node* node) {
-  X64OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kX64CompressAny, g.DefineAsRegister(node), g.Use(value));
+  // The top 32 bits in the 64-bit register will be undefined, and
+  // must not be used by a dependent node.
+  return EmitIdentity(node);
 }
 
 void InstructionSelector::VisitChangeTaggedPointerToCompressedPointer(
     Node* node) {
-  X64OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kX64CompressPointer, g.DefineAsRegister(node), g.Use(value));
+  // The top 32 bits in the 64-bit register will be undefined, and
+  // must not be used by a dependent node.
+  return EmitIdentity(node);
 }
 
 void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
     Node* node) {
-  X64OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kX64CompressSigned, g.DefineAsRegister(node), g.Use(value));
+  // The top 32 bits in the 64-bit register will be undefined, and
+  // must not be used by a dependent node.
+  return EmitIdentity(node);
 }
 
 void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
@@ -1338,10 +1369,13 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node,
 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
                     ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
   X64OperandGenerator g(selector);
+  InstructionOperand temps[] = {g.TempDoubleRegister()};
   if (selector->IsSupported(AVX)) {
-    selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
+    selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
+                   arraysize(temps), temps);
   } else {
-    selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
+    selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input),
+                   arraysize(temps), temps);
   }
 }
 
@@ -1838,30 +1872,6 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
           g.UseRegister(m.right().node()), cont);
     }
   }
-  if (selector->isolate() != nullptr) {
-    StackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> m(
-        selector->isolate(), node);
-    if (m.Matched()) {
-      // Compare(Load(js_stack_limit), LoadStackPointer)
-      if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-      InstructionCode opcode = cont->Encode(kX64StackCheck);
-      CHECK(cont->IsBranch());
-      selector->EmitWithContinuation(opcode, cont);
-      return;
-    }
-  }
-  WasmStackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> wasm_m(
-      node);
-  if (wasm_m.Matched()) {
-    // This is a wasm stack check. By structure, we know that we can use the
-    // stack pointer directly, as wasm code does not modify the stack at points
-    // where stack checks are performed.
-    Node* left = node->InputAt(0);
-    LocationOperand rsp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER,
-                        InstructionSequence::DefaultRepresentation(),
-                        RegisterCode::kRegCode_rsp);
-    return VisitCompareWithMemoryOperand(selector, kX64Cmp, left, rsp, cont);
-  }
   VisitWordCompare(selector, node, kX64Cmp, cont);
 }
 
@@ -2157,6 +2167,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
         return VisitWordCompare(this, value, kX64Cmp32, cont);
       case IrOpcode::kWord32And:
         return VisitWordCompare(this, value, kX64Test32, cont);
+      case IrOpcode::kStackPointerGreaterThan:
+        cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+        return VisitStackPointerGreaterThan(value, cont);
       default:
         break;
     }
@@ -2586,6 +2599,12 @@ VISIT_ATOMIC_BINOP(Xor)
   V(I8x16)
 
 #define SIMD_BINOP_LIST(V) \
+  V(F64x2Add)              \
+  V(F64x2Sub)              \
+  V(F64x2Mul)              \
+  V(F64x2Div)              \
+  V(F64x2Min)              \
+  V(F64x2Max)              \
   V(F64x2Eq)               \
   V(F64x2Ne)               \
   V(F64x2Lt)               \
@@ -2594,6 +2613,7 @@ VISIT_ATOMIC_BINOP(Xor)
   V(F32x4AddHoriz)         \
   V(F32x4Sub)              \
   V(F32x4Mul)              \
+  V(F32x4Div)              \
   V(F32x4Min)              \
   V(F32x4Max)              \
   V(F32x4Eq)               \
@@ -2603,7 +2623,6 @@ VISIT_ATOMIC_BINOP(Xor)
   V(I64x2Add)              \
   V(I64x2Sub)              \
   V(I64x2Eq)               \
-  V(I64x2Ne)               \
   V(I64x2GtS)              \
   V(I32x4Add)              \
   V(I32x4AddHoriz)         \
@@ -2612,12 +2631,10 @@ VISIT_ATOMIC_BINOP(Xor)
   V(I32x4MinS)             \
   V(I32x4MaxS)             \
   V(I32x4Eq)               \
-  V(I32x4Ne)               \
   V(I32x4GtS)              \
   V(I32x4GeS)              \
   V(I32x4MinU)             \
   V(I32x4MaxU)             \
-  V(I32x4GtU)              \
   V(I32x4GeU)              \
   V(I16x8SConvertI32x4)    \
   V(I16x8Add)              \
@@ -2629,14 +2646,12 @@ VISIT_ATOMIC_BINOP(Xor)
   V(I16x8MinS)             \
   V(I16x8MaxS)             \
   V(I16x8Eq)               \
-  V(I16x8Ne)               \
   V(I16x8GtS)              \
   V(I16x8GeS)              \
   V(I16x8AddSaturateU)     \
   V(I16x8SubSaturateU)     \
   V(I16x8MinU)             \
   V(I16x8MaxU)             \
-  V(I16x8GtU)              \
   V(I16x8GeU)              \
   V(I8x16SConvertI16x8)    \
   V(I8x16Add)              \
@@ -2646,23 +2661,28 @@ VISIT_ATOMIC_BINOP(Xor)
   V(I8x16MinS)             \
   V(I8x16MaxS)             \
   V(I8x16Eq)               \
-  V(I8x16Ne)               \
   V(I8x16GtS)              \
   V(I8x16GeS)              \
   V(I8x16AddSaturateU)     \
   V(I8x16SubSaturateU)     \
   V(I8x16MinU)             \
   V(I8x16MaxU)             \
-  V(I8x16GtU)              \
   V(I8x16GeU)              \
   V(S128And)               \
   V(S128Or)                \
   V(S128Xor)
 
 #define SIMD_BINOP_ONE_TEMP_LIST(V) \
+  V(I64x2Ne)                        \
   V(I64x2GeS)                       \
   V(I64x2GtU)                       \
-  V(I64x2GeU)
+  V(I64x2GeU)                       \
+  V(I32x4Ne)                        \
+  V(I32x4GtU)                       \
+  V(I16x8Ne)                        \
+  V(I16x8GtU)                       \
+  V(I8x16Ne)                        \
+  V(I8x16GtU)
 
 #define SIMD_UNOP_LIST(V)   \
   V(F32x4SConvertI32x4)     \
@@ -2686,16 +2706,17 @@ VISIT_ATOMIC_BINOP(Xor)
 
 #define SIMD_SHIFT_OPCODES(V) \
   V(I64x2Shl)                 \
-  V(I64x2ShrS)                \
   V(I64x2ShrU)                \
   V(I32x4Shl)                 \
   V(I32x4ShrS)                \
   V(I32x4ShrU)                \
   V(I16x8Shl)                 \
   V(I16x8ShrS)                \
-  V(I16x8ShrU)                \
-  V(I8x16Shl)                 \
-  V(I8x16ShrS)                \
+  V(I16x8ShrU)
+
+#define SIMD_NARROW_SHIFT_OPCODES(V) \
+  V(I8x16Shl)                        \
+  V(I8x16ShrS)                       \
   V(I8x16ShrU)
 
 #define SIMD_ANYTRUE_LIST(V) \
@@ -2745,17 +2766,30 @@ SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
 SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
 #undef VISIT_SIMD_REPLACE_LANE
 
-#define VISIT_SIMD_SHIFT(Opcode)                                  \
-  void InstructionSelector::Visit##Opcode(Node* node) {           \
-    X64OperandGenerator g(this);                                  \
-    int32_t value = OpParameter<int32_t>(node->op());             \
-    Emit(kX64##Opcode, g.DefineSameAsFirst(node),                 \
-         g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \
+#define VISIT_SIMD_SHIFT(Opcode)                                          \
+  void InstructionSelector::Visit##Opcode(Node* node) {                   \
+    X64OperandGenerator g(this);                                          \
+    InstructionOperand temps[] = {g.TempSimd128Register()};               \
+    Emit(kX64##Opcode, g.DefineSameAsFirst(node),                         \
+         g.UseUniqueRegister(node->InputAt(0)),                           \
+         g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
   }
 SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
 #undef VISIT_SIMD_SHIFT
 #undef SIMD_SHIFT_OPCODES
 
+#define VISIT_SIMD_NARROW_SHIFT(Opcode)                                       \
+  void InstructionSelector::Visit##Opcode(Node* node) {                       \
+    X64OperandGenerator g(this);                                              \
+    InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
+    Emit(kX64##Opcode, g.DefineSameAsFirst(node),                             \
+         g.UseUniqueRegister(node->InputAt(0)),                               \
+         g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);     \
+  }
+SIMD_NARROW_SHIFT_OPCODES(VISIT_SIMD_NARROW_SHIFT)
+#undef VISIT_SIMD_NARROW_SHIFT
+#undef SIMD_NARROW_SHIFT_OPCODES
+
 #define VISIT_SIMD_UNOP(Opcode)                         \
   void InstructionSelector::Visit##Opcode(Node* node) { \
     X64OperandGenerator g(this);                        \
@@ -2799,12 +2833,12 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
 #undef VISIT_SIMD_ANYTRUE
 #undef SIMD_ANYTRUE_LIST
 
-#define VISIT_SIMD_ALLTRUE(Opcode)                                        \
-  void InstructionSelector::Visit##Opcode(Node* node) {                   \
-    X64OperandGenerator g(this);                                          \
-    InstructionOperand temps[] = {g.TempRegister()};                      \
-    Emit(kX64##Opcode, g.DefineAsRegister(node),                          \
-         g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
+#define VISIT_SIMD_ALLTRUE(Opcode)                                            \
+  void InstructionSelector::Visit##Opcode(Node* node) {                       \
+    X64OperandGenerator g(this);                                              \
+    InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
+    Emit(kX64##Opcode, g.DefineAsRegister(node),                              \
+         g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);     \
   }
 SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
 #undef VISIT_SIMD_ALLTRUE
@@ -2820,14 +2854,16 @@ void InstructionSelector::VisitS128Select(Node* node) {
 
 void InstructionSelector::VisitF64x2Abs(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kX64F64x2Abs, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)));
+  InstructionOperand temps[] = {g.TempDoubleRegister()};
+  Emit(kX64F64x2Abs, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+       arraysize(temps), temps);
 }
 
 void InstructionSelector::VisitF64x2Neg(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kX64F64x2Neg, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)));
+  InstructionOperand temps[] = {g.TempDoubleRegister()};
+  Emit(kX64F64x2Neg, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+       arraysize(temps), temps);
 }
 
 void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
@@ -2836,6 +2872,15 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
        g.UseRegister(node->InputAt(0)));
 }
 
+void InstructionSelector::VisitI64x2ShrS(Node* node) {
+  X64OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempRegister()};
+  // Use fixed to rcx, to use sarq_cl in codegen.
+  Emit(kX64I64x2ShrS, g.DefineSameAsFirst(node),
+       g.UseUniqueRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), rcx),
+       arraysize(temps), temps);
+}
+
 void InstructionSelector::VisitI64x2Mul(Node* node) {
   X64OperandGenerator g(this);
   InstructionOperand temps[] = {g.TempSimd128Register(),
@@ -2845,15 +2890,59 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
        g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
 }
 
+void InstructionSelector::VisitI64x2MinS(Node* node) {
+  X64OperandGenerator g(this);
+  if (this->IsSupported(SSE4_2)) {
+    InstructionOperand temps[] = {g.TempSimd128Register()};
+    Emit(kX64I64x2MinS, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
+         arraysize(temps), temps);
+  } else {
+    InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister(),
+                                  g.TempRegister()};
+    Emit(kX64I64x2MinS, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+         arraysize(temps), temps);
+  }
+}
+
+void InstructionSelector::VisitI64x2MaxS(Node* node) {
+  X64OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempSimd128Register()};
+  Emit(kX64I64x2MaxS, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
+       arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitI64x2MinU(Node* node) {
+  X64OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempSimd128Register(),
+                                g.TempSimd128Register()};
+  Emit(kX64I64x2MinU, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
+       arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitI64x2MaxU(Node* node) {
+  X64OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempSimd128Register(),
+                                g.TempSimd128Register()};
+  Emit(kX64I64x2MaxU, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
+       arraysize(temps), temps);
+}
+
 void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
   X64OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempSimd128Register()};
   Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)));
+       g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
 }
 
 void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
   X64OperandGenerator g(this);
-  InstructionOperand temps[] = {g.TempSimd128Register()};
+  InstructionOperand temps[] = {g.TempSimd128Register(),
+                                g.TempSimd128Register()};
   Emit(kX64I32x4UConvertF32x4, g.DefineSameAsFirst(node),
        g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
 }
@@ -2997,12 +3086,12 @@ static const ShuffleEntry arch_shuffles[] = {
      true},
     {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
      kX64S8x8Reverse,
-     false,
-     false},
+     true,
+     true},
     {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
      kX64S8x4Reverse,
-     false,
-     false},
+     true,
+     true},
     {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
      kX64S8x2Reverse,
      true,
@@ -3060,6 +3149,8 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
     SwapShuffleInputs(node);
     is_swizzle = false;        // It's simpler to just handle the general case.
     no_same_as_first = false;  // SSE requires same-as-first.
+    // TODO(v8:9608): also see v8:9083
+    src1_needs_reg = true;
     opcode = kX64S8x16Alignr;
     // palignr takes a single imm8 offset.
     imms[imm_count++] = offset;
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 2583262c0744d2..ffc149ea5d9cc1 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -4,6 +4,7 @@
 
 #include "src/compiler/branch-elimination.h"
 
+#include "src/base/small-vector.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
@@ -13,17 +14,17 @@ namespace internal {
 namespace compiler {
 
 BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
-                                     Zone* zone)
+                                     Zone* zone, Phase phase)
     : AdvancedReducer(editor),
       jsgraph_(js_graph),
       node_conditions_(js_graph->graph()->NodeCount(), zone),
       reduced_(js_graph->graph()->NodeCount(), zone),
       zone_(zone),
-      dead_(js_graph->Dead()) {}
+      dead_(js_graph->Dead()),
+      phase_(phase) {}
 
 BranchElimination::~BranchElimination() = default;
 
-
 Reduction BranchElimination::Reduce(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kDead:
@@ -52,6 +53,74 @@ Reduction BranchElimination::Reduce(Node* node) {
   return NoChange();
 }
 
+void BranchElimination::SimplifyBranchCondition(Node* branch) {
+  // Try to use a phi as a branch condition if the control flow from the branch
+  // is known from previous branches. For example, in the graph below, the
+  // control flow of the second_branch is predictable because the first_branch
+  // use the same branch condition. In such case, create a new phi with constant
+  // inputs and let the second branch use the phi as its branch condition. From
+  // this transformation, more branch folding opportunities would be exposed to
+  // later passes through branch cloning in effect-control-linearizer.
+  //
+  // condition                             condition
+  //    |   \                                   |
+  //    |  first_branch                        first_branch
+  //    |   /          \                       /          \
+  //    |  /            \                     /            \
+  //    |first_true  first_false           first_true  first_false
+  //    |  \           /                      \           /
+  //    |   \         /                        \         /
+  //    |  first_merge           ==>          first_merge
+  //    |       |                                   |
+  //   second_branch                    1    0      |
+  //    /          \                     \  /       |
+  //   /            \                     phi       |
+  // second_true  second_false              \       |
+  //                                      second_branch
+  //                                      /          \
+  //                                     /            \
+  //                                   second_true  second_false
+  //
+
+  DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+  Node* merge = NodeProperties::GetControlInput(branch);
+  if (merge->opcode() != IrOpcode::kMerge) return;
+
+  Node* branch_condition = branch->InputAt(0);
+  Node* previous_branch;
+  bool condition_value;
+  Graph* graph = jsgraph()->graph();
+  base::SmallVector<Node*, 2> phi_inputs;
+
+  Node::Inputs inputs = merge->inputs();
+  int input_count = inputs.count();
+  for (int i = 0; i != input_count; ++i) {
+    Node* input = inputs[i];
+    ControlPathConditions from_input = node_conditions_.Get(input);
+    if (!from_input.LookupCondition(branch_condition, &previous_branch,
+                                    &condition_value))
+      return;
+
+    if (phase_ == kEARLY) {
+      phi_inputs.emplace_back(condition_value ? jsgraph()->TrueConstant()
+                                              : jsgraph()->FalseConstant());
+    } else {
+      phi_inputs.emplace_back(
+          condition_value
+              ? graph->NewNode(jsgraph()->common()->Int32Constant(1))
+              : graph->NewNode(jsgraph()->common()->Int32Constant(0)));
+    }
+  }
+  phi_inputs.emplace_back(merge);
+  Node* new_phi = graph->NewNode(
+      common()->Phi(phase_ == kEARLY ? MachineRepresentation::kTagged
+                                     : MachineRepresentation::kWord32,
+                    input_count),
+      input_count + 1, &phi_inputs.at(0));
+
+  // Replace the branch condition with the new phi.
+  NodeProperties::ReplaceValueInput(branch, new_phi, 0);
+}
 
 Reduction BranchElimination::ReduceBranch(Node* node) {
   Node* condition = node->InputAt(0);
@@ -87,6 +156,7 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
     }
     return Replace(dead());
   }
+  SimplifyBranchCondition(node);
   return TakeConditionsFromFirstControl(node);
 }
 
@@ -151,7 +221,6 @@ Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
   return UpdateConditions(node, from_branch, condition, branch, is_true_branch);
 }
 
-
 Reduction BranchElimination::ReduceLoop(Node* node) {
   // Here we rely on having only reducible loops:
   // The loop entry edge always dominates the header, so we can just use
@@ -159,7 +228,6 @@ Reduction BranchElimination::ReduceLoop(Node* node) {
   return TakeConditionsFromFirstControl(node);
 }
 
-
 Reduction BranchElimination::ReduceMerge(Node* node) {
   // Shortcut for the case when we do not know anything about some
   // input.
@@ -188,18 +256,15 @@ Reduction BranchElimination::ReduceMerge(Node* node) {
   return UpdateConditions(node, conditions);
 }
 
-
 Reduction BranchElimination::ReduceStart(Node* node) {
   return UpdateConditions(node, {});
 }
 
-
 Reduction BranchElimination::ReduceOtherControl(Node* node) {
   DCHECK_EQ(1, node->op()->ControlInputCount());
   return TakeConditionsFromFirstControl(node);
 }
 
-
 Reduction BranchElimination::TakeConditionsFromFirstControl(Node* node) {
   // We just propagate the information from the control input (ideally,
   // we would only revisit control uses if there is change).
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 2730da9c75e781..b3d9ef77523b6c 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -22,7 +22,12 @@ class JSGraph;
 class V8_EXPORT_PRIVATE BranchElimination final
     : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
-  BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone);
+  enum Phase {
+    kEARLY,
+    kLATE,
+  };
+  BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone,
+                    Phase phase = kLATE);
   ~BranchElimination() final;
 
   const char* reducer_name() const override { return "BranchElimination"; }
@@ -62,6 +67,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
   Reduction ReduceMerge(Node* node);
   Reduction ReduceStart(Node* node);
   Reduction ReduceOtherControl(Node* node);
+  void SimplifyBranchCondition(Node* branch);
 
   Reduction TakeConditionsFromFirstControl(Node* node);
   Reduction UpdateConditions(Node* node, ControlPathConditions conditions);
@@ -84,6 +90,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
   NodeAuxData<bool> reduced_;
   Zone* zone_;
   Node* dead_;
+  Phase phase_;
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index b44bec5fc88737..f1d43fc1a6982a 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -97,37 +97,35 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
 
 namespace {
 
-void UpdateInLiveness(
-    Bytecode bytecode,
-    BytecodeLivenessState& in_liveness,  // NOLINT(runtime/references)
-    const interpreter::BytecodeArrayAccessor& accessor) {
+void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
+                      const interpreter::BytecodeArrayAccessor& accessor) {
   int num_operands = Bytecodes::NumberOfOperands(bytecode);
   const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
 
   // Special case Suspend and Resume to just pass through liveness.
   if (bytecode == Bytecode::kSuspendGenerator) {
     // The generator object has to be live.
-    in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index());
+    in_liveness->MarkRegisterLive(accessor.GetRegisterOperand(0).index());
     // Suspend additionally reads and returns the accumulator
     DCHECK(Bytecodes::ReadsAccumulator(bytecode));
-    in_liveness.MarkAccumulatorLive();
+    in_liveness->MarkAccumulatorLive();
     return;
   }
   if (bytecode == Bytecode::kResumeGenerator) {
     // The generator object has to be live.
-    in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index());
+    in_liveness->MarkRegisterLive(accessor.GetRegisterOperand(0).index());
     return;
   }
 
   if (Bytecodes::WritesAccumulator(bytecode)) {
-    in_liveness.MarkAccumulatorDead();
+    in_liveness->MarkAccumulatorDead();
   }
   for (int i = 0; i < num_operands; ++i) {
     switch (operand_types[i]) {
       case OperandType::kRegOut: {
         interpreter::Register r = accessor.GetRegisterOperand(i);
         if (!r.is_parameter()) {
-          in_liveness.MarkRegisterDead(r.index());
+          in_liveness->MarkRegisterDead(r.index());
         }
         break;
       }
@@ -137,7 +135,7 @@ void UpdateInLiveness(
         if (!r.is_parameter()) {
           for (uint32_t j = 0; j < reg_count; ++j) {
             DCHECK(!interpreter::Register(r.index() + j).is_parameter());
-            in_liveness.MarkRegisterDead(r.index() + j);
+            in_liveness->MarkRegisterDead(r.index() + j);
           }
         }
         break;
@@ -146,8 +144,8 @@ void UpdateInLiveness(
         interpreter::Register r = accessor.GetRegisterOperand(i);
         if (!r.is_parameter()) {
           DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
-          in_liveness.MarkRegisterDead(r.index());
-          in_liveness.MarkRegisterDead(r.index() + 1);
+          in_liveness->MarkRegisterDead(r.index());
+          in_liveness->MarkRegisterDead(r.index() + 1);
         }
         break;
       }
@@ -156,9 +154,9 @@ void UpdateInLiveness(
         if (!r.is_parameter()) {
           DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
           DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
-          in_liveness.MarkRegisterDead(r.index());
-          in_liveness.MarkRegisterDead(r.index() + 1);
-          in_liveness.MarkRegisterDead(r.index() + 2);
+          in_liveness->MarkRegisterDead(r.index());
+          in_liveness->MarkRegisterDead(r.index() + 1);
+          in_liveness->MarkRegisterDead(r.index() + 2);
         }
         break;
       }
@@ -169,14 +167,14 @@ void UpdateInLiveness(
   }
 
   if (Bytecodes::ReadsAccumulator(bytecode)) {
-    in_liveness.MarkAccumulatorLive();
+    in_liveness->MarkAccumulatorLive();
   }
   for (int i = 0; i < num_operands; ++i) {
     switch (operand_types[i]) {
       case OperandType::kReg: {
         interpreter::Register r = accessor.GetRegisterOperand(i);
         if (!r.is_parameter()) {
-          in_liveness.MarkRegisterLive(r.index());
+          in_liveness->MarkRegisterLive(r.index());
         }
         break;
       }
@@ -184,8 +182,8 @@ void UpdateInLiveness(
         interpreter::Register r = accessor.GetRegisterOperand(i);
         if (!r.is_parameter()) {
           DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
-          in_liveness.MarkRegisterLive(r.index());
-          in_liveness.MarkRegisterLive(r.index() + 1);
+          in_liveness->MarkRegisterLive(r.index());
+          in_liveness->MarkRegisterLive(r.index() + 1);
         }
         break;
       }
@@ -195,7 +193,7 @@ void UpdateInLiveness(
         if (!r.is_parameter()) {
           for (uint32_t j = 0; j < reg_count; ++j) {
             DCHECK(!interpreter::Register(r.index() + j).is_parameter());
-            in_liveness.MarkRegisterLive(r.index() + j);
+            in_liveness->MarkRegisterLive(r.index() + j);
           }
         }
         break;
@@ -207,19 +205,17 @@ void UpdateInLiveness(
   }
 }
 
-void UpdateOutLiveness(
-    Bytecode bytecode,
-    BytecodeLivenessState& out_liveness,  // NOLINT(runtime/references)
-    BytecodeLivenessState* next_bytecode_in_liveness,
-    const interpreter::BytecodeArrayAccessor& accessor,
-    Handle<BytecodeArray> bytecode_array,
-    const BytecodeLivenessMap& liveness_map) {
+void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState* out_liveness,
+                       BytecodeLivenessState* next_bytecode_in_liveness,
+                       const interpreter::BytecodeArrayAccessor& accessor,
+                       Handle<BytecodeArray> bytecode_array,
+                       const BytecodeLivenessMap& liveness_map) {
   int current_offset = accessor.current_offset();
 
   // Special case Suspend and Resume to just pass through liveness.
   if (bytecode == Bytecode::kSuspendGenerator ||
       bytecode == Bytecode::kResumeGenerator) {
-    out_liveness.Union(*next_bytecode_in_liveness);
+    out_liveness->Union(*next_bytecode_in_liveness);
     return;
   }
 
@@ -227,10 +223,10 @@ void UpdateOutLiveness(
   // the liveness iterations.
   if (Bytecodes::IsForwardJump(bytecode)) {
     int target_offset = accessor.GetJumpTargetOffset();
-    out_liveness.Union(*liveness_map.GetInLiveness(target_offset));
+    out_liveness->Union(*liveness_map.GetInLiveness(target_offset));
   } else if (Bytecodes::IsSwitch(bytecode)) {
     for (const auto& entry : accessor.GetJumpTableTargetOffsets()) {
-      out_liveness.Union(*liveness_map.GetInLiveness(entry.target_offset));
+      out_liveness->Union(*liveness_map.GetInLiveness(entry.target_offset));
     }
   }
 
@@ -238,7 +234,7 @@ void UpdateOutLiveness(
   // unconditional jump).
   if (next_bytecode_in_liveness != nullptr &&
       !Bytecodes::IsUnconditionalJump(bytecode)) {
-    out_liveness.Union(*next_bytecode_in_liveness);
+    out_liveness->Union(*next_bytecode_in_liveness);
   }
 
   // Update from exception handler (if any).
@@ -250,15 +246,15 @@ void UpdateOutLiveness(
         table.LookupRange(current_offset, &handler_context, nullptr);
 
     if (handler_offset != -1) {
-      bool was_accumulator_live = out_liveness.AccumulatorIsLive();
-      out_liveness.Union(*liveness_map.GetInLiveness(handler_offset));
-      out_liveness.MarkRegisterLive(handler_context);
+      bool was_accumulator_live = out_liveness->AccumulatorIsLive();
+      out_liveness->Union(*liveness_map.GetInLiveness(handler_offset));
+      out_liveness->MarkRegisterLive(handler_context);
       if (!was_accumulator_live) {
         // The accumulator is reset to the exception on entry into a handler,
         // and so shouldn't be considered live coming out of this bytecode just
         // because it's live coming into the handler. So, kill the accumulator
         // if the handler is the only thing that made it live.
-        out_liveness.MarkAccumulatorDead();
+        out_liveness->MarkAccumulatorDead();
 
         // TODO(leszeks): Ideally the accumulator wouldn't be considered live at
         // the start of the handler, but looking up if the current bytecode is
@@ -269,45 +265,42 @@ void UpdateOutLiveness(
   }
 }
 
-void UpdateLiveness(Bytecode bytecode,
-                    BytecodeLiveness& liveness,  // NOLINT(runtime/references)
+void UpdateLiveness(Bytecode bytecode, BytecodeLiveness const& liveness,
                     BytecodeLivenessState** next_bytecode_in_liveness,
                     const interpreter::BytecodeArrayAccessor& accessor,
                     Handle<BytecodeArray> bytecode_array,
                     const BytecodeLivenessMap& liveness_map) {
-  UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness,
+  UpdateOutLiveness(bytecode, liveness.out, *next_bytecode_in_liveness,
                     accessor, bytecode_array, liveness_map);
   liveness.in->CopyFrom(*liveness.out);
-  UpdateInLiveness(bytecode, *liveness.in, accessor);
+  UpdateInLiveness(bytecode, liveness.in, accessor);
 
   *next_bytecode_in_liveness = liveness.in;
 }
 
-void UpdateAssignments(
-    Bytecode bytecode,
-    BytecodeLoopAssignments& assignments,  // NOLINT(runtime/references)
-    const interpreter::BytecodeArrayAccessor& accessor) {
+void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments* assignments,
+                       const interpreter::BytecodeArrayAccessor& accessor) {
   int num_operands = Bytecodes::NumberOfOperands(bytecode);
   const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
 
   for (int i = 0; i < num_operands; ++i) {
     switch (operand_types[i]) {
       case OperandType::kRegOut: {
-        assignments.Add(accessor.GetRegisterOperand(i));
+        assignments->Add(accessor.GetRegisterOperand(i));
         break;
       }
       case OperandType::kRegOutList: {
         interpreter::Register r = accessor.GetRegisterOperand(i++);
         uint32_t reg_count = accessor.GetRegisterCountOperand(i);
-        assignments.AddList(r, reg_count);
+        assignments->AddList(r, reg_count);
         break;
       }
       case OperandType::kRegOutPair: {
-        assignments.AddList(accessor.GetRegisterOperand(i), 2);
+        assignments->AddList(accessor.GetRegisterOperand(i), 2);
         break;
       }
       case OperandType::kRegOutTriple: {
-        assignments.AddList(accessor.GetRegisterOperand(i), 3);
+        assignments->AddList(accessor.GetRegisterOperand(i), 3);
         break;
       }
       default:
@@ -365,7 +358,7 @@ void BytecodeAnalysis::Analyze() {
       // the loop *and* are live when the loop exits. However, this requires
       // tracking the out-liveness of *all* loop exits, which is not
       // information we currently have.
-      UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
+      UpdateAssignments(bytecode, &current_loop_info->assignments(), iterator);
 
       // Update suspend counts for this loop.
       if (bytecode == Bytecode::kSuspendGenerator) {
@@ -433,7 +426,7 @@ void BytecodeAnalysis::Analyze() {
     }
 
     if (analyze_liveness_) {
-      BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
+      BytecodeLiveness const& liveness = liveness_map_.InitializeLiveness(
           current_offset, bytecode_array()->register_count(), zone());
       UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
                      bytecode_array(), liveness_map_);
@@ -496,14 +489,14 @@ void BytecodeAnalysis::Analyze() {
     for (; iterator.current_offset() > header_offset; --iterator) {
       Bytecode bytecode = iterator.current_bytecode();
       int current_offset = iterator.current_offset();
-      BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
-
+      BytecodeLiveness const& liveness =
+          liveness_map_.GetLiveness(current_offset);
       UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
                      bytecode_array(), liveness_map_);
     }
     // Now we are at the loop header. Since the in-liveness of the header
     // can't change, we need only to update the out-liveness.
-    UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out,
+    UpdateOutLiveness(iterator.current_bytecode(), header_liveness.out,
                       next_bytecode_in_liveness, iterator, bytecode_array(),
                       liveness_map_);
   }
@@ -532,13 +525,14 @@ void BytecodeAnalysis::Analyze() {
     // bytecodes before it.
     if (any_changed) {
       switch_liveness.in->CopyFrom(*switch_liveness.out);
-      UpdateInLiveness(Bytecode::kSwitchOnGeneratorState, *switch_liveness.in,
+      UpdateInLiveness(Bytecode::kSwitchOnGeneratorState, switch_liveness.in,
                        iterator);
       next_bytecode_in_liveness = switch_liveness.in;
       for (--iterator; iterator.IsValid(); --iterator) {
         Bytecode bytecode = iterator.current_bytecode();
         int current_offset = iterator.current_offset();
-        BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+        BytecodeLiveness const& liveness =
+            liveness_map_.GetLiveness(current_offset);
 
         // There shouldn't be any more loops.
         DCHECK_NE(bytecode, Bytecode::kJumpLoop);
@@ -829,7 +823,7 @@ bool BytecodeAnalysis::LivenessIsValid() {
 
     previous_liveness.CopyFrom(*liveness.out);
 
-    UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+    UpdateOutLiveness(bytecode, liveness.out, next_bytecode_in_liveness,
                       iterator, bytecode_array(), liveness_map_);
     // UpdateOutLiveness skips kJumpLoop, so we update it manually.
     if (bytecode == Bytecode::kJumpLoop) {
@@ -848,7 +842,7 @@ bool BytecodeAnalysis::LivenessIsValid() {
     previous_liveness.CopyFrom(*liveness.in);
 
     liveness.in->CopyFrom(*liveness.out);
-    UpdateInLiveness(bytecode, *liveness.in, iterator);
+    UpdateInLiveness(bytecode, liveness.in, iterator);
 
     if (!liveness.in->Equals(previous_liveness)) {
       // Reset the invalid liveness.
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 7c7144632074dd..b1051be5719e8b 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -16,7 +16,6 @@
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/compiler/state-values-utils.h"
-#include "src/compiler/vector-slot-pair.h"
 #include "src/interpreter/bytecode-array-iterator.h"
 #include "src/interpreter/bytecode-flags.h"
 #include "src/interpreter/bytecodes.h"
@@ -34,13 +33,12 @@ namespace compiler {
 class BytecodeGraphBuilder {
  public:
   BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone,
-                       BytecodeArrayRef bytecode_array,
-                       SharedFunctionInfoRef shared,
-                       FeedbackVectorRef feedback_vector, BailoutId osr_offset,
-                       JSGraph* jsgraph,
+                       NativeContextRef const& native_context,
+                       SharedFunctionInfoRef const& shared_info,
+                       FeedbackVectorRef const& feedback_vector,
+                       BailoutId osr_offset, JSGraph* jsgraph,
                        CallFrequency const& invocation_frequency,
-                       SourcePositionTable* source_positions,
-                       NativeContextRef native_context, int inlining_id,
+                       SourcePositionTable* source_positions, int inlining_id,
                        BytecodeGraphBuilderFlags flags,
                        TickCounter* tick_counter);
 
@@ -68,9 +66,9 @@ class BytecodeGraphBuilder {
   // Builder for loading the a native context field.
   Node* BuildLoadNativeContextField(int index);
 
-  // Helper function for creating a pair containing type feedback vector and
-  // a feedback slot.
-  VectorSlotPair CreateVectorSlotPair(int slot_id);
+  // Helper function for creating a feedback source containing type feedback
+  // vector and a feedback slot.
+  FeedbackSource CreateFeedbackSource(int slot_id);
 
   void set_environment(Environment* env) { environment_ = env; }
   const Environment* environment() const { return environment_; }
@@ -168,7 +166,7 @@ class BytecodeGraphBuilder {
   void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
 
   void BuildCreateArguments(CreateArgumentsType type);
-  Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
+  Node* BuildLoadGlobal(NameRef name, uint32_t feedback_slot_index,
                         TypeofMode typeof_mode);
 
   enum class StoreMode {
@@ -245,11 +243,12 @@ class BytecodeGraphBuilder {
   ForInMode GetForInMode(int operand_index);
 
   // Helper function to compute call frequency from the recorded type
-  // feedback.
+  // feedback. Returns unknown if invocation count is unknown. Returns 0 if
+  // feedback is insufficient.
   CallFrequency ComputeCallFrequency(int slot_id) const;
 
   // Helper function to extract the speculation mode from the recorded type
-  // feedback.
+  // feedback. Returns kDisallowSpeculation if feedback is insufficient.
   SpeculationMode GetSpeculationMode(int slot_id) const;
 
   // Control flow plumbing.
@@ -310,7 +309,6 @@ class BytecodeGraphBuilder {
     int context_register_;  // Index of register holding handler context.
   };
 
-  // Field accessors
   Graph* graph() const { return jsgraph_->graph(); }
   CommonOperatorBuilder* common() const { return jsgraph_->common(); }
   Zone* graph_zone() const { return graph()->zone(); }
@@ -321,55 +319,44 @@ class BytecodeGraphBuilder {
     return jsgraph_->simplified();
   }
   Zone* local_zone() const { return local_zone_; }
-  const BytecodeArrayRef bytecode_array() const { return bytecode_array_; }
-  FeedbackVectorRef feedback_vector() const { return feedback_vector_; }
+  BytecodeArrayRef bytecode_array() const {
+    return shared_info().GetBytecodeArray();
+  }
+  FeedbackVectorRef const& feedback_vector() const { return feedback_vector_; }
   const JSTypeHintLowering& type_hint_lowering() const {
     return type_hint_lowering_;
   }
   const FrameStateFunctionInfo* frame_state_function_info() const {
     return frame_state_function_info_;
   }
-
   SourcePositionTableIterator& source_position_iterator() {
     return *source_position_iterator_.get();
   }
-
   interpreter::BytecodeArrayIterator& bytecode_iterator() {
     return bytecode_iterator_;
   }
-
   BytecodeAnalysis const& bytecode_analysis() const {
     return bytecode_analysis_;
   }
-
   int currently_peeled_loop_offset() const {
     return currently_peeled_loop_offset_;
   }
-
   void set_currently_peeled_loop_offset(int offset) {
     currently_peeled_loop_offset_ = offset;
   }
-
   bool skip_next_stack_check() const { return skip_next_stack_check_; }
-
   void unset_skip_next_stack_check() { skip_next_stack_check_ = false; }
-
-  int current_exception_handler() { return current_exception_handler_; }
-
+  int current_exception_handler() const { return current_exception_handler_; }
   void set_current_exception_handler(int index) {
     current_exception_handler_ = index;
   }
-
   bool needs_eager_checkpoint() const { return needs_eager_checkpoint_; }
   void mark_as_needing_eager_checkpoint(bool value) {
     needs_eager_checkpoint_ = value;
   }
-
-  SharedFunctionInfoRef shared_info() const { return shared_info_; }
-
-  NativeContextRef native_context() const { return native_context_; }
-
   JSHeapBroker* broker() const { return broker_; }
+  NativeContextRef native_context() const { return native_context_; }
+  SharedFunctionInfoRef shared_info() const { return shared_info_; }
 
 #define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
   BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
@@ -378,9 +365,11 @@ class BytecodeGraphBuilder {
   JSHeapBroker* const broker_;
   Zone* const local_zone_;
   JSGraph* const jsgraph_;
+  // The native context for which we optimize.
+  NativeContextRef const native_context_;
+  SharedFunctionInfoRef const shared_info_;
+  FeedbackVectorRef const feedback_vector_;
   CallFrequency const invocation_frequency_;
-  BytecodeArrayRef const bytecode_array_;
-  FeedbackVectorRef feedback_vector_;
   JSTypeHintLowering const type_hint_lowering_;
   const FrameStateFunctionInfo* const frame_state_function_info_;
   std::unique_ptr<SourcePositionTableIterator> source_position_iterator_;
@@ -431,11 +420,6 @@ class BytecodeGraphBuilder {
 
   SourcePosition const start_position_;
 
-  SharedFunctionInfoRef const shared_info_;
-
-  // The native context for which we optimize.
-  NativeContextRef const native_context_;
-
   TickCounter* const tick_counter_;
 
   static int const kBinaryOperationHintIndex = 1;
@@ -937,33 +921,36 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
 }
 
 BytecodeGraphBuilder::BytecodeGraphBuilder(
-    JSHeapBroker* broker, Zone* local_zone, BytecodeArrayRef bytecode_array,
-    SharedFunctionInfoRef shared_info, FeedbackVectorRef feedback_vector,
-    BailoutId osr_offset, JSGraph* jsgraph,
-    CallFrequency const& invocation_frequency,
-    SourcePositionTable* source_positions, NativeContextRef native_context,
-    int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
+    JSHeapBroker* broker, Zone* local_zone,
+    NativeContextRef const& native_context,
+    SharedFunctionInfoRef const& shared_info,
+    FeedbackVectorRef const& feedback_vector, BailoutId osr_offset,
+    JSGraph* jsgraph, CallFrequency const& invocation_frequency,
+    SourcePositionTable* source_positions, int inlining_id,
+    BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
     : broker_(broker),
       local_zone_(local_zone),
       jsgraph_(jsgraph),
-      invocation_frequency_(invocation_frequency),
-      bytecode_array_(bytecode_array),
+      native_context_(native_context),
+      shared_info_(shared_info),
       feedback_vector_(feedback_vector),
+      invocation_frequency_(invocation_frequency),
       type_hint_lowering_(
-          jsgraph, feedback_vector.object(),
+          broker, jsgraph, feedback_vector,
           (flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized)
               ? JSTypeHintLowering::kBailoutOnUninitialized
               : JSTypeHintLowering::kNoFlags),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kInterpretedFunction,
-          bytecode_array.parameter_count(), bytecode_array.register_count(),
+          bytecode_array().parameter_count(), bytecode_array().register_count(),
           shared_info.object())),
       bytecode_iterator_(
-          base::make_unique<OffHeapBytecodeArray>(bytecode_array)),
+          base::make_unique<OffHeapBytecodeArray>(bytecode_array())),
       bytecode_analysis_(broker_->GetBytecodeAnalysis(
-          bytecode_array.object(), osr_offset,
+          bytecode_array().object(), osr_offset,
           flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness,
-          !FLAG_concurrent_inlining)),
+          FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized
+                                   : SerializationPolicy::kSerializeIfNeeded)),
       environment_(nullptr),
       osr_(!osr_offset.IsNone()),
       currently_peeled_loop_offset_(-1),
@@ -980,19 +967,17 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
       state_values_cache_(jsgraph),
       source_positions_(source_positions),
       start_position_(shared_info.StartPosition(), inlining_id),
-      shared_info_(shared_info),
-      native_context_(native_context),
       tick_counter_(tick_counter) {
   if (FLAG_concurrent_inlining) {
     // With concurrent inlining on, the source position address doesn't change
     // because it's been copied from the heap.
     source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
-        Vector<const byte>(bytecode_array.source_positions_address(),
-                           bytecode_array.source_positions_size()));
+        Vector<const byte>(bytecode_array().source_positions_address(),
+                           bytecode_array().source_positions_size()));
   } else {
     // Otherwise, we need to access the table through a handle.
     source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
-        handle(bytecode_array.object()->SourcePositionTableIfCollected(),
+        handle(bytecode_array().object()->SourcePositionTableIfCollected(),
                isolate()));
   }
 }
@@ -1014,13 +999,13 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
   return result;
 }
 
-VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
+FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(int slot_id) {
   FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
-  FeedbackNexus nexus(feedback_vector().object(), slot);
-  return VectorSlotPair(feedback_vector().object(), slot, nexus.ic_state());
+  return FeedbackSource(feedback_vector(), slot);
 }
 
 void BytecodeGraphBuilder::CreateGraph() {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
   SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
 
   // Set up the basic structure of the graph. Outputs for {Start} are the formal
@@ -1321,7 +1306,8 @@ void BytecodeGraphBuilder::VisitBytecodes() {
     VisitSingleBytecode();
   }
 
-  if (has_one_shot_bytecode) {
+  if (!FLAG_concurrent_inlining && has_one_shot_bytecode) {
+    // (For concurrent inlining this is done in the serializer instead.)
     isolate()->CountUsage(
         v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
   }
@@ -1340,8 +1326,9 @@ void BytecodeGraphBuilder::VisitLdaSmi() {
 }
 
 void BytecodeGraphBuilder::VisitLdaConstant() {
-  Node* node = jsgraph()->Constant(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  ObjectRef object(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  Node* node = jsgraph()->Constant(object);
   environment()->BindAccumulator(node);
 }
 
@@ -1387,20 +1374,20 @@ void BytecodeGraphBuilder::VisitMov() {
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
 }
 
-Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
+Node* BytecodeGraphBuilder::BuildLoadGlobal(NameRef name,
                                             uint32_t feedback_slot_index,
                                             TypeofMode typeof_mode) {
-  VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
-  DCHECK(
-      IsLoadGlobalICKind(feedback_vector().object()->GetKind(feedback.slot())));
-  const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
+  FeedbackSource feedback = CreateFeedbackSource(feedback_slot_index);
+  DCHECK(IsLoadGlobalICKind(broker()->GetFeedbackSlotKind(feedback)));
+  const Operator* op =
+      javascript()->LoadGlobal(name.object(), feedback, typeof_mode);
   return NewNode(op);
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobal() {
   PrepareEagerCheckpoint();
-  Handle<Name> name = Handle<Name>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  NameRef name(broker(),
+               bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
   uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
   Node* node =
       BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
@@ -1409,8 +1396,8 @@ void BytecodeGraphBuilder::VisitLdaGlobal() {
 
 void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
   PrepareEagerCheckpoint();
-  Handle<Name> name = Handle<Name>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  NameRef name(broker(),
+               bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
   uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
   Node* node =
       BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
@@ -1419,15 +1406,16 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
 
 void BytecodeGraphBuilder::VisitStaGlobal() {
   PrepareEagerCheckpoint();
-  Handle<Name> name = Handle<Name>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
+  NameRef name(broker(),
+               bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  FeedbackSource feedback =
+      CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
   Node* value = environment()->LookupAccumulator();
 
   LanguageMode language_mode =
-      feedback.vector()->GetLanguageMode(feedback.slot());
-  const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
+      GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback));
+  const Operator* op =
+      javascript()->StoreGlobal(language_mode, name.object(), feedback);
   Node* node = NewNode(op, value);
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
@@ -1439,12 +1427,12 @@ void BytecodeGraphBuilder::VisitStaInArrayLiteral() {
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* index =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
+  FeedbackSource feedback =
+      CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
   const Operator* op = javascript()->StoreInArrayLiteral(feedback);
 
   JSTypeHintLowering::LoweringResult lowering =
-      TryBuildSimplifiedStoreKeyed(op, array, index, value, feedback.slot());
+      TryBuildSimplifiedStoreKeyed(op, array, index, value, feedback.slot);
   if (lowering.IsExit()) return;
 
   Node* node = nullptr;
@@ -1467,11 +1455,22 @@ void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
   Node* value = environment()->LookupAccumulator();
   int flags = bytecode_iterator().GetFlagOperand(2);
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
-
+  FeedbackSource feedback =
+      CreateFeedbackSource(bytecode_iterator().GetIndexOperand(3));
   const Operator* op = javascript()->StoreDataPropertyInLiteral(feedback);
-  Node* node = NewNode(op, object, name, value, jsgraph()->Constant(flags));
+
+  JSTypeHintLowering::LoweringResult lowering =
+      TryBuildSimplifiedStoreKeyed(op, object, name, value, feedback.slot);
+  if (lowering.IsExit()) return;
+
+  Node* node = nullptr;
+  if (lowering.IsSideEffectFree()) {
+    node = lowering.value();
+  } else {
+    DCHECK(!lowering.Changed());
+    node = NewNode(op, object, name, value, jsgraph()->Constant(flags));
+  }
+
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
@@ -1545,8 +1544,8 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
 
 void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
   PrepareEagerCheckpoint();
-  Node* name = jsgraph()->Constant(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  Node* name = jsgraph()->Constant(ObjectRef(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
   const Operator* op =
       javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
                                     ? Runtime::kLoadLookupSlot
@@ -1630,8 +1629,9 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
     // Slow path, do a runtime load lookup.
     set_environment(slow_environment);
     {
-      Node* name = jsgraph()->Constant(
-          bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+      Node* name = jsgraph()->Constant(ObjectRef(
+          broker(),
+          bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
 
       const Operator* op =
           javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1666,8 +1666,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
   // Fast path, do a global load.
   {
     PrepareEagerCheckpoint();
-    Handle<Name> name = Handle<Name>::cast(
-        bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+    NameRef name(broker(),
+                 bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
     uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
     Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
     environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1682,8 +1682,9 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
     // Slow path, do a runtime load lookup.
     set_environment(slow_environment);
     {
-      Node* name = jsgraph()->Constant(
-          bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+      Node* name = jsgraph()->Constant(NameRef(
+          broker(),
+          bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
 
       const Operator* op =
           javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1712,8 +1713,8 @@ void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
 void BytecodeGraphBuilder::VisitStaLookupSlot() {
   PrepareEagerCheckpoint();
   Node* value = environment()->LookupAccumulator();
-  Node* name = jsgraph()->Constant(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  Node* name = jsgraph()->Constant(ObjectRef(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
   int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
   LanguageMode language_mode = static_cast<LanguageMode>(
       interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
@@ -1737,14 +1738,14 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
   PrepareEagerCheckpoint();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Handle<Name> name = Handle<Name>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
-  const Operator* op = javascript()->LoadNamed(name, feedback);
+  NameRef name(broker(),
+               bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+  FeedbackSource feedback =
+      CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
+  const Operator* op = javascript()->LoadNamed(name.object(), feedback);
 
   JSTypeHintLowering::LoweringResult lowering =
-      TryBuildSimplifiedLoadNamed(op, object, feedback.slot());
+      TryBuildSimplifiedLoadNamed(op, object, feedback.slot);
   if (lowering.IsExit()) return;
 
   Node* node = nullptr;
@@ -1761,9 +1762,9 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() {
   PrepareEagerCheckpoint();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Handle<Name> name = Handle<Name>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
-  const Operator* op = javascript()->LoadNamed(name, VectorSlotPair());
+  NameRef name(broker(),
+               bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+  const Operator* op = javascript()->LoadNamed(name.object(), FeedbackSource());
   Node* node = NewNode(op, object);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
@@ -1773,12 +1774,12 @@ void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
   Node* key = environment()->LookupAccumulator();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
+  FeedbackSource feedback =
+      CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
   const Operator* op = javascript()->LoadProperty(feedback);
 
   JSTypeHintLowering::LoweringResult lowering =
-      TryBuildSimplifiedLoadKeyed(op, object, key, feedback.slot());
+      TryBuildSimplifiedLoadKeyed(op, object, key, feedback.slot);
   if (lowering.IsExit()) return;
 
   Node* node = nullptr;
@@ -1796,25 +1797,26 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
   Node* value = environment()->LookupAccumulator();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Handle<Name> name = Handle<Name>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
+  NameRef name(broker(),
+               bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+  FeedbackSource feedback =
+      CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
 
   const Operator* op;
   if (store_mode == StoreMode::kOwn) {
     DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
-              feedback.vector()->GetKind(feedback.slot()));
-    op = javascript()->StoreNamedOwn(name, feedback);
+              broker()->GetFeedbackSlotKind(feedback));
+
+    op = javascript()->StoreNamedOwn(name.object(), feedback);
   } else {
     DCHECK_EQ(StoreMode::kNormal, store_mode);
     LanguageMode language_mode =
-        feedback.vector()->GetLanguageMode(feedback.slot());
-    op = javascript()->StoreNamed(language_mode, name, feedback);
+        GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback));
+    op = javascript()->StoreNamed(language_mode, name.object(), feedback);
   }
 
   JSTypeHintLowering::LoweringResult lowering =
-      TryBuildSimplifiedStoreNamed(op, object, value, feedback.slot());
+      TryBuildSimplifiedStoreNamed(op, object, value, feedback.slot);
   if (lowering.IsExit()) return;
 
   Node* node = nullptr;
@@ -1836,12 +1838,12 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() {
   Node* value = environment()->LookupAccumulator();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Handle<Name> name = Handle<Name>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+  NameRef name(broker(),
+               bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
   LanguageMode language_mode =
       static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2));
   const Operator* op =
-      javascript()->StoreNamed(language_mode, name, VectorSlotPair());
+      javascript()->StoreNamed(language_mode, name.object(), FeedbackSource());
   Node* node = NewNode(op, object, value);
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
@@ -1857,14 +1859,14 @@ void BytecodeGraphBuilder::VisitStaKeyedProperty() {
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* key =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
+  FeedbackSource source =
+      CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
   LanguageMode language_mode =
-      feedback.vector()->GetLanguageMode(feedback.slot());
-  const Operator* op = javascript()->StoreProperty(language_mode, feedback);
+      GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(source));
+  const Operator* op = javascript()->StoreProperty(language_mode, source);
 
   JSTypeHintLowering::LoweringResult lowering =
-      TryBuildSimplifiedStoreKeyed(op, object, key, value, feedback.slot());
+      TryBuildSimplifiedStoreKeyed(op, object, key, value, source.slot);
   if (lowering.IsExit()) return;
 
   Node* node = nullptr;
@@ -1910,71 +1912,76 @@ void BytecodeGraphBuilder::VisitPopContext() {
 }
 
 void BytecodeGraphBuilder::VisitCreateClosure() {
-  Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  SharedFunctionInfoRef shared_info(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
   AllocationType allocation =
       interpreter::CreateClosureFlags::PretenuredBit::decode(
           bytecode_iterator().GetFlagOperand(2))
           ? AllocationType::kOld
           : AllocationType::kYoung;
+
   const Operator* op = javascript()->CreateClosure(
-      shared_info,
-      feedback_vector().object()->GetClosureFeedbackCell(
-          bytecode_iterator().GetIndexOperand(1)),
-      handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy),
-             isolate()),
+      shared_info.object(),
+      feedback_vector()
+          .GetClosureFeedbackCell(bytecode_iterator().GetIndexOperand(1))
+          .object(),
+      jsgraph()->isolate()->builtins()->builtin_handle(Builtins::kCompileLazy),
       allocation);
   Node* closure = NewNode(op);
   environment()->BindAccumulator(closure);
 }
 
 void BytecodeGraphBuilder::VisitCreateBlockContext() {
-  Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
-
-  const Operator* op = javascript()->CreateBlockContext(scope_info);
+  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+  ScopeInfoRef scope_info(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  const Operator* op = javascript()->CreateBlockContext(scope_info.object());
   Node* context = NewNode(op);
   environment()->BindAccumulator(context);
 }
 
 void BytecodeGraphBuilder::VisitCreateFunctionContext() {
-  Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+  ScopeInfoRef scope_info(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
   uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
-  const Operator* op =
-      javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE);
+  const Operator* op = javascript()->CreateFunctionContext(
+      scope_info.object(), slots, FUNCTION_SCOPE);
   Node* context = NewNode(op);
   environment()->BindAccumulator(context);
 }
 
 void BytecodeGraphBuilder::VisitCreateEvalContext() {
-  Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+  ScopeInfoRef scope_info(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
   uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
-  const Operator* op =
-      javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE);
+  const Operator* op = javascript()->CreateFunctionContext(scope_info.object(),
+                                                           slots, EVAL_SCOPE);
   Node* context = NewNode(op);
   environment()->BindAccumulator(context);
 }
 
 void BytecodeGraphBuilder::VisitCreateCatchContext() {
+  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
   interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
   Node* exception = environment()->LookupRegister(reg);
-  Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+  ScopeInfoRef scope_info(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
 
-  const Operator* op = javascript()->CreateCatchContext(scope_info);
+  const Operator* op = javascript()->CreateCatchContext(scope_info.object());
   Node* context = NewNode(op, exception);
   environment()->BindAccumulator(context);
 }
 
 void BytecodeGraphBuilder::VisitCreateWithContext() {
+  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+  ScopeInfoRef scope_info(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
 
-  const Operator* op = javascript()->CreateWithContext(scope_info);
+  const Operator* op = javascript()->CreateWithContext(scope_info.object());
   Node* context = NewNode(op, object);
   environment()->BindAccumulator(context);
 }
@@ -1998,22 +2005,21 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
 }
 
 void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
-  Handle<String> constant_pattern = Handle<String>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  StringRef constant_pattern(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
   int const slot_id = bytecode_iterator().GetIndexOperand(1);
-  VectorSlotPair pair = CreateVectorSlotPair(slot_id);
+  FeedbackSource pair = CreateFeedbackSource(slot_id);
   int literal_flags = bytecode_iterator().GetFlagOperand(2);
-  Node* literal = NewNode(
-      javascript()->CreateLiteralRegExp(constant_pattern, pair, literal_flags));
+  Node* literal = NewNode(javascript()->CreateLiteralRegExp(
+      constant_pattern.object(), pair, literal_flags));
   environment()->BindAccumulator(literal, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
-  Handle<ArrayBoilerplateDescription> array_boilerplate_description =
-      Handle<ArrayBoilerplateDescription>::cast(
-          bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  ArrayBoilerplateDescriptionRef array_boilerplate_description(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
   int const slot_id = bytecode_iterator().GetIndexOperand(1);
-  VectorSlotPair pair = CreateVectorSlotPair(slot_id);
+  FeedbackSource pair = CreateFeedbackSource(slot_id);
   int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
   int literal_flags =
       interpreter::CreateArrayLiteralFlags::FlagsBits::decode(bytecode_flags);
@@ -2025,15 +2031,16 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
   // TODO(mstarzinger): Thread through number of elements. The below number is
   // only an estimate and does not match {ArrayLiteral::values::length}.
   int number_of_elements =
-      array_boilerplate_description->constant_elements().length();
+      array_boilerplate_description.constants_elements_length();
   Node* literal = NewNode(javascript()->CreateLiteralArray(
-      array_boilerplate_description, pair, literal_flags, number_of_elements));
+      array_boilerplate_description.object(), pair, literal_flags,
+      number_of_elements));
   environment()->BindAccumulator(literal, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitCreateEmptyArrayLiteral() {
   int const slot_id = bytecode_iterator().GetIndexOperand(0);
-  VectorSlotPair pair = CreateVectorSlotPair(slot_id);
+  FeedbackSource pair = CreateFeedbackSource(slot_id);
   Node* literal = NewNode(javascript()->CreateEmptyLiteralArray(pair));
   environment()->BindAccumulator(literal);
 }
@@ -2045,19 +2052,18 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() {
 }
 
 void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
-  Handle<ObjectBoilerplateDescription> constant_properties =
-      Handle<ObjectBoilerplateDescription>::cast(
-          bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  ObjectBoilerplateDescriptionRef constant_properties(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
   int const slot_id = bytecode_iterator().GetIndexOperand(1);
-  VectorSlotPair pair = CreateVectorSlotPair(slot_id);
+  FeedbackSource pair = CreateFeedbackSource(slot_id);
   int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
   int literal_flags =
       interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
   // TODO(mstarzinger): Thread through number of properties. The below number is
   // only an estimate and does not match {ObjectLiteral::properties_count}.
-  int number_of_properties = constant_properties->size();
+  int number_of_properties = constant_properties.size();
   Node* literal = NewNode(javascript()->CreateLiteralObject(
-      constant_properties, pair, literal_flags, number_of_properties));
+      constant_properties.object(), pair, literal_flags, number_of_properties));
   environment()->BindAccumulator(literal, Environment::kAttachFrameState);
 }
 
@@ -2074,7 +2080,7 @@ void BytecodeGraphBuilder::VisitCloneObject() {
   int flags = bytecode_iterator().GetFlagOperand(1);
   int slot = bytecode_iterator().GetIndexOperand(2);
   const Operator* op =
-      javascript()->CloneObject(CreateVectorSlotPair(slot), flags);
+      javascript()->CloneObject(CreateFeedbackSource(slot), flags);
   Node* value = NewNode(op, source);
   environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
@@ -2140,14 +2146,14 @@ void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
             receiver_mode);
   PrepareEagerCheckpoint();
 
-  VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
-
+  FeedbackSource feedback = CreateFeedbackSource(slot_id);
   CallFrequency frequency = ComputeCallFrequency(slot_id);
-  const Operator* op =
-      javascript()->Call(arg_count, frequency, feedback, receiver_mode,
-                         GetSpeculationMode(slot_id));
+  SpeculationMode speculation_mode = GetSpeculationMode(slot_id);
+  const Operator* op = javascript()->Call(arg_count, frequency, feedback,
+                                          receiver_mode, speculation_mode);
+
   JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
-      op, args, static_cast<int>(arg_count), feedback.slot());
+      op, args, static_cast<int>(arg_count), feedback.slot);
   if (lowering.IsExit()) return;
 
   Node* node = nullptr;
@@ -2325,14 +2331,13 @@ void BytecodeGraphBuilder::VisitCallWithSpread() {
   Node* const* args = GetCallArgumentsFromRegisters(callee, receiver_node,
                                                     first_arg, arg_count);
   int const slot_id = bytecode_iterator().GetIndexOperand(3);
-  VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
-
+  FeedbackSource feedback = CreateFeedbackSource(slot_id);
   CallFrequency frequency = ComputeCallFrequency(slot_id);
   const Operator* op = javascript()->CallWithSpread(
       static_cast<int>(reg_count + 1), frequency, feedback);
 
   JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
-      op, args, static_cast<int>(arg_count), feedback.slot());
+      op, args, static_cast<int>(arg_count), feedback.slot);
   if (lowering.IsExit()) return;
 
   Node* node = nullptr;
@@ -2438,7 +2443,7 @@ void BytecodeGraphBuilder::VisitConstruct() {
   interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
   size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
   int const slot_id = bytecode_iterator().GetIndexOperand(3);
-  VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
+  FeedbackSource feedback = CreateFeedbackSource(slot_id);
 
   Node* new_target = environment()->LookupAccumulator();
   Node* callee = environment()->LookupRegister(callee_reg);
@@ -2450,7 +2455,7 @@ void BytecodeGraphBuilder::VisitConstruct() {
   Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
                                                         first_reg, arg_count);
   JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
-      op, args, static_cast<int>(arg_count), feedback.slot());
+      op, args, static_cast<int>(arg_count), feedback.slot);
   if (lowering.IsExit()) return;
 
   Node* node = nullptr;
@@ -2469,7 +2474,7 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
   interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
   size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
   int const slot_id = bytecode_iterator().GetIndexOperand(3);
-  VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
+  FeedbackSource feedback = CreateFeedbackSource(slot_id);
 
   Node* new_target = environment()->LookupAccumulator();
   Node* callee = environment()->LookupRegister(callee_reg);
@@ -2481,7 +2486,7 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
   Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
                                                         first_reg, arg_count);
   JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
-      op, args, static_cast<int>(arg_count), feedback.slot());
+      op, args, static_cast<int>(arg_count), feedback.slot);
   if (lowering.IsExit()) return;
 
   Node* node = nullptr;
@@ -2568,8 +2573,8 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
   Node* accumulator = environment()->LookupAccumulator();
   Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
                                  jsgraph()->TheHoleConstant());
-  Node* name = jsgraph()->Constant(
-      bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+  Node* name = jsgraph()->Constant(ObjectRef(
+      broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
   BuildHoleCheckAndThrow(check_for_hole,
                          Runtime::kThrowAccessedUninitializedVariable, name);
 }
@@ -2640,23 +2645,23 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
 BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
     int operand_index) {
   FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
-  FeedbackNexus nexus(feedback_vector().object(), slot);
-  return nexus.GetBinaryOperationFeedback();
+  FeedbackSource source(feedback_vector(), slot);
+  return broker()->GetFeedbackForBinaryOperation(source);
 }
 
 // Helper function to create compare operation hint from the recorded type
 // feedback.
 CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
   FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
-  FeedbackNexus nexus(feedback_vector().object(), slot);
-  return nexus.GetCompareOperationFeedback();
+  FeedbackSource source(feedback_vector(), slot);
+  return broker()->GetFeedbackForCompareOperation(source);
 }
 
 // Helper function to create for-in mode from the recorded type feedback.
 ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
   FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
-  FeedbackNexus nexus(feedback_vector().object(), slot);
-  switch (nexus.GetForInFeedback()) {
+  FeedbackSource source(feedback_vector(), slot);
+  switch (broker()->GetFeedbackForForIn(source)) {
     case ForInHint::kNone:
     case ForInHint::kEnumCacheKeysAndIndices:
       return ForInMode::kUseEnumCacheKeysAndIndices;
@@ -2670,11 +2675,12 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
 
 CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
   if (invocation_frequency_.IsUnknown()) return CallFrequency();
-  FeedbackNexus nexus(feedback_vector().object(),
-                      FeedbackVector::ToSlot(slot_id));
-  float feedback_frequency = nexus.ComputeCallFrequency();
-  if (feedback_frequency == 0.0f) {
-    // This is to prevent multiplying zero and infinity.
+  FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
+  FeedbackSource source(feedback_vector(), slot);
+  ProcessedFeedback const& feedback = broker()->GetFeedbackForCall(source);
+  float feedback_frequency =
+      feedback.IsInsufficient() ? 0.0f : feedback.AsCall().frequency();
+  if (feedback_frequency == 0.0f) {  // Prevent multiplying zero and infinity.
     return CallFrequency(0.0f);
   } else {
     return CallFrequency(feedback_frequency * invocation_frequency_.value());
@@ -2682,9 +2688,11 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
 }
 
 SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
-  FeedbackNexus nexus(feedback_vector().object(),
-                      FeedbackVector::ToSlot(slot_id));
-  return nexus.GetSpeculationMode();
+  FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
+  FeedbackSource source(feedback_vector(), slot);
+  ProcessedFeedback const& feedback = broker()->GetFeedbackForCall(source);
+  return feedback.IsInsufficient() ? SpeculationMode::kDisallowSpeculation
+                                   : feedback.AsCall().speculation_mode();
 }
 
 void BytecodeGraphBuilder::VisitBitwiseNot() {
@@ -2922,15 +2930,15 @@ void BytecodeGraphBuilder::VisitTestIn() {
   Node* object = environment()->LookupAccumulator();
   Node* key =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
+  FeedbackSource feedback =
+      CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
   Node* node = NewNode(javascript()->HasProperty(feedback), object, key);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitTestInstanceOf() {
   int const slot_index = bytecode_iterator().GetIndexOperand(1);
-  BuildCompareOp(javascript()->InstanceOf(CreateVectorSlotPair(slot_index)));
+  BuildCompareOp(javascript()->InstanceOf(CreateFeedbackSource(slot_index)));
 }
 
 void BytecodeGraphBuilder::VisitTestUndetectable() {
@@ -3132,6 +3140,16 @@ void BytecodeGraphBuilder::VisitJumpIfNotUndefinedConstant() {
   BuildJumpIfNotEqual(jsgraph()->UndefinedConstant());
 }
 
+void BytecodeGraphBuilder::VisitJumpIfUndefinedOrNull() {
+  BuildJumpIfEqual(jsgraph()->UndefinedConstant());
+  BuildJumpIfEqual(jsgraph()->NullConstant());
+}
+
+void BytecodeGraphBuilder::VisitJumpIfUndefinedOrNullConstant() {
+  BuildJumpIfEqual(jsgraph()->UndefinedConstant());
+  BuildJumpIfEqual(jsgraph()->NullConstant());
+}
+
 void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); }
 
 void BytecodeGraphBuilder::BuildSwitchOnSmi(Node* condition) {
@@ -3151,7 +3169,7 @@ void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() {
   PrepareEagerCheckpoint();
 
   Node* acc = environment()->LookupAccumulator();
-  Node* acc_smi = NewNode(simplified()->CheckSmi(VectorSlotPair()), acc);
+  Node* acc_smi = NewNode(simplified()->CheckSmi(FeedbackSource()), acc);
   BuildSwitchOnSmi(acc_smi);
 }
 
@@ -3277,6 +3295,23 @@ void BytecodeGraphBuilder::VisitForInStep() {
   environment()->BindAccumulator(index, Environment::kAttachFrameState);
 }
 
+void BytecodeGraphBuilder::VisitGetIterator() {
+  PrepareEagerCheckpoint();
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  FeedbackSource feedback =
+      CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
+  const Operator* op = javascript()->GetIterator(feedback);
+
+  JSTypeHintLowering::LoweringResult lowering =
+      TryBuildSimplifiedLoadNamed(op, object, feedback.slot);
+  if (lowering.IsExit()) return;
+
+  DCHECK(!lowering.Changed());
+  Node* node = NewNode(op, object);
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
 void BytecodeGraphBuilder::VisitSuspendGenerator() {
   Node* generator = environment()->LookupRegister(
       bytecode_iterator().GetRegisterOperand(0));
@@ -4016,25 +4051,18 @@ void BytecodeGraphBuilder::UpdateSourcePosition(int offset) {
 }
 
 void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
-                            Handle<BytecodeArray> bytecode_array,
-                            Handle<SharedFunctionInfo> shared,
-                            Handle<FeedbackVector> feedback_vector,
+                            SharedFunctionInfoRef const& shared_info,
+                            FeedbackVectorRef const& feedback_vector,
                             BailoutId osr_offset, JSGraph* jsgraph,
                             CallFrequency const& invocation_frequency,
                             SourcePositionTable* source_positions,
-                            Handle<NativeContext> native_context,
                             int inlining_id, BytecodeGraphBuilderFlags flags,
                             TickCounter* tick_counter) {
-  BytecodeArrayRef bytecode_array_ref(broker, bytecode_array);
-  DCHECK(bytecode_array_ref.IsSerializedForCompilation());
-  FeedbackVectorRef feedback_vector_ref(broker, feedback_vector);
-  SharedFunctionInfoRef shared_ref(broker, shared);
-  DCHECK(shared_ref.IsSerializedForCompilation(feedback_vector_ref));
-  NativeContextRef native_context_ref(broker, native_context);
+  DCHECK(shared_info.IsSerializedForCompilation(feedback_vector));
   BytecodeGraphBuilder builder(
-      broker, local_zone, bytecode_array_ref, shared_ref, feedback_vector_ref,
-      osr_offset, jsgraph, invocation_frequency, source_positions,
-      native_context_ref, inlining_id, flags, tick_counter);
+      broker, local_zone, broker->target_native_context(), shared_info,
+      feedback_vector, osr_offset, jsgraph, invocation_frequency,
+      source_positions, inlining_id, flags, tick_counter);
   builder.CreateGraph();
 }
 
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 682569778f6990..03e900c214e542 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -39,13 +39,11 @@ using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
 // Note: {invocation_frequency} is taken by reference to work around a GCC bug
 // on AIX (v8:8193).
 void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
-                            Handle<BytecodeArray> bytecode_array,
-                            Handle<SharedFunctionInfo> shared,
-                            Handle<FeedbackVector> feedback_vector,
+                            SharedFunctionInfoRef const& shared_info,
+                            FeedbackVectorRef const& feedback_vector,
                             BailoutId osr_offset, JSGraph* jsgraph,
                             CallFrequency const& invocation_frequency,
                             SourcePositionTable* source_positions,
-                            Handle<NativeContext> native_context,
                             int inlining_id, BytecodeGraphBuilderFlags flags,
                             TickCounter* tick_counter);
 
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index e472a6a72ca89d..428ba058a7f904 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -140,8 +140,9 @@ namespace {
 
 
 // General code uses the above configuration data.
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(
-    Zone* zone, const MachineSignature* msig, bool set_initialize_root_flag) {
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  const MachineSignature* msig,
+                                                  CallDescriptor::Flags flags) {
   DCHECK_LE(msig->parameter_count(), static_cast<size_t>(kMaxCParameters));
 
   LocationSignature::Builder locations(zone, msig->return_count(),
@@ -220,10 +221,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
   // The target for C calls is always an address (i.e. machine pointer).
   MachineType target_type = MachineType::Pointer();
   LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
-  CallDescriptor::Flags flags = CallDescriptor::kNoAllocate;
-  if (set_initialize_root_flag) {
-    flags |= CallDescriptor::kInitializeRootRegister;
-  }
+  flags |= CallDescriptor::kNoAllocate;
 
   return new (zone) CallDescriptor(  // --
       CallDescriptor::kCallAddress,  // kind
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index af0ba98ffd159b..4f1801146315ec 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -34,9 +34,9 @@ namespace compiler {
 
 static_assert(std::is_convertible<TNode<Number>, TNode<Object>>::value,
               "test subtyping");
-static_assert(std::is_convertible<TNode<UnionT<Smi, HeapNumber>>,
-                                  TNode<UnionT<Smi, HeapObject>>>::value,
-              "test subtyping");
+static_assert(
+    std::is_convertible<TNode<Number>, TNode<UnionT<Smi, HeapObject>>>::value,
+    "test subtyping");
 static_assert(
     !std::is_convertible<TNode<UnionT<Smi, HeapObject>>, TNode<Number>>::value,
     "test subtyping");
@@ -188,6 +188,7 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state,
 }
 
 bool CodeAssembler::Is64() const { return raw_assembler()->machine()->Is64(); }
+bool CodeAssembler::Is32() const { return raw_assembler()->machine()->Is32(); }
 
 bool CodeAssembler::IsFloat64RoundUpSupported() const {
   return raw_assembler()->machine()->Float64RoundUp().IsSupported();
@@ -228,7 +229,7 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node,
          &ok);
   EmbeddedVector<char, 1024> message;
   SNPrintF(message, "no Object: %s", location);
-  Node* message_node = StringConstant(message.begin());
+  TNode<String> message_node = StringConstant(message.begin());
   // This somewhat misuses the AbortCSAAssert runtime function. This will print
   // "abort: CSA_ASSERT failed: <message>", which is good enough.
   AbortCSAAssert(message_node);
@@ -259,7 +260,7 @@ TNode<Number> CodeAssembler::NumberConstant(double value) {
     // (see AllocateAndInstallRequestedHeapObjects) since that makes it easier
     // to generate constant lookups for embedded builtins.
     return UncheckedCast<Number>(HeapConstant(
-        isolate()->factory()->NewHeapNumber(value, AllocationType::kOld)));
+        isolate()->factory()->NewHeapNumberForCodeAssembler(value)));
   }
 }
 
@@ -299,16 +300,12 @@ TNode<Float64T> CodeAssembler::Float64Constant(double value) {
   return UncheckedCast<Float64T>(raw_assembler()->Float64Constant(value));
 }
 
-TNode<HeapNumber> CodeAssembler::NaNConstant() {
-  return UncheckedCast<HeapNumber>(LoadRoot(RootIndex::kNanValue));
-}
-
-bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
+bool CodeAssembler::ToInt32Constant(Node* node, int32_t* out_value) {
   {
     Int64Matcher m(node);
     if (m.HasValue() && m.IsInRange(std::numeric_limits<int32_t>::min(),
                                     std::numeric_limits<int32_t>::max())) {
-      out_value = static_cast<int32_t>(m.Value());
+      *out_value = static_cast<int32_t>(m.Value());
       return true;
     }
   }
@@ -316,7 +313,7 @@ bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
   {
     Int32Matcher m(node);
     if (m.HasValue()) {
-      out_value = m.Value();
+      *out_value = m.Value();
       return true;
     }
   }
@@ -324,9 +321,9 @@ bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
   return false;
 }
 
-bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
+bool CodeAssembler::ToInt64Constant(Node* node, int64_t* out_value) {
   Int64Matcher m(node);
-  if (m.HasValue()) out_value = m.Value();
+  if (m.HasValue()) *out_value = m.Value();
   return m.HasValue();
 }
 
@@ -345,13 +342,13 @@ bool CodeAssembler::ToSmiConstant(Node* node, Smi* out_value) {
   return false;
 }
 
-bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
+bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t* out_value) {
   if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned ||
       node->opcode() == IrOpcode::kBitcastWordToTagged) {
     node = node->InputAt(0);
   }
   IntPtrMatcher m(node);
-  if (m.HasValue()) out_value = m.Value();
+  if (m.HasValue()) *out_value = m.Value();
   return m.HasValue();
 }
 
@@ -383,6 +380,9 @@ TNode<Context> CodeAssembler::GetJSContextParameter() {
 }
 
 void CodeAssembler::Return(SloppyTNode<Object> value) {
+  // TODO(leszeks): This could also return a non-object, depending on the call
+  // descriptor. We should probably have multiple return overloads with
+  // different TNode types which DCHECK the call descriptor.
   return raw_assembler()->Return(value);
 }
 
@@ -453,10 +453,6 @@ TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
   return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
 }
 
-TNode<RawPtrT> CodeAssembler::LoadStackPointer() {
-  return UncheckedCast<RawPtrT>(raw_assembler()->LoadStackPointer());
-}
-
 TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(
     SloppyTNode<Object> value) {
   return UncheckedCast<Object>(
@@ -478,9 +474,9 @@ CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
 TNode<WordT> CodeAssembler::IntPtrAdd(SloppyTNode<WordT> left,
                                       SloppyTNode<WordT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return IntPtrConstant(left_constant + right_constant);
@@ -499,9 +495,9 @@ TNode<WordT> CodeAssembler::IntPtrAdd(SloppyTNode<WordT> left,
 TNode<IntPtrT> CodeAssembler::IntPtrDiv(TNode<IntPtrT> left,
                                         TNode<IntPtrT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_right_constant) {
     if (is_left_constant) {
       return IntPtrConstant(left_constant / right_constant);
@@ -516,9 +512,9 @@ TNode<IntPtrT> CodeAssembler::IntPtrDiv(TNode<IntPtrT> left,
 TNode<WordT> CodeAssembler::IntPtrSub(SloppyTNode<WordT> left,
                                       SloppyTNode<WordT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return IntPtrConstant(left_constant - right_constant);
@@ -534,9 +530,9 @@ TNode<WordT> CodeAssembler::IntPtrSub(SloppyTNode<WordT> left,
 TNode<WordT> CodeAssembler::IntPtrMul(SloppyTNode<WordT> left,
                                       SloppyTNode<WordT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return IntPtrConstant(left_constant * right_constant);
@@ -568,12 +564,16 @@ TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> value, int shift) {
   return (shift != 0) ? Word32Shr(value, Int32Constant(shift)) : value;
 }
 
+TNode<Word32T> CodeAssembler::Word32Sar(SloppyTNode<Word32T> value, int shift) {
+  return (shift != 0) ? Word32Sar(value, Int32Constant(shift)) : value;
+}
+
 TNode<WordT> CodeAssembler::WordOr(SloppyTNode<WordT> left,
                                    SloppyTNode<WordT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return IntPtrConstant(left_constant | right_constant);
@@ -592,9 +592,9 @@ TNode<WordT> CodeAssembler::WordOr(SloppyTNode<WordT> left,
 TNode<WordT> CodeAssembler::WordAnd(SloppyTNode<WordT> left,
                                     SloppyTNode<WordT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return IntPtrConstant(left_constant & right_constant);
@@ -606,9 +606,9 @@ TNode<WordT> CodeAssembler::WordAnd(SloppyTNode<WordT> left,
 TNode<WordT> CodeAssembler::WordXor(SloppyTNode<WordT> left,
                                     SloppyTNode<WordT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return IntPtrConstant(left_constant ^ right_constant);
@@ -620,9 +620,9 @@ TNode<WordT> CodeAssembler::WordXor(SloppyTNode<WordT> left,
 TNode<WordT> CodeAssembler::WordShl(SloppyTNode<WordT> left,
                                     SloppyTNode<IntegralT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return IntPtrConstant(left_constant << right_constant);
@@ -638,9 +638,9 @@ TNode<WordT> CodeAssembler::WordShl(SloppyTNode<WordT> left,
 TNode<WordT> CodeAssembler::WordShr(SloppyTNode<WordT> left,
                                     SloppyTNode<IntegralT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return IntPtrConstant(static_cast<uintptr_t>(left_constant) >>
@@ -657,9 +657,9 @@ TNode<WordT> CodeAssembler::WordShr(SloppyTNode<WordT> left,
 TNode<WordT> CodeAssembler::WordSar(SloppyTNode<WordT> left,
                                     SloppyTNode<IntegralT> right) {
   intptr_t left_constant;
-  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  bool is_left_constant = ToIntPtrConstant(left, &left_constant);
   intptr_t right_constant;
-  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  bool is_right_constant = ToIntPtrConstant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return IntPtrConstant(left_constant >> right_constant);
@@ -675,9 +675,9 @@ TNode<WordT> CodeAssembler::WordSar(SloppyTNode<WordT> left,
 TNode<Word32T> CodeAssembler::Word32Or(SloppyTNode<Word32T> left,
                                        SloppyTNode<Word32T> right) {
   int32_t left_constant;
-  bool is_left_constant = ToInt32Constant(left, left_constant);
+  bool is_left_constant = ToInt32Constant(left, &left_constant);
   int32_t right_constant;
-  bool is_right_constant = ToInt32Constant(right, right_constant);
+  bool is_right_constant = ToInt32Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int32Constant(left_constant | right_constant);
@@ -696,9 +696,9 @@ TNode<Word32T> CodeAssembler::Word32Or(SloppyTNode<Word32T> left,
 TNode<Word32T> CodeAssembler::Word32And(SloppyTNode<Word32T> left,
                                         SloppyTNode<Word32T> right) {
   int32_t left_constant;
-  bool is_left_constant = ToInt32Constant(left, left_constant);
+  bool is_left_constant = ToInt32Constant(left, &left_constant);
   int32_t right_constant;
-  bool is_right_constant = ToInt32Constant(right, right_constant);
+  bool is_right_constant = ToInt32Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int32Constant(left_constant & right_constant);
@@ -710,9 +710,9 @@ TNode<Word32T> CodeAssembler::Word32And(SloppyTNode<Word32T> left,
 TNode<Word32T> CodeAssembler::Word32Xor(SloppyTNode<Word32T> left,
                                         SloppyTNode<Word32T> right) {
   int32_t left_constant;
-  bool is_left_constant = ToInt32Constant(left, left_constant);
+  bool is_left_constant = ToInt32Constant(left, &left_constant);
   int32_t right_constant;
-  bool is_right_constant = ToInt32Constant(right, right_constant);
+  bool is_right_constant = ToInt32Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int32Constant(left_constant ^ right_constant);
@@ -724,9 +724,9 @@ TNode<Word32T> CodeAssembler::Word32Xor(SloppyTNode<Word32T> left,
 TNode<Word32T> CodeAssembler::Word32Shl(SloppyTNode<Word32T> left,
                                         SloppyTNode<Word32T> right) {
   int32_t left_constant;
-  bool is_left_constant = ToInt32Constant(left, left_constant);
+  bool is_left_constant = ToInt32Constant(left, &left_constant);
   int32_t right_constant;
-  bool is_right_constant = ToInt32Constant(right, right_constant);
+  bool is_right_constant = ToInt32Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int32Constant(left_constant << right_constant);
@@ -742,9 +742,9 @@ TNode<Word32T> CodeAssembler::Word32Shl(SloppyTNode<Word32T> left,
 TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> left,
                                         SloppyTNode<Word32T> right) {
   int32_t left_constant;
-  bool is_left_constant = ToInt32Constant(left, left_constant);
+  bool is_left_constant = ToInt32Constant(left, &left_constant);
   int32_t right_constant;
-  bool is_right_constant = ToInt32Constant(right, right_constant);
+  bool is_right_constant = ToInt32Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int32Constant(static_cast<uint32_t>(left_constant) >>
@@ -761,9 +761,9 @@ TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> left,
 TNode<Word32T> CodeAssembler::Word32Sar(SloppyTNode<Word32T> left,
                                         SloppyTNode<Word32T> right) {
   int32_t left_constant;
-  bool is_left_constant = ToInt32Constant(left, left_constant);
+  bool is_left_constant = ToInt32Constant(left, &left_constant);
   int32_t right_constant;
-  bool is_right_constant = ToInt32Constant(right, right_constant);
+  bool is_right_constant = ToInt32Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int32Constant(left_constant >> right_constant);
@@ -779,9 +779,9 @@ TNode<Word32T> CodeAssembler::Word32Sar(SloppyTNode<Word32T> left,
 TNode<Word64T> CodeAssembler::Word64Or(SloppyTNode<Word64T> left,
                                        SloppyTNode<Word64T> right) {
   int64_t left_constant;
-  bool is_left_constant = ToInt64Constant(left, left_constant);
+  bool is_left_constant = ToInt64Constant(left, &left_constant);
   int64_t right_constant;
-  bool is_right_constant = ToInt64Constant(right, right_constant);
+  bool is_right_constant = ToInt64Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int64Constant(left_constant | right_constant);
@@ -800,9 +800,9 @@ TNode<Word64T> CodeAssembler::Word64Or(SloppyTNode<Word64T> left,
 TNode<Word64T> CodeAssembler::Word64And(SloppyTNode<Word64T> left,
                                         SloppyTNode<Word64T> right) {
   int64_t left_constant;
-  bool is_left_constant = ToInt64Constant(left, left_constant);
+  bool is_left_constant = ToInt64Constant(left, &left_constant);
   int64_t right_constant;
-  bool is_right_constant = ToInt64Constant(right, right_constant);
+  bool is_right_constant = ToInt64Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int64Constant(left_constant & right_constant);
@@ -814,9 +814,9 @@ TNode<Word64T> CodeAssembler::Word64And(SloppyTNode<Word64T> left,
 TNode<Word64T> CodeAssembler::Word64Xor(SloppyTNode<Word64T> left,
                                         SloppyTNode<Word64T> right) {
   int64_t left_constant;
-  bool is_left_constant = ToInt64Constant(left, left_constant);
+  bool is_left_constant = ToInt64Constant(left, &left_constant);
   int64_t right_constant;
-  bool is_right_constant = ToInt64Constant(right, right_constant);
+  bool is_right_constant = ToInt64Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int64Constant(left_constant ^ right_constant);
@@ -828,9 +828,9 @@ TNode<Word64T> CodeAssembler::Word64Xor(SloppyTNode<Word64T> left,
 TNode<Word64T> CodeAssembler::Word64Shl(SloppyTNode<Word64T> left,
                                         SloppyTNode<Word64T> right) {
   int64_t left_constant;
-  bool is_left_constant = ToInt64Constant(left, left_constant);
+  bool is_left_constant = ToInt64Constant(left, &left_constant);
   int64_t right_constant;
-  bool is_right_constant = ToInt64Constant(right, right_constant);
+  bool is_right_constant = ToInt64Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int64Constant(left_constant << right_constant);
@@ -846,9 +846,9 @@ TNode<Word64T> CodeAssembler::Word64Shl(SloppyTNode<Word64T> left,
 TNode<Word64T> CodeAssembler::Word64Shr(SloppyTNode<Word64T> left,
                                         SloppyTNode<Word64T> right) {
   int64_t left_constant;
-  bool is_left_constant = ToInt64Constant(left, left_constant);
+  bool is_left_constant = ToInt64Constant(left, &left_constant);
   int64_t right_constant;
-  bool is_right_constant = ToInt64Constant(right, right_constant);
+  bool is_right_constant = ToInt64Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int64Constant(static_cast<uint64_t>(left_constant) >>
@@ -865,9 +865,9 @@ TNode<Word64T> CodeAssembler::Word64Shr(SloppyTNode<Word64T> left,
 TNode<Word64T> CodeAssembler::Word64Sar(SloppyTNode<Word64T> left,
                                         SloppyTNode<Word64T> right) {
   int64_t left_constant;
-  bool is_left_constant = ToInt64Constant(left, left_constant);
+  bool is_left_constant = ToInt64Constant(left, &left_constant);
   int64_t right_constant;
-  bool is_right_constant = ToInt64Constant(right, right_constant);
+  bool is_right_constant = ToInt64Constant(right, &right_constant);
   if (is_left_constant) {
     if (is_right_constant) {
       return Int64Constant(left_constant >> right_constant);
@@ -880,14 +880,13 @@ TNode<Word64T> CodeAssembler::Word64Sar(SloppyTNode<Word64T> left,
   return UncheckedCast<Word64T>(raw_assembler()->Word64Sar(left, right));
 }
 
-#define CODE_ASSEMBLER_COMPARE(Name, ArgT, VarT, ToConstant, op)     \
-  TNode<BoolT> CodeAssembler::Name(SloppyTNode<ArgT> left,           \
-                                   SloppyTNode<ArgT> right) {        \
-    VarT lhs, rhs;                                                   \
-    if (ToConstant(left, lhs) && ToConstant(right, rhs)) {           \
-      return BoolConstant(lhs op rhs);                               \
-    }                                                                \
-    return UncheckedCast<BoolT>(raw_assembler()->Name(left, right)); \
+#define CODE_ASSEMBLER_COMPARE(Name, ArgT, VarT, ToConstant, op)          \
+  TNode<BoolT> CodeAssembler::Name(TNode<ArgT> left, TNode<ArgT> right) { \
+    VarT lhs, rhs;                                                        \
+    if (ToConstant(left, &lhs) && ToConstant(right, &rhs)) {              \
+      return BoolConstant(lhs op rhs);                                    \
+    }                                                                     \
+    return UncheckedCast<BoolT>(raw_assembler()->Name(left, right));      \
   }
 
 CODE_ASSEMBLER_COMPARE(IntPtrEqual, WordT, intptr_t, ToIntPtrConstant, ==)
@@ -959,14 +958,14 @@ Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
   return raw_assembler()->Load(type, base, offset, needs_poisoning);
 }
 
-Node* CodeAssembler::LoadFullTagged(Node* base,
-                                    LoadSensitivity needs_poisoning) {
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
+                                            LoadSensitivity needs_poisoning) {
   return BitcastWordToTagged(
       Load(MachineType::Pointer(), base, needs_poisoning));
 }
 
-Node* CodeAssembler::LoadFullTagged(Node* base, Node* offset,
-                                    LoadSensitivity needs_poisoning) {
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base, Node* offset,
+                                            LoadSensitivity needs_poisoning) {
   return BitcastWordToTagged(
       Load(MachineType::Pointer(), base, offset, needs_poisoning));
 }
@@ -993,7 +992,7 @@ TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
   // TODO(jgruber): In theory we could generate better code for this by
   // letting the macro assembler decide how to load from the roots list. In most
   // cases, it would boil down to loading from a fixed kRootRegister offset.
-  Node* isolate_root =
+  TNode<ExternalReference> isolate_root =
       ExternalConstant(ExternalReference::isolate_root(isolate()));
   int offset = IsolateData::root_slot_offset(root_index);
   return UncheckedCast<Object>(
@@ -1133,7 +1132,7 @@ Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
 
 Node* CodeAssembler::StoreRoot(RootIndex root_index, Node* value) {
   DCHECK(!RootsTable::IsImmortalImmovable(root_index));
-  Node* isolate_root =
+  TNode<ExternalReference> isolate_root =
       ExternalConstant(ExternalReference::isolate_root(isolate()));
   int offset = IsolateData::root_slot_offset(root_index);
   return StoreFullTaggedNoWriteBarrier(isolate_root, IntPtrConstant(offset),
@@ -1248,8 +1247,9 @@ TNode<Object> CodeAssembler::CallRuntimeWithCEntryImpl(
       Runtime::MayAllocate(function) ? CallDescriptor::kNoFlags
                                      : CallDescriptor::kNoAllocate);
 
-  Node* ref = ExternalConstant(ExternalReference::Create(function));
-  Node* arity = Int32Constant(argc);
+  TNode<ExternalReference> ref =
+      ExternalConstant(ExternalReference::Create(function));
+  TNode<Int32T> arity = Int32Constant(argc);
 
   NodeArray<kMaxNumArgs + 4> inputs;
   inputs.Add(centry);
@@ -1285,7 +1285,8 @@ void CodeAssembler::TailCallRuntimeWithCEntryImpl(
       zone(), function, argc, Operator::kNoProperties,
       CallDescriptor::kNoFlags);
 
-  Node* ref = ExternalConstant(ExternalReference::Create(function));
+  TNode<ExternalReference> ref =
+      ExternalConstant(ExternalReference::Create(function));
 
   NodeArray<kMaxNumArgs + 4> inputs;
   inputs.Add(centry);
@@ -1468,7 +1469,7 @@ void CodeAssembler::GotoIfNot(SloppyTNode<IntegralT> condition,
 void CodeAssembler::Branch(SloppyTNode<IntegralT> condition, Label* true_label,
                            Label* false_label) {
   int32_t constant;
-  if (ToInt32Constant(condition, constant)) {
+  if (ToInt32Constant(condition, &constant)) {
     if ((true_label->is_used() || true_label->is_bound()) &&
         (false_label->is_used() || false_label->is_bound())) {
       return Goto(constant ? true_label : false_label);
@@ -1484,7 +1485,7 @@ void CodeAssembler::Branch(TNode<BoolT> condition,
                            const std::function<void()>& true_body,
                            const std::function<void()>& false_body) {
   int32_t constant;
-  if (ToInt32Constant(condition, constant)) {
+  if (ToInt32Constant(condition, &constant)) {
     return constant ? true_body() : false_body();
   }
 
@@ -1501,7 +1502,7 @@ void CodeAssembler::Branch(TNode<BoolT> condition,
 void CodeAssembler::Branch(TNode<BoolT> condition, Label* true_label,
                            const std::function<void()>& false_body) {
   int32_t constant;
-  if (ToInt32Constant(condition, constant)) {
+  if (ToInt32Constant(condition, &constant)) {
     return constant ? Goto(true_label) : false_body();
   }
 
@@ -1515,7 +1516,7 @@ void CodeAssembler::Branch(TNode<BoolT> condition,
                            const std::function<void()>& true_body,
                            Label* false_label) {
   int32_t constant;
-  if (ToInt32Constant(condition, constant)) {
+  if (ToInt32Constant(condition, &constant)) {
     return constant ? true_body() : Goto(false_label);
   }
 
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index cc432214aa1063..c9adb1601db1eb 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -5,9 +5,9 @@
 #ifndef V8_COMPILER_CODE_ASSEMBLER_H_
 #define V8_COMPILER_CODE_ASSEMBLER_H_
 
+#include <initializer_list>
 #include <map>
 #include <memory>
-#include <initializer_list>
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
@@ -43,7 +43,6 @@ class BigInt;
 class CallInterfaceDescriptor;
 class Callable;
 class Factory;
-class FinalizationGroupCleanupJobTask;
 class InterpreterData;
 class Isolate;
 class JSAsyncFunctionObject;
@@ -317,6 +316,7 @@ class CompilationCacheTable;
 class Constructor;
 class Filler;
 class FunctionTemplateRareData;
+class HeapNumber;
 class InternalizedString;
 class JSArgumentsObject;
 class JSArrayBufferView;
@@ -324,7 +324,6 @@ class JSContextExtensionObject;
 class JSError;
 class JSSloppyArgumentsObject;
 class MapCache;
-class MutableHeapNumber;
 class NativeContext;
 class NumberWrapper;
 class ScriptWrapper;
@@ -645,7 +644,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
   V(BitcastInt32ToFloat32, Float32T, Word32T)                  \
   V(BitcastFloat32ToInt32, Uint32T, Float32T)                  \
   V(RoundFloat64ToInt32, Int32T, Float64T)                     \
-  V(RoundInt32ToFloat32, Int32T, Float32T)                     \
+  V(RoundInt32ToFloat32, Float32T, Int32T)                     \
   V(Float64SilenceNaN, Float64T, Float64T)                     \
   V(Float64RoundDown, Float64T, Float64T)                      \
   V(Float64RoundUp, Float64T, Float64T)                        \
@@ -657,7 +656,8 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
   V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T)    \
   V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T)    \
   V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \
-  V(Word32BinaryNot, BoolT, Word32T)
+  V(Word32BinaryNot, BoolT, Word32T)                           \
+  V(StackPointerGreaterThan, BoolT, WordT)
 
 // A "public" interface used by components outside of compiler directory to
 // create code objects with TurboFan's backend. This class is mostly a thin
@@ -688,6 +688,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
                                    const AssemblerOptions& options);
 
   bool Is64() const;
+  bool Is32() const;
   bool IsFloat64RoundUpSupported() const;
   bool IsFloat64RoundDownSupported() const;
   bool IsFloat64RoundTiesEvenSupported() const;
@@ -738,7 +739,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
         if (std::is_same<PreviousType, MaybeObject>::value) {
           code_assembler_->GenerateCheckMaybeObjectIsObject(node_, location_);
         }
-        Node* function = code_assembler_->ExternalConstant(
+        TNode<ExternalReference> function = code_assembler_->ExternalConstant(
             ExternalReference::check_object_type());
         code_assembler_->CallCFunction(
             function, MachineType::AnyTagged(),
@@ -842,7 +843,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
   TNode<Oddball> BooleanConstant(bool value);
   TNode<ExternalReference> ExternalConstant(ExternalReference address);
   TNode<Float64T> Float64Constant(double value);
-  TNode<HeapNumber> NaNConstant();
   TNode<BoolT> Int32TrueConstant() {
     return ReinterpretCast<BoolT>(Int32Constant(1));
   }
@@ -853,15 +853,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
     return value ? Int32TrueConstant() : Int32FalseConstant();
   }
 
-  // TODO(jkummerow): The style guide wants pointers for output parameters.
-  // https://google.github.io/styleguide/cppguide.html#Output_Parameters
-  bool ToInt32Constant(Node* node,
-                       int32_t& out_value);  // NOLINT(runtime/references)
-  bool ToInt64Constant(Node* node,
-                       int64_t& out_value);  // NOLINT(runtime/references)
+  bool ToInt32Constant(Node* node, int32_t* out_value);
+  bool ToInt64Constant(Node* node, int64_t* out_value);
+  bool ToIntPtrConstant(Node* node, intptr_t* out_value);
   bool ToSmiConstant(Node* node, Smi* out_value);
-  bool ToIntPtrConstant(Node* node,
-                        intptr_t& out_value);  // NOLINT(runtime/references)
 
   bool IsUndefinedConstant(TNode<Object> node);
   bool IsNullConstant(TNode<Object> node);
@@ -959,9 +954,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
   TNode<RawPtrT> LoadFramePointer();
   TNode<RawPtrT> LoadParentFramePointer();
 
-  // Access to the stack pointer
-  TNode<RawPtrT> LoadStackPointer();
-
   // Poison |value| on speculative paths.
   TNode<Object> TaggedPoisonOnSpeculation(SloppyTNode<Object> value);
   TNode<WordT> WordPoisonOnSpeculation(SloppyTNode<WordT> value);
@@ -977,12 +969,24 @@ class V8_EXPORT_PRIVATE CodeAssembler {
   }
   Node* Load(MachineType type, Node* base, Node* offset,
              LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+  template <class Type>
+  TNode<Type> Load(Node* base,
+                   LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+    return UncheckedCast<Type>(
+        Load(MachineTypeOf<Type>::value, base, needs_poisoning));
+  }
+  template <class Type>
+  TNode<Type> Load(Node* base, SloppyTNode<WordT> offset,
+                   LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+    return UncheckedCast<Type>(
+        Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
+  }
   Node* AtomicLoad(MachineType type, Node* base, Node* offset);
   // Load uncompressed tagged value from (most likely off JS heap) memory
   // location.
-  Node* LoadFullTagged(
+  TNode<Object> LoadFullTagged(
       Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
-  Node* LoadFullTagged(
+  TNode<Object> LoadFullTagged(
       Node* base, Node* offset,
       LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
 
@@ -1119,50 +1123,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
         Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
   }
 
-  template <class Left, class Right,
-            class = typename std::enable_if<
-                std::is_base_of<Object, Left>::value &&
-                std::is_base_of<Object, Right>::value>::type>
-  TNode<BoolT> WordEqual(TNode<Left> left, TNode<Right> right) {
-    return WordEqual(ReinterpretCast<WordT>(left),
-                     ReinterpretCast<WordT>(right));
-  }
-  TNode<BoolT> WordEqual(TNode<Object> left, Node* right) {
-    return WordEqual(ReinterpretCast<WordT>(left),
-                     ReinterpretCast<WordT>(right));
-  }
-  TNode<BoolT> WordEqual(Node* left, TNode<Object> right) {
-    return WordEqual(ReinterpretCast<WordT>(left),
-                     ReinterpretCast<WordT>(right));
-  }
-  template <class Left, class Right,
-            class = typename std::enable_if<
-                std::is_base_of<Object, Left>::value &&
-                std::is_base_of<Object, Right>::value>::type>
-  TNode<BoolT> WordNotEqual(TNode<Left> left, TNode<Right> right) {
-    return WordNotEqual(ReinterpretCast<WordT>(left),
-                        ReinterpretCast<WordT>(right));
-  }
-  TNode<BoolT> WordNotEqual(TNode<Object> left, Node* right) {
-    return WordNotEqual(ReinterpretCast<WordT>(left),
-                        ReinterpretCast<WordT>(right));
-  }
-  TNode<BoolT> WordNotEqual(Node* left, TNode<Object> right) {
-    return WordNotEqual(ReinterpretCast<WordT>(left),
-                        ReinterpretCast<WordT>(right));
-  }
-
-  TNode<BoolT> IntPtrEqual(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
-  TNode<BoolT> WordEqual(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
-  TNode<BoolT> WordNotEqual(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
-  TNode<BoolT> Word32Equal(SloppyTNode<Word32T> left,
-                           SloppyTNode<Word32T> right);
-  TNode<BoolT> Word32NotEqual(SloppyTNode<Word32T> left,
-                              SloppyTNode<Word32T> right);
-  TNode<BoolT> Word64Equal(SloppyTNode<Word64T> left,
-                           SloppyTNode<Word64T> right);
-  TNode<BoolT> Word64NotEqual(SloppyTNode<Word64T> left,
-                              SloppyTNode<Word64T> right);
+  TNode<BoolT> IntPtrEqual(TNode<WordT> left, TNode<WordT> right);
+  TNode<BoolT> WordEqual(TNode<WordT> left, TNode<WordT> right);
+  TNode<BoolT> WordNotEqual(TNode<WordT> left, TNode<WordT> right);
+  TNode<BoolT> Word32Equal(TNode<Word32T> left, TNode<Word32T> right);
+  TNode<BoolT> Word32NotEqual(TNode<Word32T> left, TNode<Word32T> right);
+  TNode<BoolT> Word64Equal(TNode<Word64T> left, TNode<Word64T> right);
+  TNode<BoolT> Word64NotEqual(TNode<Word64T> left, TNode<Word64T> right);
 
   TNode<BoolT> Word32Or(TNode<BoolT> left, TNode<BoolT> right) {
     return UncheckedCast<BoolT>(
@@ -1234,6 +1201,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
     return UncheckedCast<IntPtrT>(WordSar(static_cast<Node*>(value), shift));
   }
   TNode<Word32T> Word32Shr(SloppyTNode<Word32T> value, int shift);
+  TNode<Word32T> Word32Sar(SloppyTNode<Word32T> value, int shift);
 
   TNode<WordT> WordOr(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
   TNode<WordT> WordAnd(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
@@ -1433,7 +1401,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
   Node* CallJS(Callable const& callable, Node* context, Node* function,
                Node* receiver, TArgs... args) {
     int argc = static_cast<int>(sizeof...(args));
-    Node* arity = Int32Constant(argc);
+    TNode<Int32T> arity = Int32Constant(argc);
     return CallStub(callable, context, function, arity, receiver, args...);
   }
 
@@ -1441,8 +1409,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
   Node* ConstructJSWithTarget(Callable const& callable, Node* context,
                               Node* target, Node* new_target, TArgs... args) {
     int argc = static_cast<int>(sizeof...(args));
-    Node* arity = Int32Constant(argc);
-    Node* receiver = LoadRoot(RootIndex::kUndefinedValue);
+    TNode<Int32T> arity = Int32Constant(argc);
+    TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
 
     // Construct(target, new_target, arity, receiver, arguments...)
     return CallStub(callable, context, target, new_target, arity, receiver,
@@ -1842,9 +1810,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerScopedExceptionHandler {
 
 }  // namespace compiler
 
-#if defined(V8_HOST_ARCH_32_BIT)
+#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
+#define BINT_IS_SMI
 using BInt = Smi;
 #elif defined(V8_HOST_ARCH_64_BIT)
+#define BINT_IS_INTPTR
 using BInt = IntPtrT;
 #else
 #error Unknown architecture.
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 0ef6402264bfac..c2d8cf44693ca6 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -99,7 +99,8 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
 }
 
 size_t hash_value(DeoptimizeParameters p) {
-  return base::hash_combine(p.kind(), p.reason(), p.feedback(),
+  FeedbackSource::Hash feebdack_hash;
+  return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()),
                             p.is_safety_check());
 }
 
@@ -179,7 +180,6 @@ SelectParameters const& SelectParametersOf(const Operator* const op) {
 
 CallDescriptor const* CallDescriptorOf(const Operator* const op) {
   DCHECK(op->opcode() == IrOpcode::kCall ||
-         op->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
          op->opcode() == IrOpcode::kTailCall);
   return OpParameter<CallDescriptor const*>(op);
 }
@@ -729,7 +729,7 @@ struct CommonOperatorGlobalCache final {
               Operator::kFoldable | Operator::kNoThrow,  // properties
               "Deoptimize",                              // name
               1, 1, 1, 0, 0, 1,                          // counts
-              DeoptimizeParameters(kKind, kReason, VectorSlotPair(),
+              DeoptimizeParameters(kKind, kReason, FeedbackSource(),
                                    IsSafetyCheck::kNoSafetyCheck)) {}
   };
 #define CACHED_DEOPTIMIZE(Kind, Reason)                                    \
@@ -747,7 +747,7 @@ struct CommonOperatorGlobalCache final {
               Operator::kFoldable | Operator::kNoThrow,  // properties
               "DeoptimizeIf",                            // name
               2, 1, 1, 0, 1, 1,                          // counts
-              DeoptimizeParameters(kKind, kReason, VectorSlotPair(),
+              DeoptimizeParameters(kKind, kReason, FeedbackSource(),
                                    is_safety_check)) {}
   };
 #define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck)                          \
@@ -767,7 +767,7 @@ struct CommonOperatorGlobalCache final {
               Operator::kFoldable | Operator::kNoThrow,  // properties
               "DeoptimizeUnless",                        // name
               2, 1, 1, 0, 1, 1,                          // counts
-              DeoptimizeParameters(kKind, kReason, VectorSlotPair(),
+              DeoptimizeParameters(kKind, kReason, FeedbackSource(),
                                    is_safety_check)) {}
   };
 #define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
@@ -948,7 +948,7 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint,
 
 const Operator* CommonOperatorBuilder::Deoptimize(
     DeoptimizeKind kind, DeoptimizeReason reason,
-    VectorSlotPair const& feedback) {
+    FeedbackSource const& feedback) {
 #define CACHED_DEOPTIMIZE(Kind, Reason)                               \
   if (kind == DeoptimizeKind::k##Kind &&                              \
       reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
@@ -969,7 +969,7 @@ const Operator* CommonOperatorBuilder::Deoptimize(
 
 const Operator* CommonOperatorBuilder::DeoptimizeIf(
     DeoptimizeKind kind, DeoptimizeReason reason,
-    VectorSlotPair const& feedback, IsSafetyCheck is_safety_check) {
+    FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
 #define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck)                          \
   if (kind == DeoptimizeKind::k##Kind &&                                     \
       reason == DeoptimizeReason::k##Reason &&                               \
@@ -990,7 +990,7 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(
 
 const Operator* CommonOperatorBuilder::DeoptimizeUnless(
     DeoptimizeKind kind, DeoptimizeReason reason,
-    VectorSlotPair const& feedback, IsSafetyCheck is_safety_check) {
+    FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
 #define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck)                      \
   if (kind == DeoptimizeKind::k##Kind &&                                     \
       reason == DeoptimizeReason::k##Reason &&                               \
@@ -1481,31 +1481,6 @@ const Operator* CommonOperatorBuilder::Call(
   return new (zone()) CallOperator(call_descriptor);
 }
 
-const Operator* CommonOperatorBuilder::CallWithCallerSavedRegisters(
-    const CallDescriptor* call_descriptor) {
-  class CallOperator final : public Operator1<const CallDescriptor*> {
-   public:
-    explicit CallOperator(const CallDescriptor* call_descriptor)
-        : Operator1<const CallDescriptor*>(
-              IrOpcode::kCallWithCallerSavedRegisters,
-              call_descriptor->properties(), "CallWithCallerSavedRegisters",
-              call_descriptor->InputCount() +
-                  call_descriptor->FrameStateCount(),
-              Operator::ZeroIfPure(call_descriptor->properties()),
-              Operator::ZeroIfEliminatable(call_descriptor->properties()),
-              call_descriptor->ReturnCount(),
-              Operator::ZeroIfPure(call_descriptor->properties()),
-              Operator::ZeroIfNoThrow(call_descriptor->properties()),
-              call_descriptor) {}
-
-    void PrintParameter(std::ostream& os,
-                        PrintVerbosity verbose) const override {
-      os << "[" << *parameter() << "]";
-    }
-  };
-  return new (zone()) CallOperator(call_descriptor);
-}
-
 const Operator* CommonOperatorBuilder::TailCall(
     const CallDescriptor* call_descriptor) {
   class TailCallOperator final : public Operator1<const CallDescriptor*> {
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 9f634e72ec27a9..2b0dcc7db9df9b 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -10,8 +10,8 @@
 #include "src/codegen/reloc-info.h"
 #include "src/codegen/string-constants.h"
 #include "src/common/globals.h"
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/frame-states.h"
-#include "src/compiler/vector-slot-pair.h"
 #include "src/deoptimizer/deoptimize-reason.h"
 #include "src/zone/zone-containers.h"
 #include "src/zone/zone-handle-set.h"
@@ -104,7 +104,7 @@ int ValueInputCountOfReturn(Operator const* const op);
 class DeoptimizeParameters final {
  public:
   DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
-                       VectorSlotPair const& feedback,
+                       FeedbackSource const& feedback,
                        IsSafetyCheck is_safety_check)
       : kind_(kind),
         reason_(reason),
@@ -113,13 +113,13 @@ class DeoptimizeParameters final {
 
   DeoptimizeKind kind() const { return kind_; }
   DeoptimizeReason reason() const { return reason_; }
-  const VectorSlotPair& feedback() const { return feedback_; }
+  const FeedbackSource& feedback() const { return feedback_; }
   IsSafetyCheck is_safety_check() const { return is_safety_check_; }
 
  private:
   DeoptimizeKind const kind_;
   DeoptimizeReason const reason_;
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
   IsSafetyCheck is_safety_check_;
 };
 
@@ -468,14 +468,14 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
   const Operator* IfDefault(BranchHint hint = BranchHint::kNone);
   const Operator* Throw();
   const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
-                             VectorSlotPair const& feedback);
+                             FeedbackSource const& feedback);
   const Operator* DeoptimizeIf(
       DeoptimizeKind kind, DeoptimizeReason reason,
-      VectorSlotPair const& feedback,
+      FeedbackSource const& feedback,
       IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
   const Operator* DeoptimizeUnless(
       DeoptimizeKind kind, DeoptimizeReason reason,
-      VectorSlotPair const& feedback,
+      FeedbackSource const& feedback,
       IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
   const Operator* TrapIf(TrapId trap_id);
   const Operator* TrapUnless(TrapId trap_id);
@@ -530,8 +530,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
                              OutputFrameStateCombine state_combine,
                              const FrameStateFunctionInfo* function_info);
   const Operator* Call(const CallDescriptor* call_descriptor);
-  const Operator* CallWithCallerSavedRegisters(
-      const CallDescriptor* call_descriptor);
   const Operator* TailCall(const CallDescriptor* call_descriptor);
   const Operator* Projection(size_t index);
   const Operator* Retain();
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 673f4a341be8ce..592d85440cc23b 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -550,13 +550,7 @@ namespace {
 // This function expects to never see a JSProxy.
 void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
                                   base::Optional<JSObjectRef> last_prototype) {
-  // TODO(neis): Remove heap access (SerializePrototype call).
-  AllowCodeDependencyChange dependency_change_;
-  AllowHandleAllocation handle_allocation_;
-  AllowHandleDereference handle_dereference_;
-  AllowHeapAllocation heap_allocation_;
   while (true) {
-    map.SerializePrototype();
     HeapObjectRef proto = map.prototype();
     if (!proto.IsJSObject()) {
       CHECK_EQ(proto.map().oddball_type(), OddballType::kNull);
@@ -580,7 +574,7 @@ void CompilationDependencies::DependOnStablePrototypeChains(
       // Perform the implicit ToObject for primitives here.
       // Implemented according to ES6 section 7.3.2 GetV (V, P).
       base::Optional<JSFunctionRef> constructor =
-          broker_->native_context().GetConstructorFunction(receiver_map);
+          broker_->target_native_context().GetConstructorFunction(receiver_map);
       if (constructor.has_value()) receiver_map = constructor->initial_map();
     }
     DependOnStablePrototypeChain(this, receiver_map, last_prototype);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 788638fe68b8f4..8dfe356c34d485 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -9,6 +9,7 @@
 #include "src/common/ptr-compr-inl.h"
 #include "src/compiler/access-builder.h"
 #include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/graph-assembler.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/linkage.h"
@@ -209,13 +210,13 @@ class EffectControlLinearizer {
 
   Node* AllocateHeapNumberWithValue(Node* node);
   Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
-                                   const VectorSlotPair& feedback, Node* value,
+                                   const FeedbackSource& feedback, Node* value,
                                    Node* frame_state);
   Node* BuildCheckedFloat64ToInt64(CheckForMinusZeroMode mode,
-                                   const VectorSlotPair& feedback, Node* value,
+                                   const FeedbackSource& feedback, Node* value,
                                    Node* frame_state);
   Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
-                                                 const VectorSlotPair& feedback,
+                                                 const FeedbackSource& feedback,
                                                  Node* value,
                                                  Node* frame_state);
   Node* BuildReverseBytes(ExternalArrayType type, Node* value);
@@ -239,6 +240,7 @@ class EffectControlLinearizer {
   Node* ChangeSmiToInt32(Node* value);
   Node* ChangeSmiToInt64(Node* value);
   Node* ObjectIsSmi(Node* value);
+  Node* CompressedObjectIsSmi(Node* value);
   Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
 
   Node* SmiMaxValueConstant();
@@ -1525,7 +1527,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) {
 
 Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
   Node* value = node->InputAt(0);
-  return __ WordEqual(value, __ TrueConstant());
+  return __ TaggedEqual(value, __ TrueConstant());
 }
 
 void EffectControlLinearizer::TruncateTaggedPointerToBit(
@@ -1539,10 +1541,10 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
   Node* fzero = __ Float64Constant(0.0);
 
   // Check if {value} is false.
-  __ GotoIf(__ WordEqual(value, __ FalseConstant()), done, zero);
+  __ GotoIf(__ TaggedEqual(value, __ FalseConstant()), done, zero);
 
   // Check if {value} is the empty string.
-  __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), done, zero);
+  __ GotoIf(__ TaggedEqual(value, __ EmptyStringConstant()), done, zero);
 
   // Load the map of {value}.
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
@@ -1559,11 +1561,11 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
       done, zero);
 
   // Check if {value} is a HeapNumber.
-  __ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
+  __ GotoIf(__ TaggedEqual(value_map, __ HeapNumberMapConstant()),
             &if_heapnumber);
 
   // Check if {value} is a BigInt.
-  __ GotoIf(__ WordEqual(value_map, __ BigIntMapConstant()), &if_bigint);
+  __ GotoIf(__ TaggedEqual(value_map, __ BigIntMapConstant()), &if_bigint);
 
   // All other values that reach here are true.
   __ Goto(done, __ Int32Constant(1));
@@ -1599,7 +1601,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
   __ Bind(&if_smi);
   {
     // If {value} is a Smi, then we only need to check that it's not zero.
-    __ Goto(&done, __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)),
+    __ Goto(&done, __ Word32Equal(__ IntPtrEqual(value, __ IntPtrConstant(0)),
                                   __ Int32Constant(0)));
   }
 
@@ -1711,7 +1713,7 @@ Node* EffectControlLinearizer::LowerChangeCompressedToTaggedSigned(Node* node) {
   auto if_not_smi = __ MakeDeferredLabel();
   auto done = __ MakeLabel(MachineRepresentation::kWord32);
 
-  Node* check = ObjectIsSmi(value);
+  Node* check = CompressedObjectIsSmi(value);
   __ GotoIfNot(check, &if_not_smi);
   __ Goto(&done, __ ChangeCompressedSignedToTaggedSigned(value));
 
@@ -1795,7 +1797,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
     // Perform the map checks.
     for (size_t i = 0; i < map_count; ++i) {
       Node* map = __ HeapConstant(maps[i]);
-      Node* check = __ WordEqual(value_map, map);
+      Node* check = __ TaggedEqual(value_map, map);
       if (i == map_count - 1) {
         __ Branch(check, &done, &migrate, IsSafetyCheck::kCriticalSafetyCheck);
       } else {
@@ -1811,7 +1813,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
       // If map is not deprecated the migration attempt does not make sense.
       Node* bitfield3 =
           __ LoadField(AccessBuilder::ForMapBitField3(), value_map);
-      Node* if_not_deprecated = __ WordEqual(
+      Node* if_not_deprecated = __ Word32Equal(
           __ Word32And(bitfield3,
                        __ Int32Constant(Map::IsDeprecatedBit::kMask)),
           __ Int32Constant(0));
@@ -1837,7 +1839,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
     // Perform the map checks again.
     for (size_t i = 0; i < map_count; ++i) {
       Node* map = __ HeapConstant(maps[i]);
-      Node* check = __ WordEqual(value_map, map);
+      Node* check = __ TaggedEqual(value_map, map);
       if (i == map_count - 1) {
         __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
                            frame_state, IsSafetyCheck::kCriticalSafetyCheck);
@@ -1858,7 +1860,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
 
     for (size_t i = 0; i < map_count; ++i) {
       Node* map = __ HeapConstant(maps[i]);
-      Node* check = __ WordEqual(value_map, map);
+      Node* check = __ TaggedEqual(value_map, map);
 
       if (i == map_count - 1) {
         __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
@@ -1886,7 +1888,7 @@ Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
 
   for (size_t i = 0; i < map_count; ++i) {
     Node* map = __ HeapConstant(maps[i]);
-    Node* check = __ WordEqual(value_map, map);
+    Node* check = __ TaggedEqual(value_map, map);
 
     auto next_map = __ MakeLabel();
     auto passed = __ MakeLabel();
@@ -1916,7 +1918,7 @@ Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
 
   __ Bind(&if_not_smi);
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-  Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant());
+  Node* check1 = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
   __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
                      check1, frame_state);
   __ Goto(&done);
@@ -1936,7 +1938,7 @@ Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   Node* check = __ Uint32LessThanOrEqual(
       __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
-  __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, VectorSlotPair(),
+  __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, FeedbackSource(),
                      check, frame_state);
   return value;
 }
@@ -1955,12 +1957,12 @@ Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined(
   Node* check0 = __ Uint32LessThanOrEqual(__ Uint32Constant(ODDBALL_TYPE),
                                           value_instance_type);
   __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
-                     VectorSlotPair(), check0, frame_state);
+                     FeedbackSource(), check0, frame_state);
 
   // Rule out booleans.
-  Node* check1 = __ WordEqual(value_map, __ BooleanMapConstant());
+  Node* check1 = __ TaggedEqual(value_map, __ BooleanMapConstant());
   __ DeoptimizeIf(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
-                  VectorSlotPair(), check1, frame_state);
+                  FeedbackSource(), check1, frame_state);
   return value;
 }
 
@@ -1970,8 +1972,8 @@ Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
 
   Node* check =
-      __ WordEqual(value_map, __ HeapConstant(factory()->symbol_map()));
-  __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, VectorSlotPair(), check,
+      __ TaggedEqual(value_map, __ HeapConstant(factory()->symbol_map()));
+  __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, FeedbackSource(), check,
                      frame_state);
   return value;
 }
@@ -2003,7 +2005,7 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
       __ Word32And(value_instance_type,
                    __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
       __ Int32Constant(kInternalizedTag));
-  __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, VectorSlotPair(),
+  __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, FeedbackSource(),
                      check, frame_state);
 
   return value;
@@ -2040,7 +2042,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
 
   Node* value = __ Int32AddWithOverflow(lhs, rhs);
   Node* check = __ Projection(1, value);
-  __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check,
                   frame_state);
   return __ Projection(0, value);
 }
@@ -2052,7 +2054,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
 
   Node* value = __ Int32SubWithOverflow(lhs, rhs);
   Node* check = __ Projection(1, value);
-  __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check,
                   frame_state);
   return __ Projection(0, value);
 }
@@ -2075,7 +2077,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
     Node* mask = __ Int32Constant(divisor - 1);
     Node* shift = __ Int32Constant(WhichPowerOf2(divisor));
     Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
-    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(),
+    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
                        check, frame_state);
     return __ Word32Sar(lhs, shift);
   } else {
@@ -2100,12 +2102,12 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
 
       // Check if {rhs} is zero.
       Node* check_rhs_zero = __ Word32Equal(rhs, zero);
-      __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(),
+      __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(),
                       check_rhs_zero, frame_state);
 
       // Check if {lhs} is zero, as that would produce minus zero.
       Node* check_lhs_zero = __ Word32Equal(lhs, zero);
-      __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(),
+      __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(),
                       check_lhs_zero, frame_state);
 
       // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
@@ -2118,7 +2120,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
       {
         // Check that {rhs} is not -1, otherwise result would be -kMinInt.
         Node* check_rhs_minusone = __ Word32Equal(rhs, __ Int32Constant(-1));
-        __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(),
+        __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(),
                         check_rhs_minusone, frame_state);
 
         // Perform the actual integer division.
@@ -2137,7 +2139,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
 
     // Check if the remainder is non-zero.
     Node* check = __ Word32Equal(lhs, __ Int32Mul(value, rhs));
-    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(),
+    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
                        check, frame_state);
 
     return value;
@@ -2219,7 +2221,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
     Node* vtrue0 = __ Int32Sub(zero, rhs);
 
     // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
-    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(),
+    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(),
                     __ Word32Equal(vtrue0, zero), frame_state);
     __ Goto(&rhs_checked, vtrue0);
   }
@@ -2242,7 +2244,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
     Node* res = __ Uint32Mod(__ Int32Sub(zero, lhs), rhs);
 
     // Check if we would have to return -0.
-    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(),
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(),
                     __ Word32Equal(res, zero), frame_state);
     __ Goto(&done, __ Int32Sub(zero, res));
   }
@@ -2269,13 +2271,13 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
     Node* mask = __ Uint32Constant(divisor - 1);
     Node* shift = __ Uint32Constant(WhichPowerOf2(divisor));
     Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
-    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(),
+    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
                        check, frame_state);
     return __ Word32Shr(lhs, shift);
   } else {
     // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
     Node* check = __ Word32Equal(rhs, zero);
-    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), check,
                     frame_state);
 
     // Perform the actual unsigned integer division.
@@ -2283,7 +2285,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
 
     // Check if the remainder is non-zero.
     check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
-    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(),
+    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
                        check, frame_state);
     return value;
   }
@@ -2298,7 +2300,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
 
   // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
   Node* check = __ Word32Equal(rhs, zero);
-  __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+  __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), check,
                   frame_state);
 
   // Perform the actual unsigned integer modulus.
@@ -2313,7 +2315,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
 
   Node* projection = __ Int32MulWithOverflow(lhs, rhs);
   Node* check = __ Projection(1, projection);
-  __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check,
                   frame_state);
 
   Node* value = __ Projection(0, projection);
@@ -2329,7 +2331,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
     __ Bind(&if_zero);
     // We may need to return negative zero.
     Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
-    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check_or,
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(), check_or,
                     frame_state);
     __ Goto(&check_done);
 
@@ -2489,7 +2491,7 @@ Node* EffectControlLinearizer::LowerCheckedUint64ToTaggedSigned(
 }
 
 Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
-    CheckForMinusZeroMode mode, const VectorSlotPair& feedback, Node* value,
+    CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value,
     Node* frame_state) {
   Node* value32 = __ RoundFloat64ToInt32(value);
   Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
@@ -2528,7 +2530,7 @@ Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
 }
 
 Node* EffectControlLinearizer::BuildCheckedFloat64ToInt64(
-    CheckForMinusZeroMode mode, const VectorSlotPair& feedback, Node* value,
+    CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value,
     Node* frame_state) {
   Node* value64 = __ TruncateFloat64ToInt64(value);
   Node* check_same = __ Float64Equal(value, __ ChangeInt64ToFloat64(value64));
@@ -2594,7 +2596,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
   // to int32.
   __ Bind(&if_not_smi);
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-  Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
+  Node* check_map = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
   __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
                      check_map, frame_state);
   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
@@ -2624,7 +2626,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt64(Node* node,
   // to int64.
   __ Bind(&if_not_smi);
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-  Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
+  Node* check_map = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
   __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
                      check_map, frame_state);
   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
@@ -2637,10 +2639,10 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt64(Node* node,
 }
 
 Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
-    CheckTaggedInputMode mode, const VectorSlotPair& feedback, Node* value,
+    CheckTaggedInputMode mode, const FeedbackSource& feedback, Node* value,
     Node* frame_state) {
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-  Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant());
+  Node* check_number = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
   switch (mode) {
     case CheckTaggedInputMode::kNumber: {
       __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, feedback,
@@ -2731,7 +2733,7 @@ Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) {
 
   // Check for BigInt.
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-  Node* bi_check = __ WordEqual(value_map, __ BigIntMapConstant());
+  Node* bi_check = __ TaggedEqual(value_map, __ BigIntMapConstant());
   __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
                      bi_check, frame_state);
 
@@ -2840,7 +2842,7 @@ Node* EffectControlLinearizer::LowerCheckedCompressedToTaggedSigned(
   Node* value = node->InputAt(0);
   const CheckParameters& params = CheckParametersOf(node->op());
 
-  Node* check = ObjectIsSmi(value);
+  Node* check = CompressedObjectIsSmi(value);
   __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
                      frame_state);
 
@@ -2852,7 +2854,7 @@ Node* EffectControlLinearizer::LowerCheckedCompressedToTaggedPointer(
   Node* value = node->InputAt(0);
   const CheckParameters& params = CheckParametersOf(node->op());
 
-  Node* check = ObjectIsSmi(value);
+  Node* check = CompressedObjectIsSmi(value);
   __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), check,
                   frame_state);
   return __ ChangeCompressedPointerToTaggedPointer(value);
@@ -2983,7 +2985,7 @@ Node* EffectControlLinearizer::LowerObjectIsBigInt(Node* node) {
   Node* check = ObjectIsSmi(value);
   __ GotoIf(check, &if_smi);
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-  Node* vfalse = __ WordEqual(value_map, __ BigIntMapConstant());
+  Node* vfalse = __ TaggedEqual(value_map, __ BigIntMapConstant());
   __ Goto(&done, vfalse);
 
   __ Bind(&if_smi);
@@ -3095,7 +3097,7 @@ Node* EffectControlLinearizer::LowerObjectIsFiniteNumber(Node* node) {
 
   // Check if {object} is a HeapNumber.
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
-  __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
                zero);
 
   // {object} is a HeapNumber.
@@ -3128,7 +3130,7 @@ Node* EffectControlLinearizer::LowerObjectIsInteger(Node* node) {
 
   // Check if {object} is a HeapNumber.
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
-  __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
                zero);
 
   // {object} is a HeapNumber.
@@ -3171,7 +3173,7 @@ Node* EffectControlLinearizer::LowerObjectIsSafeInteger(Node* node) {
 
   // Check if {object} is a HeapNumber.
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
-  __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
                zero);
 
   // {object} is a HeapNumber.
@@ -3190,9 +3192,14 @@ Node* EffectControlLinearizer::LowerObjectIsSafeInteger(Node* node) {
 
 namespace {
 
-const int64_t kMinusZeroBits = bit_cast<int64_t>(-0.0);
-const int32_t kMinusZeroLoBits = static_cast<int32_t>(kMinusZeroBits);
-const int32_t kMinusZeroHiBits = static_cast<int32_t>(kMinusZeroBits >> 32);
+// There is no (currently) available constexpr version of bit_cast, so we have
+// to make do with constructing the -0.0 bits manually (by setting the sign bit
+// to 1 and everything else to 0).
+// TODO(leszeks): Revisit when upgrading to C++20.
+constexpr int32_t kMinusZeroLoBits = static_cast<int32_t>(0);
+constexpr int32_t kMinusZeroHiBits = static_cast<int32_t>(1) << 31;
+constexpr int64_t kMinusZeroBits =
+    (static_cast<uint64_t>(kMinusZeroHiBits) << 32) | kMinusZeroLoBits;
 
 }  // namespace
 
@@ -3207,7 +3214,7 @@ Node* EffectControlLinearizer::LowerObjectIsMinusZero(Node* node) {
 
   // Check if {value} is a HeapNumber.
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-  __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
                zero);
 
   // Check if {value} contains -0.
@@ -3260,7 +3267,7 @@ Node* EffectControlLinearizer::LowerObjectIsNaN(Node* node) {
 
   // Check if {value} is a HeapNumber.
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-  __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
                zero);
 
   // Check if {value} contains a NaN.
@@ -3319,7 +3326,7 @@ Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
 
   __ GotoIf(ObjectIsSmi(value), &if_smi);
   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-  __ Goto(&done, __ WordEqual(value_map, __ HeapNumberMapConstant()));
+  __ Goto(&done, __ TaggedEqual(value_map, __ HeapNumberMapConstant()));
 
   __ Bind(&if_smi);
   __ Goto(&done, __ Int32Constant(1));
@@ -3467,7 +3474,7 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
     auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
 
     Node* frame = __ LoadFramePointer();
-    __ GotoIf(__ WordEqual(arguments_frame, frame), &done, __ SmiConstant(0));
+    __ GotoIf(__ TaggedEqual(arguments_frame, frame), &done, __ SmiConstant(0));
     __ Goto(&if_adaptor_frame);
 
     __ Bind(&if_adaptor_frame);
@@ -3491,7 +3498,7 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
     auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
 
     Node* frame = __ LoadFramePointer();
-    __ GotoIf(__ WordEqual(arguments_frame, frame), &done,
+    __ GotoIf(__ TaggedEqual(arguments_frame, frame), &done,
               __ SmiConstant(formal_parameter_count));
     __ Goto(&if_adaptor_frame);
 
@@ -3517,9 +3524,9 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
       MachineType::TypeCompressedTagged(), parent_frame,
       __ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset));
 
-  __ GotoIf(__ WordEqual(parent_frame_type,
-                         __ IntPtrConstant(StackFrame::TypeToMarker(
-                             StackFrame::ARGUMENTS_ADAPTOR))),
+  __ GotoIf(__ IntPtrEqual(parent_frame_type,
+                           __ IntPtrConstant(StackFrame::TypeToMarker(
+                               StackFrame::ARGUMENTS_ADAPTOR))),
             &done, parent_frame);
   __ Goto(&done, frame);
 
@@ -3532,7 +3539,7 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
   Node* length = node->InputAt(0);
 
   auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
-  Node* zero_length = __ WordEqual(length, __ IntPtrConstant(0));
+  Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
   __ GotoIf(zero_length, &done,
             jsgraph()->HeapConstant(factory()->empty_fixed_array()));
 
@@ -3580,7 +3587,7 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
   Node* length = node->InputAt(0);
 
   auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
-  Node* zero_length = __ WordEqual(length, __ IntPtrConstant(0));
+  Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
   __ GotoIf(zero_length, &done,
             jsgraph()->HeapConstant(factory()->empty_fixed_array()));
 
@@ -3832,7 +3839,7 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
     {
       Node* receiver_second =
           __ LoadField(AccessBuilder::ForConsStringSecond(), receiver);
-      __ GotoIfNot(__ WordEqual(receiver_second, __ EmptyStringConstant()),
+      __ GotoIfNot(__ TaggedEqual(receiver_second, __ EmptyStringConstant()),
                    &if_runtime);
       Node* receiver_first =
           __ LoadField(AccessBuilder::ForConsStringFirst(), receiver);
@@ -3967,7 +3974,7 @@ Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) {
     Node* entry =
         __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
 
-    Node* check2 = __ WordEqual(entry, __ UndefinedConstant());
+    Node* check2 = __ TaggedEqual(entry, __ UndefinedConstant());
     __ GotoIf(check2, &cache_miss);
 
     // Use the {entry} from the {cache}.
@@ -4093,7 +4100,7 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
       Node* entry =
           __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
 
-      Node* check2 = __ WordEqual(entry, __ UndefinedConstant());
+      Node* check2 = __ TaggedEqual(entry, __ UndefinedConstant());
       __ GotoIf(check2, &cache_miss);
 
       // Use the {entry} from the {cache}.
@@ -4285,7 +4292,7 @@ Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
               rhs, __ NoContextConstant());
 
   // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
-  __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, VectorSlotPair{},
+  __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
                   ObjectIsSmi(value), frame_state);
 
   return value;
@@ -4338,8 +4345,8 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
 Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
                                                        Node* frame_state) {
   Node* value = node->InputAt(0);
-  Node* check = __ WordEqual(value, __ TheHoleConstant());
-  __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check,
+  Node* check = __ TaggedEqual(value, __ TheHoleConstant());
+  __ DeoptimizeIf(DeoptimizeReason::kHole, FeedbackSource(), check,
                   frame_state);
   return value;
 }
@@ -4350,7 +4357,7 @@ Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) {
   auto if_is_hole = __ MakeDeferredLabel();
   auto done = __ MakeLabel(MachineRepresentation::kTagged);
 
-  Node* check = __ WordEqual(value, __ TheHoleConstant());
+  Node* check = __ TaggedEqual(value, __ TheHoleConstant());
   __ GotoIf(check, &if_is_hole);
   __ Goto(&done, value);
 
@@ -4372,12 +4379,12 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
   auto if_notthinstring = __ MakeLabel();
 
   // Check if {exp} and {val} are the same, which is the likely case.
-  __ Branch(__ WordEqual(exp, val), &if_same, &if_notsame);
+  __ Branch(__ TaggedEqual(exp, val), &if_same, &if_notsame);
 
   __ Bind(&if_notsame);
   {
     // Now {val} could still be a non-internalized String that matches {exp}.
-    __ DeoptimizeIf(DeoptimizeReason::kWrongName, VectorSlotPair(),
+    __ DeoptimizeIf(DeoptimizeReason::kWrongName, FeedbackSource(),
                     ObjectIsSmi(val), frame_state);
     Node* val_map = __ LoadField(AccessBuilder::ForMap(), val);
     Node* val_instance_type =
@@ -4396,7 +4403,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
       // Check that the {val} is a non-internalized String, if it's anything
       // else it cannot match the recorded feedback {exp} anyways.
       __ DeoptimizeIfNot(
-          DeoptimizeReason::kWrongName, VectorSlotPair(),
+          DeoptimizeReason::kWrongName, FeedbackSource(),
           __ Word32Equal(__ Word32And(val_instance_type,
                                       __ Int32Constant(kIsNotStringMask |
                                                        kIsNotInternalizedMask)),
@@ -4419,8 +4426,8 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
                   try_internalize_string_function, isolate_ptr, val);
 
       // Now see if the results match.
-      __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
-                         __ WordEqual(exp, val_internalized), frame_state);
+      __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(),
+                         __ TaggedEqual(exp, val_internalized), frame_state);
       __ Goto(&if_same);
     }
 
@@ -4429,8 +4436,8 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
       // The {val} is a ThinString, let's check the actual value.
       Node* val_actual =
           __ LoadField(AccessBuilder::ForThinStringActual(), val);
-      __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
-                         __ WordEqual(exp, val_actual), frame_state);
+      __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(),
+                         __ TaggedEqual(exp, val_actual), frame_state);
       __ Goto(&if_same);
     }
   }
@@ -4442,8 +4449,8 @@ void EffectControlLinearizer::LowerCheckEqualsSymbol(Node* node,
                                                      Node* frame_state) {
   Node* exp = node->InputAt(0);
   Node* val = node->InputAt(1);
-  Node* check = __ WordEqual(exp, val);
-  __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(), check,
+  Node* check = __ TaggedEqual(exp, val);
+  __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(), check,
                      frame_state);
 }
 
@@ -4543,8 +4550,13 @@ Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) {
 }
 
 Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
-  return __ WordEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)),
-                      __ IntPtrConstant(kSmiTag));
+  return __ IntPtrEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)),
+                        __ IntPtrConstant(kSmiTag));
+}
+
+Node* EffectControlLinearizer::CompressedObjectIsSmi(Node* value) {
+  return __ Word32Equal(__ Word32And(value, __ Int32Constant(kSmiTagMask)),
+                        __ Int32Constant(kSmiTag));
 }
 
 Node* EffectControlLinearizer::SmiMaxValueConstant() {
@@ -4629,7 +4641,7 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
   Node* elements_map = __ LoadField(AccessBuilder::ForMap(), elements);
 
   // Check if {elements} is not a copy-on-write FixedArray.
-  Node* check = __ WordEqual(elements_map, __ FixedArrayMapConstant());
+  Node* check = __ TaggedEqual(elements_map, __ FixedArrayMapConstant());
   __ GotoIfNot(check, &if_not_fixed_array);
   // Nothing to do if the {elements} are not copy-on-write.
   __ Goto(&done, elements);
@@ -4707,7 +4719,7 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
   Node* object_map = __ LoadField(AccessBuilder::ForMap(), object);
 
   // Check if {object_map} is the same as {source_map}.
-  Node* check = __ WordEqual(object_map, source_map);
+  Node* check = __ TaggedEqual(object_map, source_map);
   __ GotoIf(check, &if_map_same);
   __ Goto(&done);
 
@@ -4749,7 +4761,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
   auto done = __ MakeLabel(MachineRepresentation::kTagged);
 
   // Check if field is a mutable double field.
-  __ GotoIfNot(__ WordEqual(__ WordAnd(index, one), zero), &if_double);
+  __ GotoIfNot(__ IntPtrEqual(__ WordAnd(index, one), zero), &if_double);
 
   // The field is a proper Tagged field on {object}. The {index} is shifted
   // to the left by one in the code below.
@@ -4772,8 +4784,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
     // The {index} is equal to the negated out of property index plus 1.
     __ Bind(&if_outofobject);
     {
-      Node* properties =
-          __ LoadField(AccessBuilder::ForJSObjectPropertiesOrHash(), object);
+      Node* properties = __ LoadField(
+          AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), object);
       Node* offset =
           __ IntAdd(__ WordShl(__ IntSub(zero, index),
                                __ IntPtrConstant(kTaggedSizeLog2 - 1)),
@@ -4786,7 +4798,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
   }
 
   // The field is a Double field, either unboxed in the object on 64-bit
-  // architectures, or as MutableHeapNumber.
+  // architectures, or a mutable HeapNumber.
   __ Bind(&if_double);
   {
     auto done_double = __ MakeLabel(MachineRepresentation::kFloat64);
@@ -4815,8 +4827,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
 
     __ Bind(&if_outofobject);
     {
-      Node* properties =
-          __ LoadField(AccessBuilder::ForJSObjectPropertiesOrHash(), object);
+      Node* properties = __ LoadField(
+          AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), object);
       Node* offset =
           __ IntAdd(__ WordShl(__ IntSub(zero, index),
                                __ IntPtrConstant(kTaggedSizeLog2)),
@@ -5123,7 +5135,7 @@ void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
     // without effecting a transition.
     Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
     Node* heap_number_map = __ HeapNumberMapConstant();
-    Node* check = __ WordEqual(value_map, heap_number_map);
+    Node* check = __ TaggedEqual(value_map, heap_number_map);
     __ GotoIfNot(check, &transition_double_to_fast);
     __ Goto(&do_store, kind);
   }
@@ -5135,7 +5147,7 @@ void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
     auto if_value_not_heap_number = __ MakeLabel();
     Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
     Node* heap_number_map = __ HeapNumberMapConstant();
-    Node* check = __ WordEqual(value_map, heap_number_map);
+    Node* check = __ TaggedEqual(value_map, heap_number_map);
     __ GotoIfNot(check, &if_value_not_heap_number);
     {
       // {value} is a HeapNumber.
@@ -5478,9 +5490,10 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
 
       // Wrap the primitive {value} into a JSPrimitiveWrapper.
       __ Bind(&convert_to_object);
-      __ GotoIf(__ WordEqual(value, __ UndefinedConstant()),
+      __ GotoIf(__ TaggedEqual(value, __ UndefinedConstant()),
+                &convert_global_proxy);
+      __ GotoIf(__ TaggedEqual(value, __ NullConstant()),
                 &convert_global_proxy);
-      __ GotoIf(__ WordEqual(value, __ NullConstant()), &convert_global_proxy);
       Operator::Properties properties = Operator::kEliminatable;
       Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
       CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
@@ -5891,7 +5904,7 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
   {
     Node* entry = loop.PhiAt(0);
     Node* check =
-        __ WordEqual(entry, __ IntPtrConstant(OrderedHashMap::kNotFound));
+        __ IntPtrEqual(entry, __ IntPtrConstant(OrderedHashMap::kNotFound));
     __ GotoIf(check, &done, entry);
     entry = __ IntAdd(
         __ IntMul(entry, __ IntPtrConstant(OrderedHashMap::kEntrySize)),
@@ -5906,14 +5919,20 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
     auto if_match = __ MakeLabel();
     auto if_notmatch = __ MakeLabel();
     auto if_notsmi = __ MakeDeferredLabel();
-    __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi);
-    __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match,
-              &if_notmatch);
+    if (COMPRESS_POINTERS_BOOL) {
+      __ GotoIfNot(CompressedObjectIsSmi(candidate_key), &if_notsmi);
+      __ Branch(__ Word32Equal(ChangeCompressedSmiToInt32(candidate_key), key),
+                &if_match, &if_notmatch);
+    } else {
+      __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi);
+      __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match,
+                &if_notmatch);
+    }
 
     __ Bind(&if_notsmi);
     __ GotoIfNot(
-        __ WordEqual(__ LoadField(AccessBuilder::ForMap(), candidate_key),
-                     __ HeapNumberMapConstant()),
+        __ TaggedEqual(__ LoadField(AccessBuilder::ForMap(), candidate_key),
+                       __ HeapNumberMapConstant()),
         &if_notmatch);
     __ Branch(__ Float64Equal(__ LoadField(AccessBuilder::ForHeapNumberValue(),
                                            candidate_key),
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index aee0121384ac61..b3f684ea61afe9 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -736,10 +736,9 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
           current->Get(map_field).To(&map)) {
         if (map) {
           Type const map_type = NodeProperties::GetType(map);
-          AllowHandleDereference handle_dereference;
           if (map_type.IsHeapConstant() &&
               params.maps().contains(
-                  Handle<Map>::cast(map_type.AsHeapConstant()->Value()))) {
+                  map_type.AsHeapConstant()->Ref().AsMap().object())) {
             current->MarkForDeletion();
             break;
           }
diff --git a/deps/v8/src/compiler/feedback-source.cc b/deps/v8/src/compiler/feedback-source.cc
new file mode 100644
index 00000000000000..8c3d175c28cb89
--- /dev/null
+++ b/deps/v8/src/compiler/feedback-source.cc
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/feedback-source.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+FeedbackSource::FeedbackSource(Handle<FeedbackVector> vector_,
+                               FeedbackSlot slot_)
+    : vector(vector_), slot(slot_) {
+  DCHECK(!slot.IsInvalid());
+}
+
+FeedbackSource::FeedbackSource(FeedbackVectorRef vector_, FeedbackSlot slot_)
+    : FeedbackSource(vector_.object(), slot_) {}
+
+FeedbackSource::FeedbackSource(FeedbackNexus const& nexus)
+    : FeedbackSource(nexus.vector_handle(), nexus.slot()) {}
+
+int FeedbackSource::index() const {
+  CHECK(IsValid());
+  return FeedbackVector::GetIndex(slot);
+}
+
+bool operator==(FeedbackSource const& lhs, FeedbackSource const& rhs) {
+  return FeedbackSource::Equal()(lhs, rhs);
+}
+
+bool operator!=(FeedbackSource const& lhs, FeedbackSource const& rhs) {
+  return !(lhs == rhs);
+}
+
+std::ostream& operator<<(std::ostream& os, const FeedbackSource& p) {
+  if (p.IsValid()) {
+    return os << "FeedbackSource(" << p.slot << ")";
+  }
+  return os << "FeedbackSource(INVALID)";
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/feedback-source.h b/deps/v8/src/compiler/feedback-source.h
new file mode 100644
index 00000000000000..8484acb4559a29
--- /dev/null
+++ b/deps/v8/src/compiler/feedback-source.h
@@ -0,0 +1,52 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FEEDBACK_SOURCE_H_
+#define V8_COMPILER_FEEDBACK_SOURCE_H_
+
+#include "src/compiler/heap-refs.h"
+#include "src/objects/feedback-vector.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct FeedbackSource {
+  FeedbackSource() { DCHECK(!IsValid()); }
+  V8_EXPORT_PRIVATE FeedbackSource(Handle<FeedbackVector> vector_,
+                                   FeedbackSlot slot_);
+  FeedbackSource(FeedbackVectorRef vector_, FeedbackSlot slot_);
+  explicit FeedbackSource(FeedbackNexus const& nexus);
+
+  bool IsValid() const { return !vector.is_null() && !slot.IsInvalid(); }
+  int index() const;
+
+  Handle<FeedbackVector> vector;
+  FeedbackSlot slot;
+
+  struct Hash {
+    size_t operator()(FeedbackSource const& source) const {
+      return base::hash_combine(source.vector.address(), source.slot);
+    }
+  };
+
+  struct Equal {
+    bool operator()(FeedbackSource const& lhs,
+                    FeedbackSource const& rhs) const {
+      return lhs.vector.equals(rhs.vector) && lhs.slot == rhs.slot;
+    }
+  };
+};
+
+bool operator==(FeedbackSource const&, FeedbackSource const&);
+bool operator!=(FeedbackSource const&, FeedbackSource const&);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           FeedbackSource const&);
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_FEEDBACK_SOURCE_H_
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 5fbf11cdbce341..9478c08c6c13a3 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -106,28 +106,22 @@ Node* CreateBuiltinContinuationFrameStateCommon(
     Node* closure, Node* context, Node** parameters, int parameter_count,
     Node* outer_frame_state,
     Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>()) {
-  Isolate* const isolate = jsgraph->isolate();
   Graph* const graph = jsgraph->graph();
   CommonOperatorBuilder* const common = jsgraph->common();
 
-  BailoutId bailout_id = Builtins::GetContinuationBailoutId(name);
-  Callable callable = Builtins::CallableFor(isolate, name);
-
   const Operator* op_param =
       common->StateValues(parameter_count, SparseInputMask::Dense());
   Node* params_node = graph->NewNode(op_param, parameter_count, parameters);
 
+  BailoutId bailout_id = Builtins::GetContinuationBailoutId(name);
   const FrameStateFunctionInfo* state_info =
       common->CreateFrameStateFunctionInfo(frame_type, parameter_count, 0,
                                            shared);
   const Operator* op = common->FrameState(
       bailout_id, OutputFrameStateCombine::Ignore(), state_info);
-
-  Node* frame_state = graph->NewNode(
-      op, params_node, jsgraph->EmptyStateValues(), jsgraph->EmptyStateValues(),
-      context, closure, outer_frame_state);
-
-  return frame_state;
+  return graph->NewNode(op, params_node, jsgraph->EmptyStateValues(),
+                        jsgraph->EmptyStateValues(), context, closure,
+                        outer_frame_state);
 }
 
 }  // namespace
@@ -136,8 +130,7 @@ Node* CreateStubBuiltinContinuationFrameState(
     JSGraph* jsgraph, Builtins::Name name, Node* context,
     Node* const* parameters, int parameter_count, Node* outer_frame_state,
     ContinuationFrameStateMode mode) {
-  Isolate* isolate = jsgraph->isolate();
-  Callable callable = Builtins::CallableFor(isolate, name);
+  Callable callable = Builtins::CallableFor(jsgraph->isolate(), name);
   CallInterfaceDescriptor descriptor = callable.descriptor();
 
   std::vector<Node*> actual_parameters;
@@ -172,9 +165,6 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
     Node* target, Node* context, Node* const* stack_parameters,
     int stack_parameter_count, Node* outer_frame_state,
     ContinuationFrameStateMode mode) {
-  Isolate* const isolate = jsgraph->isolate();
-  Callable const callable = Builtins::CallableFor(isolate, name);
-
   // Depending on {mode}, final parameters are added by the deoptimizer
   // and aren't explicitly passed in the frame state.
   DCHECK_EQ(Builtins::GetStackParameterCount(name) + 1,  // add receiver
@@ -190,11 +180,13 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
     actual_parameters.push_back(stack_parameters[i]);
   }
 
-  // Register parameters follow stack paraemters. The context will be added by
+  Node* new_target = jsgraph->UndefinedConstant();
+
+  // Register parameters follow stack parameters. The context will be added by
   // instruction selector during FrameState translation.
-  actual_parameters.push_back(target);
-  actual_parameters.push_back(jsgraph->UndefinedConstant());
-  actual_parameters.push_back(argc);
+  actual_parameters.push_back(target);      // kJavaScriptCallTargetRegister
+  actual_parameters.push_back(new_target);  // kJavaScriptCallNewTargetRegister
+  actual_parameters.push_back(argc);        // kJavaScriptCallArgCountRegister
 
   return CreateBuiltinContinuationFrameStateCommon(
       jsgraph,
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 50f29d968bfdf6..b4ad81ecda0a1f 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -94,6 +94,14 @@ PURE_ASSEMBLER_MACH_BINOP_LIST(PURE_BINOP_DEF)
 CHECKED_ASSEMBLER_MACH_BINOP_LIST(CHECKED_BINOP_DEF)
 #undef CHECKED_BINOP_DEF
 
+Node* GraphAssembler::IntPtrEqual(Node* left, Node* right) {
+  return WordEqual(left, right);
+}
+
+Node* GraphAssembler::TaggedEqual(Node* left, Node* right) {
+  return WordEqual(left, right);
+}
+
 Node* GraphAssembler::Float64RoundDown(Node* value) {
   CHECK(machine()->Float64RoundDown().IsSupported());
   return graph()->NewNode(machine()->Float64RoundDown().op(), value);
@@ -237,7 +245,7 @@ Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
 }
 
 Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
-                                   VectorSlotPair const& feedback,
+                                   FeedbackSource const& feedback,
                                    Node* condition, Node* frame_state,
                                    IsSafetyCheck is_safety_check) {
   return current_control_ = current_effect_ = graph()->NewNode(
@@ -247,7 +255,7 @@ Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
 }
 
 Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
-                                      VectorSlotPair const& feedback,
+                                      FeedbackSource const& feedback,
                                       Node* condition, Node* frame_state,
                                       IsSafetyCheck is_safety_check) {
   return current_control_ = current_effect_ = graph()->NewNode(
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index e2c0005d15741f..0088f867c54f72 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -5,10 +5,10 @@
 #ifndef V8_COMPILER_GRAPH_ASSEMBLER_H_
 #define V8_COMPILER_GRAPH_ASSEMBLER_H_
 
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node.h"
 #include "src/compiler/simplified-operator.h"
-#include "src/compiler/vector-slot-pair.h"
 
 namespace v8 {
 namespace internal {
@@ -224,6 +224,9 @@ class GraphAssembler {
 
   Node* Unreachable();
 
+  Node* IntPtrEqual(Node* left, Node* right);
+  Node* TaggedEqual(Node* left, Node* right);
+
   Node* Float64RoundDown(Node* value);
   Node* Float64RoundTruncate(Node* value);
 
@@ -251,11 +254,11 @@ class GraphAssembler {
   Node* Word32PoisonOnSpeculation(Node* value);
 
   Node* DeoptimizeIf(
-      DeoptimizeReason reason, VectorSlotPair const& feedback, Node* condition,
+      DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
       Node* frame_state,
       IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
   Node* DeoptimizeIfNot(
-      DeoptimizeReason reason, VectorSlotPair const& feedback, Node* condition,
+      DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
       Node* frame_state,
       IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
   template <typename... Args>
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index fea76bff81506f..99e9d5ffdbe77f 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -68,9 +68,10 @@ Node* Graph::CloneNode(const Node* node) {
 
 
 NodeId Graph::NextNodeId() {
-  NodeId const id = next_node_id_;
-  CHECK(!base::bits::UnsignedAddOverflow32(id, 1, &next_node_id_));
-  return id;
+  // A node's id is internally stored in a bit field using fewer bits than
+  // NodeId (see Node::IdField). Hence the addition below won't ever overflow.
+  DCHECK_LT(next_node_id_, std::numeric_limits<NodeId>::max());
+  return next_node_id_++;
 }
 
 void Graph::Print() const { StdoutStream{} << AsRPO(*this); }
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index 5547039fa63c5f..9b1aa53eb91116 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -27,7 +27,6 @@ class JSRegExp;
 class JSTypedArray;
 class NativeContext;
 class ScriptContextTable;
-class VectorSlotPair;
 
 namespace compiler {
 
@@ -35,6 +34,8 @@ namespace compiler {
 // For a store during literal creation, do not walk up the prototype chain.
 enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
 
+enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded };
+
 enum class OddballType : uint8_t {
   kNone,     // Not an Oddball.
   kBoolean,  // True or False.
@@ -53,6 +54,7 @@ enum class OddballType : uint8_t {
   V(JSBoundFunction)               \
   V(JSDataView)                    \
   V(JSFunction)                    \
+  V(JSGlobalObject)                \
   V(JSGlobalProxy)                 \
   V(JSRegExp)                      \
   V(JSTypedArray)                  \
@@ -70,8 +72,12 @@ enum class OddballType : uint8_t {
   V(InternalizedString)            \
   V(String)                        \
   V(Symbol)                        \
+  /* Subtypes of JSReceiver */     \
+  V(JSObject)                      \
   /* Subtypes of HeapObject */     \
+  V(AccessorInfo)                  \
   V(AllocationSite)                \
+  V(ArrayBoilerplateDescription)   \
   V(BigInt)                        \
   V(CallHandlerInfo)               \
   V(Cell)                          \
@@ -82,10 +88,10 @@ enum class OddballType : uint8_t {
   V(FixedArrayBase)                \
   V(FunctionTemplateInfo)          \
   V(HeapNumber)                    \
-  V(JSObject)                      \
+  V(JSReceiver)                    \
   V(Map)                           \
-  V(MutableHeapNumber)             \
   V(Name)                          \
+  V(ObjectBoilerplateDescription)  \
   V(PropertyCell)                  \
   V(SharedFunctionInfo)            \
   V(SourceTextModule)              \
@@ -103,8 +109,9 @@ HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
 
 class V8_EXPORT_PRIVATE ObjectRef {
  public:
-  ObjectRef(JSHeapBroker* broker, Handle<Object> object);
-  ObjectRef(JSHeapBroker* broker, ObjectData* data)
+  ObjectRef(JSHeapBroker* broker, Handle<Object> object,
+            bool check_type = true);
+  ObjectRef(JSHeapBroker* broker, ObjectData* data, bool check_type = true)
       : data_(data), broker_(broker) {
     CHECK_NOT_NULL(data_);
   }
@@ -131,8 +138,9 @@ class V8_EXPORT_PRIVATE ObjectRef {
 
   // Return the element at key {index} if {index} is known to be an own data
   // property of the object that is non-writable and non-configurable.
-  base::Optional<ObjectRef> GetOwnConstantElement(uint32_t index,
-                                                  bool serialize = false) const;
+  base::Optional<ObjectRef> GetOwnConstantElement(
+      uint32_t index, SerializationPolicy policy =
+                          SerializationPolicy::kAssumeSerialized) const;
 
   Isolate* isolate() const;
 
@@ -157,6 +165,7 @@ class V8_EXPORT_PRIVATE ObjectRef {
   friend class JSArrayData;
   friend class JSGlobalProxyRef;
   friend class JSGlobalProxyData;
+  friend class JSHeapBroker;
   friend class JSObjectData;
   friend class StringData;
 
@@ -200,9 +209,27 @@ class HeapObjectType {
   Flags const flags_;
 };
 
+// Constructors are carefully defined such that we do a type check on
+// the outermost Ref class in the inheritance chain only.
+#define DEFINE_REF_CONSTRUCTOR(name, base)                                  \
+  name##Ref(JSHeapBroker* broker, Handle<Object> object,                    \
+            bool check_type = true)                                         \
+      : base(broker, object, false) {                                       \
+    if (check_type) {                                                       \
+      CHECK(Is##name());                                                    \
+    }                                                                       \
+  }                                                                         \
+  name##Ref(JSHeapBroker* broker, ObjectData* data, bool check_type = true) \
+      : base(broker, data, false) {                                         \
+    if (check_type) {                                                       \
+      CHECK(Is##name());                                                    \
+    }                                                                       \
+  }
+
 class HeapObjectRef : public ObjectRef {
  public:
-  using ObjectRef::ObjectRef;
+  DEFINE_REF_CONSTRUCTOR(HeapObject, ObjectRef)
+
   Handle<HeapObject> object() const;
 
   MapRef map() const;
@@ -213,7 +240,8 @@ class HeapObjectRef : public ObjectRef {
 
 class PropertyCellRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(PropertyCell, HeapObjectRef)
+
   Handle<PropertyCell> object() const;
 
   PropertyDetails property_details() const;
@@ -222,9 +250,17 @@ class PropertyCellRef : public HeapObjectRef {
   ObjectRef value() const;
 };
 
-class JSObjectRef : public HeapObjectRef {
+class JSReceiverRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(JSReceiver, HeapObjectRef)
+
+  Handle<JSReceiver> object() const;
+};
+
+class JSObjectRef : public JSReceiverRef {
+ public:
+  DEFINE_REF_CONSTRUCTOR(JSObject, JSReceiverRef)
+
   Handle<JSObject> object() const;
 
   uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
@@ -233,10 +269,10 @@ class JSObjectRef : public HeapObjectRef {
 
   // Return the value of the property identified by the field {index}
   // if {index} is known to be an own data property of the object.
-  base::Optional<ObjectRef> GetOwnProperty(Representation field_representation,
-                                           FieldIndex index,
-                                           bool serialize = false) const;
-
+  base::Optional<ObjectRef> GetOwnDataProperty(
+      Representation field_representation, FieldIndex index,
+      SerializationPolicy policy =
+          SerializationPolicy::kAssumeSerialized) const;
   FixedArrayBaseRef elements() const;
   void SerializeElements();
   void EnsureElementsTenured();
@@ -248,7 +284,8 @@ class JSObjectRef : public HeapObjectRef {
 
 class JSDataViewRef : public JSObjectRef {
  public:
-  using JSObjectRef::JSObjectRef;
+  DEFINE_REF_CONSTRUCTOR(JSDataView, JSObjectRef)
+
   Handle<JSDataView> object() const;
 
   size_t byte_length() const;
@@ -257,20 +294,23 @@ class JSDataViewRef : public JSObjectRef {
 
 class JSBoundFunctionRef : public JSObjectRef {
  public:
-  using JSObjectRef::JSObjectRef;
+  DEFINE_REF_CONSTRUCTOR(JSBoundFunction, JSObjectRef)
+
   Handle<JSBoundFunction> object() const;
 
   void Serialize();
+  bool serialized() const;
 
   // The following are available only after calling Serialize().
-  ObjectRef bound_target_function() const;
+  JSReceiverRef bound_target_function() const;
   ObjectRef bound_this() const;
   FixedArrayRef bound_arguments() const;
 };
 
 class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
  public:
-  using JSObjectRef::JSObjectRef;
+  DEFINE_REF_CONSTRUCTOR(JSFunction, JSObjectRef)
+
   Handle<JSFunction> object() const;
 
   bool has_feedback_vector() const;
@@ -295,7 +335,8 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
 
 class JSRegExpRef : public JSObjectRef {
  public:
-  using JSObjectRef::JSObjectRef;
+  DEFINE_REF_CONSTRUCTOR(JSRegExp, JSObjectRef)
+
   Handle<JSRegExp> object() const;
 
   ObjectRef raw_properties_or_hash() const;
@@ -307,33 +348,31 @@ class JSRegExpRef : public JSObjectRef {
 
 class HeapNumberRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
-  Handle<HeapNumber> object() const;
+  DEFINE_REF_CONSTRUCTOR(HeapNumber, HeapObjectRef)
 
-  double value() const;
-};
-
-class MutableHeapNumberRef : public HeapObjectRef {
- public:
-  using HeapObjectRef::HeapObjectRef;
-  Handle<MutableHeapNumber> object() const;
+  Handle<HeapNumber> object() const;
 
   double value() const;
 };
 
 class ContextRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(Context, HeapObjectRef)
+
   Handle<Context> object() const;
 
   // {previous} decrements {depth} by 1 for each previous link successfully
   // followed. If {depth} != 0 on function return, then it only got
   // partway to the desired depth. If {serialize} is true, then
   // {previous} will cache its findings.
-  ContextRef previous(size_t* depth, bool serialize = false) const;
+  ContextRef previous(size_t* depth,
+                      SerializationPolicy policy =
+                          SerializationPolicy::kAssumeSerialized) const;
 
   // Only returns a value if the index is valid for this ContextRef.
-  base::Optional<ObjectRef> get(int index, bool serialize = false) const;
+  base::Optional<ObjectRef> get(
+      int index, SerializationPolicy policy =
+                     SerializationPolicy::kAssumeSerialized) const;
 
   // We only serialize the ScopeInfo if certain Promise
   // builtins are called.
@@ -351,6 +390,7 @@ class ContextRef : public HeapObjectRef {
   V(JSFunction, promise_then)                                         \
   V(JSFunction, string_function)                                      \
   V(JSFunction, symbol_function)                                      \
+  V(JSGlobalObject, global_object)                                    \
   V(JSGlobalProxy, global_proxy_object)                               \
   V(JSObject, promise_prototype)                                      \
   V(Map, bound_function_with_constructor_map)                         \
@@ -391,7 +431,8 @@ class ContextRef : public HeapObjectRef {
 
 class NativeContextRef : public ContextRef {
  public:
-  using ContextRef::ContextRef;
+  DEFINE_REF_CONSTRUCTOR(NativeContext, ContextRef)
+
   Handle<NativeContext> object() const;
 
   void Serialize();
@@ -408,7 +449,8 @@ class NativeContextRef : public ContextRef {
 
 class NameRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(Name, HeapObjectRef)
+
   Handle<Name> object() const;
 
   bool IsUniqueName() const;
@@ -416,7 +458,8 @@ class NameRef : public HeapObjectRef {
 
 class ScriptContextTableRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(ScriptContextTable, HeapObjectRef)
+
   Handle<ScriptContextTable> object() const;
 
   struct LookupResult {
@@ -430,13 +473,15 @@ class ScriptContextTableRef : public HeapObjectRef {
 
 class DescriptorArrayRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(DescriptorArray, HeapObjectRef)
+
   Handle<DescriptorArray> object() const;
 };
 
 class FeedbackCellRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(FeedbackCell, HeapObjectRef)
+
   Handle<FeedbackCell> object() const;
 
   HeapObjectRef value() const;
@@ -444,17 +489,21 @@ class FeedbackCellRef : public HeapObjectRef {
 
 class FeedbackVectorRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(FeedbackVector, HeapObjectRef)
+
   Handle<FeedbackVector> object() const;
 
-  ObjectRef get(FeedbackSlot slot) const;
+  double invocation_count() const;
 
-  void SerializeSlots();
+  void Serialize();
+  ObjectRef get(FeedbackSlot slot) const;
+  FeedbackCellRef GetClosureFeedbackCell(int index) const;
 };
 
 class CallHandlerInfoRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(CallHandlerInfo, HeapObjectRef)
+
   Handle<CallHandlerInfo> object() const;
 
   Address callback() const;
@@ -463,9 +512,17 @@ class CallHandlerInfoRef : public HeapObjectRef {
   ObjectRef data() const;
 };
 
+class AccessorInfoRef : public HeapObjectRef {
+ public:
+  DEFINE_REF_CONSTRUCTOR(AccessorInfo, HeapObjectRef)
+
+  Handle<AccessorInfo> object() const;
+};
+
 class AllocationSiteRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(AllocationSite, HeapObjectRef)
+
   Handle<AllocationSite> object() const;
 
   bool PointsToLiteral() const;
@@ -487,7 +544,8 @@ class AllocationSiteRef : public HeapObjectRef {
 
 class BigIntRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(BigInt, HeapObjectRef)
+
   Handle<BigInt> object() const;
 
   uint64_t AsUint64() const;
@@ -495,7 +553,8 @@ class BigIntRef : public HeapObjectRef {
 
 class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(Map, HeapObjectRef)
+
   Handle<Map> object() const;
 
   int instance_size() const;
@@ -526,7 +585,8 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
   bool is_migration_target() const;
   bool supports_fast_array_iteration() const;
   bool supports_fast_array_resize() const;
-  bool IsMapOfCurrentGlobalProxy() const;
+  bool IsMapOfTargetGlobalProxy() const;
+  bool is_abandoned_prototype_map() const;
 
   OddballType oddball_type() const;
 
@@ -550,12 +610,17 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
   // Concerning the underlying instance_descriptors:
   void SerializeOwnDescriptors();
   void SerializeOwnDescriptor(int descriptor_index);
+  bool serialized_own_descriptor(int descriptor_index) const;
   MapRef FindFieldOwner(int descriptor_index) const;
   PropertyDetails GetPropertyDetails(int descriptor_index) const;
   NameRef GetPropertyKey(int descriptor_index) const;
   FieldIndex GetFieldIndexFor(int descriptor_index) const;
   ObjectRef GetFieldType(int descriptor_index) const;
   bool IsUnboxedDoubleField(int descriptor_index) const;
+  ObjectRef GetStrongValue(int descriptor_number) const;
+
+  void SerializeRootMap();
+  base::Optional<MapRef> FindRootMap() const;
 
   // Available after calling JSFunctionRef::Serialize on a function that has
   // this map as initial map.
@@ -574,7 +639,8 @@ struct HolderLookupResult {
 
 class FunctionTemplateInfoRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(FunctionTemplateInfo, HeapObjectRef)
+
   Handle<FunctionTemplateInfo> object() const;
 
   bool is_signature_undefined() const;
@@ -585,21 +651,40 @@ class FunctionTemplateInfoRef : public HeapObjectRef {
   void SerializeCallCode();
   base::Optional<CallHandlerInfoRef> call_code() const;
 
-  HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map,
-                                                bool serialize);
+  HolderLookupResult LookupHolderOfExpectedType(
+      MapRef receiver_map,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
 };
 
 class FixedArrayBaseRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(FixedArrayBase, HeapObjectRef)
+
   Handle<FixedArrayBase> object() const;
 
   int length() const;
 };
 
+class ArrayBoilerplateDescriptionRef : public HeapObjectRef {
+ public:
+  using HeapObjectRef::HeapObjectRef;
+  Handle<ArrayBoilerplateDescription> object() const;
+
+  int constants_elements_length() const;
+};
+
+class ObjectBoilerplateDescriptionRef : public HeapObjectRef {
+ public:
+  using HeapObjectRef::HeapObjectRef;
+  Handle<ObjectBoilerplateDescription> object() const;
+
+  int size() const;
+};
+
 class FixedArrayRef : public FixedArrayBaseRef {
  public:
-  using FixedArrayBaseRef::FixedArrayBaseRef;
+  DEFINE_REF_CONSTRUCTOR(FixedArray, FixedArrayBaseRef)
+
   Handle<FixedArray> object() const;
 
   ObjectRef get(int i) const;
@@ -607,7 +692,8 @@ class FixedArrayRef : public FixedArrayBaseRef {
 
 class FixedDoubleArrayRef : public FixedArrayBaseRef {
  public:
-  using FixedArrayBaseRef::FixedArrayBaseRef;
+  DEFINE_REF_CONSTRUCTOR(FixedDoubleArray, FixedArrayBaseRef)
+
   Handle<FixedDoubleArray> object() const;
 
   double get_scalar(int i) const;
@@ -616,7 +702,8 @@ class FixedDoubleArrayRef : public FixedArrayBaseRef {
 
 class BytecodeArrayRef : public FixedArrayBaseRef {
  public:
-  using FixedArrayBaseRef::FixedArrayBaseRef;
+  DEFINE_REF_CONSTRUCTOR(BytecodeArray, FixedArrayBaseRef)
+
   Handle<BytecodeArray> object() const;
 
   int register_count() const;
@@ -646,20 +733,23 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
 
 class JSArrayRef : public JSObjectRef {
  public:
-  using JSObjectRef::JSObjectRef;
+  DEFINE_REF_CONSTRUCTOR(JSArray, JSObjectRef)
+
   Handle<JSArray> object() const;
 
   ObjectRef length() const;
 
   // Return the element at key {index} if the array has a copy-on-write elements
   // storage and {index} is known to be an own data property.
-  base::Optional<ObjectRef> GetOwnCowElement(uint32_t index,
-                                             bool serialize = false) const;
+  base::Optional<ObjectRef> GetOwnCowElement(
+      uint32_t index, SerializationPolicy policy =
+                          SerializationPolicy::kAssumeSerialized) const;
 };
 
 class ScopeInfoRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(ScopeInfo, HeapObjectRef)
+
   Handle<ScopeInfo> object() const;
 
   int ContextLength() const;
@@ -683,7 +773,8 @@ class ScopeInfoRef : public HeapObjectRef {
 
 class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(SharedFunctionInfo, HeapObjectRef)
+
   Handle<SharedFunctionInfo> object() const;
 
   int builtin_id() const;
@@ -699,8 +790,9 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
   // Template objects may not be created at compilation time. This method
   // wraps the retrieval of the template object and creates it if
   // necessary.
-  JSArrayRef GetTemplateObject(ObjectRef description, FeedbackVectorRef vector,
-                               FeedbackSlot slot, bool serialize = false);
+  JSArrayRef GetTemplateObject(
+      ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
 
   void SerializeFunctionTemplateInfo();
   base::Optional<FunctionTemplateInfoRef> function_template_info() const;
@@ -708,7 +800,8 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
 
 class StringRef : public NameRef {
  public:
-  using NameRef::NameRef;
+  DEFINE_REF_CONSTRUCTOR(String, NameRef)
+
   Handle<String> object() const;
 
   int length() const;
@@ -720,13 +813,15 @@ class StringRef : public NameRef {
 
 class SymbolRef : public NameRef {
  public:
-  using NameRef::NameRef;
+  DEFINE_REF_CONSTRUCTOR(Symbol, NameRef)
+
   Handle<Symbol> object() const;
 };
 
 class JSTypedArrayRef : public JSObjectRef {
  public:
-  using JSObjectRef::JSObjectRef;
+  DEFINE_REF_CONSTRUCTOR(JSTypedArray, JSObjectRef)
+
   Handle<JSTypedArray> object() const;
 
   bool is_on_heap() const;
@@ -741,25 +836,35 @@ class JSTypedArrayRef : public JSObjectRef {
 
 class SourceTextModuleRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(SourceTextModule, HeapObjectRef)
+
   Handle<SourceTextModule> object() const;
 
   void Serialize();
 
-  CellRef GetCell(int cell_index) const;
+  base::Optional<CellRef> GetCell(int cell_index) const;
 };
 
 class CellRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(Cell, HeapObjectRef)
+
   Handle<Cell> object() const;
 
   ObjectRef value() const;
 };
 
+class JSGlobalObjectRef : public JSObjectRef {
+ public:
+  DEFINE_REF_CONSTRUCTOR(JSGlobalObject, JSObjectRef)
+
+  Handle<JSGlobalObject> object() const;
+};
+
 class JSGlobalProxyRef : public JSObjectRef {
  public:
-  using JSObjectRef::JSObjectRef;
+  DEFINE_REF_CONSTRUCTOR(JSGlobalProxy, JSObjectRef)
+
   Handle<JSGlobalProxy> object() const;
 
   // If {serialize} is false:
@@ -769,135 +874,26 @@ class JSGlobalProxyRef : public JSObjectRef {
   // If {serialize} is true:
   //   Like above but potentially access the heap and serialize the necessary
   //   information.
-  base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name,
-                                                  bool serialize = false) const;
+  base::Optional<PropertyCellRef> GetPropertyCell(
+      NameRef const& name, SerializationPolicy policy =
+                               SerializationPolicy::kAssumeSerialized) const;
 };
 
 class CodeRef : public HeapObjectRef {
  public:
-  using HeapObjectRef::HeapObjectRef;
+  DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef)
+
   Handle<Code> object() const;
 };
 
 class InternalizedStringRef : public StringRef {
  public:
-  using StringRef::StringRef;
-  Handle<InternalizedString> object() const;
-};
-
-class ElementAccessFeedback;
-class NamedAccessFeedback;
-
-class ProcessedFeedback : public ZoneObject {
- public:
-  enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess };
-  Kind kind() const { return kind_; }
-
-  ElementAccessFeedback const* AsElementAccess() const;
-  NamedAccessFeedback const* AsNamedAccess() const;
-
- protected:
-  explicit ProcessedFeedback(Kind kind) : kind_(kind) {}
-
- private:
-  Kind const kind_;
-};
+  DEFINE_REF_CONSTRUCTOR(InternalizedString, StringRef)
 
-class InsufficientFeedback final : public ProcessedFeedback {
- public:
-  InsufficientFeedback();
-};
-
-class GlobalAccessFeedback : public ProcessedFeedback {
- public:
-  explicit GlobalAccessFeedback(PropertyCellRef cell);
-  GlobalAccessFeedback(ContextRef script_context, int slot_index,
-                       bool immutable);
-
-  bool IsPropertyCell() const;
-  PropertyCellRef property_cell() const;
-
-  bool IsScriptContextSlot() const { return !IsPropertyCell(); }
-  ContextRef script_context() const;
-  int slot_index() const;
-  bool immutable() const;
-
-  base::Optional<ObjectRef> GetConstantHint() const;
-
- private:
-  ObjectRef const cell_or_context_;
-  int const index_and_immutable_;
-};
-
-class KeyedAccessMode {
- public:
-  static KeyedAccessMode FromNexus(FeedbackNexus const& nexus);
-
-  AccessMode access_mode() const;
-  bool IsLoad() const;
-  bool IsStore() const;
-  KeyedAccessLoadMode load_mode() const;
-  KeyedAccessStoreMode store_mode() const;
-
- private:
-  AccessMode const access_mode_;
-  union LoadStoreMode {
-    LoadStoreMode(KeyedAccessLoadMode load_mode);
-    LoadStoreMode(KeyedAccessStoreMode store_mode);
-    KeyedAccessLoadMode load_mode;
-    KeyedAccessStoreMode store_mode;
-  } const load_store_mode_;
-
-  KeyedAccessMode(AccessMode access_mode, KeyedAccessLoadMode load_mode);
-  KeyedAccessMode(AccessMode access_mode, KeyedAccessStoreMode store_mode);
-};
-
-class ElementAccessFeedback : public ProcessedFeedback {
- public:
-  ElementAccessFeedback(Zone* zone, KeyedAccessMode const& keyed_mode);
-
-  // No transition sources appear in {receiver_maps}.
-  // All transition targets appear in {receiver_maps}.
-  ZoneVector<Handle<Map>> receiver_maps;
-  ZoneVector<std::pair<Handle<Map>, Handle<Map>>> transitions;
-
-  KeyedAccessMode const keyed_mode;
-
-  class MapIterator {
-   public:
-    bool done() const;
-    void advance();
-    MapRef current() const;
-
-   private:
-    friend class ElementAccessFeedback;
-
-    explicit MapIterator(ElementAccessFeedback const& processed,
-                         JSHeapBroker* broker);
-
-    ElementAccessFeedback const& processed_;
-    JSHeapBroker* const broker_;
-    size_t index_ = 0;
-  };
-
-  // Iterator over all maps: first {receiver_maps}, then transition sources.
-  MapIterator all_maps(JSHeapBroker* broker) const;
+  Handle<InternalizedString> object() const;
 };
 
-class NamedAccessFeedback : public ProcessedFeedback {
- public:
-  NamedAccessFeedback(NameRef const& name,
-                      ZoneVector<PropertyAccessInfo> const& access_infos);
-
-  NameRef const& name() const { return name_; }
-  ZoneVector<PropertyAccessInfo> const& access_infos() const {
-    return access_infos_;
-  }
-
- private:
-  NameRef const name_;
-  ZoneVector<PropertyAccessInfo> const access_infos_;
-};
+#undef DEFINE_REF_CONSTRUCTOR
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index eda866e5f2ad04..45b49757fb107a 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -21,9 +21,11 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
-Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
-                             CommonOperatorBuilder* common, Zone* zone,
-                             Signature<MachineRepresentation>* signature)
+Int64Lowering::Int64Lowering(
+    Graph* graph, MachineOperatorBuilder* machine,
+    CommonOperatorBuilder* common, Zone* zone,
+    Signature<MachineRepresentation>* signature,
+    std::unique_ptr<Int64LoweringSpecialCase> special_case)
     : zone_(zone),
       graph_(graph),
       machine_(machine),
@@ -32,8 +34,9 @@ Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
       stack_(zone),
       replacements_(nullptr),
       signature_(signature),
-      placeholder_(graph->NewNode(common->Parameter(-2, "placeholder"),
-                                  graph->start())) {
+      placeholder_(
+          graph->NewNode(common->Parameter(-2, "placeholder"), graph->start())),
+      special_case_(std::move(special_case)) {
   DCHECK_NOT_NULL(graph);
   DCHECK_NOT_NULL(graph->end());
   replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
@@ -77,7 +80,7 @@ void Int64Lowering::LowerGraph() {
 
 namespace {
 
-int GetReturnIndexAfterLowering(CallDescriptor* call_descriptor,
+int GetReturnIndexAfterLowering(const CallDescriptor* call_descriptor,
                                 int old_index) {
   int result = old_index;
   for (int i = 0; i < old_index; i++) {
@@ -89,7 +92,7 @@ int GetReturnIndexAfterLowering(CallDescriptor* call_descriptor,
   return result;
 }
 
-int GetReturnCountAfterLowering(CallDescriptor* call_descriptor) {
+int GetReturnCountAfterLowering(const CallDescriptor* call_descriptor) {
   return GetReturnIndexAfterLowering(
       call_descriptor, static_cast<int>(call_descriptor->ReturnCount()));
 }
@@ -336,21 +339,21 @@ void Int64Lowering::LowerNode(Node* node) {
       if (DefaultLowering(node) || returns_require_lowering) {
         // Tail calls do not have return values, so adjusting the call
         // descriptor is enough.
-        auto new_descriptor = GetI32WasmCallDescriptor(zone(), call_descriptor);
-        NodeProperties::ChangeOp(node, common()->TailCall(new_descriptor));
+        NodeProperties::ChangeOp(
+            node, common()->TailCall(LowerCallDescriptor(call_descriptor)));
       }
       break;
     }
     case IrOpcode::kCall: {
-      auto call_descriptor =
-          const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
+      auto call_descriptor = CallDescriptorOf(node->op());
+
       bool returns_require_lowering =
           GetReturnCountAfterLowering(call_descriptor) !=
           static_cast<int>(call_descriptor->ReturnCount());
       if (DefaultLowering(node) || returns_require_lowering) {
         // We have to adjust the call descriptor.
-        NodeProperties::ChangeOp(node, common()->Call(GetI32WasmCallDescriptor(
-                                           zone(), call_descriptor)));
+        NodeProperties::ChangeOp(
+            node, common()->Call(LowerCallDescriptor(call_descriptor)));
       }
       if (returns_require_lowering) {
         size_t return_arity = call_descriptor->ReturnCount();
@@ -994,6 +997,19 @@ bool Int64Lowering::DefaultLowering(Node* node, bool low_word_only) {
   return something_changed;
 }
 
+CallDescriptor* Int64Lowering::LowerCallDescriptor(
+    const CallDescriptor* call_descriptor) {
+  if (special_case_) {
+    if (call_descriptor == special_case_->bigint_to_i64_call_descriptor) {
+      return special_case_->bigint_to_i32_pair_call_descriptor;
+    }
+    if (call_descriptor == special_case_->i64_to_bigint_call_descriptor) {
+      return special_case_->i32_pair_to_bigint_call_descriptor;
+    }
+  }
+  return GetI32WasmCallDescriptor(zone(), call_descriptor);
+}
+
 void Int64Lowering::ReplaceNode(Node* old, Node* new_low, Node* new_high) {
   // if new_low == nullptr, then also new_high == nullptr.
   DCHECK(new_low != nullptr || new_high == nullptr);
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 9c77cf41a33137..1e2a36089b107d 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -20,11 +20,30 @@ class Signature;
 
 namespace compiler {
 
+// Struct for CallDescriptors that need special lowering.
+struct V8_EXPORT_PRIVATE Int64LoweringSpecialCase {
+  Int64LoweringSpecialCase()
+      : bigint_to_i64_call_descriptor(nullptr),
+        i64_to_bigint_call_descriptor(nullptr),
+        bigint_to_i32_pair_call_descriptor(nullptr),
+        i32_pair_to_bigint_call_descriptor(nullptr) {}
+
+  // CallDescriptors that need special lowering.
+  CallDescriptor* bigint_to_i64_call_descriptor;
+  CallDescriptor* i64_to_bigint_call_descriptor;
+
+  // The replacement CallDescriptors.
+  CallDescriptor* bigint_to_i32_pair_call_descriptor;
+  CallDescriptor* i32_pair_to_bigint_call_descriptor;
+};
+
 class V8_EXPORT_PRIVATE Int64Lowering {
  public:
-  Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
-                CommonOperatorBuilder* common, Zone* zone,
-                Signature<MachineRepresentation>* signature);
+  Int64Lowering(
+      Graph* graph, MachineOperatorBuilder* machine,
+      CommonOperatorBuilder* common, Zone* zone,
+      Signature<MachineRepresentation>* signature,
+      std::unique_ptr<Int64LoweringSpecialCase> special_case = nullptr);
 
   void LowerGraph();
 
@@ -53,6 +72,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
   void LowerWord64AtomicBinop(Node* node, const Operator* op);
   void LowerWord64AtomicNarrowOp(Node* node, const Operator* op);
 
+  CallDescriptor* LowerCallDescriptor(const CallDescriptor* call_descriptor);
+
   void ReplaceNode(Node* old, Node* new_low, Node* new_high);
   bool HasReplacementLow(Node* node);
   Node* GetReplacementLow(Node* node);
@@ -77,6 +98,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
   Replacement* replacements_;
   Signature<MachineRepresentation>* signature_;
   Node* placeholder_;
+  std::unique_ptr<Int64LoweringSpecialCase> special_case_;
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 8128f899497192..0b7b4a65f45825 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -14,6 +14,7 @@
 #include "src/compiler/access-info.h"
 #include "src/compiler/allocation-builder.h"
 #include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/map-inference.h"
@@ -21,7 +22,6 @@
 #include "src/compiler/property-access-builder.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/compiler/type-cache.h"
-#include "src/compiler/vector-slot-pair.h"
 #include "src/ic/call-optimization.h"
 #include "src/logging/counters.h"
 #include "src/objects/arguments-inl.h"
@@ -179,101 +179,9 @@ Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op,
   return Replace(value);
 }
 
-// ES section #sec-math.hypot Math.hypot ( value1, value2, ...values )
-Reduction JSCallReducer::ReduceMathHypot(Node* node) {
-  CallParameters const& p = CallParametersOf(node->op());
-  if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
-    return NoChange();
-  }
-  if (node->op()->ValueInputCount() < 3) {
-    Node* value = jsgraph()->ZeroConstant();
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  NodeVector values(graph()->zone());
-
-  Node* max = effect =
-      graph()->NewNode(simplified()->SpeculativeToNumber(
-                           NumberOperationHint::kNumberOrOddball, p.feedback()),
-                       NodeProperties::GetValueInput(node, 2), effect, control);
-  max = graph()->NewNode(simplified()->NumberAbs(), max);
-  values.push_back(max);
-  for (int i = 3; i < node->op()->ValueInputCount(); ++i) {
-    Node* input = effect = graph()->NewNode(
-        simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
-                                          p.feedback()),
-        NodeProperties::GetValueInput(node, i), effect, control);
-    input = graph()->NewNode(simplified()->NumberAbs(), input);
-    values.push_back(input);
-
-    // Make sure {max} is NaN in the end in case any argument was NaN.
-    max = graph()->NewNode(
-        common()->Select(MachineRepresentation::kTagged),
-        graph()->NewNode(simplified()->NumberLessThanOrEqual(), input, max),
-        max, input);
-  }
-
-  Node* check0 = graph()->NewNode(simplified()->NumberEqual(), max,
-                                  jsgraph()->ZeroConstant());
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0 = jsgraph()->ZeroConstant();
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
-  {
-    Node* check1 = graph()->NewNode(simplified()->NumberEqual(), max,
-                                    jsgraph()->Constant(V8_INFINITY));
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = jsgraph()->Constant(V8_INFINITY);
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      // Kahan summation to avoid rounding errors.
-      // Normalize the numbers to the largest one to avoid overflow.
-      Node* sum = jsgraph()->ZeroConstant();
-      Node* compensation = jsgraph()->ZeroConstant();
-      for (Node* value : values) {
-        Node* n = graph()->NewNode(simplified()->NumberDivide(), value, max);
-        Node* summand = graph()->NewNode(
-            simplified()->NumberSubtract(),
-            graph()->NewNode(simplified()->NumberMultiply(), n, n),
-            compensation);
-        Node* preliminary =
-            graph()->NewNode(simplified()->NumberAdd(), sum, summand);
-        compensation = graph()->NewNode(
-            simplified()->NumberSubtract(),
-            graph()->NewNode(simplified()->NumberSubtract(), preliminary, sum),
-            summand);
-        sum = preliminary;
-      }
-      vfalse1 = graph()->NewNode(
-          simplified()->NumberMultiply(),
-          graph()->NewNode(simplified()->NumberSqrt(), sum), max);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                               vtrue1, vfalse1, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
-                       vfalse0, control);
-  ReplaceWithValue(node, value, effect, control);
-  return Replace(value);
-}
-
 Reduction JSCallReducer::Reduce(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   switch (node->opcode()) {
     case IrOpcode::kJSConstruct:
       return ReduceJSConstruct(node);
@@ -313,6 +221,8 @@ void JSCallReducer::Finalize() {
 
 // ES6 section 22.1.1 The Array Constructor
 Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   Node* target = NodeProperties::GetValueInput(node, 0);
   CallParameters const& p = CallParametersOf(node->op());
@@ -480,14 +390,11 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
   // TODO(mslekova): Since this introduces a Call that will get optimized by
   // the JSCallReducer, we basically might have to do all the serialization
   // that we do for that here as well. The only difference is that here we
-  // disable speculation (cf. the empty VectorSlotPair above), causing the
+  // disable speculation (cf. the empty FeedbackSource above), causing the
   // JSCallReducer to do much less work. We should revisit this later.
   NodeProperties::ChangeOp(
       node,
-      javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
-  // TODO(mslekova): Remove once ReduceJSCall is brokerized.
-  AllowHandleDereference allow_handle_dereference;
-  AllowHandleAllocation allow_handle_allocation;
+      javascript()->Call(arity, p.frequency(), FeedbackSource(), convert_mode));
   // Try to further reduce the JSCall {node}.
   Reduction const reduction = ReduceJSCall(node);
   return reduction.Changed() ? reduction : Changed(node);
@@ -495,6 +402,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
 
 // ES section #sec-function.prototype.bind
 Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
+  DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
   if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -506,7 +415,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
   //  - target, which is Function.prototype.bind JSFunction
   //  - receiver, which is the [[BoundTargetFunction]]
   //  - bound_this (optional), which is the [[BoundThis]]
-  //  - and all the remaining value inouts are [[BoundArguments]]
+  //  - and all the remaining value inputs are [[BoundArguments]]
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* bound_this = (node->op()->ValueInputCount() < 3)
                          ? jsgraph()->UndefinedConstant()
@@ -525,14 +434,24 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
 
   MapRef first_receiver_map(broker(), receiver_maps[0]);
   bool const is_constructor = first_receiver_map.is_constructor();
-  first_receiver_map.SerializePrototype();
+
+  if (FLAG_concurrent_inlining && !first_receiver_map.serialized_prototype()) {
+    TRACE_BROKER_MISSING(broker(),
+                         "serialized prototype on map " << first_receiver_map);
+    return inference.NoChange();
+  }
   ObjectRef const prototype = first_receiver_map.prototype();
   for (Handle<Map> const map : receiver_maps) {
     MapRef receiver_map(broker(), map);
 
+    if (FLAG_concurrent_inlining && !receiver_map.serialized_prototype()) {
+      TRACE_BROKER_MISSING(broker(),
+                           "serialized prototype on map " << receiver_map);
+      return inference.NoChange();
+    }
+
     // Check for consistency among the {receiver_maps}.
     STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
-    receiver_map.SerializePrototype();
     if (!receiver_map.prototype().equals(prototype) ||
         receiver_map.is_constructor() != is_constructor ||
         receiver_map.instance_type() < FIRST_FUNCTION_TYPE) {
@@ -548,22 +467,31 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
     // recomputed even if the actual value of the object changes.
     // This mirrors the checks done in builtins-function-gen.cc at
     // runtime otherwise.
-    Handle<DescriptorArray> descriptors(
-        receiver_map.object()->instance_descriptors(), isolate());
-    if (descriptors->number_of_descriptors() < 2) return inference.NoChange();
-    if (descriptors->GetKey(JSFunction::kLengthDescriptorIndex) !=
-        ReadOnlyRoots(isolate()).length_string()) {
-      return inference.NoChange();
-    }
-    if (!descriptors->GetStrongValue(JSFunction::kLengthDescriptorIndex)
-             .IsAccessorInfo()) {
+    int minimum_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex,
+                                           JSFunction::kNameDescriptorIndex) +
+                                  1;
+    if (receiver_map.NumberOfOwnDescriptors() < minimum_nof_descriptors) {
       return inference.NoChange();
     }
-    if (descriptors->GetKey(JSFunction::kNameDescriptorIndex) !=
-        ReadOnlyRoots(isolate()).name_string()) {
+    if (!receiver_map.serialized_own_descriptor(
+            JSFunction::kLengthDescriptorIndex) ||
+        !receiver_map.serialized_own_descriptor(
+            JSFunction::kNameDescriptorIndex)) {
+      TRACE_BROKER_MISSING(broker(),
+                           "serialized descriptors on map " << receiver_map);
       return inference.NoChange();
     }
-    if (!descriptors->GetStrongValue(JSFunction::kNameDescriptorIndex)
+    ReadOnlyRoots roots(isolate());
+    StringRef length_string(broker(), roots.length_string_handle());
+    StringRef name_string(broker(), roots.name_string_handle());
+
+    if (!receiver_map.GetPropertyKey(JSFunction::kLengthDescriptorIndex)
+             .equals(length_string) ||
+        !receiver_map.GetStrongValue(JSFunction::kLengthDescriptorIndex)
+             .IsAccessorInfo() ||
+        !receiver_map.GetPropertyKey(JSFunction::kNameDescriptorIndex)
+             .equals(name_string) ||
+        !receiver_map.GetStrongValue(JSFunction::kNameDescriptorIndex)
              .IsAccessorInfo()) {
       return inference.NoChange();
     }
@@ -646,10 +574,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
   }
   NodeProperties::ChangeOp(
       node,
-      javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
-  // TODO(mslekova): Remove once ReduceJSCall is brokerized.
-  AllowHandleDereference allow_handle_dereference;
-  AllowHandleAllocation allow_handle_allocation;
+      javascript()->Call(arity, p.frequency(), FeedbackSource(), convert_mode));
   // Try to further reduce the JSCall {node}.
   Reduction const reduction = ReduceJSCall(node);
   return reduction.Changed() ? reduction : Changed(node);
@@ -693,13 +618,19 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
   MapHandles const& object_maps = inference.GetMaps();
 
   MapRef candidate_map(broker(), object_maps[0]);
-  candidate_map.SerializePrototype();
+  if (FLAG_concurrent_inlining && !candidate_map.serialized_prototype()) {
+    TRACE_BROKER_MISSING(broker(), "prototype for map " << candidate_map);
+    return inference.NoChange();
+  }
   ObjectRef candidate_prototype = candidate_map.prototype();
 
   // Check if we can constant-fold the {candidate_prototype}.
   for (size_t i = 0; i < object_maps.size(); ++i) {
     MapRef object_map(broker(), object_maps[i]);
-    object_map.SerializePrototype();
+    if (FLAG_concurrent_inlining && !object_map.serialized_prototype()) {
+      TRACE_BROKER_MISSING(broker(), "prototype for map " << object_map);
+      return inference.NoChange();
+    }
     if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
         !object_map.prototype().equals(candidate_prototype)) {
       // We exclude special receivers, like JSProxy or API objects that
@@ -830,6 +761,8 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
 
 // ES #sec-object.prototype.isprototypeof
 Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) {
+  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* value = node->op()->ValueInputCount() > 2
@@ -1048,7 +981,7 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) {
   {
     // TODO(magardn): collect feedback so this can be optimized
     vtrue = etrue = if_true =
-        graph()->NewNode(javascript()->HasProperty(VectorSlotPair()), target,
+        graph()->NewNode(javascript()->HasProperty(FeedbackSource()), target,
                          key, context, frame_state, etrue, if_true);
   }
 
@@ -1114,10 +1047,10 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
   return true;
 }
 
-bool CanInlineArrayResizingBuiltin(
-    JSHeapBroker* broker, MapHandles const& receiver_maps,
-    std::vector<ElementsKind>& kinds,  // NOLINT(runtime/references)
-    bool builtin_is_push = false) {
+bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
+                                   MapHandles const& receiver_maps,
+                                   std::vector<ElementsKind>* kinds,
+                                   bool builtin_is_push = false) {
   DCHECK_NE(0, receiver_maps.size());
   for (auto receiver_map : receiver_maps) {
     MapRef map(broker, receiver_map);
@@ -1128,14 +1061,14 @@ bool CanInlineArrayResizingBuiltin(
       return false;
     }
     ElementsKind current_kind = map.elements_kind();
-    auto kind_ptr = kinds.data();
+    auto kind_ptr = kinds->data();
     size_t i;
-    for (i = 0; i < kinds.size(); i++, kind_ptr++) {
+    for (i = 0; i < kinds->size(); i++, kind_ptr++) {
       if (UnionElementsKindUptoPackedness(kind_ptr, current_kind)) {
         break;
       }
     }
-    if (i == kinds.size()) kinds.push_back(current_kind);
+    if (i == kinds->size()) kinds->push_back(current_kind);
   }
   return true;
 }
@@ -1143,6 +1076,8 @@ bool CanInlineArrayResizingBuiltin(
 
 Reduction JSCallReducer::ReduceArrayForEach(
     Node* node, const SharedFunctionInfoRef& shared) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   if (!FLAG_turbo_inline_array_builtins) return NoChange();
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
@@ -1309,6 +1244,8 @@ Reduction JSCallReducer::ReduceArrayForEach(
 Reduction JSCallReducer::ReduceArrayReduce(
     Node* node, ArrayReduceDirection direction,
     const SharedFunctionInfoRef& shared) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   if (!FLAG_turbo_inline_array_builtins) return NoChange();
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
@@ -1567,6 +1504,8 @@ Reduction JSCallReducer::ReduceArrayReduce(
 
 Reduction JSCallReducer::ReduceArrayMap(Node* node,
                                         const SharedFunctionInfoRef& shared) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   if (!FLAG_turbo_inline_array_builtins) return NoChange();
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
@@ -1759,6 +1698,8 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
 
 Reduction JSCallReducer::ReduceArrayFilter(
     Node* node, const SharedFunctionInfoRef& shared) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   if (!FLAG_turbo_inline_array_builtins) return NoChange();
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
@@ -1809,7 +1750,8 @@ Reduction JSCallReducer::ReduceArrayFilter(
                 Type::Array());
     ab.Store(AccessBuilder::ForMap(), initial_map);
     Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
-    ab.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array);
+    ab.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+             empty_fixed_array);
     ab.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
     ab.Store(AccessBuilder::ForJSArrayLength(packed_kind),
              jsgraph()->ZeroConstant());
@@ -1998,6 +1940,8 @@ Reduction JSCallReducer::ReduceArrayFilter(
 
 Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
                                          const SharedFunctionInfoRef& shared) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   if (!FLAG_turbo_inline_array_builtins) return NoChange();
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
@@ -2218,7 +2162,7 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
         IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
                                    : GrowFastElementsMode::kSmiOrObjectElements;
     elements = etrue = graph()->NewNode(
-        simplified()->MaybeGrowFastElements(mode, VectorSlotPair()), a,
+        simplified()->MaybeGrowFastElements(mode, FeedbackSource()), a,
         elements, checked_to, elements_length, etrue, if_true);
 
     // Update the length of {a}.
@@ -2288,7 +2232,7 @@ void JSCallReducer::RewirePostCallbackExceptionEdges(Node* check_throw,
 
 Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
                                      Node* control, Node** effect, Node** k,
-                                     const VectorSlotPair& feedback) {
+                                     const FeedbackSource& feedback) {
   // Make sure that the access is still in bounds, since the callback could
   // have changed the array's size.
   Node* length = *effect = graph()->NewNode(
@@ -2313,6 +2257,8 @@ Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
 
 Reduction JSCallReducer::ReduceArrayEvery(Node* node,
                                           const SharedFunctionInfoRef& shared) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   if (!FLAG_turbo_inline_array_builtins) return NoChange();
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
@@ -2567,6 +2513,8 @@ Callable GetCallableForArrayIncludes(ElementsKind elements_kind,
 // #sec-array.prototype.includes
 Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
     SearchVariant search_variant, Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   CallParameters const& p = CallParametersOf(node->op());
   if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
     return NoChange();
@@ -2638,6 +2586,8 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
 
 Reduction JSCallReducer::ReduceArraySome(Node* node,
                                          const SharedFunctionInfoRef& shared) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   if (!FLAG_turbo_inline_array_builtins) return NoChange();
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
@@ -2906,8 +2856,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
 
       // See if we can constant-fold the compatible receiver checks.
       HolderLookupResult api_holder =
-          function_template_info.LookupHolderOfExpectedType(first_receiver_map,
-                                                            false);
+          function_template_info.LookupHolderOfExpectedType(first_receiver_map);
       if (api_holder.lookup == CallOptimization::kHolderNotFound)
         return inference.NoChange();
 
@@ -2937,8 +2886,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
       for (size_t i = 1; i < receiver_maps.size(); ++i) {
         MapRef receiver_map(broker(), receiver_maps[i]);
         HolderLookupResult holder_i =
-            function_template_info.LookupHolderOfExpectedType(receiver_map,
-                                                              false);
+            function_template_info.LookupHolderOfExpectedType(receiver_map);
 
         if (api_holder.lookup != holder_i.lookup) return inference.NoChange();
         if (!(api_holder.holder.has_value() && holder_i.holder.has_value()))
@@ -3059,7 +3007,7 @@ bool IsSafeArgumentsElements(Node* node) {
 
 Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
     Node* node, int arity, CallFrequency const& frequency,
-    VectorSlotPair const& feedback) {
+    FeedbackSource const& feedback) {
   DCHECK(node->opcode() == IrOpcode::kJSCallWithArrayLike ||
          node->opcode() == IrOpcode::kJSCallWithSpread ||
          node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
@@ -3285,13 +3233,6 @@ bool ShouldUseCallICFeedback(Node* node) {
   return true;
 }
 
-base::Optional<HeapObjectRef> GetHeapObjectFeedback(
-    JSHeapBroker* broker, const FeedbackNexus& nexus) {
-  HeapObject object;
-  if (!nexus.GetFeedback()->GetHeapObject(&object)) return base::nullopt;
-  return HeapObjectRef(broker, handle(object, broker->isolate()));
-}
-
 }  // namespace
 
 Reduction JSCallReducer::ReduceJSCall(Node* node) {
@@ -3309,7 +3250,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
     ObjectRef target_ref = m.Ref(broker());
     if (target_ref.IsJSFunction()) {
       JSFunctionRef function = target_ref.AsJSFunction();
-      function.Serialize();
+      if (FLAG_concurrent_inlining && !function.serialized()) {
+        TRACE_BROKER_MISSING(broker(), "data for function " << function);
+        return NoChange();
+      }
 
       // Don't inline cross native context.
       if (!function.native_context().equals(native_context())) {
@@ -3319,7 +3263,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
       return ReduceJSCall(node, function.shared());
     } else if (target_ref.IsJSBoundFunction()) {
       JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
-      function.Serialize();
+      if (FLAG_concurrent_inlining && !function.serialized()) {
+        TRACE_BROKER_MISSING(broker(), "data for function " << function);
+        return NoChange();
+      }
 
       ObjectRef bound_this = function.bound_this();
       ConvertReceiverMode const convert_mode =
@@ -3342,7 +3289,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
       }
 
       NodeProperties::ChangeOp(
-          node, javascript()->Call(arity, p.frequency(), VectorSlotPair(),
+          node, javascript()->Call(arity, p.frequency(), FeedbackSource(),
                                    convert_mode));
 
       // Try to further reduce the JSCall {node}.
@@ -3390,7 +3337,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
             ? ConvertReceiverMode::kAny
             : ConvertReceiverMode::kNotNullOrUndefined;
     NodeProperties::ChangeOp(
-        node, javascript()->Call(arity, p.frequency(), VectorSlotPair(),
+        node, javascript()->Call(arity, p.frequency(), FeedbackSource(),
                                  convert_mode));
 
     // Try to further reduce the JSCall {node}.
@@ -3398,19 +3345,18 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
     return reduction.Changed() ? reduction : Changed(node);
   }
 
-  // Extract feedback from the {node} using the FeedbackNexus.
   if (!p.feedback().IsValid()) return NoChange();
-  FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-  if (nexus.IsUninitialized()) {
+  ProcessedFeedback const& feedback =
+      broker()->GetFeedbackForCall(FeedbackSource(p.feedback()));
+  if (feedback.IsInsufficient()) {
     return ReduceSoftDeoptimize(
         node, DeoptimizeReason::kInsufficientTypeFeedbackForCall);
   }
 
-  base::Optional<HeapObjectRef> feedback =
-      GetHeapObjectFeedback(broker(), nexus);
-  if (feedback.has_value() && ShouldUseCallICFeedback(target) &&
-      feedback->map().is_callable()) {
-    Node* target_function = jsgraph()->Constant(*feedback);
+  base::Optional<HeapObjectRef> feedback_target = feedback.AsCall().target();
+  if (feedback_target.has_value() && ShouldUseCallICFeedback(target) &&
+      feedback_target->map().is_callable()) {
+    Node* target_function = jsgraph()->Constant(*feedback_target);
 
     // Check that the {target} is still the {target_function}.
     Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
@@ -3630,8 +3576,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
       return ReduceMathUnary(node, simplified()->NumberFloor());
     case Builtins::kMathFround:
       return ReduceMathUnary(node, simplified()->NumberFround());
-    case Builtins::kMathHypot:
-      return ReduceMathHypot(node);
     case Builtins::kMathLog:
       return ReduceMathUnary(node, simplified()->NumberLog());
     case Builtins::kMathLog1p:
@@ -3785,22 +3729,17 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
       break;
   }
 
-  if (shared.object()->IsApiFunction()) {
+  if (shared.function_template_info().has_value()) {
     return ReduceCallApiFunction(node, shared);
   }
   return NoChange();
 }
 
 Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
-  // TODO(mslekova): Remove once ReduceJSCallWithArrayLike is brokerized.
-  AllowHandleDereference allow_handle_dereference;
-  AllowHandleAllocation allow_handle_allocation;
-
   DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
   CallFrequency frequency = CallFrequencyOf(node->op());
-  VectorSlotPair feedback;
   return ReduceCallOrConstructWithArrayLikeOrSpread(node, 2, frequency,
-                                                    feedback);
+                                                    FeedbackSource());
 }
 
 Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
@@ -3809,7 +3748,7 @@ Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
   DCHECK_LE(3u, p.arity());
   int arity = static_cast<int>(p.arity() - 1);
   CallFrequency frequency = p.frequency();
-  VectorSlotPair feedback = p.feedback();
+  FeedbackSource feedback = p.feedback();
   return ReduceCallOrConstructWithArrayLikeOrSpread(node, arity, frequency,
                                                     feedback);
 }
@@ -3824,17 +3763,16 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  // Extract feedback from the {node} using the FeedbackNexus.
   if (p.feedback().IsValid()) {
-    FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-    if (nexus.IsUninitialized()) {
+    ProcessedFeedback const& feedback =
+        broker()->GetFeedbackForCall(p.feedback());
+    if (feedback.IsInsufficient()) {
       return ReduceSoftDeoptimize(
           node, DeoptimizeReason::kInsufficientTypeFeedbackForConstruct);
     }
 
-    base::Optional<HeapObjectRef> feedback =
-        GetHeapObjectFeedback(broker(), nexus);
-    if (feedback.has_value() && feedback->IsAllocationSite()) {
+    base::Optional<HeapObjectRef> feedback_target = feedback.AsCall().target();
+    if (feedback_target.has_value() && feedback_target->IsAllocationSite()) {
       // The feedback is an AllocationSite, which means we have called the
       // Array function and collected transition (and pretenuring) feedback
       // for the resulting arrays.  This has to be kept in sync with the
@@ -3859,12 +3797,12 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
       NodeProperties::ReplaceValueInput(node, array_function, 1);
       NodeProperties::ChangeOp(
           node, javascript()->CreateArray(
-                    arity, feedback->AsAllocationSite().object()));
+                    arity, feedback_target->AsAllocationSite().object()));
       return Changed(node);
-    } else if (feedback.has_value() &&
+    } else if (feedback_target.has_value() &&
                !HeapObjectMatcher(new_target).HasValue() &&
-               feedback->map().is_constructor()) {
-      Node* new_target_feedback = jsgraph()->Constant(*feedback);
+               feedback_target->map().is_constructor()) {
+      Node* new_target_feedback = jsgraph()->Constant(*feedback_target);
 
       // Check that the {new_target} is still the {new_target_feedback}.
       Node* check = graph()->NewNode(simplified()->ReferenceEqual(), new_target,
@@ -3902,7 +3840,11 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
 
     if (target_ref.IsJSFunction()) {
       JSFunctionRef function = target_ref.AsJSFunction();
-      function.Serialize();
+      if (FLAG_concurrent_inlining && !function.serialized()) {
+        TRACE_BROKER_MISSING(broker(),
+                             "function, not serialized: " << function);
+        return NoChange();
+      }
 
       // Do not reduce constructors with break points.
       if (function.shared().HasBreakInfo()) return NoChange();
@@ -3959,7 +3901,11 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
       }
     } else if (target_ref.IsJSBoundFunction()) {
       JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
-      function.Serialize();
+      if (FLAG_concurrent_inlining && !function.serialized()) {
+        TRACE_BROKER_MISSING(broker(),
+                             "function, not serialized: " << function);
+        return NoChange();
+      }
 
       ObjectRef bound_target_function = function.bound_target_function();
       FixedArrayRef bound_arguments = function.bound_arguments();
@@ -3989,7 +3935,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
       // Update the JSConstruct operator on {node}.
       NodeProperties::ChangeOp(
           node,
-          javascript()->Construct(arity + 2, p.frequency(), VectorSlotPair()));
+          javascript()->Construct(arity + 2, p.frequency(), FeedbackSource()));
 
       // Try to further reduce the JSConstruct {node}.
       Reduction const reduction = ReduceJSConstruct(node);
@@ -4030,7 +3976,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
     // Update the JSConstruct operator on {node}.
     NodeProperties::ChangeOp(
         node,
-        javascript()->Construct(arity + 2, p.frequency(), VectorSlotPair()));
+        javascript()->Construct(arity + 2, p.frequency(), FeedbackSource()));
 
     // Try to further reduce the JSConstruct {node}.
     Reduction const reduction = ReduceJSConstruct(node);
@@ -4350,9 +4296,8 @@ Reduction JSCallReducer::ReduceStringPrototypeSubstr(Node* node) {
 Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
   DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
   CallFrequency frequency = CallFrequencyOf(node->op());
-  VectorSlotPair feedback;
   return ReduceCallOrConstructWithArrayLikeOrSpread(node, 1, frequency,
-                                                    feedback);
+                                                    FeedbackSource());
 }
 
 Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
@@ -4361,7 +4306,7 @@ Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
   DCHECK_LE(3u, p.arity());
   int arity = static_cast<int>(p.arity() - 2);
   CallFrequency frequency = p.frequency();
-  VectorSlotPair feedback = p.feedback();
+  FeedbackSource feedback = p.feedback();
   return ReduceCallOrConstructWithArrayLikeOrSpread(node, arity, frequency,
                                                     feedback);
 }
@@ -4382,7 +4327,7 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
   Node* frame_state =
       NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
   Node* deoptimize = graph()->NewNode(
-      common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+      common()->Deoptimize(DeoptimizeKind::kSoft, reason, FeedbackSource()),
       frame_state, effect, control);
   // TODO(bmeurer): This should be on the AdvancedReducer somehow.
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
@@ -4440,6 +4385,8 @@ void JSCallReducer::CheckIfElementsKind(Node* receiver_elements_kind,
 
 // ES6 section 22.1.3.18 Array.prototype.push ( )
 Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
   if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -4456,7 +4403,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
   MapHandles const& receiver_maps = inference.GetMaps();
 
   std::vector<ElementsKind> kinds;
-  if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds, true)) {
+  if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds, true)) {
     return inference.NoChange();
   }
   if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
@@ -4574,6 +4521,8 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
 
 // ES6 section 22.1.3.17 Array.prototype.pop ( )
 Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
   if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -4589,7 +4538,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
   MapHandles const& receiver_maps = inference.GetMaps();
 
   std::vector<ElementsKind> kinds;
-  if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
+  if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
     return inference.NoChange();
   }
   if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
@@ -4707,6 +4656,8 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
 
 // ES6 section 22.1.3.22 Array.prototype.shift ( )
 Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
   if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -4725,7 +4676,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
   MapHandles const& receiver_maps = inference.GetMaps();
 
   std::vector<ElementsKind> kinds;
-  if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
+  if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
     return inference.NoChange();
   }
   if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
@@ -4923,6 +4874,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
 
 // ES6 section 22.1.3.23 Array.prototype.slice ( )
 Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   if (!FLAG_turbo_inline_array_builtins) return NoChange();
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
@@ -4999,6 +4952,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
 
 // ES6 section 22.1.2.2 Array.isArray ( arg )
 Reduction JSCallReducer::ReduceArrayIsArray(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   // We certainly know that undefined is not an array.
   if (node->op()->ValueInputCount() < 3) {
     Node* value = jsgraph()->FalseConstant();
@@ -5022,6 +4977,8 @@ Reduction JSCallReducer::ReduceArrayIsArray(Node* node) {
 }
 
 Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* context = NodeProperties::GetContextInput(node);
@@ -5047,6 +5004,8 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
 
 // ES #sec-%arrayiteratorprototype%.next
 Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
   Node* iterator = NodeProperties::GetValueInput(node, 1);
@@ -5681,10 +5640,14 @@ Node* JSCallReducer::CreateArtificialFrameState(
       bailout_id, OutputFrameStateCombine::Ignore(), state_info);
   const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
   Node* node0 = graph()->NewNode(op0);
+
+  static constexpr int kTargetInputIndex = 0;
+  static constexpr int kReceiverInputIndex = 1;
+  const int parameter_count_with_receiver = parameter_count + 1;
   std::vector<Node*> params;
-  params.reserve(parameter_count + 1);
-  for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
-    params.push_back(node->InputAt(1 + parameter));
+  params.reserve(parameter_count_with_receiver);
+  for (int i = 0; i < parameter_count_with_receiver; i++) {
+    params.push_back(node->InputAt(kReceiverInputIndex + i));
   }
   const Operator* op_param = common()->StateValues(
       static_cast<int>(params.size()), SparseInputMask::Dense());
@@ -5694,7 +5657,7 @@ Node* JSCallReducer::CreateArtificialFrameState(
     context = jsgraph()->UndefinedConstant();
   }
   return graph()->NewNode(op, params_node, node0, node0, context,
-                          node->InputAt(0), outer_frame_state);
+                          node->InputAt(kTargetInputIndex), outer_frame_state);
 }
 
 Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
@@ -5804,7 +5767,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
 
   // 9. Call executor with both resolving functions
   effect = control = graph()->NewNode(
-      javascript()->Call(4, p.frequency(), VectorSlotPair(),
+      javascript()->Call(4, p.frequency(), FeedbackSource(),
                          ConvertReceiverMode::kNullOrUndefined,
                          SpeculationMode::kDisallowSpeculation),
       executor, jsgraph()->UndefinedConstant(), resolve, reject, context,
@@ -5817,7 +5780,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
         common()->IfException(), exception_control, exception_effect);
     // 10a. Call reject if the call to executor threw.
     exception_effect = exception_control = graph()->NewNode(
-        javascript()->Call(3, p.frequency(), VectorSlotPair(),
+        javascript()->Call(3, p.frequency(), FeedbackSource(),
                            ConvertReceiverMode::kNullOrUndefined,
                            SpeculationMode::kDisallowSpeculation),
         reject, jsgraph()->UndefinedConstant(), reason, context, frame_state,
@@ -5928,9 +5891,7 @@ bool JSCallReducer::DoPromiseChecks(MapInference* inference) {
   for (Handle<Map> map : receiver_maps) {
     MapRef receiver_map(broker(), map);
     if (!receiver_map.IsJSPromiseMap()) return false;
-    if (!FLAG_concurrent_inlining) {
-      receiver_map.SerializePrototype();
-    } else if (!receiver_map.serialized_prototype()) {
+    if (FLAG_concurrent_inlining && !receiver_map.serialized_prototype()) {
       TRACE_BROKER_MISSING(broker(), "prototype for map " << receiver_map);
       return false;
     }
@@ -6109,7 +6070,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
 }
 
 Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
-  DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
 
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   CallParameters const& p = CallParametersOf(node->op());
@@ -6177,7 +6138,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
 
 // ES section #sec-promise.resolve
 Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
-  DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
 
   DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -6293,8 +6254,13 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
         jsgraph()->Constant(TYPE##_ELEMENTS -                          \
                             FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));   \
     control = graph()->NewNode(common()->Branch(), check, control);    \
-    values.push_back(jsgraph()->HeapConstant(                          \
-        factory()->InternalizeUtf8String(#Type "Array")));             \
+    if (FLAG_concurrent_inlining) {                                    \
+      values.push_back(jsgraph()->Constant(                            \
+          broker()->GetTypedArrayStringTag(TYPE##_ELEMENTS)));         \
+    } else {                                                           \
+      values.push_back(jsgraph()->HeapConstant(                        \
+          factory()->InternalizeUtf8String(#Type "Array")));           \
+    }                                                                  \
     effects.push_back(effect);                                         \
     controls.push_back(graph()->NewNode(common()->IfTrue(), control)); \
     control = graph()->NewNode(common()->IfFalse(), control);          \
@@ -6536,9 +6502,10 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
     MapInference inference(broker(), receiver, effect);
     if (!inference.HaveMaps()) return NoChange();
     MapHandles const& receiver_maps = inference.GetMaps();
-    receiver_instance_type = receiver_maps[0]->instance_type();
+    receiver_instance_type = MapRef(broker(), receiver_maps[0]).instance_type();
     for (size_t i = 1; i < receiver_maps.size(); ++i) {
-      if (receiver_maps[i]->instance_type() != receiver_instance_type) {
+      if (MapRef(broker(), receiver_maps[i]).instance_type() !=
+          receiver_instance_type) {
         return inference.NoChange();
       }
     }
@@ -6799,6 +6766,8 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
 }
 
 Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   Node* value = node->op()->ValueInputCount() >= 3
                     ? NodeProperties::GetValueInput(node, 2)
                     : jsgraph()->UndefinedConstant();
@@ -6811,6 +6780,8 @@ Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) {
 
 Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
     Node* node, InstanceType instance_type, FieldAccess const& access) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
@@ -7142,19 +7113,20 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
   ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
   AccessInfoFactory access_info_factory(broker(), dependencies(),
                                         graph()->zone());
-  if (!FLAG_concurrent_inlining) {
-    // Compute property access info for "exec" on {resolution}.
-    access_info_factory.ComputePropertyAccessInfos(
-        MapHandles(regexp_maps.begin(), regexp_maps.end()),
-        factory()->exec_string(), AccessMode::kLoad, &access_infos);
-  } else {
+  if (FLAG_concurrent_inlining) {
     // Obtain precomputed access infos from the broker.
     for (auto map : regexp_maps) {
       MapRef map_ref(broker(), map);
-      PropertyAccessInfo access_info =
-          broker()->GetAccessInfoForLoadingExec(map_ref);
+      PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
+          map_ref, NameRef(broker(), isolate()->factory()->exec_string()),
+          AccessMode::kLoad);
       access_infos.push_back(access_info);
     }
+  } else {
+    // Compute property access info for "exec" on {resolution}.
+    access_info_factory.ComputePropertyAccessInfos(
+        MapHandles(regexp_maps.begin(), regexp_maps.end()),
+        factory()->exec_string(), AccessMode::kLoad, &access_infos);
   }
 
   PropertyAccessInfo ai_exec =
@@ -7171,7 +7143,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
     JSObjectRef holder_ref(broker(), holder);
 
     // Bail out if the exec method is not the original one.
-    base::Optional<ObjectRef> constant = holder_ref.GetOwnProperty(
+    base::Optional<ObjectRef> constant = holder_ref.GetOwnDataProperty(
         ai_exec.field_representation(), ai_exec.field_index());
     if (!constant.has_value() ||
         !constant->equals(native_context().regexp_exec_function())) {
@@ -7287,7 +7259,7 @@ Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
 Factory* JSCallReducer::factory() const { return isolate()->factory(); }
 
 NativeContextRef JSCallReducer::native_context() const {
-  return broker()->native_context();
+  return broker()->target_native_context();
 }
 
 CommonOperatorBuilder* JSCallReducer::common() const {
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index bf3676c5b2202f..66c42cfb63dbd9 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -17,7 +17,6 @@ namespace internal {
 // Forward declarations.
 class Factory;
 class JSGlobalProxy;
-class VectorSlotPair;
 
 namespace compiler {
 
@@ -25,6 +24,7 @@ namespace compiler {
 class CallFrequency;
 class CommonOperatorBuilder;
 class CompilationDependencies;
+struct FeedbackSource;
 struct FieldAccess;
 class JSGraph;
 class JSHeapBroker;
@@ -106,7 +106,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
 
   Reduction ReduceCallOrConstructWithArrayLikeOrSpread(
       Node* node, int arity, CallFrequency const& frequency,
-      VectorSlotPair const& feedback);
+      FeedbackSource const& feedback);
   Reduction ReduceJSConstruct(Node* node);
   Reduction ReduceJSConstructWithArrayLike(Node* node);
   Reduction ReduceJSConstructWithSpread(Node* node);
@@ -156,7 +156,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
   Reduction ReduceMathImul(Node* node);
   Reduction ReduceMathClz32(Node* node);
   Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value);
-  Reduction ReduceMathHypot(Node* node);
 
   Reduction ReduceNumberIsFinite(Node* node);
   Reduction ReduceNumberIsInteger(Node* node);
@@ -234,7 +233,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
   // k is thusly changed, and the effect is changed as well.
   Node* SafeLoadElement(ElementsKind kind, Node* receiver, Node* control,
                         Node** effect, Node** k,
-                        const VectorSlotPair& feedback);
+                        const FeedbackSource& feedback);
 
   Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
                                    int parameter_count, BailoutId bailout_id,
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 4e69db6b9bca6c..cb52ccaccb17f4 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -127,7 +127,7 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
   AllocationBuilder a(jsgraph(), effect, control);
   a.Allocate(slack_tracking_prediction.instance_size());
   a.Store(AccessBuilder::ForMap(), *initial_map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
           jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(),
           jsgraph()->EmptyFixedArrayConstant());
@@ -180,11 +180,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
                 : native_context().sloppy_arguments_map());
         // Actually allocate and initialize the arguments object.
         AllocationBuilder a(jsgraph(), effect, control);
-        Node* properties = jsgraph()->EmptyFixedArrayConstant();
         STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kTaggedSize);
         a.Allocate(JSSloppyArgumentsObject::kSize);
         a.Store(AccessBuilder::ForMap(), arguments_map);
-        a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+        a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+                jsgraph()->EmptyFixedArrayConstant());
         a.Store(AccessBuilder::ForJSObjectElements(), elements);
         a.Store(AccessBuilder::ForArgumentsLength(), arguments_length);
         a.Store(AccessBuilder::ForArgumentsCallee(), callee);
@@ -209,11 +209,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
             jsgraph()->Constant(native_context().strict_arguments_map());
         // Actually allocate and initialize the arguments object.
         AllocationBuilder a(jsgraph(), effect, control);
-        Node* properties = jsgraph()->EmptyFixedArrayConstant();
         STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kTaggedSize);
         a.Allocate(JSStrictArgumentsObject::kSize);
         a.Store(AccessBuilder::ForMap(), arguments_map);
-        a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+        a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+                jsgraph()->EmptyFixedArrayConstant());
         a.Store(AccessBuilder::ForJSObjectElements(), elements);
         a.Store(AccessBuilder::ForArgumentsLength(), arguments_length);
         RelaxControls(node);
@@ -239,11 +239,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
             native_context().js_array_packed_elements_map());
         // Actually allocate and initialize the jsarray.
         AllocationBuilder a(jsgraph(), effect, control);
-        Node* properties = jsgraph()->EmptyFixedArrayConstant();
         STATIC_ASSERT(JSArray::kSize == 4 * kTaggedSize);
         a.Allocate(JSArray::kSize);
         a.Store(AccessBuilder::ForMap(), jsarray_map);
-        a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+        a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+                jsgraph()->EmptyFixedArrayConstant());
         a.Store(AccessBuilder::ForJSObjectElements(), elements);
         a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), rest_length);
         RelaxControls(node);
@@ -284,12 +284,12 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
                                 : native_context().sloppy_arguments_map());
       // Actually allocate and initialize the arguments object.
       AllocationBuilder a(jsgraph(), effect, control);
-      Node* properties = jsgraph()->EmptyFixedArrayConstant();
       int length = args_state_info.parameter_count() - 1;  // Minus receiver.
       STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kTaggedSize);
       a.Allocate(JSSloppyArgumentsObject::kSize);
       a.Store(AccessBuilder::ForMap(), arguments_map);
-      a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+      a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+              jsgraph()->EmptyFixedArrayConstant());
       a.Store(AccessBuilder::ForJSObjectElements(), elements);
       a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
       a.Store(AccessBuilder::ForArgumentsCallee(), callee);
@@ -320,12 +320,12 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
           jsgraph()->Constant(native_context().strict_arguments_map());
       // Actually allocate and initialize the arguments object.
       AllocationBuilder a(jsgraph(), effect, control);
-      Node* properties = jsgraph()->EmptyFixedArrayConstant();
       int length = args_state_info.parameter_count() - 1;  // Minus receiver.
       STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kTaggedSize);
       a.Allocate(JSStrictArgumentsObject::kSize);
       a.Store(AccessBuilder::ForMap(), arguments_map);
-      a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+      a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+              jsgraph()->EmptyFixedArrayConstant());
       a.Store(AccessBuilder::ForJSObjectElements(), elements);
       a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
       RelaxControls(node);
@@ -357,7 +357,6 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
           jsgraph()->Constant(native_context().js_array_packed_elements_map());
       // Actually allocate and initialize the jsarray.
       AllocationBuilder a(jsgraph(), effect, control);
-      Node* properties = jsgraph()->EmptyFixedArrayConstant();
 
       // -1 to minus receiver
       int argument_count = args_state_info.parameter_count() - 1;
@@ -365,7 +364,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
       STATIC_ASSERT(JSArray::kSize == 4 * kTaggedSize);
       a.Allocate(JSArray::kSize);
       a.Store(AccessBuilder::ForMap(), jsarray_map);
-      a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+      a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+              jsgraph()->EmptyFixedArrayConstant());
       a.Store(AccessBuilder::ForJSObjectElements(), elements);
       a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS),
               jsgraph()->Constant(length));
@@ -406,7 +406,7 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
     int size = parameter_count_no_receiver +
                shared.GetBytecodeArray().register_count();
     AllocationBuilder ab(jsgraph(), effect, control);
-    ab.AllocateArray(size, factory()->fixed_array_map());
+    ab.AllocateArray(size, MapRef(broker(), factory()->fixed_array_map()));
     for (int i = 0; i < size; ++i) {
       ab.Store(AccessBuilder::ForFixedArraySlot(i),
                jsgraph()->UndefinedConstant());
@@ -416,11 +416,12 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
     // Emit code to allocate the JS[Async]GeneratorObject instance.
     AllocationBuilder a(jsgraph(), effect, control);
     a.Allocate(slack_tracking_prediction.instance_size());
-    Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
     Node* undefined = jsgraph()->UndefinedConstant();
     a.Store(AccessBuilder::ForMap(), initial_map);
-    a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array);
-    a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+    a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+            jsgraph()->EmptyFixedArrayConstant());
+    a.Store(AccessBuilder::ForJSObjectElements(),
+            jsgraph()->EmptyFixedArrayConstant());
     a.Store(AccessBuilder::ForJSGeneratorObjectContext(), context);
     a.Store(AccessBuilder::ForJSGeneratorObjectFunction(), closure);
     a.Store(AccessBuilder::ForJSGeneratorObjectReceiver(), receiver);
@@ -470,7 +471,7 @@ Reduction JSCreateLowering::ReduceNewArray(
   // This has to be kept in sync with src/runtime/runtime-array.cc,
   // where this limit is protected.
   length = effect = graph()->NewNode(
-      simplified()->CheckBounds(VectorSlotPair()), length,
+      simplified()->CheckBounds(FeedbackSource()), length,
       jsgraph()->Constant(JSArray::kInitialMaxFastElementArray), effect,
       control);
 
@@ -480,13 +481,13 @@ Reduction JSCreateLowering::ReduceNewArray(
                            ? simplified()->NewDoubleElements(allocation)
                            : simplified()->NewSmiOrObjectElements(allocation),
                        length, effect, control);
-  Node* properties = jsgraph()->EmptyFixedArrayConstant();
 
   // Perform the allocation of the actual JSArray object.
   AllocationBuilder a(jsgraph(), effect, control);
   a.Allocate(slack_tracking_prediction.instance_size(), allocation);
   a.Store(AccessBuilder::ForMap(), initial_map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+          jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(), elements);
   a.Store(AccessBuilder::ForJSArrayLength(initial_map.elements_kind()), length);
   for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
@@ -526,13 +527,13 @@ Reduction JSCreateLowering::ReduceNewArray(
     elements = effect =
         AllocateElements(effect, control, elements_kind, capacity, allocation);
   }
-  Node* properties = jsgraph()->EmptyFixedArrayConstant();
 
   // Perform the allocation of the actual JSArray object.
   AllocationBuilder a(jsgraph(), effect, control);
   a.Allocate(slack_tracking_prediction.instance_size(), allocation);
   a.Store(AccessBuilder::ForMap(), initial_map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+          jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(), elements);
   a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
   for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
@@ -565,14 +566,14 @@ Reduction JSCreateLowering::ReduceNewArray(
     for (auto& value : values) {
       if (!NodeProperties::GetType(value).Is(Type::SignedSmall())) {
         value = effect = graph()->NewNode(
-            simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
+            simplified()->CheckSmi(FeedbackSource()), value, effect, control);
       }
     }
   } else if (IsDoubleElementsKind(elements_kind)) {
     for (auto& value : values) {
       if (!NodeProperties::GetType(value).Is(Type::Number())) {
         value = effect =
-            graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+            graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value,
                              effect, control);
       }
       // Make sure we do not store signaling NaNs into double arrays.
@@ -583,14 +584,14 @@ Reduction JSCreateLowering::ReduceNewArray(
   // Setup elements, properties and length.
   Node* elements = effect =
       AllocateElements(effect, control, elements_kind, values, allocation);
-  Node* properties = jsgraph()->EmptyFixedArrayConstant();
   Node* length = jsgraph()->Constant(static_cast<int>(values.size()));
 
   // Perform the allocation of the actual JSArray object.
   AllocationBuilder a(jsgraph(), effect, control);
   a.Allocate(slack_tracking_prediction.instance_size(), allocation);
   a.Store(AccessBuilder::ForMap(), initial_map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+          jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(), elements);
   a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
   for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
@@ -735,7 +736,7 @@ Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
              Type::OtherObject());
   a.Store(AccessBuilder::ForMap(),
           native_context().initial_array_iterator_map());
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
           jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(),
           jsgraph()->EmptyFixedArrayConstant());
@@ -761,7 +762,8 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) {
 
   // Create the register file.
   AllocationBuilder ab(jsgraph(), effect, control);
-  ab.AllocateArray(register_count, factory()->fixed_array_map());
+  ab.AllocateArray(register_count,
+                   MapRef(broker(), factory()->fixed_array_map()));
   for (int i = 0; i < register_count; ++i) {
     ab.Store(AccessBuilder::ForFixedArraySlot(i),
              jsgraph()->UndefinedConstant());
@@ -771,11 +773,12 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) {
   // Create the JSAsyncFunctionObject result.
   AllocationBuilder a(jsgraph(), effect, control);
   a.Allocate(JSAsyncFunctionObject::kSize);
-  Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
   a.Store(AccessBuilder::ForMap(),
           native_context().async_function_object_map());
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array);
-  a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+          jsgraph()->EmptyFixedArrayConstant());
+  a.Store(AccessBuilder::ForJSObjectElements(),
+          jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSGeneratorObjectContext(), context);
   a.Store(AccessBuilder::ForJSGeneratorObjectFunction(), closure);
   a.Store(AccessBuilder::ForJSGeneratorObjectReceiver(), receiver);
@@ -844,7 +847,7 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
   a.Store(AccessBuilder::ForMap(),
           MapForCollectionIterationKind(native_context(), p.collection_kind(),
                                         p.iteration_kind()));
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
           jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(),
           jsgraph()->EmptyFixedArrayConstant());
@@ -871,7 +874,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
   Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant();
   if (arity > 0) {
     AllocationBuilder a(jsgraph(), effect, control);
-    a.AllocateArray(arity, factory()->fixed_array_map());
+    a.AllocateArray(arity, MapRef(broker(), factory()->fixed_array_map()));
     for (int i = 0; i < arity; ++i) {
       a.Store(AccessBuilder::ForFixedArraySlot(i),
               NodeProperties::GetValueInput(node, 2 + i));
@@ -884,7 +887,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
   a.Allocate(JSBoundFunction::kSize, AllocationType::kYoung,
              Type::BoundFunction());
   a.Store(AccessBuilder::ForMap(), map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
           jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(),
           jsgraph()->EmptyFixedArrayConstant());
@@ -936,7 +939,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
   AllocationBuilder a(jsgraph(), effect, control);
   a.Allocate(function_map.instance_size(), allocation, Type::Function());
   a.Store(AccessBuilder::ForMap(), function_map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
           jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(),
           jsgraph()->EmptyFixedArrayConstant());
@@ -972,7 +975,7 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
   AllocationBuilder a(jsgraph(), effect, graph()->start());
   a.Allocate(JSIteratorResult::kSize);
   a.Store(AccessBuilder::ForMap(), iterator_result_map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
           jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(),
           jsgraph()->EmptyFixedArrayConstant());
@@ -995,7 +998,7 @@ Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
   a.Allocate(JSStringIterator::kSize, AllocationType::kYoung,
              Type::OtherObject());
   a.Store(AccessBuilder::ForMap(), map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
           jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(),
           jsgraph()->EmptyFixedArrayConstant());
@@ -1014,11 +1017,10 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
 
   Node* array_map =
       jsgraph()->Constant(native_context().js_array_packed_elements_map());
-  Node* properties = jsgraph()->EmptyFixedArrayConstant();
   Node* length = jsgraph()->Constant(2);
 
   AllocationBuilder aa(jsgraph(), effect, graph()->start());
-  aa.AllocateArray(2, factory()->fixed_array_map());
+  aa.AllocateArray(2, MapRef(broker(), factory()->fixed_array_map()));
   aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
            jsgraph()->ZeroConstant(), key);
   aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
@@ -1028,7 +1030,8 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
   AllocationBuilder a(jsgraph(), elements, graph()->start());
   a.Allocate(JSArray::kSize);
   a.Store(AccessBuilder::ForMap(), array_map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+          jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(), elements);
   a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), length);
   STATIC_ASSERT(JSArray::kSize == 4 * kTaggedSize);
@@ -1045,7 +1048,7 @@ Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
   AllocationBuilder a(jsgraph(), effect, graph()->start());
   a.Allocate(promise_map.instance_size());
   a.Store(AccessBuilder::ForMap(), promise_map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
           jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(),
           jsgraph()->EmptyFixedArrayConstant());
@@ -1071,8 +1074,12 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  FeedbackVectorRef feedback_vector(broker(), p.feedback().vector());
-  ObjectRef feedback = feedback_vector.get(p.feedback().slot());
+  FeedbackVectorRef feedback_vector(broker(), p.feedback().vector);
+  ObjectRef feedback = feedback_vector.get(p.feedback().slot);
+  // TODO(turbofan):  we should consider creating a ProcessedFeedback for
+  // allocation sites/boiler plates so that we use GetFeedback here. Then
+  // we can eventually get rid of the additional copy of feedback slots that
+  // we currently have in FeedbackVectorData.
   if (feedback.IsAllocationSite()) {
     AllocationSiteRef site = feedback.AsAllocationSite();
     if (site.IsFastLiteral()) {
@@ -1094,8 +1101,12 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
 Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode());
   FeedbackParameter const& p = FeedbackParameterOf(node->op());
-  FeedbackVectorRef fv(broker(), p.feedback().vector());
-  ObjectRef feedback = fv.get(p.feedback().slot());
+  FeedbackVectorRef fv(broker(), p.feedback().vector);
+  ObjectRef feedback = fv.get(p.feedback().slot);
+  // TODO(turbofan):  we should consider creating a ProcessedFeedback for
+  // allocation sites/boiler plates so that we use GetFeedback here. Then
+  // we can eventually get rid of the additional copy of feedback slots that
+  // we currently have in FeedbackVectorData.
   if (feedback.IsAllocationSite()) {
     AllocationSiteRef site = feedback.AsAllocationSite();
     DCHECK(!site.PointsToLiteral());
@@ -1128,13 +1139,13 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
 
   // Setup elements and properties.
   Node* elements = jsgraph()->EmptyFixedArrayConstant();
-  Node* properties = jsgraph()->EmptyFixedArrayConstant();
 
   // Perform the allocation of the actual JSArray object.
   AllocationBuilder a(jsgraph(), effect, control);
   a.Allocate(map.instance_size());
   a.Store(AccessBuilder::ForMap(), js_object_map);
-  a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+  a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+          jsgraph()->EmptyFixedArrayConstant());
   a.Store(AccessBuilder::ForJSObjectElements(), elements);
   for (int i = 0; i < map.GetInObjectProperties(); i++) {
     a.Store(AccessBuilder::ForJSObjectInObjectProperty(map, i),
@@ -1152,8 +1163,12 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  FeedbackVectorRef feedback_vector(broker(), p.feedback().vector());
-  ObjectRef feedback = feedback_vector.get(p.feedback().slot());
+  FeedbackVectorRef feedback_vector(broker(), p.feedback().vector);
+  ObjectRef feedback = feedback_vector.get(p.feedback().slot);
+  // TODO(turbofan):  we should consider creating a ProcessedFeedback for
+  // allocation sites/boiler plates so that we use GetFeedback here. Then
+  // we can eventually get rid of the additional copy of feedback slots that
+  // we currently have in FeedbackVectorData.
   if (feedback.IsJSRegExp()) {
     JSRegExpRef boilerplate = feedback.AsJSRegExp();
     Node* value = effect = AllocateLiteralRegExp(effect, control, boilerplate);
@@ -1192,7 +1207,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
       default:
         UNREACHABLE();
     }
-    a.AllocateContext(context_length, map);
+    a.AllocateContext(context_length, MapRef(broker(), map));
     a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX),
             scope_info);
     a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
@@ -1220,7 +1235,8 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
 
   AllocationBuilder a(jsgraph(), effect, control);
   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-  a.AllocateContext(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
+  a.AllocateContext(Context::MIN_CONTEXT_SLOTS,
+                    MapRef(broker(), factory()->with_context_map()));
   a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX), scope_info);
   a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
   a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -1243,7 +1259,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
   AllocationBuilder a(jsgraph(), effect, control);
   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
   a.AllocateContext(Context::MIN_CONTEXT_SLOTS + 1,
-                    factory()->catch_context_map());
+                    MapRef(broker(), factory()->catch_context_map()));
   a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX), scope_info);
   a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
   a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -1271,7 +1287,8 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
 
     AllocationBuilder a(jsgraph(), effect, control);
     STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-    a.AllocateContext(context_length, factory()->block_context_map());
+    a.AllocateContext(context_length,
+                      MapRef(broker(), factory()->block_context_map()));
     a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX),
             scope_info);
     a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
@@ -1293,12 +1310,13 @@ namespace {
 base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker,
                                           HeapObjectRef prototype) {
   MapRef standard_map =
-      broker->native_context().object_function().initial_map();
+      broker->target_native_context().object_function().initial_map();
   if (prototype.equals(standard_map.prototype())) {
     return standard_map;
   }
   if (prototype.map().oddball_type() == OddballType::kNull) {
-    return broker->native_context().slow_object_with_null_prototype_map();
+    return broker->target_native_context()
+        .slow_object_with_null_prototype_map();
   }
   if (prototype.IsJSObject()) {
     return prototype.AsJSObject().GetObjectCreateMap();
@@ -1401,7 +1419,8 @@ Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
 
   // Actually allocate the backing store.
   AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(argument_count, factory()->fixed_array_map());
+  a.AllocateArray(argument_count,
+                  MapRef(broker(), factory()->fixed_array_map()));
   for (int i = 0; i < argument_count; ++i, ++parameters_it) {
     DCHECK_NOT_NULL((*parameters_it).node);
     a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
@@ -1432,7 +1451,7 @@ Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
 
   // Actually allocate the backing store.
   AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(num_elements, factory()->fixed_array_map());
+  a.AllocateArray(num_elements, MapRef(broker(), factory()->fixed_array_map()));
   for (int i = 0; i < num_elements; ++i, ++parameters_it) {
     DCHECK_NOT_NULL((*parameters_it).node);
     a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
@@ -1471,7 +1490,8 @@ Node* JSCreateLowering::AllocateAliasedArguments(
   // another indirection away and then linked into the parameter map below,
   // whereas mapped argument values are replaced with a hole instead.
   AllocationBuilder aa(jsgraph(), effect, control);
-  aa.AllocateArray(argument_count, factory()->fixed_array_map());
+  aa.AllocateArray(argument_count,
+                   MapRef(broker(), factory()->fixed_array_map()));
   for (int i = 0; i < mapped_count; ++i, ++parameters_it) {
     aa.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
              jsgraph()->TheHoleConstant());
@@ -1485,7 +1505,8 @@ Node* JSCreateLowering::AllocateAliasedArguments(
 
   // Actually allocate the backing store.
   AllocationBuilder a(jsgraph(), arguments, control);
-  a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
+  a.AllocateArray(mapped_count + 2,
+                  MapRef(broker(), factory()->sloppy_arguments_elements_map()));
   a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0),
           context);
   a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1),
@@ -1530,7 +1551,8 @@ Node* JSCreateLowering::AllocateAliasedArguments(
 
   // Actually allocate the backing store.
   AllocationBuilder a(jsgraph(), arguments, control);
-  a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
+  a.AllocateArray(mapped_count + 2,
+                  MapRef(broker(), factory()->sloppy_arguments_elements_map()));
   a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0),
           context);
   a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1),
@@ -1565,7 +1587,7 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
 
   // Actually allocate the backing store.
   AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(capacity, elements_map, allocation);
+  a.AllocateArray(capacity, MapRef(broker(), elements_map), allocation);
   for (int i = 0; i < capacity; ++i) {
     Node* index = jsgraph()->Constant(i);
     a.Store(access, index, value);
@@ -1590,7 +1612,7 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
 
   // Actually allocate the backing store.
   AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(capacity, elements_map, allocation);
+  a.AllocateArray(capacity, MapRef(broker(), elements_map), allocation);
   for (int i = 0; i < capacity; ++i) {
     Node* index = jsgraph()->Constant(i);
     a.Store(access, index, values[i]);
@@ -1601,9 +1623,6 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
 Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
                                             JSObjectRef boilerplate,
                                             AllocationType allocation) {
-  // Setup the properties backing store.
-  Node* properties = jsgraph()->EmptyFixedArrayConstant();
-
   // Compute the in-object properties to store first (might have effects).
   MapRef boilerplate_map = boilerplate.map();
   ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
@@ -1616,6 +1635,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
     DCHECK_EQ(kData, property_details.kind());
     NameRef property_name = boilerplate_map.GetPropertyKey(i);
     FieldIndex index = boilerplate_map.GetFieldIndexFor(i);
+    ConstFieldInfo const_field_info(boilerplate_map.object());
     FieldAccess access = {kTaggedBase,
                           index.offset(),
                           property_name.object(),
@@ -1624,7 +1644,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
                           MachineType::TypeCompressedTagged(),
                           kFullWriteBarrier,
                           LoadSensitivity::kUnsafe,
-                          property_details.constness()};
+                          const_field_info};
     Node* value;
     if (boilerplate_map.IsUnboxedDoubleField(i)) {
       access.machine_type = MachineType::Float64();
@@ -1637,7 +1657,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
         // the field. The hole NaN should therefore be unobservable.
         // Load elimination expects there to be at most one const store to any
         // given field, so we always mark the unobservable ones as mutable.
-        access.constness = PropertyConstness::kMutable;
+        access.const_field_info = ConstFieldInfo::None();
       }
       value = jsgraph()->Constant(bit_cast<double>(value_bits));
     } else {
@@ -1647,19 +1667,19 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
           boilerplate_value.AsHeapObject().map().oddball_type() ==
               OddballType::kUninitialized;
       if (is_uninitialized) {
-        access.constness = PropertyConstness::kMutable;
+        access.const_field_info = ConstFieldInfo::None();
       }
       if (boilerplate_value.IsJSObject()) {
         JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
         value = effect = AllocateFastLiteral(effect, control,
                                              boilerplate_object, allocation);
       } else if (property_details.representation().IsDouble()) {
-        double number = boilerplate_value.AsMutableHeapNumber().value();
+        double number = boilerplate_value.AsHeapNumber().value();
         // Allocate a mutable HeapNumber box and store the value into it.
         AllocationBuilder builder(jsgraph(), effect, control);
         builder.Allocate(HeapNumber::kSize, allocation);
         builder.Store(AccessBuilder::ForMap(),
-                      factory()->mutable_heap_number_map());
+                      MapRef(broker(), factory()->heap_number_map()));
         builder.Store(AccessBuilder::ForHeapNumberValue(),
                       jsgraph()->Constant(number));
         value = effect = builder.Finish();
@@ -1695,7 +1715,8 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
   builder.Allocate(boilerplate_map.instance_size(), allocation,
                    Type::For(boilerplate_map));
   builder.Store(AccessBuilder::ForMap(), boilerplate_map);
-  builder.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+  builder.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
+                jsgraph()->EmptyFixedArrayConstant());
   builder.Store(AccessBuilder::ForJSObjectElements(), elements);
   if (boilerplate.IsJSArray()) {
     JSArrayRef boilerplate_array = boilerplate.AsJSArray();
@@ -1751,7 +1772,7 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
 
   // Allocate the backing store array and store the elements.
   AllocationBuilder builder(jsgraph(), effect, control);
-  builder.AllocateArray(elements_length, elements_map.object(), allocation);
+  builder.AllocateArray(elements_length, elements_map, allocation);
   ElementAccess const access =
       (elements_map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE)
           ? AccessBuilder::ForFixedDoubleArrayElement()
@@ -1811,7 +1832,7 @@ SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
 }
 
 NativeContextRef JSCreateLowering::native_context() const {
-  return broker()->native_context();
+  return broker()->target_native_context();
 }
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 0a6f90975feb38..d2a9b675f96199 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -9,10 +9,12 @@
 #include "src/codegen/code-factory.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/processed-feedback.h"
 #include "src/objects/feedback-cell.h"
 #include "src/objects/feedback-vector.h"
 #include "src/objects/scope-info.h"
@@ -31,8 +33,9 @@ CallDescriptor::Flags FrameStateFlagForCall(Node* node) {
 
 }  // namespace
 
-JSGenericLowering::JSGenericLowering(JSGraph* jsgraph, Editor* editor)
-    : AdvancedReducer(editor), jsgraph_(jsgraph) {}
+JSGenericLowering::JSGenericLowering(JSGraph* jsgraph, Editor* editor,
+                                     JSHeapBroker* broker)
+    : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {}
 
 JSGenericLowering::~JSGenericLowering() = default;
 
@@ -144,6 +147,22 @@ void JSGenericLowering::LowerJSStrictEqual(Node* node) {
                       Operator::kEliminatable);
 }
 
+namespace {
+bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
+                                     JSHeapBroker* broker) {
+  ProcessedFeedback const& feedback = broker->GetFeedback(source);
+
+  if (feedback.kind() == ProcessedFeedback::kElementAccess) {
+    return feedback.AsElementAccess().transition_groups().empty();
+  } else if (feedback.kind() == ProcessedFeedback::kNamedAccess) {
+    return feedback.AsNamedAccess().maps().empty();
+  } else if (feedback.kind() == ProcessedFeedback::kInsufficient) {
+    return false;
+  }
+  UNREACHABLE();
+}
+}  // namespace
+
 void JSGenericLowering::LowerJSLoadProperty(Node* node) {
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   const PropertyAccess& p = PropertyAccessOf(node->op());
@@ -152,16 +171,16 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
   if (outer_state->opcode() != IrOpcode::kFrameState) {
     Callable callable = Builtins::CallableFor(
-        isolate(), p.feedback().ic_state() == MEGAMORPHIC
+        isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
                        ? Builtins::kKeyedLoadICTrampoline_Megamorphic
                        : Builtins::kKeyedLoadICTrampoline);
     ReplaceWithStubCall(node, callable, flags);
   } else {
     Callable callable = Builtins::CallableFor(
-        isolate(), p.feedback().ic_state() == MEGAMORPHIC
+        isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
                        ? Builtins::kKeyedLoadIC_Megamorphic
                        : Builtins::kKeyedLoadIC);
-    Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+    Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
     node->InsertInput(zone(), 3, vector);
     ReplaceWithStubCall(node, callable, flags);
   }
@@ -182,16 +201,16 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
   if (outer_state->opcode() != IrOpcode::kFrameState) {
     Callable callable = Builtins::CallableFor(
-        isolate(), p.feedback().ic_state() == MEGAMORPHIC
+        isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
                        ? Builtins::kLoadICTrampoline_Megamorphic
                        : Builtins::kLoadICTrampoline);
     ReplaceWithStubCall(node, callable, flags);
   } else {
-    Callable callable =
-        Builtins::CallableFor(isolate(), p.feedback().ic_state() == MEGAMORPHIC
-                                             ? Builtins::kLoadIC_Megamorphic
-                                             : Builtins::kLoadIC);
-    Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+    Callable callable = Builtins::CallableFor(
+        isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+                       ? Builtins::kLoadIC_Megamorphic
+                       : Builtins::kLoadIC);
+    Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
     node->InsertInput(zone(), 3, vector);
     ReplaceWithStubCall(node, callable, flags);
   }
@@ -210,12 +229,23 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
   } else {
     Callable callable =
         CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
-    Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+    Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
     node->InsertInput(zone(), 2, vector);
     ReplaceWithStubCall(node, callable, flags);
   }
 }
 
+void JSGenericLowering::LowerJSGetIterator(Node* node) {
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  const PropertyAccess& p = PropertyAccessOf(node->op());
+  node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
+  node->InsertInput(zone(), 2, vector);
+  Callable callable =
+      Builtins::CallableFor(isolate(), Builtins::kGetIteratorWithFeedback);
+  ReplaceWithStubCall(node, callable, flags);
+}
+
 void JSGenericLowering::LowerJSStoreProperty(Node* node) {
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   PropertyAccess const& p = PropertyAccessOf(node->op());
@@ -229,7 +259,7 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
   } else {
     Callable callable =
         Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
-    Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+    Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
     node->InsertInput(zone(), 4, vector);
     ReplaceWithStubCall(node, callable, flags);
   }
@@ -252,7 +282,7 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
     ReplaceWithStubCall(node, callable, flags);
   } else {
     Callable callable = Builtins::CallableFor(isolate(), Builtins::kStoreIC);
-    Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+    Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
     node->InsertInput(zone(), 4, vector);
     ReplaceWithStubCall(node, callable, flags);
   }
@@ -270,7 +300,7 @@ void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
     ReplaceWithStubCall(node, callable, flags);
   } else {
     Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
-    Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+    Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
     node->InsertInput(zone(), 4, vector);
     ReplaceWithStubCall(node, callable, flags);
   }
@@ -290,7 +320,7 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
   } else {
     Callable callable =
         Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
-    Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+    Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
     node->InsertInput(zone(), 3, vector);
     ReplaceWithStubCall(node, callable, flags);
   }
@@ -298,8 +328,9 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
 
 void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
   FeedbackParameter const& p = FeedbackParameterOf(node->op());
+  RelaxControls(node);
   node->InsertInputs(zone(), 4, 2);
-  node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector()));
+  node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector));
   node->ReplaceInput(5, jsgraph()->SmiConstant(p.feedback().index()));
   ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral);
 }
@@ -311,7 +342,7 @@ void JSGenericLowering::LowerJSStoreInArrayLiteral(Node* node) {
   FeedbackParameter const& p = FeedbackParameterOf(node->op());
   RelaxControls(node);
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  node->InsertInput(zone(), 4, jsgraph()->HeapConstant(p.feedback().vector()));
+  node->InsertInput(zone(), 4, jsgraph()->HeapConstant(p.feedback().vector));
   ReplaceWithStubCall(node, callable, flags);
 }
 
@@ -513,7 +544,7 @@ void JSGenericLowering::LowerJSCreateTypedArray(Node* node) {
 void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
-  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector()));
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
 
@@ -533,7 +564,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
 void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   FeedbackParameter const& p = FeedbackParameterOf(node->op());
-  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector()));
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
   node->RemoveInput(4);  // control
   Callable callable =
@@ -551,7 +582,7 @@ void JSGenericLowering::LowerJSCreateArrayFromIterable(Node* node) {
 void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
-  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector()));
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -576,7 +607,7 @@ void JSGenericLowering::LowerJSCloneObject(Node* node) {
       Builtins::CallableFor(isolate(), Builtins::kCloneObjectIC);
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.flags()));
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
-  node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector()));
+  node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector));
   ReplaceWithStubCall(node, callable, flags);
 }
 
@@ -589,7 +620,7 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   Callable callable =
       Builtins::CallableFor(isolate(), Builtins::kCreateRegExpLiteral);
-  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector()));
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -812,14 +843,13 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  Node* limit = effect = graph()->NewNode(
-      machine()->Load(MachineType::Pointer()),
-      jsgraph()->ExternalConstant(
-          ExternalReference::address_of_stack_limit(isolate())),
-      jsgraph()->IntPtrConstant(0), effect, control);
-  Node* pointer = graph()->NewNode(machine()->LoadStackPointer());
+  Node* limit = effect =
+      graph()->NewNode(machine()->Load(MachineType::Pointer()),
+                       jsgraph()->ExternalConstant(
+                           ExternalReference::address_of_jslimit(isolate())),
+                       jsgraph()->IntPtrConstant(0), effect, control);
 
-  Node* check = graph()->NewNode(machine()->UintLessThan(), limit, pointer);
+  Node* check = graph()->NewNode(machine()->StackPointerGreaterThan(), limit);
   Node* branch =
       graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
 
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 2a395ca5e86082..2a4ac808b1ad9c 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -23,7 +23,7 @@ class Linkage;
 // Lowers JS-level operators to runtime and IC calls in the "generic" case.
 class JSGenericLowering final : public AdvancedReducer {
  public:
-  JSGenericLowering(JSGraph* jsgraph, Editor* editor);
+  JSGenericLowering(JSGraph* jsgraph, Editor* editor, JSHeapBroker* broker);
   ~JSGenericLowering() final;
 
   const char* reducer_name() const override { return "JSGenericLowering"; }
@@ -48,9 +48,11 @@ class JSGenericLowering final : public AdvancedReducer {
   Graph* graph() const;
   CommonOperatorBuilder* common() const;
   MachineOperatorBuilder* machine() const;
+  JSHeapBroker* broker() const { return broker_; }
 
  private:
   JSGraph* const jsgraph_;
+  JSHeapBroker* const broker_;
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 43a4beadeeb754..beed7820b40057 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -46,26 +46,6 @@ Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
                                           argv_mode, builtin_exit_frame));
 }
 
-Node* JSGraph::Constant(Handle<Object> value) {
-  // Dereference the handle to determine if a number constant or other
-  // canonicalized node can be used.
-  if (value->IsNumber()) {
-    return Constant(value->Number());
-  } else if (value->IsUndefined(isolate())) {
-    return UndefinedConstant();
-  } else if (value->IsTrue(isolate())) {
-    return TrueConstant();
-  } else if (value->IsFalse(isolate())) {
-    return FalseConstant();
-  } else if (value->IsNull(isolate())) {
-    return NullConstant();
-  } else if (value->IsTheHole(isolate())) {
-    return TheHoleConstant();
-  } else {
-    return HeapConstant(Handle<HeapObject>::cast(value));
-  }
-}
-
 Node* JSGraph::Constant(const ObjectRef& ref) {
   if (ref.IsSmi()) return Constant(ref.AsSmi());
   OddballType oddball_type =
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index ec36c26034b1ba..83c81b1010b8de 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -46,16 +46,12 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
   // Used for stubs and runtime functions with no context. (alias: SMI zero)
   Node* NoContextConstant() { return ZeroConstant(); }
 
-  // Creates a HeapConstant node, possibly canonicalized, and may access the
-  // heap to inspect the object.
+  // Creates a HeapConstant node, possibly canonicalized.
   Node* HeapConstant(Handle<HeapObject> value);
 
   // Creates a Constant node of the appropriate type for the given object.
-  // Accesses the heap to inspect the object and determine whether one of the
+  // Inspect the (serialized) object and determine whether one of the
   // canonicalized globals or a number constant should be returned.
-  Node* Constant(Handle<Object> value);
-
-  // Like above, but doesn't access the heap directly.
   Node* Constant(const ObjectRef& value);
 
   // Creates a NumberConstant node, usually canonicalized.
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index c79c793ae69a63..7466a80f8515ef 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -16,7 +16,6 @@
 #include "src/compiler/bytecode-analysis.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/per-isolate-compiler-cache.h"
-#include "src/compiler/vector-slot-pair.h"
 #include "src/init/bootstrapper.h"
 #include "src/objects/allocation-site-inl.h"
 #include "src/objects/api-callbacks.h"
@@ -26,6 +25,7 @@
 #include "src/objects/js-array-buffer-inl.h"
 #include "src/objects/js-array-inl.h"
 #include "src/objects/js-regexp-inl.h"
+#include "src/objects/literal-objects-inl.h"
 #include "src/objects/module-inl.h"
 #include "src/objects/objects-inl.h"
 #include "src/objects/template-objects-inl.h"
@@ -256,13 +256,14 @@ class JSObjectField {
   uint64_t number_bits_ = 0;
 };
 
-struct FieldIndexHasher {
-  size_t operator()(FieldIndex field_index) const {
-    return field_index.index();
-  }
+class JSReceiverData : public HeapObjectData {
+ public:
+  JSReceiverData(JSHeapBroker* broker, ObjectData** storage,
+                 Handle<JSReceiver> object)
+      : HeapObjectData(broker, storage, object) {}
 };
 
-class JSObjectData : public HeapObjectData {
+class JSObjectData : public JSReceiverData {
  public:
   JSObjectData(JSHeapBroker* broker, ObjectData** storage,
                Handle<JSObject> object);
@@ -277,16 +278,22 @@ class JSObjectData : public HeapObjectData {
   FixedArrayBaseData* elements() const;
 
   void SerializeObjectCreateMap(JSHeapBroker* broker);
-  MapData* object_create_map() const {  // Can be nullptr.
-    CHECK(serialized_object_create_map_);
+
+  MapData* object_create_map(JSHeapBroker* broker) const {  // Can be nullptr.
+    if (!serialized_object_create_map_) {
+      DCHECK_NULL(object_create_map_);
+      TRACE_MISSING(broker, "object_create_map on " << this);
+    }
     return object_create_map_;
   }
 
-  ObjectData* GetOwnConstantElement(JSHeapBroker* broker, uint32_t index,
-                                    bool serialize);
-  ObjectData* GetOwnProperty(JSHeapBroker* broker,
-                             Representation representation,
-                             FieldIndex field_index, bool serialize);
+  ObjectData* GetOwnConstantElement(
+      JSHeapBroker* broker, uint32_t index,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+  ObjectData* GetOwnDataProperty(
+      JSHeapBroker* broker, Representation representation,
+      FieldIndex field_index,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
 
   // This method is only used to assert our invariants.
   bool cow_or_empty_elements_tenured() const;
@@ -316,7 +323,9 @@ class JSObjectData : public HeapObjectData {
   // (2) are known not to (possibly they don't exist at all).
   // In case (2), the second pair component is nullptr.
   // For simplicity, this may in theory overlap with inobject_fields_.
-  ZoneUnorderedMap<FieldIndex, ObjectData*, FieldIndexHasher> own_properties_;
+  // The keys of the map are the property_index() values of the
+  // respective property FieldIndex'es.
+  ZoneUnorderedMap<int, ObjectData*> own_properties_;
 };
 
 void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
@@ -353,24 +362,25 @@ base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
   return base::nullopt;
 }
 
-ObjectRef GetOwnPropertyFromHeap(JSHeapBroker* broker,
-                                 Handle<JSObject> receiver,
-                                 Representation representation,
-                                 FieldIndex field_index) {
+ObjectRef GetOwnDataPropertyFromHeap(JSHeapBroker* broker,
+                                     Handle<JSObject> receiver,
+                                     Representation representation,
+                                     FieldIndex field_index) {
   Handle<Object> constant =
       JSObject::FastPropertyAt(receiver, representation, field_index);
   return ObjectRef(broker, constant);
 }
+
 }  // namespace
 
 ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
                                                 uint32_t index,
-                                                bool serialize) {
+                                                SerializationPolicy policy) {
   for (auto const& p : own_constant_elements_) {
     if (p.first == index) return p.second;
   }
 
-  if (!serialize) {
+  if (policy == SerializationPolicy::kAssumeSerialized) {
     TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
     return nullptr;
   }
@@ -382,24 +392,24 @@ ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
   return result;
 }
 
-ObjectData* JSObjectData::GetOwnProperty(JSHeapBroker* broker,
-                                         Representation representation,
-                                         FieldIndex field_index,
-                                         bool serialize) {
-  auto p = own_properties_.find(field_index);
+ObjectData* JSObjectData::GetOwnDataProperty(JSHeapBroker* broker,
+                                             Representation representation,
+                                             FieldIndex field_index,
+                                             SerializationPolicy policy) {
+  auto p = own_properties_.find(field_index.property_index());
   if (p != own_properties_.end()) return p->second;
 
-  if (!serialize) {
+  if (policy == SerializationPolicy::kAssumeSerialized) {
     TRACE_MISSING(broker, "knowledge about property with index "
                               << field_index.property_index() << " on "
                               << this);
     return nullptr;
   }
 
-  ObjectRef property = GetOwnPropertyFromHeap(
+  ObjectRef property = GetOwnDataPropertyFromHeap(
       broker, Handle<JSObject>::cast(object()), representation, field_index);
   ObjectData* result(property.data());
-  own_properties_.insert(std::make_pair(field_index, result));
+  own_properties_.insert(std::make_pair(field_index.property_index(), result));
   return result;
 }
 
@@ -446,6 +456,31 @@ void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
   }
 }
 
+class ArrayBoilerplateDescriptionData : public HeapObjectData {
+ public:
+  ArrayBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
+                                  Handle<ArrayBoilerplateDescription> object)
+      : HeapObjectData(broker, storage, object),
+        constants_elements_length_(object->constant_elements().length()) {}
+
+  int constants_elements_length() const { return constants_elements_length_; }
+
+ private:
+  int const constants_elements_length_;
+};
+
+class ObjectBoilerplateDescriptionData : public HeapObjectData {
+ public:
+  ObjectBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
+                                   Handle<ObjectBoilerplateDescription> object)
+      : HeapObjectData(broker, storage, object), size_(object->size()) {}
+
+  int size() const { return size_; }
+
+ private:
+  int const size_;
+};
+
 class JSDataViewData : public JSObjectData {
  public:
   JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
@@ -465,6 +500,7 @@ class JSBoundFunctionData : public JSObjectData {
                       Handle<JSBoundFunction> object);
 
   void Serialize(JSHeapBroker* broker);
+  bool serialized() const { return serialized_; }
 
   ObjectData* bound_target_function() const { return bound_target_function_; }
   ObjectData* bound_this() const { return bound_this_; }
@@ -557,18 +593,6 @@ class HeapNumberData : public HeapObjectData {
   double const value_;
 };
 
-class MutableHeapNumberData : public HeapObjectData {
- public:
-  MutableHeapNumberData(JSHeapBroker* broker, ObjectData** storage,
-                        Handle<MutableHeapNumber> object)
-      : HeapObjectData(broker, storage, object), value_(object->value()) {}
-
-  double value() const { return value_; }
-
- private:
-  double const value_;
-};
-
 class ContextData : public HeapObjectData {
  public:
   ContextData(JSHeapBroker* broker, ObjectData** storage,
@@ -576,12 +600,15 @@ class ContextData : public HeapObjectData {
 
   // {previous} will return the closest valid context possible to desired
   // {depth}, decrementing {depth} for each previous link successfully followed.
-  // If {serialize} is true, it will serialize contexts along the way.
-  ContextData* previous(JSHeapBroker* broker, size_t* depth, bool serialize);
+  ContextData* previous(
+      JSHeapBroker* broker, size_t* depth,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
 
-  // Returns nullptr if the slot index isn't valid or wasn't serialized
-  // (unless {serialize} is true).
-  ObjectData* GetSlot(JSHeapBroker* broker, int index, bool serialize);
+  // Returns nullptr if the slot index isn't valid or wasn't serialized,
+  // unless {policy} is {kSerializeIfNeeded}.
+  ObjectData* GetSlot(
+      JSHeapBroker* broker, int index,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
 
  private:
   ZoneMap<int, ObjectData*> slots_;
@@ -593,10 +620,11 @@ ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage,
     : HeapObjectData(broker, storage, object), slots_(broker->zone()) {}
 
 ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth,
-                                   bool serialize) {
+                                   SerializationPolicy policy) {
   if (*depth == 0) return this;
 
-  if (serialize && previous_ == nullptr) {
+  if (policy == SerializationPolicy::kSerializeIfNeeded &&
+      previous_ == nullptr) {
     TraceScope tracer(broker, this, "ContextData::previous");
     Handle<Context> context = Handle<Context>::cast(object());
     Object prev = context->unchecked_previous();
@@ -607,20 +635,20 @@ ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth,
 
   if (previous_ != nullptr) {
     *depth = *depth - 1;
-    return previous_->previous(broker, depth, serialize);
+    return previous_->previous(broker, depth, policy);
   }
   return this;
 }
 
 ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index,
-                                 bool serialize) {
+                                 SerializationPolicy policy) {
   CHECK_GE(index, 0);
   auto search = slots_.find(index);
   if (search != slots_.end()) {
     return search->second;
   }
 
-  if (serialize) {
+  if (policy == SerializationPolicy::kSerializeIfNeeded) {
     Handle<Context> context = Handle<Context>::cast(object());
     if (index < context->length()) {
       TraceScope tracer(broker, this, "ContextData::GetSlot");
@@ -680,8 +708,9 @@ class StringData : public NameData {
   bool is_external_string() const { return is_external_string_; }
   bool is_seq_string() const { return is_seq_string_; }
 
-  StringData* GetCharAsString(JSHeapBroker* broker, uint32_t index,
-                              bool serialize);
+  StringData* GetCharAsString(
+      JSHeapBroker* broker, uint32_t index,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
 
  private:
   int const length_;
@@ -730,14 +759,14 @@ class InternalizedStringData : public StringData {
 };
 
 StringData* StringData::GetCharAsString(JSHeapBroker* broker, uint32_t index,
-                                        bool serialize) {
+                                        SerializationPolicy policy) {
   if (index >= static_cast<uint32_t>(length())) return nullptr;
 
   for (auto const& p : chars_as_strings_) {
     if (p.first == index) return p.second;
   }
 
-  if (!serialize) {
+  if (policy == SerializationPolicy::kAssumeSerialized) {
     TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
     return nullptr;
   }
@@ -842,6 +871,12 @@ bool IsInlinableFastLiteral(Handle<JSObject> boilerplate) {
 
 }  // namespace
 
+class AccessorInfoData : public HeapObjectData {
+ public:
+  AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
+                   Handle<AccessorInfo> object);
+};
+
 class AllocationSiteData : public HeapObjectData {
  public:
   AllocationSiteData(JSHeapBroker* broker, ObjectData** storage,
@@ -891,6 +926,7 @@ class ScriptContextTableData : public HeapObjectData {
 
 struct PropertyDescriptor {
   NameData* key = nullptr;
+  ObjectData* value = nullptr;
   PropertyDetails details = PropertyDetails::Empty();
   FieldIndex field_index;
   MapData* field_owner = nullptr;
@@ -926,8 +962,11 @@ class MapData : public HeapObjectData {
   bool supports_fast_array_resize() const {
     return supports_fast_array_resize_;
   }
-  bool IsMapOfCurrentGlobalProxy() const {
-    return is_map_of_current_global_proxy_;
+  bool IsMapOfTargetGlobalProxy() const {
+    return is_map_of_target_global_proxy_;
+  }
+  bool is_abandoned_prototype_map() const {
+    return is_abandoned_prototype_map_;
   }
 
   // Extra information.
@@ -942,10 +981,14 @@ class MapData : public HeapObjectData {
   // on field owner(s).
   void SerializeOwnDescriptor(JSHeapBroker* broker, int descriptor_index);
   void SerializeOwnDescriptors(JSHeapBroker* broker);
+  ObjectData* GetStrongValue(int descriptor_index) const;
   DescriptorArrayData* instance_descriptors() const {
     return instance_descriptors_;
   }
 
+  void SerializeRootMap(JSHeapBroker* broker);
+  MapData* FindRootMap() const;
+
   void SerializeConstructor(JSHeapBroker* broker);
   ObjectData* GetConstructor() const {
     CHECK(serialized_constructor_);
@@ -984,7 +1027,8 @@ class MapData : public HeapObjectData {
   int const unused_property_fields_;
   bool const supports_fast_array_iteration_;
   bool const supports_fast_array_resize_;
-  bool const is_map_of_current_global_proxy_;
+  bool const is_map_of_target_global_proxy_;
+  bool const is_abandoned_prototype_map_;
 
   bool serialized_elements_kind_generalizations_ = false;
   ZoneVector<MapData*> elements_kind_generalizations_;
@@ -1001,11 +1045,18 @@ class MapData : public HeapObjectData {
   bool serialized_prototype_ = false;
   ObjectData* prototype_ = nullptr;
 
+  bool serialized_root_map_ = false;
+  MapData* root_map_ = nullptr;
+
   bool serialized_for_element_load_ = false;
 
   bool serialized_for_element_store_ = false;
 };
 
+AccessorInfoData::AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
+                                   Handle<AccessorInfo> object)
+    : HeapObjectData(broker, storage, object) {}
+
 AllocationSiteData::AllocationSiteData(JSHeapBroker* broker,
                                        ObjectData** storage,
                                        Handle<AllocationSite> object)
@@ -1103,8 +1154,9 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
           SupportsFastArrayIteration(broker->isolate(), object)),
       supports_fast_array_resize_(
           SupportsFastArrayResize(broker->isolate(), object)),
-      is_map_of_current_global_proxy_(
-          object->IsMapOfGlobalProxy(broker->isolate()->native_context())),
+      is_map_of_target_global_proxy_(
+          object->IsMapOfGlobalProxy(broker->target_native_context().object())),
+      is_abandoned_prototype_map_(object->is_abandoned_prototype_map()),
       elements_kind_generalizations_(broker->zone()) {}
 
 JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
@@ -1210,28 +1262,52 @@ FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
 
 class FeedbackVectorData : public HeapObjectData {
  public:
-  const ZoneVector<ObjectData*>& feedback() { return feedback_; }
-
   FeedbackVectorData(JSHeapBroker* broker, ObjectData** storage,
                      Handle<FeedbackVector> object);
 
-  void SerializeSlots(JSHeapBroker* broker);
+  double invocation_count() const { return invocation_count_; }
+
+  void Serialize(JSHeapBroker* broker);
+  const ZoneVector<ObjectData*>& feedback() { return feedback_; }
+  FeedbackCellData* GetClosureFeedbackCell(JSHeapBroker* broker,
+                                           int index) const;
 
  private:
+  double const invocation_count_;
+
   bool serialized_ = false;
   ZoneVector<ObjectData*> feedback_;
+  ZoneVector<ObjectData*> closure_feedback_cell_array_;
 };
 
 FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
                                        ObjectData** storage,
                                        Handle<FeedbackVector> object)
-    : HeapObjectData(broker, storage, object), feedback_(broker->zone()) {}
+    : HeapObjectData(broker, storage, object),
+      invocation_count_(object->invocation_count()),
+      feedback_(broker->zone()),
+      closure_feedback_cell_array_(broker->zone()) {}
+
+FeedbackCellData* FeedbackVectorData::GetClosureFeedbackCell(
+    JSHeapBroker* broker, int index) const {
+  CHECK_GE(index, 0);
+
+  size_t cell_array_size = closure_feedback_cell_array_.size();
+  if (!serialized_) {
+    DCHECK_EQ(cell_array_size, 0);
+    TRACE_BROKER_MISSING(broker,
+                         " closure feedback cell array for vector " << this);
+    return nullptr;
+  }
+  CHECK_LT(index, cell_array_size);
+  return closure_feedback_cell_array_[index]->AsFeedbackCell();
+}
 
-void FeedbackVectorData::SerializeSlots(JSHeapBroker* broker) {
+void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
   if (serialized_) return;
   serialized_ = true;
 
-  TraceScope tracer(broker, this, "FeedbackVectorData::SerializeSlots");
+  TraceScope tracer(broker, this, "FeedbackVectorData::Serialize");
   Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
   DCHECK(feedback_.empty());
   feedback_.reserve(vector->length());
@@ -1252,6 +1328,16 @@ void FeedbackVectorData::SerializeSlots(JSHeapBroker* broker) {
   }
   DCHECK_EQ(vector->length(), feedback_.size());
   TRACE(broker, "Copied " << feedback_.size() << " slots");
+
+  DCHECK(closure_feedback_cell_array_.empty());
+  int length = vector->closure_feedback_cell_array().length();
+  closure_feedback_cell_array_.reserve(length);
+  for (int i = 0; i < length; ++i) {
+    Handle<FeedbackCell> cell = vector->GetClosureFeedbackCell(i);
+    ObjectData* cell_data = broker->GetOrCreateData(cell);
+    closure_feedback_cell_array_.push_back(cell_data);
+  }
+  TRACE(broker, "Copied " << length << " feedback cells");
 }
 
 class FixedArrayBaseData : public HeapObjectData {
@@ -1300,21 +1386,26 @@ void JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
   Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
 
   DCHECK_NULL(bound_target_function_);
-  DCHECK_NULL(bound_this_);
-  DCHECK_NULL(bound_arguments_);
-
   bound_target_function_ =
       broker->GetOrCreateData(function->bound_target_function());
-  bound_this_ = broker->GetOrCreateData(function->bound_this());
+  if (bound_target_function_->IsJSBoundFunction()) {
+    bound_target_function_->AsJSBoundFunction()->Serialize(broker);
+  } else if (bound_target_function_->IsJSFunction()) {
+    bound_target_function_->AsJSFunction()->Serialize(broker);
+  }
+
+  DCHECK_NULL(bound_arguments_);
   bound_arguments_ =
       broker->GetOrCreateData(function->bound_arguments())->AsFixedArray();
-
   bound_arguments_->SerializeContents(broker);
+
+  DCHECK_NULL(bound_this_);
+  bound_this_ = broker->GetOrCreateData(function->bound_this());
 }
 
 JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
                            Handle<JSObject> object)
-    : HeapObjectData(broker, storage, object),
+    : JSReceiverData(broker, storage, object),
       inobject_fields_(broker->zone()),
       own_constant_elements_(broker->zone()),
       own_properties_(broker->zone()) {}
@@ -1494,8 +1585,9 @@ class JSArrayData : public JSObjectData {
   void Serialize(JSHeapBroker* broker);
   ObjectData* length() const { return length_; }
 
-  ObjectData* GetOwnElement(JSHeapBroker* broker, uint32_t index,
-                            bool serialize);
+  ObjectData* GetOwnElement(
+      JSHeapBroker* broker, uint32_t index,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
 
  private:
   bool serialized_ = false;
@@ -1524,12 +1616,12 @@ void JSArrayData::Serialize(JSHeapBroker* broker) {
 }
 
 ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index,
-                                       bool serialize) {
+                                       SerializationPolicy policy) {
   for (auto const& p : own_elements_) {
     if (p.first == index) return p.second;
   }
 
-  if (!serialize) {
+  if (policy == SerializationPolicy::kAssumeSerialized) {
     TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
     return nullptr;
   }
@@ -1654,7 +1746,7 @@ class SourceTextModuleData : public HeapObjectData {
                        Handle<SourceTextModule> object);
   void Serialize(JSHeapBroker* broker);
 
-  CellData* GetCell(int cell_index) const;
+  CellData* GetCell(JSHeapBroker* broker, int cell_index) const;
 
  private:
   bool serialized_ = false;
@@ -1669,8 +1761,14 @@ SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker,
       imports_(broker->zone()),
       exports_(broker->zone()) {}
 
-CellData* SourceTextModuleData::GetCell(int cell_index) const {
-  CHECK(serialized_);
+CellData* SourceTextModuleData::GetCell(JSHeapBroker* broker,
+                                        int cell_index) const {
+  if (!serialized_) {
+    DCHECK(imports_.empty());
+    TRACE_BROKER_MISSING(broker,
+                         "module cell " << cell_index << " on " << this);
+    return nullptr;
+  }
   CellData* cell;
   switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
     case SourceTextModuleDescriptor::kImport:
@@ -1741,13 +1839,25 @@ void CellData::Serialize(JSHeapBroker* broker) {
   value_ = broker->GetOrCreateData(cell->value());
 }
 
+class JSGlobalObjectData : public JSObjectData {
+ public:
+  JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
+                     Handle<JSGlobalObject> object);
+};
+
+JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker,
+                                       ObjectData** storage,
+                                       Handle<JSGlobalObject> object)
+    : JSObjectData(broker, storage, object) {}
+
 class JSGlobalProxyData : public JSObjectData {
  public:
   JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
                     Handle<JSGlobalProxy> object);
 
-  PropertyCellData* GetPropertyCell(JSHeapBroker* broker, NameData* name,
-                                    bool serialize);
+  PropertyCellData* GetPropertyCell(
+      JSHeapBroker* broker, NameData* name,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
 
  private:
   // Properties that either
@@ -1764,10 +1874,11 @@ JSGlobalProxyData::JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
 namespace {
 base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
                                                         Handle<Name> name) {
-  LookupIterator it(broker->isolate(),
-                    handle(broker->native_context().object()->global_object(),
-                           broker->isolate()),
-                    name, LookupIterator::OWN);
+  LookupIterator it(
+      broker->isolate(),
+      handle(broker->target_native_context().object()->global_object(),
+             broker->isolate()),
+      name, LookupIterator::OWN);
   it.TryLookupCachedProperty();
   if (it.state() == LookupIterator::DATA &&
       it.GetHolder<JSObject>()->IsJSGlobalObject()) {
@@ -1777,15 +1888,14 @@ base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
 }
 }  // namespace
 
-PropertyCellData* JSGlobalProxyData::GetPropertyCell(JSHeapBroker* broker,
-                                                     NameData* name,
-                                                     bool serialize) {
+PropertyCellData* JSGlobalProxyData::GetPropertyCell(
+    JSHeapBroker* broker, NameData* name, SerializationPolicy policy) {
   CHECK_NOT_NULL(name);
   for (auto const& p : properties_) {
     if (p.first == name) return p.second;
   }
 
-  if (!serialize) {
+  if (policy == SerializationPolicy::kAssumeSerialized) {
     TRACE_MISSING(broker, "knowledge about global property " << name);
     return nullptr;
   }
@@ -1896,6 +2006,13 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
   }
 }
 
+ObjectData* MapData::GetStrongValue(int descriptor_index) const {
+  auto data = instance_descriptors_->contents().find(descriptor_index);
+  if (data == instance_descriptors_->contents().end()) return nullptr;
+
+  return data->second.value;
+}
+
 void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
                                      int descriptor_index) {
   TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
@@ -1907,7 +2024,7 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
   }
 
   ZoneMap<int, PropertyDescriptor>& contents =
-      instance_descriptors_->contents();
+      instance_descriptors()->contents();
   CHECK_LT(descriptor_index, map->NumberOfOwnDescriptors());
   if (contents.find(descriptor_index) != contents.end()) return;
 
@@ -1919,6 +2036,11 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
   PropertyDescriptor d;
   d.key =
       broker->GetOrCreateData(descriptors->GetKey(descriptor_index))->AsName();
+  MaybeObject value = descriptors->GetValue(descriptor_index);
+  HeapObject obj;
+  if (value.GetHeapObjectIfStrong(&obj)) {
+    d.value = broker->GetOrCreateData(handle(obj, broker->isolate()));
+  }
   d.details = descriptors->GetDetails(descriptor_index);
   if (d.details.location() == kField) {
     d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
@@ -1941,6 +2063,19 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
                                      << contents.size() << " total)");
 }
 
+void MapData::SerializeRootMap(JSHeapBroker* broker) {
+  if (serialized_root_map_) return;
+  serialized_root_map_ = true;
+
+  TraceScope tracer(broker, this, "MapData::SerializeRootMap");
+  Handle<Map> map = Handle<Map>::cast(object());
+  DCHECK_NULL(root_map_);
+  root_map_ =
+      broker->GetOrCreateData(map->FindRootMap(broker->isolate()))->AsMap();
+}
+
+MapData* MapData::FindRootMap() const { return root_map_; }
+
 void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
                                                    int depth) {
   if (serialized_as_boilerplate_) return;
@@ -2029,15 +2164,16 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
     } else {
       Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
                            isolate);
-      // In case of unboxed double fields we use a sentinel NaN value to mark
+      // In case of double fields we use a sentinel NaN value to mark
       // uninitialized fields. A boilerplate value with such a field may migrate
-      // from its unboxed double to a tagged representation. In the process the
-      // raw double is converted to a heap number. The sentinel value carries no
-      // special meaning when it occurs in a heap number, so we would like to
-      // recover the uninitialized value.
-      // We check for the sentinel here, specifically, since migrations might
-      // have been triggered as part of boilerplate serialization.
-      if (value->IsHeapNumber() &&
+      // from its double to a tagged representation. If the double is unboxed,
+      // the raw double is converted to a heap number, otherwise the (boxed)
+      // double ceases to be mutable, and becomes a normal heap number. The
+      // sentinel value carries no special meaning when it occurs in a heap
+      // number, so we would like to recover the uninitialized value. We check
+      // for the sentinel here, specifically, since migrations might have been
+      // triggered as part of boilerplate serialization.
+      if (!details.representation().IsDouble() && value->IsHeapNumber() &&
           HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
         value = isolate->factory()->uninitialized_value();
       }
@@ -2079,7 +2215,8 @@ bool ObjectRef::equals(const ObjectRef& other) const {
 
 Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
 
-ContextRef ContextRef::previous(size_t* depth, bool serialize) const {
+ContextRef ContextRef::previous(size_t* depth,
+                                SerializationPolicy policy) const {
   DCHECK_NOT_NULL(depth);
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     AllowHandleAllocation handle_allocation;
@@ -2092,10 +2229,11 @@ ContextRef ContextRef::previous(size_t* depth, bool serialize) const {
     return ContextRef(broker(), handle(current, broker()->isolate()));
   }
   ContextData* current = this->data()->AsContext();
-  return ContextRef(broker(), current->previous(broker(), depth, serialize));
+  return ContextRef(broker(), current->previous(broker(), depth, policy));
 }
 
-base::Optional<ObjectRef> ContextRef::get(int index, bool serialize) const {
+base::Optional<ObjectRef> ContextRef::get(int index,
+                                          SerializationPolicy policy) const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     AllowHandleAllocation handle_allocation;
     AllowHandleDereference handle_dereference;
@@ -2103,7 +2241,7 @@ base::Optional<ObjectRef> ContextRef::get(int index, bool serialize) const {
     return ObjectRef(broker(), value);
   }
   ObjectData* optional_slot =
-      data()->AsContext()->GetSlot(broker(), index, serialize);
+      data()->AsContext()->GetSlot(broker(), index, policy);
   if (optional_slot != nullptr) {
     return ObjectRef(broker(), optional_slot);
   }
@@ -2121,13 +2259,13 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
       tracing_enabled_(tracing_enabled),
       feedback_(zone()),
       bytecode_analyses_(zone()),
-      ais_for_loading_then_(zone()),
-      ais_for_loading_exec_(zone()) {
-  // Note that this initialization of the refs_ pointer with the minimal
-  // initial capacity is redundant in the normal use case (concurrent
-  // compilation enabled, standard objects to be serialized), as the map
-  // is going to be replaced immediatelly with a larger capacity one.
-  // It doesn't seem to affect the performance in a noticeable way though.
+      property_access_infos_(zone()),
+      typed_array_string_tags_(zone()) {
+  // Note that this initialization of {refs_} with the minimal initial capacity
+  // is redundant in the normal use case (concurrent compilation enabled,
+  // standard objects to be serialized), as the map is going to be replaced
+  // immediately with a larger-capacity one.  It doesn't seem to affect the
+  // performance in a noticeable way though.
   TRACE(this, "Constructing heap broker");
 }
 
@@ -2136,13 +2274,6 @@ std::ostream& JSHeapBroker::Trace() {
                     << std::string(trace_indentation_ * 2, ' ');
 }
 
-void JSHeapBroker::StartSerializing() {
-  CHECK_EQ(mode_, kDisabled);
-  TRACE(this, "Starting serialization");
-  mode_ = kSerializing;
-  refs_->Clear();
-}
-
 void JSHeapBroker::StopSerializing() {
   CHECK_EQ(mode_, kSerializing);
   TRACE(this, "Stopping serialization");
@@ -2157,39 +2288,54 @@ void JSHeapBroker::Retire() {
 
 bool JSHeapBroker::SerializingAllowed() const { return mode() == kSerializing; }
 
-void JSHeapBroker::SetNativeContextRef() {
-  native_context_ = NativeContextRef(this, isolate()->native_context());
+void JSHeapBroker::SetTargetNativeContextRef(
+    Handle<NativeContext> native_context) {
+  // The MapData constructor uses {target_native_context_}. This creates a
+  // benign cycle that we break by setting {target_native_context_} right before
+  // starting to serialize (thus creating dummy data), and then again properly
+  // right after.
+  DCHECK((mode() == kDisabled && !target_native_context_.has_value()) ||
+         (mode() == kSerializing &&
+          target_native_context_->object().equals(native_context) &&
+          target_native_context_->data_->kind() == kUnserializedHeapObject));
+  target_native_context_ = NativeContextRef(this, native_context);
 }
 
 bool IsShareable(Handle<Object> object, Isolate* isolate) {
-  Builtins* const b = isolate->builtins();
-
   int index;
   RootIndex root_index;
-  return (object->IsHeapObject() &&
-          b->IsBuiltinHandle(Handle<HeapObject>::cast(object), &index)) ||
+  bool is_builtin_handle =
+      object->IsHeapObject() && isolate->builtins()->IsBuiltinHandle(
+                                    Handle<HeapObject>::cast(object), &index);
+  return is_builtin_handle ||
          isolate->roots_table().IsRootHandle(object, &root_index);
 }
 
-void JSHeapBroker::SerializeShareableObjects() {
+void JSHeapBroker::InitializeRefsMap() {
+  TraceScope tracer(this, "JSHeapBroker::InitializeRefsMap");
+
+  DCHECK_NULL(compiler_cache_);
   PerIsolateCompilerCache::Setup(isolate());
   compiler_cache_ = isolate()->compiler_cache();
 
   if (compiler_cache_->HasSnapshot()) {
-    RefsMap* snapshot = compiler_cache_->GetSnapshot();
-
-    refs_ = new (zone()) RefsMap(snapshot, zone());
+    TRACE(this, "Importing existing RefsMap snapshot");
+    DCHECK_NULL(refs_);
+    refs_ = new (zone()) RefsMap(compiler_cache_->GetSnapshot(), zone());
     return;
   }
 
-  TraceScope tracer(
-      this, "JSHeapBroker::SerializeShareableObjects (building snapshot)");
-
+  TRACE(this, "Building RefsMap snapshot");
+  DCHECK_NULL(refs_);
   refs_ =
       new (zone()) RefsMap(kInitialRefsBucketCount, AddressMatcher(), zone());
 
+  // Temporarily use the "compiler zone" for serialization, such that the
+  // serialized data survives this compilation.
+  DCHECK_EQ(current_zone_, broker_zone_);
   current_zone_ = compiler_cache_->zone();
 
+  // Serialize various builtins.
   Builtins* const b = isolate()->builtins();
   {
     Builtins::Name builtins[] = {
@@ -2199,17 +2345,28 @@ void JSHeapBroker::SerializeShareableObjects() {
         Builtins::kAllocateRegularInOldGeneration,
         Builtins::kArgumentsAdaptorTrampoline,
         Builtins::kArrayConstructorImpl,
+        Builtins::kArrayIncludesHoleyDoubles,
+        Builtins::kArrayIncludesPackedDoubles,
+        Builtins::kArrayIncludesSmiOrObject,
+        Builtins::kArrayIndexOfHoleyDoubles,
+        Builtins::kArrayIndexOfPackedDoubles,
+        Builtins::kArrayIndexOfSmiOrObject,
+        Builtins::kCallApiCallback,
         Builtins::kCallFunctionForwardVarargs,
         Builtins::kCallFunction_ReceiverIsAny,
         Builtins::kCallFunction_ReceiverIsNotNullOrUndefined,
         Builtins::kCallFunction_ReceiverIsNullOrUndefined,
+        Builtins::kCloneFastJSArray,
+        Builtins::kCompileLazy,
         Builtins::kConstructFunctionForwardVarargs,
         Builtins::kForInFilter,
+        Builtins::kGetProperty,
+        Builtins::kIncBlockCounter,
         Builtins::kJSBuiltinsConstructStub,
         Builtins::kJSConstructStubGeneric,
         Builtins::kStringAdd_CheckNone,
-        Builtins::kStringAdd_ConvertLeft,
-        Builtins::kStringAdd_ConvertRight,
+        Builtins::kStringAddConvertLeft,
+        Builtins::kStringAddConvertRight,
         Builtins::kToNumber,
         Builtins::kToObject,
     };
@@ -2223,12 +2380,13 @@ void JSHeapBroker::SerializeShareableObjects() {
     }
   }
 
+  // TODO(mslekova): Serialize root objects (from factory).
+
+  // Verify.
   for (RefsMap::Entry* p = refs_->Start(); p != nullptr; p = refs_->Next(p)) {
     CHECK(IsShareable(p->value->object(), isolate()));
   }
 
-  // TODO(mslekova):
-  // Serialize root objects (from factory).
   compiler_cache()->SetSnapshot(refs_);
   current_zone_ = broker_zone_;
 }
@@ -2252,6 +2410,25 @@ void JSHeapBroker::CollectArrayAndObjectPrototypes() {
   CHECK(!array_and_object_prototypes_.empty());
 }
 
+void JSHeapBroker::SerializeTypedArrayStringTags() {
+#define TYPED_ARRAY_STRING_TAG(Type, type, TYPE, ctype)              \
+  do {                                                               \
+    ObjectData* data = GetOrCreateData(                              \
+        isolate()->factory()->InternalizeUtf8String(#Type "Array")); \
+    typed_array_string_tags_.push_back(data);                        \
+  } while (false);
+
+  TYPED_ARRAYS(TYPED_ARRAY_STRING_TAG)
+#undef TYPED_ARRAY_STRING_TAG
+}
+
+StringRef JSHeapBroker::GetTypedArrayStringTag(ElementsKind kind) {
+  DCHECK(IsTypedArrayElementsKind(kind));
+  size_t idx = kind - FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
+  CHECK_LT(idx, typed_array_string_tags_.size());
+  return StringRef(this, typed_array_string_tags_[idx]);
+}
+
 bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const {
   if (mode() == kDisabled) {
     return isolate()->IsInAnyContext(*object.object(),
@@ -2264,22 +2441,29 @@ bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const {
          array_and_object_prototypes_.end();
 }
 
-void JSHeapBroker::SerializeStandardObjects() {
-  if (mode() == kDisabled) return;
-  CHECK_EQ(mode(), kSerializing);
+void JSHeapBroker::InitializeAndStartSerializing(
+    Handle<NativeContext> native_context) {
+  TraceScope tracer(this, "JSHeapBroker::InitializeAndStartSerializing");
 
-  SerializeShareableObjects();
+  CHECK_EQ(mode_, kDisabled);
+  mode_ = kSerializing;
 
-  TraceScope tracer(this, "JSHeapBroker::SerializeStandardObjects");
+  // Throw away the dummy data that we created while disabled.
+  refs_->Clear();
+  refs_ = nullptr;
 
-  CollectArrayAndObjectPrototypes();
+  InitializeRefsMap();
 
-  SetNativeContextRef();
-  native_context().Serialize();
+  SetTargetNativeContextRef(native_context);
+  target_native_context().Serialize();
 
-  Factory* const f = isolate()->factory();
+  CollectArrayAndObjectPrototypes();
+  SerializeTypedArrayStringTags();
 
-  // Maps, strings, oddballs
+  // Serialize standard objects.
+  //
+  // - Maps, strings, oddballs
+  Factory* const f = isolate()->factory();
   GetOrCreateData(f->arguments_marker_map());
   GetOrCreateData(f->bigint_string());
   GetOrCreateData(f->block_context_map());
@@ -2300,7 +2484,6 @@ void JSHeapBroker::SerializeStandardObjects() {
   GetOrCreateData(f->length_string());
   GetOrCreateData(f->many_closures_cell_map());
   GetOrCreateData(f->minus_zero_value());
-  GetOrCreateData(f->mutable_heap_number_map());
   GetOrCreateData(f->name_dictionary_map());
   GetOrCreateData(f->NaN_string());
   GetOrCreateData(f->null_map());
@@ -2312,6 +2495,8 @@ void JSHeapBroker::SerializeStandardObjects() {
   GetOrCreateData(f->optimized_out());
   GetOrCreateData(f->optimized_out_map());
   GetOrCreateData(f->property_array_map());
+  GetOrCreateData(f->ReflectHas_string());
+  GetOrCreateData(f->ReflectGet_string());
   GetOrCreateData(f->sloppy_arguments_elements_map());
   GetOrCreateData(f->stale_register());
   GetOrCreateData(f->stale_register_map());
@@ -2328,8 +2513,7 @@ void JSHeapBroker::SerializeStandardObjects() {
   GetOrCreateData(f->uninitialized_map());
   GetOrCreateData(f->with_context_map());
   GetOrCreateData(f->zero_string());
-
-  // Protector cells
+  // - Cells
   GetOrCreateData(f->array_buffer_detaching_protector())
       ->AsPropertyCell()
       ->Serialize(this);
@@ -2340,6 +2524,7 @@ void JSHeapBroker::SerializeStandardObjects() {
   GetOrCreateData(f->array_species_protector())
       ->AsPropertyCell()
       ->Serialize(this);
+  GetOrCreateData(f->many_closures_cell())->AsFeedbackCell();
   GetOrCreateData(f->no_elements_protector())
       ->AsPropertyCell()
       ->Serialize(this);
@@ -2353,8 +2538,7 @@ void JSHeapBroker::SerializeStandardObjects() {
       ->AsPropertyCell()
       ->Serialize(this);
   GetOrCreateData(f->string_length_protector())->AsCell()->Serialize(this);
-
-  // CEntry stub
+  // - CEntry stub
   GetOrCreateData(
       CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true));
 
@@ -2425,7 +2609,7 @@ base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
       return base::Optional<MapRef>();
     }
   }
-  MapData* map_data = data()->AsJSObject()->object_create_map();
+  MapData* map_data = data()->AsJSObject()->object_create_map(broker());
   return map_data != nullptr ? MapRef(broker(), map_data)
                              : base::Optional<MapRef>();
 }
@@ -2535,13 +2719,14 @@ bool MapRef::supports_fast_array_resize() const {
   return data()->AsMap()->supports_fast_array_resize();
 }
 
-bool MapRef::IsMapOfCurrentGlobalProxy() const {
+bool MapRef::IsMapOfTargetGlobalProxy() const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     AllowHandleDereference allow_handle_dereference;
     AllowHandleAllocation handle_allocation;
-    return object()->IsMapOfGlobalProxy(broker()->isolate()->native_context());
+    return object()->IsMapOfGlobalProxy(
+        broker()->target_native_context().object());
   }
-  return data()->AsMap()->IsMapOfCurrentGlobalProxy();
+  return data()->AsMap()->IsMapOfTargetGlobalProxy();
 }
 
 int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
@@ -2612,6 +2797,18 @@ ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const {
   return ObjectRef(broker(), data()->AsFeedbackVector()->feedback().at(i));
 }
 
+FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
+  if (broker()->mode() == JSHeapBroker::kDisabled) {
+    AllowHandleAllocation handle_allocation;
+    AllowHandleDereference handle_dereference;
+    return FeedbackCellRef(broker(), object()->GetClosureFeedbackCell(index));
+  }
+
+  return FeedbackCellRef(
+      broker(),
+      data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index));
+}
+
 double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     AllowHandleDereference handle_dereference;
@@ -2789,6 +2986,22 @@ base::Optional<double> StringRef::ToNumber() {
   return data()->AsString()->to_number();
 }
 
+int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
+  if (broker()->mode() == JSHeapBroker::kDisabled) {
+    AllowHandleDereference allow_handle_dereference;
+    return object()->constant_elements().length();
+  }
+  return data()->AsArrayBoilerplateDescription()->constants_elements_length();
+}
+
+int ObjectBoilerplateDescriptionRef::size() const {
+  if (broker()->mode() == JSHeapBroker::kDisabled) {
+    AllowHandleDereference allow_handle_dereference;
+    return object()->size();
+  }
+  return data()->AsObjectBoilerplateDescription()->size();
+}
+
 ObjectRef FixedArrayRef::get(int i) const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     AllowHandleAllocation handle_allocation;
@@ -2954,11 +3167,13 @@ BIMODAL_ACCESSOR_C(BytecodeArray, interpreter::Register,
 
 BIMODAL_ACCESSOR(Cell, Object, value)
 
+BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count)
+
 BIMODAL_ACCESSOR(HeapObject, Map, map)
 
 BIMODAL_ACCESSOR(JSArray, Object, length)
 
-BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_target_function)
+BIMODAL_ACCESSOR(JSBoundFunction, JSReceiver, bound_target_function)
 BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_this)
 BIMODAL_ACCESSOR(JSBoundFunction, FixedArray, bound_arguments)
 
@@ -3003,6 +3218,7 @@ BIMODAL_ACCESSOR(Map, HeapObject, prototype)
 BIMODAL_ACCESSOR_C(Map, InstanceType, instance_type)
 BIMODAL_ACCESSOR(Map, Object, GetConstructor)
 BIMODAL_ACCESSOR(Map, HeapObject, GetBackPointer)
+BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
 
 #define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
   BIMODAL_ACCESSOR(NativeContext, type, name)
@@ -3047,7 +3263,7 @@ bool FunctionTemplateInfoRef::has_call_code() const {
 BIMODAL_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
 
 HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
-    MapRef receiver_map, bool serialize) {
+    MapRef receiver_map, SerializationPolicy policy) {
   const HolderLookupResult not_found;
 
   if (broker()->mode() == JSHeapBroker::kDisabled) {
@@ -3083,7 +3299,7 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
   if (lookup_it != fti_data->known_receivers().cend()) {
     return lookup_it->second;
   }
-  if (!serialize) {
+  if (policy == SerializationPolicy::kAssumeSerialized) {
     TRACE_BROKER_MISSING(broker(),
                          "holder for receiver with map " << receiver_map);
     return not_found;
@@ -3129,6 +3345,37 @@ BIMODAL_ACCESSOR_C(String, int, length)
 
 BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value)
 
+ObjectRef MapRef::GetStrongValue(int descriptor_index) const {
+  if (broker()->mode() == JSHeapBroker::kDisabled) {
+    AllowHandleDereference allow_handle_dereference;
+    return ObjectRef(broker(),
+                     handle(object()->instance_descriptors().GetStrongValue(
+                                descriptor_index),
+                            broker()->isolate()));
+  }
+  return ObjectRef(broker(), data()->AsMap()->GetStrongValue(descriptor_index));
+}
+
+void MapRef::SerializeRootMap() {
+  if (broker()->mode() == JSHeapBroker::kDisabled) return;
+  CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+  data()->AsMap()->SerializeRootMap(broker());
+}
+
+base::Optional<MapRef> MapRef::FindRootMap() const {
+  if (broker()->mode() == JSHeapBroker::kDisabled) {
+    AllowHandleDereference allow_handle_dereference;
+    return MapRef(broker(), handle(object()->FindRootMap(broker()->isolate()),
+                                   broker()->isolate()));
+  }
+  MapData* map_data = data()->AsMap()->FindRootMap();
+  if (map_data) {
+    return MapRef(broker(), map_data);
+  }
+  TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
+  return base::nullopt;
+}
+
 void* JSTypedArrayRef::external_pointer() const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     AllowHandleDereference allow_handle_dereference;
@@ -3297,7 +3544,7 @@ Maybe<double> ObjectRef::OddballToNumber() const {
 }
 
 base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
-    uint32_t index, bool serialize) const {
+    uint32_t index, SerializationPolicy policy) const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     return (IsJSObject() || IsString())
                ? GetOwnElementFromHeap(broker(), object(), index, true)
@@ -3306,35 +3553,36 @@ base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
   ObjectData* element = nullptr;
   if (IsJSObject()) {
     element =
-        data()->AsJSObject()->GetOwnConstantElement(broker(), index, serialize);
+        data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
   } else if (IsString()) {
-    element = data()->AsString()->GetCharAsString(broker(), index, serialize);
+    element = data()->AsString()->GetCharAsString(broker(), index, policy);
   }
   if (element == nullptr) return base::nullopt;
   return ObjectRef(broker(), element);
 }
 
-base::Optional<ObjectRef> JSObjectRef::GetOwnProperty(
+base::Optional<ObjectRef> JSObjectRef::GetOwnDataProperty(
     Representation field_representation, FieldIndex index,
-    bool serialize) const {
+    SerializationPolicy policy) const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
-    return GetOwnPropertyFromHeap(broker(), Handle<JSObject>::cast(object()),
-                                  field_representation, index);
+    return GetOwnDataPropertyFromHeap(broker(),
+                                      Handle<JSObject>::cast(object()),
+                                      field_representation, index);
   }
-  ObjectData* property = data()->AsJSObject()->GetOwnProperty(
-      broker(), field_representation, index, serialize);
+  ObjectData* property = data()->AsJSObject()->GetOwnDataProperty(
+      broker(), field_representation, index, policy);
   if (property == nullptr) return base::nullopt;
   return ObjectRef(broker(), property);
 }
 
-base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(uint32_t index,
-                                                       bool serialize) const {
+base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
+    uint32_t index, SerializationPolicy policy) const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     if (!object()->elements().IsCowArray()) return base::nullopt;
     return GetOwnElementFromHeap(broker(), object(), index, false);
   }
 
-  if (serialize) {
+  if (policy == SerializationPolicy::kSerializeIfNeeded) {
     data()->AsJSObject()->SerializeElements(broker());
   } else if (!data()->AsJSObject()->serialized_elements()) {
     TRACE(broker(), "'elements' on " << this);
@@ -3343,7 +3591,7 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(uint32_t index,
   if (!elements().map().IsFixedCowArrayMap()) return base::nullopt;
 
   ObjectData* element =
-      data()->AsJSArray()->GetOwnElement(broker(), index, serialize);
+      data()->AsJSArray()->GetOwnElement(broker(), index, policy);
   if (element == nullptr) return base::nullopt;
   return ObjectRef(broker(), element);
 }
@@ -3353,27 +3601,25 @@ double HeapNumberRef::value() const {
   return data()->AsHeapNumber()->value();
 }
 
-double MutableHeapNumberRef::value() const {
-  IF_BROKER_DISABLED_ACCESS_HANDLE_C(MutableHeapNumber, value);
-  return data()->AsMutableHeapNumber()->value();
-}
-
 uint64_t BigIntRef::AsUint64() const {
   IF_BROKER_DISABLED_ACCESS_HANDLE_C(BigInt, AsUint64);
   return data()->AsBigInt()->AsUint64();
 }
 
-CellRef SourceTextModuleRef::GetCell(int cell_index) const {
+base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     AllowHandleAllocation handle_allocation;
     AllowHandleDereference allow_handle_dereference;
     return CellRef(broker(),
                    handle(object()->GetCell(cell_index), broker()->isolate()));
   }
-  return CellRef(broker(), data()->AsSourceTextModule()->GetCell(cell_index));
+  CellData* cell = data()->AsSourceTextModule()->GetCell(broker(), cell_index);
+  if (cell == nullptr) return base::nullopt;
+  return CellRef(broker(), cell);
 }
 
-ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
+ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object,
+                     bool check_type)
     : broker_(broker) {
   switch (broker->mode()) {
     case JSHeapBroker::kSerialized:
@@ -3398,6 +3644,10 @@ ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
     case JSHeapBroker::kRetired:
       UNREACHABLE();
   }
+  if (!data_) {  // TODO(mslekova): Remove once we're on the background thread.
+    AllowHandleDereference handle_dereference;
+    object->Print();
+  }
   CHECK_WITH_MSG(data_ != nullptr, "Object is not known to the heap broker");
 }
 
@@ -3489,8 +3739,8 @@ Float64 FixedDoubleArrayData::Get(int i) const {
   return contents_[i];
 }
 
-void FeedbackVectorRef::SerializeSlots() {
-  data()->AsFeedbackVector()->SerializeSlots(broker());
+void FeedbackVectorRef::Serialize() {
+  data()->AsFeedbackVector()->Serialize(broker());
 }
 
 bool NameRef::IsUniqueName() const {
@@ -3597,8 +3847,13 @@ void JSFunctionRef::Serialize() {
   data()->AsJSFunction()->Serialize(broker());
 }
 
+bool JSBoundFunctionRef::serialized() const {
+  if (broker()->mode() == JSHeapBroker::kDisabled) return true;
+  return data()->AsJSBoundFunction()->serialized();
+}
+
 bool JSFunctionRef::serialized() const {
-  CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
+  if (broker()->mode() == JSHeapBroker::kDisabled) return true;
   return data()->AsJSFunction()->serialized();
 }
 
@@ -3614,10 +3869,9 @@ bool JSFunctionRef::IsSerializedForCompilation() const {
          shared().IsSerializedForCompilation(feedback_vector());
 }
 
-JSArrayRef SharedFunctionInfoRef::GetTemplateObject(ObjectRef description,
-                                                    FeedbackVectorRef vector,
-                                                    FeedbackSlot slot,
-                                                    bool serialize) {
+JSArrayRef SharedFunctionInfoRef::GetTemplateObject(
+    ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot,
+    SerializationPolicy policy) {
   // Look in the feedback vector for the array. A Smi indicates that it's
   // not yet cached here.
   ObjectRef candidate = vector.get(slot);
@@ -3632,22 +3886,22 @@ JSArrayRef SharedFunctionInfoRef::GetTemplateObject(ObjectRef description,
         Handle<TemplateObjectDescription>::cast(description.object());
     Handle<JSArray> template_object =
         TemplateObjectDescription::GetTemplateObject(
-            broker()->isolate(), broker()->native_context().object(), tod,
-            object(), slot.ToInt());
+            broker()->isolate(), broker()->target_native_context().object(),
+            tod, object(), slot.ToInt());
     return JSArrayRef(broker(), template_object);
   }
 
   JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot);
   if (array != nullptr) return JSArrayRef(broker(), array);
 
-  CHECK(serialize);
+  CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded);
   CHECK(broker()->SerializingAllowed());
 
   Handle<TemplateObjectDescription> tod =
       Handle<TemplateObjectDescription>::cast(description.object());
   Handle<JSArray> template_object =
       TemplateObjectDescription::GetTemplateObject(
-          broker()->isolate(), broker()->native_context().object(), tod,
+          broker()->isolate(), broker()->target_native_context().object(), tod,
           object(), slot.ToInt());
   array = broker()->GetOrCreateData(template_object)->AsJSArray();
   data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array);
@@ -3663,15 +3917,17 @@ void SharedFunctionInfoRef::SetSerializedForCompilation(
 
 void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
   CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
-
   data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker());
 }
 
 base::Optional<FunctionTemplateInfoRef>
 SharedFunctionInfoRef::function_template_info() const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
-    return FunctionTemplateInfoRef(
-        broker(), handle(object()->function_data(), broker()->isolate()));
+    if (object()->IsApiFunction()) {
+      return FunctionTemplateInfoRef(
+          broker(), handle(object()->function_data(), broker()->isolate()));
+    }
+    return base::nullopt;
   }
   FunctionTemplateInfoData* function_template_info =
       data()->AsSharedFunctionInfo()->function_template_info();
@@ -3703,6 +3959,16 @@ void MapRef::SerializeOwnDescriptor(int descriptor_index) {
   data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index);
 }
 
+bool MapRef::serialized_own_descriptor(int descriptor_index) const {
+  CHECK_LT(descriptor_index, NumberOfOwnDescriptors());
+  if (broker()->mode() == JSHeapBroker::kDisabled) return true;
+  DescriptorArrayData* desc_array_data =
+      data()->AsMap()->instance_descriptors();
+  if (!desc_array_data) return false;
+  return desc_array_data->contents().find(descriptor_index) !=
+         desc_array_data->contents().end();
+}
+
 void MapRef::SerializeBackPointer() {
   if (broker()->mode() == JSHeapBroker::kDisabled) return;
   CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -3762,13 +4028,13 @@ void FunctionTemplateInfoRef::SerializeCallCode() {
 }
 
 base::Optional<PropertyCellRef> JSGlobalProxyRef::GetPropertyCell(
-    NameRef const& name, bool serialize) const {
+    NameRef const& name, SerializationPolicy policy) const {
   if (broker()->mode() == JSHeapBroker::kDisabled) {
     return GetPropertyCellFromHeap(broker(), name.object());
   }
   PropertyCellData* property_cell_data =
-      data()->AsJSGlobalProxy()->GetPropertyCell(
-          broker(), name.data()->AsName(), serialize);
+      data()->AsJSGlobalProxy()->GetPropertyCell(broker(),
+                                                 name.data()->AsName(), policy);
   if (property_cell_data == nullptr) return base::nullopt;
   return PropertyCellRef(broker(), property_cell_data);
 }
@@ -3787,64 +4053,133 @@ bool CanInlineElementAccess(MapRef const& map) {
   return false;
 }
 
-InsufficientFeedback::InsufficientFeedback()
-    : ProcessedFeedback(kInsufficient) {}
+ProcessedFeedback::ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind)
+    : kind_(kind), slot_kind_(slot_kind) {}
+
+KeyedAccessMode ElementAccessFeedback::keyed_mode() const {
+  return keyed_mode_;
+}
+
+ZoneVector<ElementAccessFeedback::TransitionGroup> const&
+ElementAccessFeedback::transition_groups() const {
+  return transition_groups_;
+}
+
+ElementAccessFeedback const& ElementAccessFeedback::Refine(
+    ZoneVector<Handle<Map>> const& inferred_maps, Zone* zone) const {
+  ElementAccessFeedback& refined_feedback =
+      *new (zone) ElementAccessFeedback(zone, keyed_mode(), slot_kind());
+  if (inferred_maps.empty()) return refined_feedback;
+
+  ZoneUnorderedSet<Handle<Map>, Handle<Map>::hash, Handle<Map>::equal_to>
+      inferred(zone);
+  inferred.insert(inferred_maps.begin(), inferred_maps.end());
+
+  for (auto const& group : transition_groups()) {
+    DCHECK(!group.empty());
+    TransitionGroup new_group(zone);
+    for (size_t i = 1; i < group.size(); ++i) {
+      Handle<Map> source = group[i];
+      if (inferred.find(source) != inferred.end()) {
+        new_group.push_back(source);
+      }
+    }
+
+    Handle<Map> target = group.front();
+    bool const keep_target =
+        inferred.find(target) != inferred.end() || new_group.size() > 1;
+    if (keep_target) {
+      new_group.push_back(target);
+      // The target must be at the front, the order of sources doesn't matter.
+      std::swap(new_group[0], new_group[new_group.size() - 1]);
+    }
+
+    if (!new_group.empty()) {
+      DCHECK(new_group.size() == 1 || new_group.front().equals(target));
+      refined_feedback.transition_groups_.push_back(std::move(new_group));
+    }
+  }
+  return refined_feedback;
+}
+
+InsufficientFeedback::InsufficientFeedback(FeedbackSlotKind slot_kind)
+    : ProcessedFeedback(kInsufficient, slot_kind) {}
 
-GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell)
-    : ProcessedFeedback(kGlobalAccess),
+GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
+                                           FeedbackSlotKind slot_kind)
+    : ProcessedFeedback(kGlobalAccess, slot_kind),
       cell_or_context_(cell),
-      index_and_immutable_(0 /* doesn't matter */) {}
+      index_and_immutable_(0 /* doesn't matter */) {
+  DCHECK(IsGlobalICKind(slot_kind));
+}
+
+GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
+    : ProcessedFeedback(kGlobalAccess, slot_kind),
+      index_and_immutable_(0 /* doesn't matter */) {
+  DCHECK(IsGlobalICKind(slot_kind));
+}
 
 GlobalAccessFeedback::GlobalAccessFeedback(ContextRef script_context,
-                                           int slot_index, bool immutable)
-    : ProcessedFeedback(kGlobalAccess),
+                                           int slot_index, bool immutable,
+                                           FeedbackSlotKind slot_kind)
+    : ProcessedFeedback(kGlobalAccess, slot_kind),
       cell_or_context_(script_context),
       index_and_immutable_(FeedbackNexus::SlotIndexBits::encode(slot_index) |
                            FeedbackNexus::ImmutabilityBit::encode(immutable)) {
   DCHECK_EQ(this->slot_index(), slot_index);
   DCHECK_EQ(this->immutable(), immutable);
+  DCHECK(IsGlobalICKind(slot_kind));
 }
 
+bool GlobalAccessFeedback::IsMegamorphic() const {
+  return !cell_or_context_.has_value();
+}
 bool GlobalAccessFeedback::IsPropertyCell() const {
-  return cell_or_context_.IsPropertyCell();
+  return cell_or_context_.has_value() && cell_or_context_->IsPropertyCell();
+}
+bool GlobalAccessFeedback::IsScriptContextSlot() const {
+  return cell_or_context_.has_value() && cell_or_context_->IsContext();
 }
 PropertyCellRef GlobalAccessFeedback::property_cell() const {
-  DCHECK(IsPropertyCell());
-  return cell_or_context_.AsPropertyCell();
+  CHECK(IsPropertyCell());
+  return cell_or_context_->AsPropertyCell();
 }
 ContextRef GlobalAccessFeedback::script_context() const {
-  DCHECK(IsScriptContextSlot());
-  return cell_or_context_.AsContext();
+  CHECK(IsScriptContextSlot());
+  return cell_or_context_->AsContext();
 }
 int GlobalAccessFeedback::slot_index() const {
-  CHECK(IsScriptContextSlot());
+  DCHECK(IsScriptContextSlot());
   return FeedbackNexus::SlotIndexBits::decode(index_and_immutable_);
 }
 bool GlobalAccessFeedback::immutable() const {
-  CHECK(IsScriptContextSlot());
+  DCHECK(IsScriptContextSlot());
   return FeedbackNexus::ImmutabilityBit::decode(index_and_immutable_);
 }
 
 base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const {
-  if (IsScriptContextSlot()) {
-    if (immutable()) return script_context().get(slot_index());
-  } else {
+  if (IsPropertyCell()) {
     return property_cell().value();
+  } else if (IsScriptContextSlot() && immutable()) {
+    return script_context().get(slot_index());
+  } else {
+    return base::nullopt;
   }
-  return {};
 }
 
 KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) {
-  if (IsKeyedLoadICKind(nexus.kind())) {
+  FeedbackSlotKind kind = nexus.kind();
+  if (IsKeyedLoadICKind(kind)) {
     return KeyedAccessMode(AccessMode::kLoad, nexus.GetKeyedAccessLoadMode());
   }
-  if (IsKeyedHasICKind(nexus.kind())) {
+  if (IsKeyedHasICKind(kind)) {
     return KeyedAccessMode(AccessMode::kHas, nexus.GetKeyedAccessLoadMode());
   }
-  if (IsKeyedStoreICKind(nexus.kind())) {
+  if (IsKeyedStoreICKind(kind)) {
     return KeyedAccessMode(AccessMode::kStore, nexus.GetKeyedAccessStoreMode());
   }
-  if (IsStoreInArrayLiteralICKind(nexus.kind())) {
+  if (IsStoreInArrayLiteralICKind(kind) ||
+      IsStoreDataPropertyInLiteralKind(kind)) {
     return KeyedAccessMode(AccessMode::kStoreInLiteral,
                            nexus.GetKeyedAccessStoreMode());
   }
@@ -3890,59 +4225,40 @@ KeyedAccessMode::KeyedAccessMode(AccessMode access_mode,
 }
 
 ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
-                                             KeyedAccessMode const& keyed_mode)
-    : ProcessedFeedback(kElementAccess),
-      receiver_maps(zone),
-      transitions(zone),
-      keyed_mode(keyed_mode) {}
-
-ElementAccessFeedback::MapIterator::MapIterator(
-    ElementAccessFeedback const& processed, JSHeapBroker* broker)
-    : processed_(processed), broker_(broker) {
-  CHECK_LT(processed.receiver_maps.size(),
-           std::numeric_limits<size_t>::max() - processed.transitions.size());
-}
-
-bool ElementAccessFeedback::MapIterator::done() const {
-  return index_ >=
-         processed_.receiver_maps.size() + processed_.transitions.size();
-}
-
-void ElementAccessFeedback::MapIterator::advance() { index_++; }
-
-MapRef ElementAccessFeedback::MapIterator::current() const {
-  CHECK(!done());
-  size_t receiver_maps_size = processed_.receiver_maps.size();
-  Handle<Map> map;
-  if (index_ < receiver_maps_size) {
-    map = processed_.receiver_maps[index_];
-  } else {
-    map = processed_.transitions[index_ - receiver_maps_size].first;
+                                             KeyedAccessMode const& keyed_mode,
+                                             FeedbackSlotKind slot_kind)
+    : ProcessedFeedback(kElementAccess, slot_kind),
+      keyed_mode_(keyed_mode),
+      transition_groups_(zone) {
+  DCHECK(IsKeyedLoadICKind(slot_kind) || IsKeyedHasICKind(slot_kind) ||
+         IsStoreDataPropertyInLiteralKind(slot_kind) ||
+         IsKeyedStoreICKind(slot_kind) ||
+         IsStoreInArrayLiteralICKind(slot_kind));
+}
+
+bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
+  for (auto const& group : transition_groups()) {
+    for (Handle<Map> map : group) {
+      if (!MapRef(broker, map).IsStringMap()) return false;
+    }
   }
-  return MapRef(broker_, map);
-}
-
-ElementAccessFeedback::MapIterator ElementAccessFeedback::all_maps(
-    JSHeapBroker* broker) const {
-  return MapIterator(*this, broker);
+  return true;
 }
 
-NamedAccessFeedback::NamedAccessFeedback(
-    NameRef const& name, ZoneVector<PropertyAccessInfo> const& access_infos)
-    : ProcessedFeedback(kNamedAccess),
-      name_(name),
-      access_infos_(access_infos) {
-  CHECK(!access_infos.empty());
+NamedAccessFeedback::NamedAccessFeedback(NameRef const& name,
+                                         ZoneVector<Handle<Map>> const& maps,
+                                         FeedbackSlotKind slot_kind)
+    : ProcessedFeedback(kNamedAccess, slot_kind), name_(name), maps_(maps) {
+  DCHECK(IsLoadICKind(slot_kind) || IsStoreICKind(slot_kind) ||
+         IsStoreOwnICKind(slot_kind) || IsKeyedLoadICKind(slot_kind) ||
+         IsKeyedHasICKind(slot_kind) || IsKeyedStoreICKind(slot_kind) ||
+         IsStoreInArrayLiteralICKind(slot_kind) ||
+         IsStoreDataPropertyInLiteralKind(slot_kind));
 }
 
-FeedbackSource::FeedbackSource(FeedbackNexus const& nexus)
-    : vector(nexus.vector_handle()), slot(nexus.slot()) {}
-
-FeedbackSource::FeedbackSource(VectorSlotPair const& pair)
-    : vector(pair.vector()), slot(pair.slot()) {}
-
 void JSHeapBroker::SetFeedback(FeedbackSource const& source,
                                ProcessedFeedback const* feedback) {
+  CHECK(source.IsValid());
   auto insertion = feedback_.insert({source, feedback});
   CHECK(insertion.second);
 }
@@ -3951,80 +4267,90 @@ bool JSHeapBroker::HasFeedback(FeedbackSource const& source) const {
   return feedback_.find(source) != feedback_.end();
 }
 
-ProcessedFeedback const* JSHeapBroker::GetFeedback(
+ProcessedFeedback const& JSHeapBroker::GetFeedback(
     FeedbackSource const& source) const {
+  DCHECK(source.IsValid());
   auto it = feedback_.find(source);
   CHECK_NE(it, feedback_.end());
-  return it->second;
+  return *it->second;
 }
 
-GlobalAccessFeedback const* JSHeapBroker::GetGlobalAccessFeedback(
+FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind(
     FeedbackSource const& source) const {
-  ProcessedFeedback const* feedback = GetFeedback(source);
-  if (feedback == nullptr) return nullptr;
-  CHECK_EQ(feedback->kind(), ProcessedFeedback::kGlobalAccess);
-  return static_cast<GlobalAccessFeedback const*>(feedback);
-}
-
-ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess(
-    MapHandles const& maps, KeyedAccessMode const& keyed_mode) {
-  DCHECK(!maps.empty());
-
-  // Collect possible transition targets.
-  MapHandles possible_transition_targets;
-  possible_transition_targets.reserve(maps.size());
-  for (Handle<Map> map : maps) {
-    if (CanInlineElementAccess(MapRef(this, map)) &&
-        IsFastElementsKind(map->elements_kind()) &&
-        GetInitialFastElementsKind() != map->elements_kind()) {
-      possible_transition_targets.push_back(map);
-    }
+  if (FLAG_concurrent_inlining) {
+    ProcessedFeedback const& processed = GetFeedback(source);
+    return processed.slot_kind();
   }
+  FeedbackNexus nexus(source.vector, source.slot);
+  return nexus.kind();
+}
 
-  ElementAccessFeedback* result =
-      new (zone()) ElementAccessFeedback(zone(), keyed_mode);
+bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
+  return FLAG_concurrent_inlining
+             ? GetFeedback(source).IsInsufficient()
+             : FeedbackNexus(source.vector, source.slot).IsUninitialized();
+}
 
-  // Separate the actual receiver maps and the possible transition sources.
+namespace {
+MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapHandles const& maps) {
+  MapHandles result;
   for (Handle<Map> map : maps) {
-    // Don't generate elements kind transitions from stable maps.
-    Map transition_target = map->is_stable()
-                                ? Map()
-                                : map->FindElementsKindTransitionedMap(
-                                      isolate(), possible_transition_targets);
-    if (transition_target.is_null()) {
-      result->receiver_maps.push_back(map);
-    } else {
-      result->transitions.emplace_back(map,
-                                       handle(transition_target, isolate()));
+    if (Map::TryUpdate(isolate, map).ToHandle(&map) &&
+        !map->is_abandoned_prototype_map()) {
+      DCHECK(!map->is_deprecated());
+      result.push_back(map);
     }
   }
+  return result;
+}  // namespace
+}  // namespace
 
-#ifdef ENABLE_SLOW_DCHECKS
-  // No transition sources appear in {receiver_maps}.
-  // All transition targets appear in {receiver_maps}.
-  for (auto& transition : result->transitions) {
-    CHECK(std::none_of(
-        result->receiver_maps.cbegin(), result->receiver_maps.cend(),
-        [&](Handle<Map> map) { return map.equals(transition.first); }));
-    CHECK(std::any_of(
-        result->receiver_maps.cbegin(), result->receiver_maps.cend(),
-        [&](Handle<Map> map) { return map.equals(transition.second); }));
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
+    FeedbackSource const& source, AccessMode mode,
+    base::Optional<NameRef> static_name) {
+  FeedbackNexus nexus(source.vector, source.slot);
+  FeedbackSlotKind kind = nexus.kind();
+  if (nexus.IsUninitialized()) return *new (zone()) InsufficientFeedback(kind);
+
+  MapHandles maps;
+  nexus.ExtractMaps(&maps);
+  DCHECK_NE(nexus.ic_state(), PREMONOMORPHIC);
+  if (!maps.empty()) {
+    maps = GetRelevantReceiverMaps(isolate(), maps);
+    if (maps.empty()) return *new (zone()) InsufficientFeedback(kind);
+  }
+
+  base::Optional<NameRef> name =
+      static_name.has_value() ? static_name : GetNameFeedback(nexus);
+  if (name.has_value()) {
+    return *new (zone()) NamedAccessFeedback(
+        *name, ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()), kind);
+  } else if (nexus.GetKeyType() == ELEMENT && !maps.empty()) {
+    return ProcessFeedbackMapsForElementAccess(
+        maps, KeyedAccessMode::FromNexus(nexus), kind);
+  } else {
+    // No actionable feedback.
+    DCHECK(maps.empty());
+    // TODO(neis): Investigate if we really want to treat cleared the same as
+    // megamorphic (also for global accesses).
+    // TODO(neis): Using ElementAccessFeedback here is kind of an abuse.
+    return *new (zone())
+        ElementAccessFeedback(zone(), KeyedAccessMode::FromNexus(nexus), kind);
   }
-#endif
-  CHECK(!result->receiver_maps.empty());
-
-  return result;
 }
 
-GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess(
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
     FeedbackSource const& source) {
   FeedbackNexus nexus(source.vector, source.slot);
   DCHECK(nexus.kind() == FeedbackSlotKind::kLoadGlobalInsideTypeof ||
          nexus.kind() == FeedbackSlotKind::kLoadGlobalNotInsideTypeof ||
          nexus.kind() == FeedbackSlotKind::kStoreGlobalSloppy ||
          nexus.kind() == FeedbackSlotKind::kStoreGlobalStrict);
+  if (nexus.IsUninitialized()) {
+    return *new (zone()) InsufficientFeedback(nexus.kind());
+  }
   if (nexus.ic_state() != MONOMORPHIC || nexus.GetFeedback()->IsCleared()) {
-    return nullptr;
+    return *new (zone()) GlobalAccessFeedback(nexus.kind());
   }
 
   Handle<Object> feedback_value(nexus.GetFeedback()->GetHeapObjectOrSmi(),
@@ -4039,7 +4365,7 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess(
     int const context_slot_index = FeedbackNexus::SlotIndexBits::decode(number);
     bool const immutable = FeedbackNexus::ImmutabilityBit::decode(number);
     Handle<Context> context = ScriptContextTable::GetContext(
-        isolate(), native_context().script_context_table().object(),
+        isolate(), target_native_context().script_context_table().object(),
         script_context_index);
     {
       ObjectRef contents(this,
@@ -4049,10 +4375,11 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess(
     }
     ContextRef context_ref(this, context);
     if (immutable) {
-      context_ref.get(context_slot_index, true);
+      context_ref.get(context_slot_index,
+                      SerializationPolicy::kSerializeIfNeeded);
     }
-    return new (zone())
-        GlobalAccessFeedback(context_ref, context_slot_index, immutable);
+    return *new (zone()) GlobalAccessFeedback(context_ref, context_slot_index,
+                                              immutable, nexus.kind());
   }
 
   CHECK(feedback_value->IsPropertyCell());
@@ -4060,11 +4387,275 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess(
   // object and the feedback is the cell holding its value.
   PropertyCellRef cell(this, Handle<PropertyCell>::cast(feedback_value));
   cell.Serialize();
-  return new (zone()) GlobalAccessFeedback(cell);
+  return *new (zone()) GlobalAccessFeedback(cell, nexus.kind());
+}
+
+BinaryOperationHint JSHeapBroker::ReadFeedbackForBinaryOperation(
+    FeedbackSource const& source) const {
+  return FeedbackNexus(source.vector, source.slot).GetBinaryOperationFeedback();
+}
+
+CompareOperationHint JSHeapBroker::ReadFeedbackForCompareOperation(
+    FeedbackSource const& source) const {
+  return FeedbackNexus(source.vector, source.slot)
+      .GetCompareOperationFeedback();
+}
+
+ForInHint JSHeapBroker::ReadFeedbackForForIn(
+    FeedbackSource const& source) const {
+  return FeedbackNexus(source.vector, source.slot).GetForInFeedback();
+}
+
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
+    FeedbackSource const& source) {
+  FeedbackNexus nexus(source.vector, source.slot);
+  if (nexus.IsUninitialized())
+    return *new (zone()) InsufficientFeedback(nexus.kind());
+
+  base::Optional<JSObjectRef> optional_constructor;
+  {
+    MaybeHandle<JSObject> maybe_constructor = nexus.GetConstructorFeedback();
+    Handle<JSObject> constructor;
+    if (maybe_constructor.ToHandle(&constructor)) {
+      optional_constructor = JSObjectRef(this, constructor);
+    }
+  }
+  return *new (zone()) InstanceOfFeedback(optional_constructor, nexus.kind());
+}
+
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
+    FeedbackSource const& source) {
+  FeedbackNexus nexus(source.vector, source.slot);
+  if (nexus.IsUninitialized())
+    return *new (zone()) InsufficientFeedback(nexus.kind());
+
+  base::Optional<HeapObjectRef> target_ref;
+  {
+    MaybeObject maybe_target = nexus.GetFeedback();
+    HeapObject target_object;
+    if (maybe_target->GetHeapObject(&target_object)) {
+      target_ref = HeapObjectRef(this, handle(target_object, isolate()));
+    }
+  }
+  float frequency = nexus.ComputeCallFrequency();
+  SpeculationMode mode = nexus.GetSpeculationMode();
+  return *new (zone()) CallFeedback(target_ref, frequency, mode, nexus.kind());
+}
+
+BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation(
+    FeedbackSource const& source) {
+  ProcessedFeedback const& feedback =
+      FLAG_concurrent_inlining ? GetFeedback(source)
+                               : ProcessFeedbackForBinaryOperation(source);
+  return feedback.IsInsufficient() ? BinaryOperationHint::kNone
+                                   : feedback.AsBinaryOperation().value();
+}
+
+CompareOperationHint JSHeapBroker::GetFeedbackForCompareOperation(
+    FeedbackSource const& source) {
+  ProcessedFeedback const& feedback =
+      FLAG_concurrent_inlining ? GetFeedback(source)
+                               : ProcessFeedbackForCompareOperation(source);
+  return feedback.IsInsufficient() ? CompareOperationHint::kNone
+                                   : feedback.AsCompareOperation().value();
+}
+
+ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) {
+  ProcessedFeedback const& feedback = FLAG_concurrent_inlining
+                                          ? GetFeedback(source)
+                                          : ProcessFeedbackForForIn(source);
+  return feedback.IsInsufficient() ? ForInHint::kNone
+                                   : feedback.AsForIn().value();
+}
+
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess(
+    FeedbackSource const& source, AccessMode mode,
+    base::Optional<NameRef> static_name) {
+  return FLAG_concurrent_inlining
+             ? GetFeedback(source)
+             : ProcessFeedbackForPropertyAccess(source, mode, static_name);
+}
+
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf(
+    FeedbackSource const& source) {
+  return FLAG_concurrent_inlining ? GetFeedback(source)
+                                  : ProcessFeedbackForInstanceOf(source);
+}
+
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall(
+    FeedbackSource const& source) {
+  return FLAG_concurrent_inlining ? GetFeedback(source)
+                                  : ProcessFeedbackForCall(source);
+}
+
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
+    FeedbackSource const& source) {
+  return FLAG_concurrent_inlining ? GetFeedback(source)
+                                  : ProcessFeedbackForGlobalAccess(source);
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForBinaryOperation(
+    FeedbackSource const& source) {
+  if (HasFeedback(source)) return GetFeedback(source);
+  BinaryOperationHint hint = ReadFeedbackForBinaryOperation(source);
+  ProcessedFeedback const* feedback;
+  if (hint == BinaryOperationHint::kNone) {
+    feedback =
+        new (zone()) InsufficientFeedback(source.vector->GetKind(source.slot));
+  } else {
+    feedback = new (zone())
+        BinaryOperationFeedback(hint, source.vector->GetKind(source.slot));
+  }
+  SetFeedback(source, feedback);
+  return *feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCompareOperation(
+    FeedbackSource const& source) {
+  if (HasFeedback(source)) return GetFeedback(source);
+  CompareOperationHint hint = ReadFeedbackForCompareOperation(source);
+  ProcessedFeedback const* feedback;
+  if (hint == CompareOperationHint::kNone) {
+    feedback =
+        new (zone()) InsufficientFeedback(source.vector->GetKind(source.slot));
+  } else {
+    feedback = new (zone())
+        CompareOperationFeedback(hint, source.vector->GetKind(source.slot));
+  }
+  SetFeedback(source, feedback);
+  return *feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForForIn(
+    FeedbackSource const& source) {
+  if (HasFeedback(source)) return GetFeedback(source);
+  ForInHint hint = ReadFeedbackForForIn(source);
+  ProcessedFeedback const* feedback;
+  if (hint == ForInHint::kNone) {
+    feedback =
+        new (zone()) InsufficientFeedback(source.vector->GetKind(source.slot));
+  } else {
+    feedback =
+        new (zone()) ForInFeedback(hint, source.vector->GetKind(source.slot));
+  }
+  SetFeedback(source, feedback);
+  return *feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForPropertyAccess(
+    FeedbackSource const& source, AccessMode mode,
+    base::Optional<NameRef> static_name) {
+  if (HasFeedback(source)) return GetFeedback(source);
+  ProcessedFeedback const& feedback =
+      ReadFeedbackForPropertyAccess(source, mode, static_name);
+  SetFeedback(source, &feedback);
+  return feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForInstanceOf(
+    FeedbackSource const& source) {
+  if (HasFeedback(source)) return GetFeedback(source);
+  ProcessedFeedback const& feedback = ReadFeedbackForInstanceOf(source);
+  SetFeedback(source, &feedback);
+  return feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCall(
+    FeedbackSource const& source) {
+  if (HasFeedback(source)) return GetFeedback(source);
+  ProcessedFeedback const& feedback = ReadFeedbackForCall(source);
+  SetFeedback(source, &feedback);
+  return feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForGlobalAccess(
+    FeedbackSource const& source) {
+  if (HasFeedback(source)) return GetFeedback(source);
+  ProcessedFeedback const& feedback = ReadFeedbackForGlobalAccess(source);
+  SetFeedback(source, &feedback);
+  return feedback;
+}
+
+ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
+    MapHandles const& maps, KeyedAccessMode const& keyed_mode,
+    FeedbackSlotKind slot_kind) {
+  DCHECK(!maps.empty());
+
+  // Collect possible transition targets.
+  MapHandles possible_transition_targets;
+  possible_transition_targets.reserve(maps.size());
+  for (Handle<Map> map : maps) {
+    MapRef map_ref(this, map);
+    map_ref.SerializeRootMap();
+
+    if (CanInlineElementAccess(map_ref) &&
+        IsFastElementsKind(map->elements_kind()) &&
+        GetInitialFastElementsKind() != map->elements_kind()) {
+      possible_transition_targets.push_back(map);
+    }
+  }
+
+  using TransitionGroup = ElementAccessFeedback::TransitionGroup;
+  ZoneUnorderedMap<Handle<Map>, TransitionGroup, Handle<Map>::hash,
+                   Handle<Map>::equal_to>
+      transition_groups(zone());
+
+  // Separate the actual receiver maps and the possible transition sources.
+  for (Handle<Map> map : maps) {
+    // Don't generate elements kind transitions from stable maps.
+    Map transition_target = map->is_stable()
+                                ? Map()
+                                : map->FindElementsKindTransitionedMap(
+                                      isolate(), possible_transition_targets);
+    if (transition_target.is_null()) {
+      TransitionGroup group(1, map, zone());
+      transition_groups.insert({map, group});
+    } else {
+      Handle<Map> target(transition_target, isolate());
+      TransitionGroup new_group(1, target, zone());
+      TransitionGroup& actual_group =
+          transition_groups.insert({target, new_group}).first->second;
+      actual_group.push_back(map);
+    }
+  }
+
+  ElementAccessFeedback* result =
+      new (zone()) ElementAccessFeedback(zone(), keyed_mode, slot_kind);
+  for (auto entry : transition_groups) {
+    result->AddGroup(std::move(entry.second));
+  }
+
+  CHECK(!result->transition_groups().empty());
+  return *result;
+}
+
+void ElementAccessFeedback::AddGroup(TransitionGroup&& group) {
+  CHECK(!group.empty());
+  transition_groups_.push_back(std::move(group));
+
+#ifdef ENABLE_SLOW_DCHECKS
+  // Check that each of the group's maps occurs exactly once in the whole
+  // feedback. This implies that "a source is not a target".
+  for (Handle<Map> map : group) {
+    int count = 0;
+    for (TransitionGroup const& some_group : transition_groups()) {
+      count += std::count_if(
+          some_group.begin(), some_group.end(),
+          [&](Handle<Map> some_map) { return some_map.equals(map); });
+    }
+    CHECK_EQ(count, 1);
+  }
+#endif
 }
 
 std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
-  return os << ref.data();
+  if (ref.broker()->mode() == JSHeapBroker::kDisabled) {
+    // If the broker is disabled we cannot be in a background thread so it's
+    // safe to read the heap.
+    return os << ref.data() << " {" << ref.object() << "}";
+  } else {
+    return os << ref.data();
+  }
 }
 
 base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
@@ -4074,67 +4665,77 @@ base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
   return NameRef(this, handle(raw_name, isolate()));
 }
 
-PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingThen(MapRef map) {
-  auto access_info = ais_for_loading_then_.find(map);
-  if (access_info == ais_for_loading_then_.end()) {
-    TRACE_BROKER_MISSING(
-        this, "access info for reducing JSResolvePromise with map " << map);
+PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
+    MapRef map, NameRef name, AccessMode access_mode,
+    CompilationDependencies* dependencies, SerializationPolicy policy) {
+  PropertyAccessTarget target({map, name, access_mode});
+  auto it = property_access_infos_.find(target);
+  if (it != property_access_infos_.end()) return it->second;
+
+  if (policy == SerializationPolicy::kAssumeSerialized) {
+    TRACE_BROKER_MISSING(this, "PropertyAccessInfo for "
+                                   << access_mode << " of property " << name
+                                   << " on map " << map);
     return PropertyAccessInfo::Invalid(zone());
   }
-  return access_info->second;
-}
 
-void JSHeapBroker::CreateAccessInfoForLoadingThen(
-    MapRef map, CompilationDependencies* dependencies) {
-  auto access_info = ais_for_loading_then_.find(map);
-  if (access_info == ais_for_loading_then_.end()) {
-    AccessInfoFactory access_info_factory(this, dependencies, zone());
-    Handle<Name> then_string = isolate()->factory()->then_string();
-    ais_for_loading_then_.insert(
-        std::make_pair(map, access_info_factory.ComputePropertyAccessInfo(
-                                map.object(), then_string, AccessMode::kLoad)));
+  CHECK_NOT_NULL(dependencies);
+  AccessInfoFactory factory(this, dependencies, zone());
+  PropertyAccessInfo access_info = factory.ComputePropertyAccessInfo(
+      map.object(), name.object(), access_mode);
+  if (FLAG_concurrent_inlining) {
+    CHECK(SerializingAllowed());
+    TRACE(this, "Storing PropertyAccessInfo for "
+                    << access_mode << " of property " << name << " on map "
+                    << map);
+    property_access_infos_.insert({target, access_info});
   }
+  return access_info;
 }
 
-PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingExec(MapRef map) {
-  auto access_info = ais_for_loading_exec_.find(map);
-  if (access_info == ais_for_loading_exec_.end()) {
-    TRACE_BROKER_MISSING(this,
-                         "access info for property 'exec' on map " << map);
-    return PropertyAccessInfo::Invalid(zone());
-  }
-  return access_info->second;
+BinaryOperationFeedback const& ProcessedFeedback::AsBinaryOperation() const {
+  CHECK_EQ(kBinaryOperation, kind());
+  return *static_cast<BinaryOperationFeedback const*>(this);
 }
 
-PropertyAccessInfo const& JSHeapBroker::CreateAccessInfoForLoadingExec(
-    MapRef map, CompilationDependencies* dependencies) {
-  auto access_info = ais_for_loading_exec_.find(map);
-  if (access_info != ais_for_loading_exec_.end()) {
-    return access_info->second;
-  }
-
-  ZoneVector<PropertyAccessInfo> access_infos(zone());
-  AccessInfoFactory access_info_factory(this, dependencies, zone());
-  PropertyAccessInfo ai_exec = access_info_factory.ComputePropertyAccessInfo(
-      map.object(), isolate()->factory()->exec_string(), AccessMode::kLoad);
+CallFeedback const& ProcessedFeedback::AsCall() const {
+  CHECK_EQ(kCall, kind());
+  return *static_cast<CallFeedback const*>(this);
+}
 
-  auto inserted_ai = ais_for_loading_exec_.insert(std::make_pair(map, ai_exec));
-  return inserted_ai.first->second;
+CompareOperationFeedback const& ProcessedFeedback::AsCompareOperation() const {
+  CHECK_EQ(kCompareOperation, kind());
+  return *static_cast<CompareOperationFeedback const*>(this);
 }
 
-ElementAccessFeedback const* ProcessedFeedback::AsElementAccess() const {
+ElementAccessFeedback const& ProcessedFeedback::AsElementAccess() const {
   CHECK_EQ(kElementAccess, kind());
-  return static_cast<ElementAccessFeedback const*>(this);
+  return *static_cast<ElementAccessFeedback const*>(this);
+}
+
+ForInFeedback const& ProcessedFeedback::AsForIn() const {
+  CHECK_EQ(kForIn, kind());
+  return *static_cast<ForInFeedback const*>(this);
+}
+
+GlobalAccessFeedback const& ProcessedFeedback::AsGlobalAccess() const {
+  CHECK_EQ(kGlobalAccess, kind());
+  return *static_cast<GlobalAccessFeedback const*>(this);
+}
+
+InstanceOfFeedback const& ProcessedFeedback::AsInstanceOf() const {
+  CHECK_EQ(kInstanceOf, kind());
+  return *static_cast<InstanceOfFeedback const*>(this);
 }
 
-NamedAccessFeedback const* ProcessedFeedback::AsNamedAccess() const {
+NamedAccessFeedback const& ProcessedFeedback::AsNamedAccess() const {
   CHECK_EQ(kNamedAccess, kind());
-  return static_cast<NamedAccessFeedback const*>(this);
+  return *static_cast<NamedAccessFeedback const*>(this);
 }
 
 BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
     Handle<BytecodeArray> bytecode_array, BailoutId osr_bailout_id,
-    bool analyze_liveness, bool serialize) {
+    bool analyze_liveness, SerializationPolicy policy) {
   ObjectData* bytecode_array_data = GetData(bytecode_array);
   CHECK_NOT_NULL(bytecode_array_data);
 
@@ -4154,7 +4755,7 @@ BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
     return *it->second;
   }
 
-  CHECK(serialize);
+  CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded);
   BytecodeAnalysis* analysis = new (zone()) BytecodeAnalysis(
       bytecode_array, zone(), osr_bailout_id, analyze_liveness);
   DCHECK_EQ(analysis->osr_bailout_id(), osr_bailout_id);
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index ffc10d2b93a023..8c2622bf488c7c 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -9,6 +9,8 @@
 #include "src/base/optional.h"
 #include "src/common/globals.h"
 #include "src/compiler/access-info.h"
+#include "src/compiler/feedback-source.h"
+#include "src/compiler/processed-feedback.h"
 #include "src/compiler/refs-map.h"
 #include "src/handles/handles.h"
 #include "src/interpreter/bytecode-array-accessor.h"
@@ -26,29 +28,6 @@ class BytecodeAnalysis;
 class ObjectRef;
 std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
 
-struct FeedbackSource {
-  FeedbackSource(Handle<FeedbackVector> vector_, FeedbackSlot slot_)
-      : vector(vector_), slot(slot_) {}
-  explicit FeedbackSource(FeedbackNexus const& nexus);
-  explicit FeedbackSource(VectorSlotPair const& pair);
-
-  Handle<FeedbackVector> const vector;
-  FeedbackSlot const slot;
-
-  struct Hash {
-    size_t operator()(FeedbackSource const& source) const {
-      return base::hash_combine(source.vector.address(), source.slot);
-    }
-  };
-
-  struct Equal {
-    bool operator()(FeedbackSource const& lhs,
-                    FeedbackSource const& rhs) const {
-      return lhs.vector.equals(rhs.vector) && lhs.slot == rhs.slot;
-    }
-  };
-};
-
 #define TRACE_BROKER(broker, x)                                      \
   do {                                                               \
     if (broker->tracing_enabled() && FLAG_trace_heap_broker_verbose) \
@@ -58,25 +37,51 @@ struct FeedbackSource {
 #define TRACE_BROKER_MISSING(broker, x)                             \
   do {                                                              \
     if (broker->tracing_enabled())                                  \
-      broker->Trace() << __FUNCTION__ << ": missing " << x << '\n'; \
+      broker->Trace() << "Missing " << x << " (" << __FILE__ << ":" \
+                      << __LINE__ << ")" << std::endl;              \
   } while (false)
 
+struct PropertyAccessTarget {
+  MapRef map;
+  NameRef name;
+  AccessMode mode;
+
+  struct Hash {
+    size_t operator()(const PropertyAccessTarget& pair) const {
+      return base::hash_combine(
+          base::hash_combine(pair.map.object().address(),
+                             pair.name.object().address()),
+          static_cast<int>(pair.mode));
+    }
+  };
+  struct Equal {
+    bool operator()(const PropertyAccessTarget& lhs,
+                    const PropertyAccessTarget& rhs) const {
+      return lhs.map.equals(rhs.map) && lhs.name.equals(rhs.name) &&
+             lhs.mode == rhs.mode;
+    }
+  };
+};
+
 class V8_EXPORT_PRIVATE JSHeapBroker {
  public:
   JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled);
 
-  void SetNativeContextRef();
-  void SerializeStandardObjects();
+  // The compilation target's native context. We need the setter because at
+  // broker construction time we don't yet have the canonical handle.
+  NativeContextRef target_native_context() const {
+    return target_native_context_.value();
+  }
+  void SetTargetNativeContextRef(Handle<NativeContext> native_context);
+
+  void InitializeAndStartSerializing(Handle<NativeContext> native_context);
 
   Isolate* isolate() const { return isolate_; }
   Zone* zone() const { return current_zone_; }
   bool tracing_enabled() const { return tracing_enabled_; }
-  NativeContextRef native_context() const { return native_context_.value(); }
-  PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
 
   enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
   BrokerMode mode() const { return mode_; }
-  void StartSerializing();
   void StopSerializing();
   void Retire();
   bool SerializingAllowed() const;
@@ -93,36 +98,64 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
   bool IsArrayOrObjectPrototype(const JSObjectRef& object) const;
 
   bool HasFeedback(FeedbackSource const& source) const;
-  // The processed {feedback} can be {nullptr}, indicating that the original
-  // feedback didn't contain information relevant for Turbofan.
   void SetFeedback(FeedbackSource const& source,
                    ProcessedFeedback const* feedback);
-  ProcessedFeedback const* GetFeedback(FeedbackSource const& source) const;
-
-  // Convenience wrappers around GetFeedback.
-  GlobalAccessFeedback const* GetGlobalAccessFeedback(
-      FeedbackSource const& source) const;
+  ProcessedFeedback const& GetFeedback(FeedbackSource const& source) const;
+  FeedbackSlotKind GetFeedbackSlotKind(FeedbackSource const& source) const;
 
   // TODO(neis): Move these into serializer when we're always in the background.
-  ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
-      MapHandles const& maps, KeyedAccessMode const& keyed_mode);
-  GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(
-      FeedbackSource const& source);
-
+  ElementAccessFeedback const& ProcessFeedbackMapsForElementAccess(
+      MapHandles const& maps, KeyedAccessMode const& keyed_mode,
+      FeedbackSlotKind slot_kind);
   BytecodeAnalysis const& GetBytecodeAnalysis(
       Handle<BytecodeArray> bytecode_array, BailoutId osr_offset,
-      bool analyze_liveness, bool serialize);
+      bool analyze_liveness,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+
+  // Binary, comparison and for-in hints can be fully expressed via
+  // an enum. Insufficient feedback is signaled by <Hint enum>::kNone.
+  BinaryOperationHint GetFeedbackForBinaryOperation(
+      FeedbackSource const& source);
+  CompareOperationHint GetFeedbackForCompareOperation(
+      FeedbackSource const& source);
+  ForInHint GetFeedbackForForIn(FeedbackSource const& source);
+
+  ProcessedFeedback const& GetFeedbackForCall(FeedbackSource const& source);
+  ProcessedFeedback const& GetFeedbackForGlobalAccess(
+      FeedbackSource const& source);
+  ProcessedFeedback const& GetFeedbackForInstanceOf(
+      FeedbackSource const& source);
+  ProcessedFeedback const& GetFeedbackForPropertyAccess(
+      FeedbackSource const& source, AccessMode mode,
+      base::Optional<NameRef> static_name);
+
+  ProcessedFeedback const& ProcessFeedbackForBinaryOperation(
+      FeedbackSource const& source);
+  ProcessedFeedback const& ProcessFeedbackForCall(FeedbackSource const& source);
+  ProcessedFeedback const& ProcessFeedbackForCompareOperation(
+      FeedbackSource const& source);
+  ProcessedFeedback const& ProcessFeedbackForForIn(
+      FeedbackSource const& source);
+  ProcessedFeedback const& ProcessFeedbackForGlobalAccess(
+      FeedbackSource const& source);
+  ProcessedFeedback const& ProcessFeedbackForInstanceOf(
+      FeedbackSource const& source);
+  ProcessedFeedback const& ProcessFeedbackForPropertyAccess(
+      FeedbackSource const& source, AccessMode mode,
+      base::Optional<NameRef> static_name);
+
+  bool FeedbackIsInsufficient(FeedbackSource const& source) const;
 
   base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus);
 
-  // If there is no result stored for {map}, we return an Invalid
-  // PropertyAccessInfo.
-  PropertyAccessInfo GetAccessInfoForLoadingThen(MapRef map);
-  void CreateAccessInfoForLoadingThen(MapRef map,
-                                      CompilationDependencies* dependencies);
-  PropertyAccessInfo GetAccessInfoForLoadingExec(MapRef map);
-  PropertyAccessInfo const& CreateAccessInfoForLoadingExec(
-      MapRef map, CompilationDependencies* dependencies);
+  // If {policy} is {kAssumeSerialized} and the broker doesn't know about the
+  // combination of {map}, {name}, and {access_mode}, returns Invalid.
+  PropertyAccessInfo GetPropertyAccessInfo(
+      MapRef map, NameRef name, AccessMode access_mode,
+      CompilationDependencies* dependencies = nullptr,
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+
+  StringRef GetTypedArrayStringTag(ElementsKind kind);
 
   std::ostream& Trace();
   void IncrementTracingIndentation();
@@ -133,13 +166,33 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
   friend class ObjectRef;
   friend class ObjectData;
 
-  void SerializeShareableObjects();
+  // Bottleneck FeedbackNexus access here, for storage in the broker
+  // or on-the-fly usage elsewhere in the compiler.
+  ForInHint ReadFeedbackForForIn(FeedbackSource const& source) const;
+  CompareOperationHint ReadFeedbackForCompareOperation(
+      FeedbackSource const& source) const;
+  BinaryOperationHint ReadFeedbackForBinaryOperation(
+      FeedbackSource const& source) const;
+
+  ProcessedFeedback const& ReadFeedbackForCall(FeedbackSource const& source);
+  ProcessedFeedback const& ReadFeedbackForGlobalAccess(
+      FeedbackSource const& source);
+  ProcessedFeedback const& ReadFeedbackForInstanceOf(
+      FeedbackSource const& source);
+  ProcessedFeedback const& ReadFeedbackForPropertyAccess(
+      FeedbackSource const& source, AccessMode mode,
+      base::Optional<NameRef> static_name);
+
+  void InitializeRefsMap();
   void CollectArrayAndObjectPrototypes();
+  void SerializeTypedArrayStringTags();
+
+  PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
 
   Isolate* const isolate_;
   Zone* const broker_zone_;
-  Zone* current_zone_;
-  base::Optional<NativeContextRef> native_context_;
+  Zone* current_zone_ = nullptr;
+  base::Optional<NativeContextRef> target_native_context_;
   RefsMap* refs_;
   ZoneUnorderedSet<Handle<JSObject>, Handle<JSObject>::hash,
                    Handle<JSObject>::equal_to>
@@ -148,16 +201,16 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
   bool const tracing_enabled_;
   StdoutStream trace_out_;
   unsigned trace_indentation_ = 0;
-  PerIsolateCompilerCache* compiler_cache_;
+  PerIsolateCompilerCache* compiler_cache_ = nullptr;
   ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
                    FeedbackSource::Hash, FeedbackSource::Equal>
       feedback_;
   ZoneUnorderedMap<ObjectData*, BytecodeAnalysis*> bytecode_analyses_;
-  typedef ZoneUnorderedMap<MapRef, PropertyAccessInfo, ObjectRef::Hash,
-                           ObjectRef::Equal>
-      MapToAccessInfos;
-  MapToAccessInfos ais_for_loading_then_;
-  MapToAccessInfos ais_for_loading_exec_;
+  ZoneUnorderedMap<PropertyAccessTarget, PropertyAccessInfo,
+                   PropertyAccessTarget::Hash, PropertyAccessTarget::Equal>
+      property_access_infos_;
+
+  ZoneVector<ObjectData*> typed_array_string_tags_;
 
   static const size_t kMinimalRefsBucketCount = 8;     // must be power of 2
   static const size_t kInitialRefsBucketCount = 1024;  // must be power of 2
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 7e7c9e3a0e1e52..bf4b79bf92cbbf 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -27,113 +27,175 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
 Reduction JSHeapCopyReducer::Reduce(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kHeapConstant: {
-      ObjectRef object(broker(), HeapConstantOf(node->op()));
-      if (object.IsJSFunction()) object.AsJSFunction().Serialize();
-      if (object.IsJSObject()) object.AsJSObject().SerializeObjectCreateMap();
-      if (object.IsSourceTextModule()) object.AsSourceTextModule().Serialize();
+      if (!FLAG_concurrent_inlining) {
+        ObjectRef object(broker(), HeapConstantOf(node->op()));
+        if (object.IsJSFunction()) object.AsJSFunction().Serialize();
+        if (object.IsJSObject()) {
+          object.AsJSObject().SerializeObjectCreateMap();
+        }
+        if (object.IsSourceTextModule()) {
+          object.AsSourceTextModule().Serialize();
+        }
+      }
       break;
     }
     case IrOpcode::kJSCreateArray: {
-      CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
-      Handle<AllocationSite> site;
-      if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
+      if (!FLAG_concurrent_inlining) {
+        CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+        Handle<AllocationSite> site;
+        if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
+      }
       break;
     }
     case IrOpcode::kJSCreateArguments: {
-      Node* const frame_state = NodeProperties::GetFrameStateInput(node);
-      FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
-      SharedFunctionInfoRef shared(broker(),
-                                   state_info.shared_info().ToHandleChecked());
+      if (!FLAG_concurrent_inlining) {
+        Node* const frame_state = NodeProperties::GetFrameStateInput(node);
+        FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+        SharedFunctionInfoRef shared(
+            broker(), state_info.shared_info().ToHandleChecked());
+      }
       break;
     }
     case IrOpcode::kJSCreateBlockContext: {
-      ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+      if (!FLAG_concurrent_inlining) {
+        ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+      }
       break;
     }
     case IrOpcode::kJSCreateBoundFunction: {
-      CreateBoundFunctionParameters const& p =
-          CreateBoundFunctionParametersOf(node->op());
-      MapRef(broker(), p.map());
+      if (!FLAG_concurrent_inlining) {
+        CreateBoundFunctionParameters const& p =
+            CreateBoundFunctionParametersOf(node->op());
+        MapRef(broker(), p.map());
+      }
       break;
     }
     case IrOpcode::kJSCreateCatchContext: {
-      ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+      if (!FLAG_concurrent_inlining) {
+        ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+      }
       break;
     }
     case IrOpcode::kJSCreateClosure: {
-      CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
-      SharedFunctionInfoRef(broker(), p.shared_info());
-      FeedbackCellRef(broker(), p.feedback_cell());
-      HeapObjectRef(broker(), p.code());
+      if (!FLAG_concurrent_inlining) {
+        CreateClosureParameters const& p =
+            CreateClosureParametersOf(node->op());
+        SharedFunctionInfoRef(broker(), p.shared_info());
+        FeedbackCellRef(broker(), p.feedback_cell());
+        HeapObjectRef(broker(), p.code());
+      }
       break;
     }
     case IrOpcode::kJSCreateEmptyLiteralArray: {
-      FeedbackParameter const& p = FeedbackParameterOf(node->op());
-      FeedbackVectorRef(broker(), p.feedback().vector()).SerializeSlots();
+      if (!FLAG_concurrent_inlining) {
+        FeedbackParameter const& p = FeedbackParameterOf(node->op());
+        FeedbackVectorRef(broker(), p.feedback().vector).Serialize();
+      }
       break;
     }
     case IrOpcode::kJSCreateFunctionContext: {
-      CreateFunctionContextParameters const& p =
-          CreateFunctionContextParametersOf(node->op());
-      ScopeInfoRef(broker(), p.scope_info());
+      if (!FLAG_concurrent_inlining) {
+        CreateFunctionContextParameters const& p =
+            CreateFunctionContextParametersOf(node->op());
+        ScopeInfoRef(broker(), p.scope_info());
+      }
       break;
     }
     case IrOpcode::kJSCreateLiteralArray:
     case IrOpcode::kJSCreateLiteralObject: {
-      CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
-      FeedbackVectorRef(broker(), p.feedback().vector()).SerializeSlots();
+      if (!FLAG_concurrent_inlining) {
+        CreateLiteralParameters const& p =
+            CreateLiteralParametersOf(node->op());
+        FeedbackVectorRef(broker(), p.feedback().vector).Serialize();
+      }
       break;
     }
     case IrOpcode::kJSCreateLiteralRegExp: {
-      CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
-      FeedbackVectorRef(broker(), p.feedback().vector()).SerializeSlots();
+      if (!FLAG_concurrent_inlining) {
+        CreateLiteralParameters const& p =
+            CreateLiteralParametersOf(node->op());
+        FeedbackVectorRef(broker(), p.feedback().vector).Serialize();
+      }
       break;
     }
     case IrOpcode::kJSCreateWithContext: {
-      ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+      if (!FLAG_concurrent_inlining) {
+        ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+      }
+      break;
+    }
+    case IrOpcode::kJSLoadNamed: {
+      if (!FLAG_concurrent_inlining) {
+        NamedAccess const& p = NamedAccessOf(node->op());
+        NameRef name(broker(), p.name());
+        if (p.feedback().IsValid()) {
+          broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
+                                                     AccessMode::kLoad, name);
+        }
+      }
       break;
     }
-    case IrOpcode::kJSLoadNamed:
     case IrOpcode::kJSStoreNamed: {
-      NamedAccess const& p = NamedAccessOf(node->op());
-      NameRef(broker(), p.name());
+      if (!FLAG_concurrent_inlining) {
+        NamedAccess const& p = NamedAccessOf(node->op());
+        NameRef name(broker(), p.name());
+      }
       break;
     }
     case IrOpcode::kStoreField:
     case IrOpcode::kLoadField: {
-      FieldAccess access = FieldAccessOf(node->op());
-      Handle<Map> map_handle;
-      if (access.map.ToHandle(&map_handle)) {
-        MapRef(broker(), map_handle);
-      }
-      Handle<Name> name_handle;
-      if (access.name.ToHandle(&name_handle)) {
-        NameRef(broker(), name_handle);
+      if (!FLAG_concurrent_inlining) {
+        FieldAccess access = FieldAccessOf(node->op());
+        Handle<Map> map_handle;
+        if (access.map.ToHandle(&map_handle)) {
+          MapRef(broker(), map_handle);
+        }
+        Handle<Name> name_handle;
+        if (access.name.ToHandle(&name_handle)) {
+          NameRef(broker(), name_handle);
+        }
       }
       break;
     }
     case IrOpcode::kMapGuard: {
-      ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
-      for (Handle<Map> map : maps) {
-        MapRef(broker(), map);
+      if (!FLAG_concurrent_inlining) {
+        ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
+        for (Handle<Map> map : maps) {
+          MapRef(broker(), map);
+        }
       }
       break;
     }
     case IrOpcode::kCheckMaps: {
-      ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps();
-      for (Handle<Map> map : maps) {
-        MapRef(broker(), map);
+      if (!FLAG_concurrent_inlining) {
+        ZoneHandleSet<Map> const& maps =
+            CheckMapsParametersOf(node->op()).maps();
+        for (Handle<Map> map : maps) {
+          MapRef(broker(), map);
+        }
       }
       break;
     }
     case IrOpcode::kCompareMaps: {
-      ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
-      for (Handle<Map> map : maps) {
-        MapRef(broker(), map);
+      if (!FLAG_concurrent_inlining) {
+        ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
+        for (Handle<Map> map : maps) {
+          MapRef(broker(), map);
+        }
+      }
+      break;
+    }
+    case IrOpcode::kJSLoadProperty: {
+      if (!FLAG_concurrent_inlining) {
+        PropertyAccess const& p = PropertyAccessOf(node->op());
+        AccessMode access_mode = AccessMode::kLoad;
+        if (p.feedback().IsValid()) {
+          broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode,
+                                                     base::nullopt);
+        }
       }
       break;
     }
-
     default:
       break;
   }
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index e11d6b59a30349..ae271b3af9e986 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -114,8 +114,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
   Handle<SharedFunctionInfo> frame_shared_info;
   for (int i = 0; i < candidate.num_functions; ++i) {
     if (!candidate.bytecode[i].has_value()) {
-      // We're already missing critical data which wouldn't allow us to
-      // continue the inlining checks. Log a warning and continue.
+      // Can't inline without bytecode.
+      // TODO(neis): Should this even be a broker message?
       if (candidate.functions[i].has_value()) {
         TRACE_BROKER(broker(),
                      "Missing bytecode array trying to inline JSFunction "
@@ -205,6 +205,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
 }
 
 void JSInliningHeuristic::Finalize() {
+  DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
   if (candidates_.empty()) return;  // Nothing to do without candidates.
   if (FLAG_trace_turbo_inlining) PrintCandidates();
 
@@ -730,18 +732,22 @@ bool JSInliningHeuristic::CandidateCompare::operator()(
 
 void JSInliningHeuristic::PrintCandidates() {
   StdoutStream os;
-  os << "Candidates for inlining (size=" << candidates_.size() << "):\n";
+  os << candidates_.size() << " candidate(s) for inlining:" << std::endl;
   for (const Candidate& candidate : candidates_) {
-    os << "  #" << candidate.node->id() << ":"
-       << candidate.node->op()->mnemonic()
-       << ", frequency: " << candidate.frequency << std::endl;
+    os << "- candidate: " << candidate.node->op()->mnemonic() << " node #"
+       << candidate.node->id() << " with frequency " << candidate.frequency
+       << ", " << candidate.num_functions << " target(s):" << std::endl;
     for (int i = 0; i < candidate.num_functions; ++i) {
-      SharedFunctionInfoRef shared =
-          candidate.functions[i].has_value()
-              ? candidate.functions[i].value().shared()
-              : candidate.shared_info.value();
-      PrintF("  - size:%d, name: %s\n", candidate.bytecode[i].value().length(),
-             shared.object()->DebugName().ToCString().get());
+      SharedFunctionInfoRef shared = candidate.functions[i].has_value()
+                                         ? candidate.functions[i]->shared()
+                                         : candidate.shared_info.value();
+      os << "  - target: " << shared;
+      if (candidate.bytecode[i].has_value()) {
+        os << ", bytecode size: " << candidate.bytecode[i]->length();
+      } else {
+        os << ", no bytecode";
+      }
+      os << std::endl;
     }
   }
 }
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 91cbea2346a37b..51179f1956f880 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -247,9 +247,13 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
       bailout_id, OutputFrameStateCombine::Ignore(), state_info);
   const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
   Node* node0 = graph()->NewNode(op0);
+
+  static constexpr int kTargetInputIndex = 0;
+  static constexpr int kReceiverInputIndex = 1;
+  const int parameter_count_with_receiver = parameter_count + 1;
   NodeVector params(local_zone_);
-  for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
-    params.push_back(node->InputAt(1 + parameter));
+  for (int i = 0; i < parameter_count_with_receiver; i++) {
+    params.push_back(node->InputAt(kReceiverInputIndex + i));
   }
   const Operator* op_param = common()->StateValues(
       static_cast<int>(params.size()), SparseInputMask::Dense());
@@ -259,7 +263,7 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
     context = jsgraph()->UndefinedConstant();
   }
   return graph()->NewNode(op, params_node, node0, node0, context,
-                          node->InputAt(0), outer_frame_state);
+                          node->InputAt(kTargetInputIndex), outer_frame_state);
 }
 
 namespace {
@@ -301,7 +305,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
     // TODO(turbofan): We might want to revisit this restriction later when we
     // have a need for this, and we know how to model different native contexts
     // in the same graph in a compositional way.
-    if (!function.native_context().equals(broker()->native_context())) {
+    if (!function.native_context().equals(broker()->target_native_context())) {
       return base::nullopt;
     }
 
@@ -332,7 +336,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
 //  - context         : The context (as SSA value) bound by the call target.
 //  - feedback_vector : The target is guaranteed to use this feedback vector.
 FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
-                                                  Node*& context_out) {
+                                                  Node** context_out) {
   DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
   HeapObjectMatcher match(node->InputAt(0));
 
@@ -342,7 +346,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
     CHECK(function.has_feedback_vector());
 
     // The inlinee specializes to the context from the JSFunction object.
-    context_out = jsgraph()->Constant(function.context());
+    *context_out = jsgraph()->Constant(function.context());
     return function.feedback_vector();
   }
 
@@ -354,7 +358,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
     FeedbackCellRef cell(FeedbackCellRef(broker(), p.feedback_cell()));
 
     // The inlinee uses the locally provided context at instantiation.
-    context_out = NodeProperties::GetContextInput(match.node());
+    *context_out = NodeProperties::GetContextInput(match.node());
     return cell.value().AsFeedbackVector();
   }
 
@@ -369,13 +373,14 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
   // Determine the call target.
   base::Optional<SharedFunctionInfoRef> shared_info(DetermineCallTarget(node));
   if (!shared_info.has_value()) return NoChange();
-
   DCHECK(shared_info->IsInlineable());
 
+  SharedFunctionInfoRef outer_shared_info(broker(), info_->shared_info());
+
   // Constructor must be constructable.
   if (node->opcode() == IrOpcode::kJSConstruct &&
       !IsConstructable(shared_info->kind())) {
-    TRACE("Not inlining " << *shared_info << " into " << info_->shared_info()
+    TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
                           << " because constructor is not constructable.");
     return NoChange();
   }
@@ -384,7 +389,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
   // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
   if (node->opcode() == IrOpcode::kJSCall &&
       IsClassConstructor(shared_info->kind())) {
-    TRACE("Not inlining " << *shared_info << " into " << info_->shared_info()
+    TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
                           << " because callee is a class constructor.");
     return NoChange();
   }
@@ -398,7 +403,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
     nesting_level++;
     if (nesting_level > kMaxDepthForInlining) {
       TRACE("Not inlining "
-            << *shared_info << " into " << info_->shared_info()
+            << *shared_info << " into " << outer_shared_info
             << " because call has exceeded the maximum depth for function "
                "inlining.");
       return NoChange();
@@ -413,38 +418,38 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
   // passing the IsInlineable check, The broker holds a reference to the
   // bytecode array, which prevents it from getting flushed.
   // Therefore, the following check should always hold true.
-  CHECK(shared_info.value().is_compiled());
+  CHECK(shared_info->is_compiled());
 
   if (!FLAG_concurrent_inlining && info_->is_source_positions_enabled()) {
     SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(),
                                                        shared_info->object());
   }
 
-  TRACE("Inlining " << *shared_info << " into " << info_->shared_info()
+  TRACE("Inlining " << *shared_info << " into " << outer_shared_info
                     << ((exception_target != nullptr) ? " (inside try-block)"
                                                       : ""));
   // Determine the targets feedback vector and its context.
   Node* context;
-  FeedbackVectorRef feedback_vector = DetermineCallContext(node, context);
+  FeedbackVectorRef feedback_vector = DetermineCallContext(node, &context);
 
-  if (FLAG_concurrent_inlining) {
-    if (!shared_info.value().IsSerializedForCompilation(feedback_vector)) {
-      TRACE("Missed opportunity to inline a function ("
-            << *shared_info << " with " << feedback_vector << ")");
-      return NoChange();
-    }
+  if (FLAG_concurrent_inlining &&
+      !shared_info->IsSerializedForCompilation(feedback_vector)) {
+    // TODO(neis): Should this be a broker message?
+    TRACE("Missed opportunity to inline a function ("
+          << *shared_info << " with " << feedback_vector << ")");
+    return NoChange();
   }
 
   // ----------------------------------------------------------------
   // After this point, we've made a decision to inline this function.
   // We shall not bailout from inlining if we got here.
 
-  BytecodeArrayRef bytecode_array = shared_info.value().GetBytecodeArray();
+  BytecodeArrayRef bytecode_array = shared_info->GetBytecodeArray();
 
   // Remember that we inlined this function.
-  int inlining_id = info_->AddInlinedFunction(
-      shared_info.value().object(), bytecode_array.object(),
-      source_positions_->GetSourcePosition(node));
+  int inlining_id =
+      info_->AddInlinedFunction(shared_info->object(), bytecode_array.object(),
+                                source_positions_->GetSourcePosition(node));
 
   // Create the subgraph for the inlinee.
   Node* start;
@@ -461,20 +466,11 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
       flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
     }
     {
-      // TODO(mslekova): Remove the following once bytecode graph builder
-      // is brokerized. Also, remove the context argument from
-      // BuildGraphFromBytecode and extract it from the broker there.
-      AllowHandleDereference allow_handle_deref;
-      AllowHandleAllocation allow_handle_alloc;
-      AllowHeapAllocation allow_heap_alloc;
-      AllowCodeDependencyChange allow_code_dep_change;
       CallFrequency frequency = call.frequency();
-      Handle<NativeContext> native_context(info_->native_context(), isolate());
-      BuildGraphFromBytecode(
-          broker(), zone(), bytecode_array.object(),
-          shared_info.value().object(), feedback_vector.object(),
-          BailoutId::None(), jsgraph(), frequency, source_positions_,
-          native_context, inlining_id, flags, &info_->tick_counter());
+      BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_vector,
+                             BailoutId::None(), jsgraph(), frequency,
+                             source_positions_, inlining_id, flags,
+                             &info_->tick_counter());
     }
 
     // Extract the inlinee start/end nodes.
@@ -522,13 +518,13 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
     // where execution continues at {construct_stub_create_deopt_pc_offset}).
     Node* receiver = jsgraph()->TheHoleConstant();  // Implicit receiver.
     Node* context = NodeProperties::GetContextInput(node);
-    if (NeedsImplicitReceiver(shared_info.value())) {
+    if (NeedsImplicitReceiver(*shared_info)) {
       Node* effect = NodeProperties::GetEffectInput(node);
       Node* control = NodeProperties::GetControlInput(node);
       Node* frame_state_inside = CreateArtificialFrameState(
           node, frame_state, call.formal_arguments(),
           BailoutId::ConstructStubCreate(), FrameStateType::kConstructStub,
-          shared_info.value(), context);
+          *shared_info, context);
       Node* create =
           graph()->NewNode(javascript()->Create(), call.target(), new_target,
                            context, frame_state_inside, effect, control);
@@ -583,7 +579,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
     frame_state = CreateArtificialFrameState(
         node, frame_state, call.formal_arguments(),
         BailoutId::ConstructStubInvoke(), FrameStateType::kConstructStub,
-        shared_info.value(), context);
+        *shared_info, context);
   }
 
   // Insert a JSConvertReceiver node for sloppy callees. Note that the context
@@ -593,8 +589,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
     Node* effect = NodeProperties::GetEffectInput(node);
     if (NodeProperties::CanBePrimitive(broker(), call.receiver(), effect)) {
       CallParameters const& p = CallParametersOf(node->op());
-      Node* global_proxy =
-          jsgraph()->Constant(broker()->native_context().global_proxy_object());
+      Node* global_proxy = jsgraph()->Constant(
+          broker()->target_native_context().global_proxy_object());
       Node* receiver = effect =
           graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
                            call.receiver(), global_proxy, effect, start);
@@ -612,7 +608,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
   if (call.formal_arguments() != parameter_count) {
     frame_state = CreateArtificialFrameState(
         node, frame_state, call.formal_arguments(), BailoutId::None(),
-        FrameStateType::kArgumentsAdaptor, shared_info.value());
+        FrameStateType::kArgumentsAdaptor, *shared_info);
   }
 
   return InlineCall(node, new_target, context, frame_state, start, end,
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index f50f7b591d2559..f60d53dbc9de92 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -59,8 +59,7 @@ class JSInliner final : public AdvancedReducer {
   SourcePositionTable* const source_positions_;
 
   base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
-  FeedbackVectorRef DetermineCallContext(
-      Node* node, Node*& context_out);  // NOLINT(runtime/references)
+  FeedbackVectorRef DetermineCallContext(Node* node, Node** context_out);
 
   Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
                                    int parameter_count, BailoutId bailout_id,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 970a7e3ed63c69..ccb0622017d46b 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -21,10 +21,13 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
-JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph)
-    : AdvancedReducer(editor), jsgraph_(jsgraph) {}
+JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
+                                         JSHeapBroker* broker)
+    : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {}
 
 Reduction JSIntrinsicLowering::Reduce(Node* node) {
+  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+
   if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange();
   const Runtime::Function* const f =
       Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id());
@@ -108,7 +111,7 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
   // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
   Node* deoptimize = graph()->NewNode(
       common()->Deoptimize(DeoptimizeKind::kEager,
-                           DeoptimizeReason::kDeoptimizeNow, VectorSlotPair()),
+                           DeoptimizeReason::kDeoptimizeNow, FeedbackSource()),
       frame_state, effect, control);
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
   Revisit(graph()->end());
@@ -307,7 +310,7 @@ Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
 Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
   // ToString is unnecessary if the input is a string.
   HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
-  if (m.HasValue() && m.Value()->IsString()) {
+  if (m.HasValue() && m.Ref(broker()).IsString()) {
     ReplaceWithValue(node, m.node());
     return Replace(m.node());
   }
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 844e051d0a300d..f32b53b5863766 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -31,7 +31,7 @@ class SimplifiedOperatorBuilder;
 class V8_EXPORT_PRIVATE JSIntrinsicLowering final
     : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
-  JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph);
+  JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker);
   ~JSIntrinsicLowering() final = default;
 
   const char* reducer_name() const override { return "JSIntrinsicLowering"; }
@@ -81,12 +81,14 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
+  JSHeapBroker* broker() const { return broker_; }
   Isolate* isolate() const;
   CommonOperatorBuilder* common() const;
   JSOperatorBuilder* javascript() const;
   SimplifiedOperatorBuilder* simplified() const;
 
   JSGraph* const jsgraph_;
+  JSHeapBroker* const broker_;
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 8f7552baa18458..9f950c808c2d83 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -19,7 +19,6 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/property-access-builder.h"
 #include "src/compiler/type-cache.h"
-#include "src/compiler/vector-slot-pair.h"
 #include "src/execution/isolate-inl.h"
 #include "src/numbers/dtoa.h"
 #include "src/objects/feedback-vector.h"
@@ -52,35 +51,26 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker,
   return true;
 }
 
-void TryUpdateThenDropDeprecated(Isolate* isolate, MapHandles* maps) {
-  for (auto it = maps->begin(); it != maps->end();) {
-    if (Map::TryUpdate(isolate, *it).ToHandle(&*it)) {
-      DCHECK(!(*it)->is_deprecated());
-      ++it;
-    } else {
-      it = maps->erase(it);
-    }
-  }
-}
-
 }  // namespace
 
 JSNativeContextSpecialization::JSNativeContextSpecialization(
     Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags,
-    Handle<Context> native_context, CompilationDependencies* dependencies,
-    Zone* zone, Zone* shared_zone)
+    CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone)
     : AdvancedReducer(editor),
       jsgraph_(jsgraph),
       broker_(broker),
       flags_(flags),
-      global_object_(native_context->global_object(), jsgraph->isolate()),
-      global_proxy_(native_context->global_proxy(), jsgraph->isolate()),
+      global_object_(broker->target_native_context().global_object().object()),
+      global_proxy_(
+          broker->target_native_context().global_proxy_object().object()),
       dependencies_(dependencies),
       zone_(zone),
       shared_zone_(shared_zone),
       type_cache_(TypeCache::Get()) {}
 
 Reduction JSNativeContextSpecialization::Reduce(Node* node) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   switch (node->opcode()) {
     case IrOpcode::kJSAdd:
       return ReduceJSAdd(node);
@@ -128,6 +118,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
       return ReduceJSToObject(node);
     case IrOpcode::kJSToString:
       return ReduceJSToString(node);
+    case IrOpcode::kJSGetIterator:
+      return ReduceJSGetIterator(node);
     default:
       break;
   }
@@ -236,11 +228,12 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
 
   // Create the JSAsyncFunctionObject based on the SharedFunctionInfo
   // extracted from the top-most frame in {frame_state}.
-  Handle<SharedFunctionInfo> shared =
-      FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked();
-  DCHECK(shared->is_compiled());
-  int register_count = shared->internal_formal_parameter_count() +
-                       shared->GetBytecodeArray().register_count();
+  SharedFunctionInfoRef shared(
+      broker(),
+      FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked());
+  DCHECK(shared.is_compiled());
+  int register_count = shared.internal_formal_parameter_count() +
+                       shared.GetBytecodeArray().register_count();
   Node* value = effect =
       graph()->NewNode(javascript()->CreateAsyncFunctionObject(register_count),
                        closure, receiver, promise, context, effect, control);
@@ -360,9 +353,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
   if (!m.HasValue()) return NoChange();
   JSFunctionRef function = m.Ref(broker()).AsJSFunction();
   MapRef function_map = function.map();
-  if (!FLAG_concurrent_inlining) {
-    function_map.SerializePrototype();
-  } else if (!function_map.serialized_prototype()) {
+  if (FLAG_concurrent_inlining && !function_map.serialized_prototype()) {
     TRACE_BROKER_MISSING(broker(), "data for map " << function_map);
     return NoChange();
   }
@@ -396,22 +387,37 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
   // we have feedback from the InstanceOfIC.
   Handle<JSObject> receiver;
   HeapObjectMatcher m(constructor);
-  if (m.HasValue() && m.Value()->IsJSObject()) {
-    receiver = Handle<JSObject>::cast(m.Value());
+  if (m.HasValue() && m.Ref(broker()).IsJSObject()) {
+    receiver = m.Ref(broker()).AsJSObject().object();
   } else if (p.feedback().IsValid()) {
-    FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-    if (!nexus.GetConstructorFeedback().ToHandle(&receiver)) return NoChange();
+    ProcessedFeedback const& feedback =
+        broker()->GetFeedbackForInstanceOf(FeedbackSource(p.feedback()));
+    if (feedback.IsInsufficient()) return NoChange();
+    base::Optional<JSObjectRef> maybe_receiver =
+        feedback.AsInstanceOf().value();
+    if (!maybe_receiver.has_value()) return NoChange();
+    receiver = maybe_receiver->object();
   } else {
     return NoChange();
   }
-  Handle<Map> receiver_map(receiver->map(), isolate());
 
-  // Compute property access info for @@hasInstance on the constructor.
-  AccessInfoFactory access_info_factory(broker(), dependencies(),
-                                        graph()->zone());
-  PropertyAccessInfo access_info =
-      access_info_factory.ComputePropertyAccessInfo(
-          receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad);
+  JSObjectRef receiver_ref(broker(), receiver);
+  MapRef receiver_map = receiver_ref.map();
+
+  PropertyAccessInfo access_info = PropertyAccessInfo::Invalid(graph()->zone());
+  if (FLAG_concurrent_inlining) {
+    access_info = broker()->GetPropertyAccessInfo(
+        receiver_map,
+        NameRef(broker(), isolate()->factory()->has_instance_symbol()),
+        AccessMode::kLoad);
+  } else {
+    AccessInfoFactory access_info_factory(broker(), dependencies(),
+                                          graph()->zone());
+    access_info = access_info_factory.ComputePropertyAccessInfo(
+        receiver_map.object(), factory()->has_instance_symbol(),
+        AccessMode::kLoad);
+  }
+
   if (access_info.IsInvalid()) return NoChange();
   access_info.RecordDependencies(dependencies());
 
@@ -420,7 +426,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
   if (access_info.IsNotFound()) {
     // If there's no @@hasInstance handler, the OrdinaryHasInstance operation
     // takes over, but that requires the constructor to be callable.
-    if (!receiver_map->is_callable()) return NoChange();
+    if (!receiver_map.is_callable()) return NoChange();
 
     dependencies()->DependOnStablePrototypeChains(access_info.receiver_maps(),
                                                   kStartAtPrototype);
@@ -439,17 +445,15 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
   }
 
   if (access_info.IsDataConstant()) {
-    // Determine actual holder.
     Handle<JSObject> holder;
     bool found_on_proto = access_info.holder().ToHandle(&holder);
-    if (!found_on_proto) holder = receiver;
-
-    FieldIndex field_index = access_info.field_index();
-    Handle<Object> constant = JSObject::FastPropertyAt(
-        holder, access_info.field_representation(), field_index);
-    if (!constant->IsCallable()) {
+    JSObjectRef holder_ref =
+        found_on_proto ? JSObjectRef(broker(), holder) : receiver_ref;
+    base::Optional<ObjectRef> constant = holder_ref.GetOwnDataProperty(
+        access_info.field_representation(), access_info.field_index());
+    if (!constant.has_value() || !constant->IsHeapObject() ||
+        !constant->AsHeapObject().map().is_callable())
       return NoChange();
-    }
 
     if (found_on_proto) {
       dependencies()->DependOnStablePrototypeChains(
@@ -457,8 +461,6 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
           JSObjectRef(broker(), holder));
     }
 
-    DCHECK(constant->IsCallable());
-
     // Check that {constructor} is actually {receiver}.
     constructor =
         access_builder.BuildCheckValue(constructor, &effect, control, receiver);
@@ -478,14 +480,14 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
         0, frame_state, ContinuationFrameStateMode::LAZY);
 
     // Call the @@hasInstance handler.
-    Node* target = jsgraph()->Constant(constant);
+    Node* target = jsgraph()->Constant(*constant);
     node->InsertInput(graph()->zone(), 0, target);
     node->ReplaceInput(1, constructor);
     node->ReplaceInput(2, object);
     node->ReplaceInput(4, continuation_frame_state);
     node->ReplaceInput(5, effect);
     NodeProperties::ChangeOp(
-        node, javascript()->Call(3, CallFrequency(), VectorSlotPair(),
+        node, javascript()->Call(3, CallFrequency(), FeedbackSource(),
                                  ConvertReceiverMode::kNotNullOrUndefined));
 
     // Rewire the value uses of {node} to ToBoolean conversion of the result.
@@ -504,7 +506,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
 
 JSNativeContextSpecialization::InferHasInPrototypeChainResult
 JSNativeContextSpecialization::InferHasInPrototypeChain(
-    Node* receiver, Node* effect, Handle<HeapObject> prototype) {
+    Node* receiver, Node* effect, HeapObjectRef const& prototype) {
   ZoneHandleSet<Map> receiver_maps;
   NodeProperties::InferReceiverMapsResult result =
       NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
@@ -517,28 +519,31 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
   bool all = true;
   bool none = true;
   for (size_t i = 0; i < receiver_maps.size(); ++i) {
-    Handle<Map> receiver_map = receiver_maps[i];
-    if (receiver_map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
-      return kMayBeInPrototypeChain;
-    }
-    if (result == NodeProperties::kUnreliableReceiverMaps &&
-        !receiver_map->is_stable()) {
+    MapRef map(broker(), receiver_maps[i]);
+    if (result == NodeProperties::kUnreliableReceiverMaps && !map.is_stable()) {
       return kMayBeInPrototypeChain;
     }
-    for (PrototypeIterator it(isolate(), receiver_map);; it.Advance()) {
-      if (it.IsAtEnd()) {
+    while (true) {
+      if (IsSpecialReceiverInstanceType(map.instance_type())) {
+        return kMayBeInPrototypeChain;
+      }
+      if (!map.IsJSObjectMap()) {
         all = false;
         break;
       }
-      Handle<HeapObject> current =
-          PrototypeIterator::GetCurrent<HeapObject>(it);
-      if (current.is_identical_to(prototype)) {
+      if (FLAG_concurrent_inlining && !map.serialized_prototype()) {
+        TRACE_BROKER_MISSING(broker(), "prototype data for map " << map);
+        return kMayBeInPrototypeChain;
+      }
+      if (map.prototype().equals(prototype)) {
         none = false;
         break;
       }
-      if (!current->map().is_stable() ||
-          current->map().instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
-        return kMayBeInPrototypeChain;
+      map = map.prototype().map();
+      if (!map.is_stable()) return kMayBeInPrototypeChain;
+      if (map.oddball_type() == OddballType::kNull) {
+        all = false;
+        break;
       }
     }
   }
@@ -554,8 +559,8 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
       // might be a different object each time, so it's much simpler to include
       // {prototype}. That does, however, mean that we must check {prototype}'s
       // map stability.
-      if (!prototype->map().is_stable()) return kMayBeInPrototypeChain;
-      last_prototype.emplace(broker(), Handle<JSObject>::cast(prototype));
+      if (!prototype.map().is_stable()) return kMayBeInPrototypeChain;
+      last_prototype = prototype.AsJSObject();
     }
     WhereToStart start = result == NodeProperties::kUnreliableReceiverMaps
                              ? kStartAtReceiver
@@ -580,7 +585,7 @@ Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain(
   HeapObjectMatcher m(prototype);
   if (m.HasValue()) {
     InferHasInPrototypeChainResult result =
-        InferHasInPrototypeChain(value, effect, m.Value());
+        InferHasInPrototypeChain(value, effect, m.Ref(broker()));
     if (result != kMayBeInPrototypeChain) {
       Node* value = jsgraph()->BooleanConstant(result == kIsInPrototypeChain);
       ReplaceWithValue(node, value);
@@ -601,34 +606,41 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
   HeapObjectMatcher m(constructor);
   if (!m.HasValue()) return NoChange();
 
-  // Check if the {constructor} is a JSBoundFunction.
-  if (m.Value()->IsJSBoundFunction()) {
-    // OrdinaryHasInstance on bound functions turns into a recursive
-    // invocation of the instanceof operator again.
-    // ES6 section 7.3.19 OrdinaryHasInstance (C, O) step 2.
-    Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(m.Value());
-    Handle<JSReceiver> bound_target_function(function->bound_target_function(),
-                                             isolate());
+  if (m.Ref(broker()).IsJSBoundFunction()) {
+    // OrdinaryHasInstance on bound functions turns into a recursive invocation
+    // of the instanceof operator again.
+    JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction();
+    if (FLAG_concurrent_inlining && !function.serialized()) {
+      TRACE_BROKER_MISSING(broker(), "data for JSBoundFunction " << function);
+      return NoChange();
+    }
+
+    JSReceiverRef bound_target_function = function.bound_target_function();
+
     NodeProperties::ReplaceValueInput(node, object, 0);
     NodeProperties::ReplaceValueInput(
-        node, jsgraph()->HeapConstant(bound_target_function), 1);
-    NodeProperties::ChangeOp(node, javascript()->InstanceOf(VectorSlotPair()));
+        node, jsgraph()->Constant(bound_target_function), 1);
+    NodeProperties::ChangeOp(node, javascript()->InstanceOf(FeedbackSource()));
     Reduction const reduction = ReduceJSInstanceOf(node);
     return reduction.Changed() ? reduction : Changed(node);
   }
 
-  // Optimize if we currently know the "prototype" property.
-  if (m.Value()->IsJSFunction()) {
+  if (m.Ref(broker()).IsJSFunction()) {
+    // Optimize if we currently know the "prototype" property.
+
     JSFunctionRef function = m.Ref(broker()).AsJSFunction();
-    // TODO(neis): This is a temporary hack needed because the copy reducer
-    // runs only after this pass.
-    function.Serialize();
+    if (FLAG_concurrent_inlining && !function.serialized()) {
+      TRACE_BROKER_MISSING(broker(), "data for JSFunction " << function);
+      return NoChange();
+    }
+
     // TODO(neis): Remove the has_prototype_slot condition once the broker is
     // always enabled.
     if (!function.map().has_prototype_slot() || !function.has_prototype() ||
         function.PrototypeRequiresRuntimeLookup()) {
       return NoChange();
     }
+
     ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function);
     Node* prototype_constant = jsgraph()->Constant(prototype);
 
@@ -656,7 +668,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
   // Check if the {constructor} is the %Promise% function.
   HeapObjectMatcher m(constructor);
   if (!m.HasValue() ||
-      !m.Ref(broker()).equals(broker()->native_context().promise_function())) {
+      !m.Ref(broker()).equals(native_context().promise_function())) {
     return NoChange();
   }
 
@@ -680,7 +692,6 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
 
 // ES section #sec-promise-resolve-functions
 Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
-  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode());
   Node* promise = NodeProperties::GetValueInput(node, 0);
   Node* resolution = NodeProperties::GetValueInput(node, 1);
@@ -705,7 +716,9 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
     // Obtain pre-computed access infos from the broker.
     for (auto map : resolution_maps) {
       MapRef map_ref(broker(), map);
-      access_infos.push_back(broker()->GetAccessInfoForLoadingThen(map_ref));
+      access_infos.push_back(broker()->GetPropertyAccessInfo(
+          map_ref, NameRef(broker(), isolate()->factory()->then_string()),
+          AccessMode::kLoad));
     }
   }
   PropertyAccessInfo access_info =
@@ -948,7 +961,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
         } else {
           // Check that the {value} is a Smi.
           value = effect = graph()->NewNode(
-              simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
+              simplified()->CheckSmi(FeedbackSource()), value, effect, control);
           property_cell_value_type = Type::SignedSmall();
           representation = MachineType::RepCompressedTaggedSigned();
         }
@@ -978,70 +991,85 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
 }
 
 Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
-  DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
   LoadGlobalParameters const& p = LoadGlobalParametersOf(node->op());
   if (!p.feedback().IsValid()) return NoChange();
-  FeedbackSource source(p.feedback());
 
-  // TODO(neis): Make consistent with other feedback processing code.
-  GlobalAccessFeedback const* processed =
-      FLAG_concurrent_inlining
-          ? broker()->GetGlobalAccessFeedback(source)
-          : broker()->ProcessFeedbackForGlobalAccess(source);
-  if (processed == nullptr) return NoChange();
+  ProcessedFeedback const& processed =
+      broker()->GetFeedbackForGlobalAccess(FeedbackSource(p.feedback()));
+  if (processed.IsInsufficient()) return NoChange();
 
-  if (processed->IsScriptContextSlot()) {
+  GlobalAccessFeedback const& feedback = processed.AsGlobalAccess();
+  if (feedback.IsScriptContextSlot()) {
     Node* effect = NodeProperties::GetEffectInput(node);
-    Node* script_context = jsgraph()->Constant(processed->script_context());
+    Node* script_context = jsgraph()->Constant(feedback.script_context());
     Node* value = effect =
-        graph()->NewNode(javascript()->LoadContext(0, processed->slot_index(),
-                                                   processed->immutable()),
+        graph()->NewNode(javascript()->LoadContext(0, feedback.slot_index(),
+                                                   feedback.immutable()),
                          script_context, effect);
     ReplaceWithValue(node, value, effect);
     return Replace(value);
+  } else if (feedback.IsPropertyCell()) {
+    return ReduceGlobalAccess(node, nullptr, nullptr,
+                              NameRef(broker(), p.name()), AccessMode::kLoad,
+                              nullptr, feedback.property_cell());
+  } else {
+    DCHECK(feedback.IsMegamorphic());
+    return NoChange();
   }
-
-  CHECK(processed->IsPropertyCell());
-  return ReduceGlobalAccess(node, nullptr, nullptr, NameRef(broker(), p.name()),
-                            AccessMode::kLoad, nullptr,
-                            processed->property_cell());
 }
 
 Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
-  DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
   Node* value = NodeProperties::GetValueInput(node, 0);
-
   StoreGlobalParameters const& p = StoreGlobalParametersOf(node->op());
   if (!p.feedback().IsValid()) return NoChange();
-  FeedbackSource source(p.feedback());
 
-  GlobalAccessFeedback const* processed =
-      FLAG_concurrent_inlining
-          ? broker()->GetGlobalAccessFeedback(source)
-          : broker()->ProcessFeedbackForGlobalAccess(source);
-  if (processed == nullptr) return NoChange();
+  ProcessedFeedback const& processed =
+      broker()->GetFeedbackForGlobalAccess(FeedbackSource(p.feedback()));
+  if (processed.IsInsufficient()) return NoChange();
 
-  if (processed->IsScriptContextSlot()) {
-    if (processed->immutable()) return NoChange();
+  GlobalAccessFeedback const& feedback = processed.AsGlobalAccess();
+  if (feedback.IsScriptContextSlot()) {
+    if (feedback.immutable()) return NoChange();
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* control = NodeProperties::GetControlInput(node);
-    Node* script_context = jsgraph()->Constant(processed->script_context());
+    Node* script_context = jsgraph()->Constant(feedback.script_context());
     effect =
-        graph()->NewNode(javascript()->StoreContext(0, processed->slot_index()),
+        graph()->NewNode(javascript()->StoreContext(0, feedback.slot_index()),
                          value, script_context, effect, control);
     ReplaceWithValue(node, value, effect, control);
     return Replace(value);
-  }
-
-  if (processed->IsPropertyCell()) {
+  } else if (feedback.IsPropertyCell()) {
     return ReduceGlobalAccess(node, nullptr, value, NameRef(broker(), p.name()),
                               AccessMode::kStore, nullptr,
-                              processed->property_cell());
+                              feedback.property_cell());
+  } else {
+    DCHECK(feedback.IsMegamorphic());
+    return NoChange();
   }
+}
 
-  UNREACHABLE();
+void JSNativeContextSpecialization::FilterMapsAndGetPropertyAccessInfos(
+    NamedAccessFeedback const& feedback, AccessMode access_mode, Node* receiver,
+    Node* effect, ZoneVector<PropertyAccessInfo>* access_infos) {
+  ZoneVector<Handle<Map>> receiver_maps(zone());
+
+  // Either infer maps from the graph or use the feedback.
+  if (!InferReceiverMaps(receiver, effect, &receiver_maps)) {
+    receiver_maps = feedback.maps();
+  }
+  RemoveImpossibleReceiverMaps(receiver, &receiver_maps);
+
+  for (Handle<Map> map_handle : receiver_maps) {
+    MapRef map(broker(), map_handle);
+    if (map.is_deprecated()) continue;
+    PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
+        map, feedback.name(), access_mode, dependencies(),
+        FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized
+                                 : SerializationPolicy::kSerializeIfNeeded);
+    access_infos->push_back(access_info);
+  }
 }
 
 Reduction JSNativeContextSpecialization::ReduceNamedAccess(
@@ -1052,18 +1080,23 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
          node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty ||
          node->opcode() == IrOpcode::kJSStoreNamedOwn ||
-         node->opcode() == IrOpcode::kJSHasProperty);
+         node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
+         node->opcode() == IrOpcode::kJSHasProperty ||
+         node->opcode() == IrOpcode::kJSGetIterator);
   Node* receiver = NodeProperties::GetValueInput(node, 0);
   Node* context = NodeProperties::GetContextInput(node);
   Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
+  ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
   ZoneVector<PropertyAccessInfo> access_infos(zone());
+  FilterMapsAndGetPropertyAccessInfos(feedback, access_mode, receiver, effect,
+                                      &access_infos_for_feedback);
   AccessInfoFactory access_info_factory(broker(), dependencies(),
                                         graph()->zone());
   if (!access_info_factory.FinalizePropertyAccessInfos(
-          feedback.access_infos(), access_mode, &access_infos)) {
+          access_infos_for_feedback, access_mode, &access_infos)) {
     return NoChange();
   }
 
@@ -1072,7 +1105,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
   // to the current native context's global object instead.
   if (access_infos.size() == 1 && access_infos[0].receiver_maps().size() == 1) {
     MapRef receiver_map(broker(), access_infos[0].receiver_maps()[0]);
-    if (receiver_map.IsMapOfCurrentGlobalProxy()) {
+    if (receiver_map.IsMapOfTargetGlobalProxy()) {
       return ReduceGlobalAccess(node, receiver, value, feedback.name(),
                                 access_mode, key);
     }
@@ -1318,7 +1351,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
 }
 
 Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
-  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
   NamedAccess const& p = NamedAccessOf(node->op());
   Node* const receiver = NodeProperties::GetValueInput(node, 0);
@@ -1332,9 +1364,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
         name.equals(ObjectRef(broker(), factory()->prototype_string()))) {
       // Optimize "prototype" property of functions.
       JSFunctionRef function = object.AsJSFunction();
-      if (!FLAG_concurrent_inlining) {
-        function.Serialize();
-      } else if (!function.serialized()) {
+      if (FLAG_concurrent_inlining && !function.serialized()) {
         TRACE_BROKER_MISSING(broker(), "data for function " << function);
         return NoChange();
       }
@@ -1363,8 +1393,16 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
                                     AccessMode::kLoad);
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSGetIterator, node->opcode());
+  PropertyAccess const& p = PropertyAccessOf(node->op());
+  NameRef name(broker(), factory()->iterator_symbol());
+
+  return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
+                              FeedbackSource(p.feedback()), AccessMode::kLoad);
+}
+
 Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
-  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
   NamedAccess const& p = NamedAccessOf(node->op());
   Node* const value = NodeProperties::GetValueInput(node, 1);
@@ -1376,7 +1414,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
 }
 
 Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
-  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode());
   StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
   Node* const value = NodeProperties::GetValueInput(node, 1);
@@ -1401,7 +1438,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
 
   // Ensure that the {receiver} is actually a String.
   receiver = effect = graph()->NewNode(
-      simplified()->CheckString(VectorSlotPair()), receiver, effect, control);
+      simplified()->CheckString(FeedbackSource()), receiver, effect, control);
 
   // Determine the {receiver} length.
   Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
@@ -1428,13 +1465,50 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
 }
 }  // namespace
 
+void JSNativeContextSpecialization::RemoveImpossibleReceiverMaps(
+    Node* receiver, ZoneVector<Handle<Map>>* receiver_maps) const {
+  base::Optional<MapRef> root_map = InferReceiverRootMap(receiver);
+  if (root_map.has_value()) {
+    DCHECK(!root_map->is_abandoned_prototype_map());
+    receiver_maps->erase(
+        std::remove_if(receiver_maps->begin(), receiver_maps->end(),
+                       [root_map, this](Handle<Map> map) {
+                         MapRef map_ref(broker(), map);
+                         return map_ref.is_abandoned_prototype_map() ||
+                                (map_ref.FindRootMap().has_value() &&
+                                 !map_ref.FindRootMap()->equals(*root_map));
+                       }),
+        receiver_maps->end());
+  }
+}
+
+// Possibly refine the feedback using inferred map information from the graph.
+ElementAccessFeedback const&
+JSNativeContextSpecialization::TryRefineElementAccessFeedback(
+    ElementAccessFeedback const& feedback, Node* receiver, Node* effect) const {
+  AccessMode access_mode = feedback.keyed_mode().access_mode();
+  bool use_inference =
+      access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas;
+  if (!use_inference) return feedback;
+
+  ZoneVector<Handle<Map>> inferred_maps(zone());
+  if (!InferReceiverMaps(receiver, effect, &inferred_maps)) return feedback;
+
+  RemoveImpossibleReceiverMaps(receiver, &inferred_maps);
+  // TODO(neis): After Refine, the resulting feedback can still contain
+  // impossible maps when a target is kept only because more than one of its
+  // sources was inferred. Think of a way to completely rule out impossible
+  // maps.
+  return feedback.Refine(inferred_maps, zone());
+}
+
 Reduction JSNativeContextSpecialization::ReduceElementAccess(
     Node* node, Node* index, Node* value,
-    ElementAccessFeedback const& processed) {
-  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+    ElementAccessFeedback const& feedback) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty ||
          node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
+         node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
          node->opcode() == IrOpcode::kJSHasProperty);
 
   Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1443,30 +1517,34 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
   Node* frame_state =
       NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
 
-  AccessMode access_mode = processed.keyed_mode.access_mode();
+  // TODO(neis): It's odd that we do optimizations below that don't really care
+  // about the feedback, but we don't do them when the feedback is megamorphic.
+  if (feedback.transition_groups().empty()) return NoChange();
+
+  ElementAccessFeedback const& refined_feedback =
+      TryRefineElementAccessFeedback(feedback, receiver, effect);
+
+  AccessMode access_mode = refined_feedback.keyed_mode().access_mode();
   if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) &&
       receiver->opcode() == IrOpcode::kHeapConstant) {
-    Reduction reduction = ReduceKeyedLoadFromHeapConstant(
-        node, index, access_mode, processed.keyed_mode.load_mode());
+    Reduction reduction = ReduceElementLoadFromHeapConstant(
+        node, index, access_mode, refined_feedback.keyed_mode().load_mode());
     if (reduction.Changed()) return reduction;
   }
 
-  if (HasOnlyStringMaps(broker(), processed.receiver_maps)) {
-    DCHECK(processed.transitions.empty());
+  if (!refined_feedback.transition_groups().empty() &&
+      refined_feedback.HasOnlyStringMaps(broker())) {
     return ReduceElementAccessOnString(node, index, value,
-                                       processed.keyed_mode);
+                                       refined_feedback.keyed_mode());
   }
 
-  // Compute element access infos for the receiver maps.
   AccessInfoFactory access_info_factory(broker(), dependencies(),
                                         graph()->zone());
   ZoneVector<ElementAccessInfo> access_infos(zone());
-  if (!access_info_factory.ComputeElementAccessInfos(processed, access_mode,
-                                                     &access_infos)) {
+  if (!access_info_factory.ComputeElementAccessInfos(refined_feedback,
+                                                     &access_infos) ||
+      access_infos.empty()) {
     return NoChange();
-  } else if (access_infos.empty()) {
-    return ReduceSoftDeoptimize(
-        node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
   }
 
   // For holey stores or growing stores, we need to check that the prototype
@@ -1485,7 +1563,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
         // then we need to check that all prototypes have stable maps with
         // fast elements (and we need to guard against changes to that below).
         if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) ||
-             IsGrowStoreMode(processed.keyed_mode.store_mode())) &&
+             IsGrowStoreMode(feedback.keyed_mode().store_mode())) &&
             !receiver_map.HasOnlyStablePrototypesWithFastElements(
                 &prototype_maps)) {
           return NoChange();
@@ -1514,9 +1592,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
     base::Optional<JSTypedArrayRef> typed_array =
         GetTypedArrayConstant(broker(), receiver);
     if (typed_array.has_value()) {
-      if (!FLAG_concurrent_inlining) {
-        typed_array->Serialize();
-      } else if (!typed_array->serialized()) {
+      if (FLAG_concurrent_inlining && !typed_array->serialized()) {
         TRACE_BROKER_MISSING(broker(), "data for typed array " << *typed_array);
         return NoChange();
       }
@@ -1558,7 +1634,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
     // Access the actual element.
     ValueEffectControl continuation =
         BuildElementAccess(receiver, index, value, effect, control, access_info,
-                           processed.keyed_mode);
+                           feedback.keyed_mode());
     value = continuation.value();
     effect = continuation.effect();
     control = continuation.control();
@@ -1625,7 +1701,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
       // Access the actual element.
       ValueEffectControl continuation =
           BuildElementAccess(this_receiver, this_index, this_value, this_effect,
-                             this_control, access_info, processed.keyed_mode);
+                             this_control, access_info, feedback.keyed_mode());
       values.push_back(continuation.value());
       effects.push_back(continuation.effect());
       controls.push_back(continuation.control());
@@ -1658,7 +1734,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
   return Replace(value);
 }
 
-Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
+Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
     Node* node, Node* key, AccessMode access_mode,
     KeyedAccessLoadMode load_mode) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -1733,67 +1809,35 @@ Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
 Reduction JSNativeContextSpecialization::ReducePropertyAccess(
     Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
     FeedbackSource const& source, AccessMode access_mode) {
+  DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
   DCHECK_EQ(key == nullptr, static_name.has_value());
   DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty ||
          node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
+         node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
          node->opcode() == IrOpcode::kJSHasProperty ||
          node->opcode() == IrOpcode::kJSLoadNamed ||
          node->opcode() == IrOpcode::kJSStoreNamed ||
-         node->opcode() == IrOpcode::kJSStoreNamedOwn);
-
-  Node* receiver = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-
-  ProcessedFeedback const* processed = nullptr;
-  if (FLAG_concurrent_inlining) {
-    processed = broker()->GetFeedback(source);
-    // TODO(neis): Infer maps from the graph and consolidate with feedback/hints
-    // and filter impossible candidates based on inferred root map.
-  } else {
-    // TODO(neis): Try to unify this with the similar code in the serializer.
-    FeedbackNexus nexus(source.vector, source.slot);
-    if (nexus.ic_state() == UNINITIALIZED) {
-      processed = new (zone()) InsufficientFeedback();
-    } else {
-      MapHandles receiver_maps;
-      if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
-        processed = new (zone()) InsufficientFeedback();
-      } else if (!receiver_maps.empty()) {
-        base::Optional<NameRef> name = static_name.has_value()
-                                           ? static_name
-                                           : broker()->GetNameFeedback(nexus);
-        if (name.has_value()) {
-          ZoneVector<PropertyAccessInfo> access_infos(zone());
-          AccessInfoFactory access_info_factory(broker(), dependencies(),
-                                                graph()->zone());
-          access_info_factory.ComputePropertyAccessInfos(
-              receiver_maps, name->object(), access_mode, &access_infos);
-          processed = new (zone()) NamedAccessFeedback(*name, access_infos);
-        } else if (nexus.GetKeyType() == ELEMENT &&
-                   MEGAMORPHIC != nexus.ic_state()) {
-          processed = broker()->ProcessFeedbackMapsForElementAccess(
-              receiver_maps, KeyedAccessMode::FromNexus(nexus));
-        }
-      }
-    }
-  }
+         node->opcode() == IrOpcode::kJSStoreNamedOwn ||
+         node->opcode() == IrOpcode::kJSGetIterator);
+  DCHECK_GE(node->op()->ControlOutputCount(), 1);
 
-  if (processed == nullptr) return NoChange();
-  switch (processed->kind()) {
+  ProcessedFeedback const& feedback =
+      broker()->GetFeedbackForPropertyAccess(source, access_mode, static_name);
+  switch (feedback.kind()) {
     case ProcessedFeedback::kInsufficient:
       return ReduceSoftDeoptimize(
           node,
           DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
     case ProcessedFeedback::kNamedAccess:
-      return ReduceNamedAccess(node, value, *processed->AsNamedAccess(),
+      return ReduceNamedAccess(node, value, feedback.AsNamedAccess(),
                                access_mode, key);
     case ProcessedFeedback::kElementAccess:
-      CHECK_EQ(processed->AsElementAccess()->keyed_mode.access_mode(),
-               access_mode);
-      return ReduceElementAccess(node, key, value,
-                                 *processed->AsElementAccess());
-    case ProcessedFeedback::kGlobalAccess:
+      DCHECK_EQ(feedback.AsElementAccess().keyed_mode().access_mode(),
+                access_mode);
+      return ReduceElementAccess(node, key, value, feedback.AsElementAccess());
+    default:
       UNREACHABLE();
   }
 }
@@ -1807,7 +1851,7 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
   Node* frame_state =
       NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
   Node* deoptimize = graph()->NewNode(
-      common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+      common()->Deoptimize(DeoptimizeKind::kSoft, reason, FeedbackSource()),
       frame_state, effect, control);
   // TODO(bmeurer): This should be on the AdvancedReducer somehow.
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
@@ -1818,7 +1862,6 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
 }
 
 Reduction JSNativeContextSpecialization::ReduceJSHasProperty(Node* node) {
-  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSHasProperty, node->opcode());
   PropertyAccess const& p = PropertyAccessOf(node->op());
   Node* key = NodeProperties::GetValueInput(node, 1);
@@ -1936,7 +1979,6 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
 }
 
 Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
-  DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
   PropertyAccess const& p = PropertyAccessOf(node->op());
   Node* name = NodeProperties::GetValueInput(node, 1);
@@ -1953,7 +1995,6 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
 }
 
 Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
-  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
   PropertyAccess const& p = PropertyAccessOf(node->op());
   Node* const key = NodeProperties::GetValueInput(node, 1);
@@ -1975,7 +2016,7 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
   Node* value;
   if (constant.IsJSFunction()) {
     value = *effect = *control = graph()->NewNode(
-        jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(),
+        jsgraph()->javascript()->Call(2, CallFrequency(), FeedbackSource(),
                                       ConvertReceiverMode::kNotNullOrUndefined),
         target, receiver, context, frame_state, *effect, *control);
   } else {
@@ -2012,7 +2053,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
   // Introduce the call to the setter function.
   if (constant.IsJSFunction()) {
     *effect = *control = graph()->NewNode(
-        jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(),
+        jsgraph()->javascript()->Call(3, CallFrequency(), FeedbackSource(),
                                       ConvertReceiverMode::kNotNullOrUndefined),
         target, receiver, value, context, frame_state, *effect, *control);
   } else {
@@ -2197,12 +2238,10 @@ JSNativeContextSpecialization::BuildPropertyStore(
     Node* storage = receiver;
     if (!field_index.is_inobject()) {
       storage = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForJSObjectPropertiesOrHash()),
+          simplified()->LoadField(
+              AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer()),
           storage, effect, control);
     }
-    PropertyConstness constness = access_info.IsDataConstant()
-                                      ? PropertyConstness::kConst
-                                      : PropertyConstness::kMutable;
     bool store_to_existing_constant_field = access_info.IsDataConstant() &&
                                             access_mode == AccessMode::kStore &&
                                             !access_info.HasTransitionMap();
@@ -2215,24 +2254,25 @@ JSNativeContextSpecialization::BuildPropertyStore(
         MachineType::TypeForRepresentation(field_representation),
         kFullWriteBarrier,
         LoadSensitivity::kUnsafe,
-        constness};
+        access_info.GetConstFieldInfo(),
+        access_mode == AccessMode::kStoreInLiteral};
 
     switch (field_representation) {
       case MachineRepresentation::kFloat64: {
         value = effect =
-            graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+            graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value,
                              effect, control);
         if (!field_index.is_inobject() || !FLAG_unbox_double_fields) {
           if (access_info.HasTransitionMap()) {
-            // Allocate a MutableHeapNumber for the new property.
+            // Allocate a HeapNumber for the new property.
             AllocationBuilder a(jsgraph(), effect, control);
             a.Allocate(HeapNumber::kSize, AllocationType::kYoung,
                        Type::OtherInternal());
             a.Store(AccessBuilder::ForMap(),
-                    factory()->mutable_heap_number_map());
+                    MapRef(broker(), factory()->heap_number_map()));
             FieldAccess value_field_access =
                 AccessBuilder::ForHeapNumberValue();
-            value_field_access.constness = field_access.constness;
+            value_field_access.const_field_info = field_access.const_field_info;
             a.Store(value_field_access, value);
             value = effect = a.Finish();
 
@@ -2241,7 +2281,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
                 MachineType::TypeCompressedTaggedPointer();
             field_access.write_barrier_kind = kPointerWriteBarrier;
           } else {
-            // We just store directly to the MutableHeapNumber.
+            // We just store directly to the HeapNumber.
             FieldAccess const storage_access = {
                 kTaggedBase,
                 field_index.offset(),
@@ -2251,7 +2291,8 @@ JSNativeContextSpecialization::BuildPropertyStore(
                 MachineType::TypeCompressedTaggedPointer(),
                 kPointerWriteBarrier,
                 LoadSensitivity::kUnsafe,
-                constness};
+                access_info.GetConstFieldInfo(),
+                access_mode == AccessMode::kStoreInLiteral};
             storage = effect =
                 graph()->NewNode(simplified()->LoadField(storage_access),
                                  storage, effect, control);
@@ -2300,7 +2341,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
         if (field_representation == MachineRepresentation::kTaggedSigned ||
             field_representation == MachineRepresentation::kCompressedSigned) {
           value = effect = graph()->NewNode(
-              simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
+              simplified()->CheckSmi(FeedbackSource()), value, effect, control);
           field_access.write_barrier_kind = kNoWriteBarrier;
 
         } else if (field_representation ==
@@ -2356,7 +2397,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
                                   storage, value, effect, control);
 
         // Atomically switch to the new properties below.
-        field_access = AccessBuilder::ForJSObjectPropertiesOrHash();
+        field_access = AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer();
         value = storage;
         storage = receiver;
       }
@@ -2382,80 +2423,18 @@ JSNativeContextSpecialization::BuildPropertyStore(
 Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
     Node* node) {
   DCHECK_EQ(IrOpcode::kJSStoreDataPropertyInLiteral, node->opcode());
-
   FeedbackParameter const& p = FeedbackParameterOf(node->op());
+  Node* const key = NodeProperties::GetValueInput(node, 1);
+  Node* const value = NodeProperties::GetValueInput(node, 2);
 
   if (!p.feedback().IsValid()) return NoChange();
-
-  FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-  if (nexus.IsUninitialized()) {
-    return NoChange();
-  }
-
-  if (nexus.ic_state() == MEGAMORPHIC) {
-    return NoChange();
-  }
-
-  DCHECK_EQ(MONOMORPHIC, nexus.ic_state());
-
-  Map map = nexus.GetFirstMap();
-  if (map.is_null()) {
-    // Maps are weakly held in the type feedback vector, we may not have one.
-    return NoChange();
-  }
-
-  Handle<Map> receiver_map(map, isolate());
-  if (!Map::TryUpdate(isolate(), receiver_map).ToHandle(&receiver_map))
-    return NoChange();
-
-  NameRef cached_name(
-      broker(),
-      handle(Name::cast(nexus.GetFeedbackExtra()->GetHeapObjectAssumeStrong()),
-             isolate()));
-
-  AccessInfoFactory access_info_factory(broker(), dependencies(),
-                                        graph()->zone());
-  PropertyAccessInfo access_info =
-      access_info_factory.ComputePropertyAccessInfo(
-          receiver_map, cached_name.object(), AccessMode::kStoreInLiteral);
-  if (access_info.IsInvalid()) return NoChange();
-  access_info.RecordDependencies(dependencies());
-
-  Node* receiver = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // Monomorphic property access.
-  PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
-  access_builder.BuildCheckMaps(receiver, &effect, control,
-                                access_info.receiver_maps());
-
-  // Ensure that {name} matches the cached name.
-  Node* name = NodeProperties::GetValueInput(node, 1);
-  Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name,
-                                 jsgraph()->Constant(cached_name));
-  effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongName),
-                            check, effect, control);
-
-  Node* value = NodeProperties::GetValueInput(node, 2);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
-
-  // Generate the actual property access.
-  ValueEffectControl continuation = BuildPropertyAccess(
-      receiver, value, context, frame_state_lazy, effect, control, cached_name,
-      nullptr, access_info, AccessMode::kStoreInLiteral);
-  value = continuation.value();
-  effect = continuation.effect();
-  control = continuation.control();
-
-  ReplaceWithValue(node, value, effect, control);
-  return Replace(value);
+  return ReducePropertyAccess(node, key, base::nullopt, value,
+                              FeedbackSource(p.feedback()),
+                              AccessMode::kStoreInLiteral);
 }
 
 Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
     Node* node) {
-  DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
   DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode());
   FeedbackParameter const& p = FeedbackParameterOf(node->op());
   Node* const index = NodeProperties::GetValueInput(node, 1);
@@ -2591,7 +2570,7 @@ JSNativeContextSpecialization::BuildElementAccess(
       // bounds check below and just skip the property access if it's out of
       // bounds for the {receiver}.
       index = effect = graph()->NewNode(
-          simplified()->CheckSmi(VectorSlotPair()), index, effect, control);
+          simplified()->CheckSmi(FeedbackSource()), index, effect, control);
 
       // Cast the {index} to Unsigned32 range, so that the bounds checks
       // below are performed on unsigned values, which means that all the
@@ -2600,7 +2579,7 @@ JSNativeContextSpecialization::BuildElementAccess(
     } else {
       // Check that the {index} is in the valid range for the {receiver}.
       index = effect =
-          graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+          graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
                            length, effect, control);
     }
 
@@ -2660,7 +2639,7 @@ JSNativeContextSpecialization::BuildElementAccess(
         // and truncate it to a Number appropriately.
         value = effect = graph()->NewNode(
             simplified()->SpeculativeToNumber(
-                NumberOperationHint::kNumberOrOddball, VectorSlotPair()),
+                NumberOperationHint::kNumberOrOddball, FeedbackSource()),
             value, effect, control);
 
         // Introduce the appropriate truncation for {value}. Currently we
@@ -2756,12 +2735,12 @@ JSNativeContextSpecialization::BuildElementAccess(
       // bounds check below and just skip the store below if it's out of
       // bounds for the {receiver}.
       index = effect = graph()->NewNode(
-          simplified()->CheckBounds(VectorSlotPair()), index,
+          simplified()->CheckBounds(FeedbackSource()), index,
           jsgraph()->Constant(Smi::kMaxValue), effect, control);
     } else {
       // Check that the {index} is in the valid range for the {receiver}.
       index = effect =
-          graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+          graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
                            length, effect, control);
     }
 
@@ -2825,7 +2804,7 @@ JSNativeContextSpecialization::BuildElementAccess(
             // truncating.
             vtrue = etrue = graph()->NewNode(
                 simplified()->CheckFloat64Hole(
-                    CheckFloat64HoleMode::kAllowReturnHole, VectorSlotPair()),
+                    CheckFloat64HoleMode::kAllowReturnHole, FeedbackSource()),
                 vtrue, etrue, if_true);
           }
         }
@@ -2874,7 +2853,7 @@ JSNativeContextSpecialization::BuildElementAccess(
             mode = CheckFloat64HoleMode::kAllowReturnHole;
           }
           value = effect = graph()->NewNode(
-              simplified()->CheckFloat64Hole(mode, VectorSlotPair()), value,
+              simplified()->CheckFloat64Hole(mode, FeedbackSource()), value,
               effect, control);
         }
       }
@@ -2905,7 +2884,7 @@ JSNativeContextSpecialization::BuildElementAccess(
         Node* etrue = effect;
 
         Node* checked = etrue =
-            graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+            graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
                              length, etrue, if_true);
 
         Node* element = etrue =
@@ -2936,7 +2915,7 @@ JSNativeContextSpecialization::BuildElementAccess(
           } else {
             etrue = graph()->NewNode(
                 simplified()->CheckFloat64Hole(
-                    CheckFloat64HoleMode::kNeverReturnHole, VectorSlotPair()),
+                    CheckFloat64HoleMode::kNeverReturnHole, FeedbackSource()),
                 element, etrue, if_true);
           }
 
@@ -2956,10 +2935,10 @@ JSNativeContextSpecialization::BuildElementAccess(
 
       if (IsSmiElementsKind(elements_kind)) {
         value = effect = graph()->NewNode(
-            simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
+            simplified()->CheckSmi(FeedbackSource()), value, effect, control);
       } else if (IsDoubleElementsKind(elements_kind)) {
         value = effect =
-            graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+            graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value,
                              effect, control);
         // Make sure we do not store signalling NaNs into double arrays.
         value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
@@ -2994,7 +2973,7 @@ JSNativeContextSpecialization::BuildElementAccess(
                 : graph()->NewNode(simplified()->NumberAdd(), length,
                                    jsgraph()->OneConstant());
         index = effect =
-            graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+            graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
                              limit, effect, control);
 
         // Grow {elements} backing store if necessary.
@@ -3003,7 +2982,7 @@ JSNativeContextSpecialization::BuildElementAccess(
                 ? GrowFastElementsMode::kDoubleElements
                 : GrowFastElementsMode::kSmiOrObjectElements;
         elements = effect = graph()->NewNode(
-            simplified()->MaybeGrowFastElements(mode, VectorSlotPair()),
+            simplified()->MaybeGrowFastElements(mode, FeedbackSource()),
             receiver, elements, index, elements_length, effect, control);
 
         // If we didn't grow {elements}, it might still be COW, in which case we
@@ -3063,7 +3042,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
       dependencies()->DependOnNoElementsProtector()) {
     // Ensure that the {index} is a valid String length.
     index = *effect = graph()->NewNode(
-        simplified()->CheckBounds(VectorSlotPair()), index,
+        simplified()->CheckBounds(FeedbackSource()), index,
         jsgraph()->Constant(String::kMaxLength), *effect, *control);
 
     // Load the single character string from {receiver} or yield
@@ -3095,7 +3074,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
   } else {
     // Ensure that {index} is less than {receiver} length.
     index = *effect =
-        graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+        graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
                          length, *effect, *control);
 
     Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
@@ -3196,7 +3175,6 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
   // native contexts, as the global Array protector works isolate-wide).
   for (Handle<Map> map : receiver_maps) {
     MapRef receiver_map(broker(), map);
-    if (!FLAG_concurrent_inlining) receiver_map.SerializePrototype();
     ObjectRef receiver_prototype = receiver_map.prototype();
     if (!receiver_prototype.IsJSObject() ||
         !broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
@@ -3208,47 +3186,9 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
   return dependencies()->DependOnNoElementsProtector();
 }
 
-// Returns false iff we have insufficient feedback (uninitialized or obsolete).
-bool JSNativeContextSpecialization::ExtractReceiverMaps(
-    Node* receiver, Node* effect, FeedbackNexus const& nexus,
-    MapHandles* receiver_maps) {
-  DCHECK(receiver_maps->empty());
-  if (nexus.IsUninitialized()) return false;
-
-  // See if we can infer a concrete type for the {receiver}. Solely relying on
-  // the inference is not safe for keyed stores, because we would potentially
-  // miss out on transitions that need to be performed.
-  {
-    FeedbackSlotKind kind = nexus.kind();
-    bool use_inference =
-        !IsKeyedStoreICKind(kind) && !IsStoreInArrayLiteralICKind(kind);
-    if (use_inference && InferReceiverMaps(receiver, effect, receiver_maps)) {
-      TryUpdateThenDropDeprecated(isolate(), receiver_maps);
-      return true;
-    }
-  }
-
-  if (nexus.ExtractMaps(receiver_maps) == 0) return true;
-
-  // Try to filter impossible candidates based on inferred root map.
-  Handle<Map> root_map;
-  if (InferReceiverRootMap(receiver).ToHandle(&root_map)) {
-    DCHECK(!root_map->is_abandoned_prototype_map());
-    Isolate* isolate = this->isolate();
-    receiver_maps->erase(
-        std::remove_if(receiver_maps->begin(), receiver_maps->end(),
-                       [root_map, isolate](Handle<Map> map) {
-                         return map->is_abandoned_prototype_map() ||
-                                map->FindRootMap(isolate) != *root_map;
-                       }),
-        receiver_maps->end());
-  }
-  TryUpdateThenDropDeprecated(isolate(), receiver_maps);
-  return !receiver_maps->empty();
-}
-
 bool JSNativeContextSpecialization::InferReceiverMaps(
-    Node* receiver, Node* effect, MapHandles* receiver_maps) {
+    Node* receiver, Node* effect,
+    ZoneVector<Handle<Map>>* receiver_maps) const {
   ZoneHandleSet<Map> maps;
   NodeProperties::InferReceiverMapsResult result =
       NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
@@ -3273,21 +3213,24 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
   return false;
 }
 
-MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
-    Node* receiver) {
+base::Optional<MapRef> JSNativeContextSpecialization::InferReceiverRootMap(
+    Node* receiver) const {
   HeapObjectMatcher m(receiver);
   if (m.HasValue()) {
-    return handle(m.Value()->map().FindRootMap(isolate()), isolate());
+    MapRef map = m.Ref(broker()).map();
+    return map.FindRootMap();
   } else if (m.IsJSCreate()) {
     base::Optional<MapRef> initial_map =
         NodeProperties::GetJSCreateMap(broker(), receiver);
     if (initial_map.has_value()) {
-      DCHECK_EQ(*initial_map->object(),
-                initial_map->object()->FindRootMap(isolate()));
-      return initial_map->object();
+      if (!initial_map->FindRootMap().has_value()) {
+        return base::nullopt;
+      }
+      DCHECK(initial_map->equals(*initial_map->FindRootMap()));
+      return *initial_map;
     }
   }
-  return MaybeHandle<Map>();
+  return base::nullopt;
 }
 
 Graph* JSNativeContextSpecialization::graph() const {
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 8510c76bfc3d59..a0707b98303d46 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -53,7 +53,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
 
   JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph,
                                 JSHeapBroker* broker, Flags flags,
-                                Handle<Context> native_context,
                                 CompilationDependencies* dependencies,
                                 Zone* zone, Zone* shared_zone);
 
@@ -84,6 +83,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
   Reduction ReduceJSLoadGlobal(Node* node);
   Reduction ReduceJSStoreGlobal(Node* node);
   Reduction ReduceJSLoadNamed(Node* node);
+  Reduction ReduceJSGetIterator(Node* node);
   Reduction ReduceJSStoreNamed(Node* node);
   Reduction ReduceJSHasProperty(Node* node);
   Reduction ReduceJSLoadProperty(Node* node);
@@ -114,9 +114,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
   Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
                                NameRef const& name, AccessMode access_mode,
                                Node* key, PropertyCellRef const& property_cell);
-  Reduction ReduceKeyedLoadFromHeapConstant(Node* node, Node* key,
-                                            AccessMode access_mode,
-                                            KeyedAccessLoadMode load_mode);
+  Reduction ReduceElementLoadFromHeapConstant(Node* node, Node* key,
+                                              AccessMode access_mode,
+                                              KeyedAccessLoadMode load_mode);
   Reduction ReduceElementAccessOnString(Node* node, Node* index, Node* value,
                                         KeyedAccessMode const& keyed_mode);
 
@@ -212,18 +212,25 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
   // code dependencies and might use the array protector cell.
   bool CanTreatHoleAsUndefined(ZoneVector<Handle<Map>> const& receiver_maps);
 
-  // Extract receiver maps from {nexus} and filter based on {receiver} if
-  // possible.
-  bool ExtractReceiverMaps(Node* receiver, Node* effect,
-                           FeedbackNexus const& nexus,
-                           MapHandles* receiver_maps);
+  void RemoveImpossibleReceiverMaps(
+      Node* receiver, ZoneVector<Handle<Map>>* receiver_maps) const;
+
+  ElementAccessFeedback const& TryRefineElementAccessFeedback(
+      ElementAccessFeedback const& feedback, Node* receiver,
+      Node* effect) const;
+
+  void FilterMapsAndGetPropertyAccessInfos(
+      NamedAccessFeedback const& feedback, AccessMode access_mode,
+      Node* receiver, Node* effect,
+      ZoneVector<PropertyAccessInfo>* access_infos);
 
   // Try to infer maps for the given {receiver} at the current {effect}.
   bool InferReceiverMaps(Node* receiver, Node* effect,
-                         MapHandles* receiver_maps);
+                         ZoneVector<Handle<Map>>* receiver_maps) const;
+
   // Try to infer a root map for the {receiver} independent of the current
   // program location.
-  MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
+  base::Optional<MapRef> InferReceiverRootMap(Node* receiver) const;
 
   // Checks if we know at compile time that the {receiver} either definitely
   // has the {prototype} in it's prototype chain, or the {receiver} definitely
@@ -234,7 +241,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
     kMayBeInPrototypeChain
   };
   InferHasInPrototypeChainResult InferHasInPrototypeChain(
-      Node* receiver, Node* effect, Handle<HeapObject> prototype);
+      Node* receiver, Node* effect, HeapObjectRef const& prototype);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -248,7 +255,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
   Flags flags() const { return flags_; }
   Handle<JSGlobalObject> global_object() const { return global_object_; }
   Handle<JSGlobalProxy> global_proxy() const { return global_proxy_; }
-  NativeContextRef native_context() const { return broker()->native_context(); }
+  NativeContextRef native_context() const {
+    return broker()->target_native_context();
+  }
   CompilationDependencies* dependencies() const { return dependencies_; }
   Zone* zone() const { return zone_; }
   Zone* shared_zone() const { return shared_zone_; }
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index e0f97922b2ced0..d0581b59a5af3a 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -9,7 +9,6 @@
 #include "src/base/lazy-instance.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
-#include "src/compiler/vector-slot-pair.h"
 #include "src/handles/handles-inl.h"
 #include "src/objects/objects-inl.h"
 
@@ -51,7 +50,8 @@ bool operator!=(ConstructParameters const& lhs,
 }
 
 size_t hash_value(ConstructParameters const& p) {
-  return base::hash_combine(p.arity(), p.frequency(), p.feedback());
+  return base::hash_combine(p.arity(), p.frequency(),
+                            FeedbackSource::Hash()(p.feedback()));
 }
 
 std::ostream& operator<<(std::ostream& os, ConstructParameters const& p) {
@@ -198,7 +198,8 @@ bool operator!=(StoreNamedOwnParameters const& lhs,
 }
 
 size_t hash_value(StoreNamedOwnParameters const& p) {
-  return base::hash_combine(p.name().location(), p.feedback());
+  return base::hash_combine(p.name().location(),
+                            FeedbackSource::Hash()(p.feedback()));
 }
 
 std::ostream& operator<<(std::ostream& os, StoreNamedOwnParameters const& p) {
@@ -219,7 +220,7 @@ bool operator!=(FeedbackParameter const& lhs, FeedbackParameter const& rhs) {
 }
 
 size_t hash_value(FeedbackParameter const& p) {
-  return base::hash_combine(p.feedback());
+  return FeedbackSource::Hash()(p.feedback());
 }
 
 std::ostream& operator<<(std::ostream& os, FeedbackParameter const& p) {
@@ -248,7 +249,7 @@ bool operator!=(NamedAccess const& lhs, NamedAccess const& rhs) {
 
 size_t hash_value(NamedAccess const& p) {
   return base::hash_combine(p.name().location(), p.language_mode(),
-                            p.feedback());
+                            FeedbackSource::Hash()(p.feedback()));
 }
 
 
@@ -283,13 +284,15 @@ bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) {
 PropertyAccess const& PropertyAccessOf(const Operator* op) {
   DCHECK(op->opcode() == IrOpcode::kJSHasProperty ||
          op->opcode() == IrOpcode::kJSLoadProperty ||
-         op->opcode() == IrOpcode::kJSStoreProperty);
+         op->opcode() == IrOpcode::kJSStoreProperty ||
+         op->opcode() == IrOpcode::kJSGetIterator);
   return OpParameter<PropertyAccess>(op);
 }
 
 
 size_t hash_value(PropertyAccess const& p) {
-  return base::hash_combine(p.language_mode(), p.feedback());
+  return base::hash_combine(p.language_mode(),
+                            FeedbackSource::Hash()(p.feedback()));
 }
 
 
@@ -339,7 +342,7 @@ bool operator!=(StoreGlobalParameters const& lhs,
 
 size_t hash_value(StoreGlobalParameters const& p) {
   return base::hash_combine(p.language_mode(), p.name().location(),
-                            p.feedback());
+                            FeedbackSource::Hash()(p.feedback()));
 }
 
 
@@ -518,7 +521,8 @@ bool operator!=(CreateLiteralParameters const& lhs,
 
 
 size_t hash_value(CreateLiteralParameters const& p) {
-  return base::hash_combine(p.constant().location(), p.feedback(), p.length(),
+  return base::hash_combine(p.constant().location(),
+                            FeedbackSource::Hash()(p.feedback()), p.length(),
                             p.flags());
 }
 
@@ -546,7 +550,7 @@ bool operator!=(CloneObjectParameters const& lhs,
 }
 
 size_t hash_value(CloneObjectParameters const& p) {
-  return base::hash_combine(p.feedback(), p.flags());
+  return base::hash_combine(FeedbackSource::Hash()(p.feedback()), p.flags());
 }
 
 std::ostream& operator<<(std::ostream& os, CloneObjectParameters const& p) {
@@ -795,18 +799,18 @@ COMPARE_OP_LIST(COMPARE_OP)
 #undef COMPARE_OP
 
 const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
-    const VectorSlotPair& feedback) {
+    const FeedbackSource& feedback) {
   FeedbackParameter parameters(feedback);
   return new (zone()) Operator1<FeedbackParameter>(  // --
       IrOpcode::kJSStoreDataPropertyInLiteral,
       Operator::kNoThrow,              // opcode
       "JSStoreDataPropertyInLiteral",  // name
-      4, 1, 1, 0, 1, 0,                // counts
+      4, 1, 1, 0, 1, 1,                // counts
       parameters);                     // parameter
 }
 
 const Operator* JSOperatorBuilder::StoreInArrayLiteral(
-    const VectorSlotPair& feedback) {
+    const FeedbackSource& feedback) {
   FeedbackParameter parameters(feedback);
   return new (zone()) Operator1<FeedbackParameter>(  // --
       IrOpcode::kJSStoreInArrayLiteral,
@@ -828,7 +832,7 @@ const Operator* JSOperatorBuilder::CallForwardVarargs(size_t arity,
 
 const Operator* JSOperatorBuilder::Call(size_t arity,
                                         CallFrequency const& frequency,
-                                        VectorSlotPair const& feedback,
+                                        FeedbackSource const& feedback,
                                         ConvertReceiverMode convert_mode,
                                         SpeculationMode speculation_mode) {
   DCHECK_IMPLIES(speculation_mode == SpeculationMode::kAllowSpeculation,
@@ -853,7 +857,7 @@ const Operator* JSOperatorBuilder::CallWithArrayLike(
 
 const Operator* JSOperatorBuilder::CallWithSpread(
     uint32_t arity, CallFrequency const& frequency,
-    VectorSlotPair const& feedback, SpeculationMode speculation_mode) {
+    FeedbackSource const& feedback, SpeculationMode speculation_mode) {
   DCHECK_IMPLIES(speculation_mode == SpeculationMode::kAllowSpeculation,
                  feedback.IsValid());
   CallParameters parameters(arity, frequency, feedback,
@@ -903,7 +907,7 @@ const Operator* JSOperatorBuilder::ConstructForwardVarargs(
 // on AIX (v8:8193).
 const Operator* JSOperatorBuilder::Construct(uint32_t arity,
                                              CallFrequency const& frequency,
-                                             VectorSlotPair const& feedback) {
+                                             FeedbackSource const& feedback) {
   ConstructParameters parameters(arity, frequency, feedback);
   return new (zone()) Operator1<ConstructParameters>(   // --
       IrOpcode::kJSConstruct, Operator::kNoProperties,  // opcode
@@ -924,7 +928,7 @@ const Operator* JSOperatorBuilder::ConstructWithArrayLike(
 
 const Operator* JSOperatorBuilder::ConstructWithSpread(
     uint32_t arity, CallFrequency const& frequency,
-    VectorSlotPair const& feedback) {
+    FeedbackSource const& feedback) {
   ConstructParameters parameters(arity, frequency, feedback);
   return new (zone()) Operator1<ConstructParameters>(             // --
       IrOpcode::kJSConstructWithSpread, Operator::kNoProperties,  // opcode
@@ -934,7 +938,7 @@ const Operator* JSOperatorBuilder::ConstructWithSpread(
 }
 
 const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
-                                             const VectorSlotPair& feedback) {
+                                             const FeedbackSource& feedback) {
   NamedAccess access(LanguageMode::kSloppy, name, feedback);
   return new (zone()) Operator1<NamedAccess>(           // --
       IrOpcode::kJSLoadNamed, Operator::kNoProperties,  // opcode
@@ -944,7 +948,7 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
 }
 
 const Operator* JSOperatorBuilder::LoadProperty(
-    VectorSlotPair const& feedback) {
+    FeedbackSource const& feedback) {
   PropertyAccess access(LanguageMode::kSloppy, feedback);
   return new (zone()) Operator1<PropertyAccess>(           // --
       IrOpcode::kJSLoadProperty, Operator::kNoProperties,  // opcode
@@ -953,7 +957,16 @@ const Operator* JSOperatorBuilder::LoadProperty(
       access);                                             // parameter
 }
 
-const Operator* JSOperatorBuilder::HasProperty(VectorSlotPair const& feedback) {
+const Operator* JSOperatorBuilder::GetIterator(FeedbackSource const& feedback) {
+  PropertyAccess access(LanguageMode::kSloppy, feedback);
+  return new (zone()) Operator1<PropertyAccess>(          // --
+      IrOpcode::kJSGetIterator, Operator::kNoProperties,  // opcode
+      "JSGetIterator",                                    // name
+      1, 1, 1, 1, 1, 2,                                   // counts
+      access);                                            // parameter
+}
+
+const Operator* JSOperatorBuilder::HasProperty(FeedbackSource const& feedback) {
   PropertyAccess access(LanguageMode::kSloppy, feedback);
   return new (zone()) Operator1<PropertyAccess>(          // --
       IrOpcode::kJSHasProperty, Operator::kNoProperties,  // opcode
@@ -962,7 +975,7 @@ const Operator* JSOperatorBuilder::HasProperty(VectorSlotPair const& feedback) {
       access);                                            // parameter
 }
 
-const Operator* JSOperatorBuilder::InstanceOf(VectorSlotPair const& feedback) {
+const Operator* JSOperatorBuilder::InstanceOf(FeedbackSource const& feedback) {
   FeedbackParameter parameter(feedback);
   return new (zone()) Operator1<FeedbackParameter>(      // --
       IrOpcode::kJSInstanceOf, Operator::kNoProperties,  // opcode
@@ -1021,7 +1034,7 @@ int RestoreRegisterIndexOf(const Operator* op) {
 
 const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
                                               Handle<Name> name,
-                                              VectorSlotPair const& feedback) {
+                                              FeedbackSource const& feedback) {
   NamedAccess access(language_mode, name, feedback);
   return new (zone()) Operator1<NamedAccess>(            // --
       IrOpcode::kJSStoreNamed, Operator::kNoProperties,  // opcode
@@ -1030,9 +1043,8 @@ const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
       access);                                           // parameter
 }
 
-
 const Operator* JSOperatorBuilder::StoreProperty(
-    LanguageMode language_mode, VectorSlotPair const& feedback) {
+    LanguageMode language_mode, FeedbackSource const& feedback) {
   PropertyAccess access(language_mode, feedback);
   return new (zone()) Operator1<PropertyAccess>(            // --
       IrOpcode::kJSStoreProperty, Operator::kNoProperties,  // opcode
@@ -1042,7 +1054,7 @@ const Operator* JSOperatorBuilder::StoreProperty(
 }
 
 const Operator* JSOperatorBuilder::StoreNamedOwn(
-    Handle<Name> name, VectorSlotPair const& feedback) {
+    Handle<Name> name, FeedbackSource const& feedback) {
   StoreNamedOwnParameters parameters(name, feedback);
   return new (zone()) Operator1<StoreNamedOwnParameters>(   // --
       IrOpcode::kJSStoreNamedOwn, Operator::kNoProperties,  // opcode
@@ -1066,7 +1078,7 @@ const Operator* JSOperatorBuilder::CreateGeneratorObject() {
 }
 
 const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
-                                              const VectorSlotPair& feedback,
+                                              const FeedbackSource& feedback,
                                               TypeofMode typeof_mode) {
   LoadGlobalParameters parameters(name, feedback, typeof_mode);
   return new (zone()) Operator1<LoadGlobalParameters>(   // --
@@ -1076,10 +1088,9 @@ const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
       parameters);                                       // parameter
 }
 
-
 const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
                                                const Handle<Name>& name,
-                                               const VectorSlotPair& feedback) {
+                                               const FeedbackSource& feedback) {
   StoreGlobalParameters parameters(language_mode, feedback, name);
   return new (zone()) Operator1<StoreGlobalParameters>(   // --
       IrOpcode::kJSStoreGlobal, Operator::kNoProperties,  // opcode
@@ -1088,7 +1099,6 @@ const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
       parameters);                                        // parameter
 }
 
-
 const Operator* JSOperatorBuilder::LoadContext(size_t depth, size_t index,
                                                bool immutable) {
   ContextAccess access(depth, index, immutable);
@@ -1203,7 +1213,7 @@ const Operator* JSOperatorBuilder::CreateClosure(
 
 const Operator* JSOperatorBuilder::CreateLiteralArray(
     Handle<ArrayBoilerplateDescription> description,
-    VectorSlotPair const& feedback, int literal_flags, int number_of_elements) {
+    FeedbackSource const& feedback, int literal_flags, int number_of_elements) {
   CreateLiteralParameters parameters(description, feedback, number_of_elements,
                                      literal_flags);
   return new (zone()) Operator1<CreateLiteralParameters>(  // --
@@ -1215,7 +1225,7 @@ const Operator* JSOperatorBuilder::CreateLiteralArray(
 }
 
 const Operator* JSOperatorBuilder::CreateEmptyLiteralArray(
-    VectorSlotPair const& feedback) {
+    FeedbackSource const& feedback) {
   FeedbackParameter parameters(feedback);
   return new (zone()) Operator1<FeedbackParameter>(  // --
       IrOpcode::kJSCreateEmptyLiteralArray,          // opcode
@@ -1235,7 +1245,7 @@ const Operator* JSOperatorBuilder::CreateArrayFromIterable() {
 
 const Operator* JSOperatorBuilder::CreateLiteralObject(
     Handle<ObjectBoilerplateDescription> constant_properties,
-    VectorSlotPair const& feedback, int literal_flags,
+    FeedbackSource const& feedback, int literal_flags,
     int number_of_properties) {
   CreateLiteralParameters parameters(constant_properties, feedback,
                                      number_of_properties, literal_flags);
@@ -1247,7 +1257,7 @@ const Operator* JSOperatorBuilder::CreateLiteralObject(
       parameters);                                         // parameter
 }
 
-const Operator* JSOperatorBuilder::CloneObject(VectorSlotPair const& feedback,
+const Operator* JSOperatorBuilder::CloneObject(FeedbackSource const& feedback,
                                                int literal_flags) {
   CloneObjectParameters parameters(feedback, literal_flags);
   return new (zone()) Operator1<CloneObjectParameters>(  // --
@@ -1267,7 +1277,7 @@ const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
 }
 
 const Operator* JSOperatorBuilder::CreateLiteralRegExp(
-    Handle<String> constant_pattern, VectorSlotPair const& feedback,
+    Handle<String> constant_pattern, FeedbackSource const& feedback,
     int literal_flags) {
   CreateLiteralParameters parameters(constant_pattern, feedback, -1,
                                      literal_flags);
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index e7d9acb152acfe..f795a2f4029eef 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -7,7 +7,7 @@
 
 #include "src/base/compiler-specific.h"
 #include "src/common/globals.h"
-#include "src/compiler/vector-slot-pair.h"
+#include "src/compiler/feedback-source.h"
 #include "src/handles/maybe-handles.h"
 #include "src/objects/type-hints.h"
 #include "src/runtime/runtime.h"
@@ -102,17 +102,17 @@ ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
 class ConstructParameters final {
  public:
   ConstructParameters(uint32_t arity, CallFrequency const& frequency,
-                      VectorSlotPair const& feedback)
+                      FeedbackSource const& feedback)
       : arity_(arity), frequency_(frequency), feedback_(feedback) {}
 
   uint32_t arity() const { return arity_; }
   CallFrequency const& frequency() const { return frequency_; }
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
  private:
   uint32_t const arity_;
   CallFrequency const frequency_;
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
 };
 
 bool operator==(ConstructParameters const&, ConstructParameters const&);
@@ -163,7 +163,7 @@ CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
 class CallParameters final {
  public:
   CallParameters(size_t arity, CallFrequency const& frequency,
-                 VectorSlotPair const& feedback,
+                 FeedbackSource const& feedback,
                  ConvertReceiverMode convert_mode,
                  SpeculationMode speculation_mode)
       : bit_field_(ArityField::encode(arity) |
@@ -177,7 +177,7 @@ class CallParameters final {
   ConvertReceiverMode convert_mode() const {
     return ConvertReceiverModeField::decode(bit_field_);
   }
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
   SpeculationMode speculation_mode() const {
     return SpeculationModeField::decode(bit_field_);
@@ -192,7 +192,9 @@ class CallParameters final {
 
  private:
   friend size_t hash_value(CallParameters const& p) {
-    return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_);
+    FeedbackSource::Hash feedback_hash;
+    return base::hash_combine(p.bit_field_, p.frequency_,
+                              feedback_hash(p.feedback_));
   }
 
   using ArityField = BitField<size_t, 0, 28>;
@@ -201,7 +203,7 @@ class CallParameters final {
 
   uint32_t const bit_field_;
   CallFrequency const frequency_;
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
 };
 
 size_t hash_value(CallParameters const&);
@@ -297,15 +299,15 @@ CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
 // Defines parameters for JSStoreNamedOwn operator.
 class StoreNamedOwnParameters final {
  public:
-  StoreNamedOwnParameters(Handle<Name> name, VectorSlotPair const& feedback)
+  StoreNamedOwnParameters(Handle<Name> name, FeedbackSource const& feedback)
       : name_(name), feedback_(feedback) {}
 
   Handle<Name> name() const { return name_; }
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
  private:
   Handle<Name> const name_;
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
 };
 
 bool operator==(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&);
@@ -322,13 +324,13 @@ const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op);
 // and JSStoreDataPropertyInLiteral operators.
 class FeedbackParameter final {
  public:
-  explicit FeedbackParameter(VectorSlotPair const& feedback)
+  explicit FeedbackParameter(FeedbackSource const& feedback)
       : feedback_(feedback) {}
 
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
  private:
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
 };
 
 bool operator==(FeedbackParameter const&, FeedbackParameter const&);
@@ -345,16 +347,16 @@ const FeedbackParameter& FeedbackParameterOf(const Operator* op);
 class NamedAccess final {
  public:
   NamedAccess(LanguageMode language_mode, Handle<Name> name,
-              VectorSlotPair const& feedback)
+              FeedbackSource const& feedback)
       : name_(name), feedback_(feedback), language_mode_(language_mode) {}
 
   Handle<Name> name() const { return name_; }
   LanguageMode language_mode() const { return language_mode_; }
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
  private:
   Handle<Name> const name_;
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
   LanguageMode const language_mode_;
 };
 
@@ -372,18 +374,18 @@ const NamedAccess& NamedAccessOf(const Operator* op);
 // used as a parameter by JSLoadGlobal operator.
 class LoadGlobalParameters final {
  public:
-  LoadGlobalParameters(const Handle<Name>& name, const VectorSlotPair& feedback,
+  LoadGlobalParameters(const Handle<Name>& name, const FeedbackSource& feedback,
                        TypeofMode typeof_mode)
       : name_(name), feedback_(feedback), typeof_mode_(typeof_mode) {}
 
   const Handle<Name>& name() const { return name_; }
   TypeofMode typeof_mode() const { return typeof_mode_; }
 
-  const VectorSlotPair& feedback() const { return feedback_; }
+  const FeedbackSource& feedback() const { return feedback_; }
 
  private:
   const Handle<Name> name_;
-  const VectorSlotPair feedback_;
+  const FeedbackSource feedback_;
   const TypeofMode typeof_mode_;
 };
 
@@ -402,18 +404,18 @@ const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op);
 class StoreGlobalParameters final {
  public:
   StoreGlobalParameters(LanguageMode language_mode,
-                        const VectorSlotPair& feedback,
+                        const FeedbackSource& feedback,
                         const Handle<Name>& name)
       : language_mode_(language_mode), name_(name), feedback_(feedback) {}
 
   LanguageMode language_mode() const { return language_mode_; }
-  const VectorSlotPair& feedback() const { return feedback_; }
+  const FeedbackSource& feedback() const { return feedback_; }
   const Handle<Name>& name() const { return name_; }
 
  private:
   const LanguageMode language_mode_;
   const Handle<Name> name_;
-  const VectorSlotPair feedback_;
+  const FeedbackSource feedback_;
 };
 
 bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&);
@@ -430,14 +432,14 @@ const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op);
 // as a parameter by the JSLoadProperty and JSStoreProperty operators.
 class PropertyAccess final {
  public:
-  PropertyAccess(LanguageMode language_mode, VectorSlotPair const& feedback)
+  PropertyAccess(LanguageMode language_mode, FeedbackSource const& feedback)
       : feedback_(feedback), language_mode_(language_mode) {}
 
   LanguageMode language_mode() const { return language_mode_; }
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
  private:
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
   LanguageMode const language_mode_;
 };
 
@@ -602,20 +604,20 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
 class CreateLiteralParameters final {
  public:
   CreateLiteralParameters(Handle<HeapObject> constant,
-                          VectorSlotPair const& feedback, int length, int flags)
+                          FeedbackSource const& feedback, int length, int flags)
       : constant_(constant),
         feedback_(feedback),
         length_(length),
         flags_(flags) {}
 
   Handle<HeapObject> constant() const { return constant_; }
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
   int length() const { return length_; }
   int flags() const { return flags_; }
 
  private:
   Handle<HeapObject> const constant_;
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
   int const length_;
   int const flags_;
 };
@@ -631,14 +633,14 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
 
 class CloneObjectParameters final {
  public:
-  CloneObjectParameters(VectorSlotPair const& feedback, int flags)
+  CloneObjectParameters(FeedbackSource const& feedback, int flags)
       : feedback_(feedback), flags_(flags) {}
 
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
   int flags() const { return flags_; }
 
  private:
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
   int const flags_;
 };
 
@@ -735,32 +737,32 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
   const Operator* CreateTypedArray();
   const Operator* CreateLiteralArray(
       Handle<ArrayBoilerplateDescription> constant,
-      VectorSlotPair const& feedback, int literal_flags,
+      FeedbackSource const& feedback, int literal_flags,
       int number_of_elements);
-  const Operator* CreateEmptyLiteralArray(VectorSlotPair const& feedback);
+  const Operator* CreateEmptyLiteralArray(FeedbackSource const& feedback);
   const Operator* CreateArrayFromIterable();
   const Operator* CreateEmptyLiteralObject();
 
   const Operator* CreateLiteralObject(
       Handle<ObjectBoilerplateDescription> constant,
-      VectorSlotPair const& feedback, int literal_flags,
+      FeedbackSource const& feedback, int literal_flags,
       int number_of_properties);
-  const Operator* CloneObject(VectorSlotPair const& feedback,
+  const Operator* CloneObject(FeedbackSource const& feedback,
                               int literal_flags);
   const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
-                                      VectorSlotPair const& feedback,
+                                      FeedbackSource const& feedback,
                                       int literal_flags);
 
   const Operator* CallForwardVarargs(size_t arity, uint32_t start_index);
   const Operator* Call(
       size_t arity, CallFrequency const& frequency = CallFrequency(),
-      VectorSlotPair const& feedback = VectorSlotPair(),
+      FeedbackSource const& feedback = FeedbackSource(),
       ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
       SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
   const Operator* CallWithArrayLike(CallFrequency const& frequency);
   const Operator* CallWithSpread(
       uint32_t arity, CallFrequency const& frequency = CallFrequency(),
-      VectorSlotPair const& feedback = VectorSlotPair(),
+      FeedbackSource const& feedback = FeedbackSource(),
       SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
   const Operator* CallRuntime(Runtime::FunctionId id);
   const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
@@ -769,39 +771,39 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
   const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index);
   const Operator* Construct(uint32_t arity,
                             CallFrequency const& frequency = CallFrequency(),
-                            VectorSlotPair const& feedback = VectorSlotPair());
+                            FeedbackSource const& feedback = FeedbackSource());
   const Operator* ConstructWithArrayLike(CallFrequency const& frequency);
   const Operator* ConstructWithSpread(
       uint32_t arity, CallFrequency const& frequency = CallFrequency(),
-      VectorSlotPair const& feedback = VectorSlotPair());
+      FeedbackSource const& feedback = FeedbackSource());
 
-  const Operator* LoadProperty(VectorSlotPair const& feedback);
-  const Operator* LoadNamed(Handle<Name> name, VectorSlotPair const& feedback);
+  const Operator* LoadProperty(FeedbackSource const& feedback);
+  const Operator* LoadNamed(Handle<Name> name, FeedbackSource const& feedback);
 
   const Operator* StoreProperty(LanguageMode language_mode,
-                                VectorSlotPair const& feedback);
+                                FeedbackSource const& feedback);
   const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
-                             VectorSlotPair const& feedback);
+                             FeedbackSource const& feedback);
 
   const Operator* StoreNamedOwn(Handle<Name> name,
-                                VectorSlotPair const& feedback);
-  const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback);
-  const Operator* StoreInArrayLiteral(const VectorSlotPair& feedback);
+                                FeedbackSource const& feedback);
+  const Operator* StoreDataPropertyInLiteral(const FeedbackSource& feedback);
+  const Operator* StoreInArrayLiteral(const FeedbackSource& feedback);
 
   const Operator* DeleteProperty();
 
-  const Operator* HasProperty(VectorSlotPair const& feedback);
+  const Operator* HasProperty(FeedbackSource const& feedback);
 
   const Operator* GetSuperConstructor();
 
   const Operator* CreateGeneratorObject();
 
   const Operator* LoadGlobal(const Handle<Name>& name,
-                             const VectorSlotPair& feedback,
+                             const FeedbackSource& feedback,
                              TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
   const Operator* StoreGlobal(LanguageMode language_mode,
                               const Handle<Name>& name,
-                              const VectorSlotPair& feedback);
+                              const FeedbackSource& feedback);
 
   const Operator* LoadContext(size_t depth, size_t index, bool immutable);
   const Operator* StoreContext(size_t depth, size_t index);
@@ -810,7 +812,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
   const Operator* StoreModule(int32_t cell_index);
 
   const Operator* HasInPrototypeChain();
-  const Operator* InstanceOf(const VectorSlotPair& feedback);
+  const Operator* InstanceOf(const FeedbackSource& feedback);
   const Operator* OrdinaryHasInstance();
 
   const Operator* AsyncFunctionEnter();
@@ -854,6 +856,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
   const Operator* ParseInt();
   const Operator* RegExpTest();
 
+  const Operator* GetIterator(FeedbackSource const& feedback);
+
  private:
   Zone* zone() const { return zone_; }
 
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index f3696bcc4887f4..e1ff928cec646c 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -6,6 +6,7 @@
 
 #include "src/compiler/access-builder.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/objects/feedback-vector.h"
@@ -78,16 +79,6 @@ class JSSpeculativeBinopBuilder final {
         control_(control),
         slot_(slot) {}
 
-  BinaryOperationHint GetBinaryOperationHint() {
-    FeedbackNexus nexus(feedback_vector(), slot_);
-    return nexus.GetBinaryOperationFeedback();
-  }
-
-  CompareOperationHint GetCompareOperationHint() {
-    FeedbackNexus nexus(feedback_vector(), slot_);
-    return nexus.GetCompareOperationFeedback();
-  }
-
   bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
     return BinaryOperationHintToNumberOperationHint(GetBinaryOperationHint(),
                                                     hint);
@@ -239,34 +230,52 @@ class JSSpeculativeBinopBuilder final {
   JSOperatorBuilder* javascript() { return jsgraph()->javascript(); }
   SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
   CommonOperatorBuilder* common() { return jsgraph()->common(); }
-  const Handle<FeedbackVector>& feedback_vector() const {
-    return lowering_->feedback_vector();
-  }
 
  private:
-  const JSTypeHintLowering* lowering_;
-  const Operator* op_;
+  BinaryOperationHint GetBinaryOperationHint() {
+    return lowering_->GetBinaryOperationHint(slot_);
+  }
+
+  CompareOperationHint GetCompareOperationHint() {
+    return lowering_->GetCompareOperationHint(slot_);
+  }
+
+  JSTypeHintLowering const* const lowering_;
+  Operator const* const op_;
   Node* left_;
   Node* right_;
-  Node* effect_;
-  Node* control_;
-  FeedbackSlot slot_;
+  Node* const effect_;
+  Node* const control_;
+  FeedbackSlot const slot_;
 };
 
-JSTypeHintLowering::JSTypeHintLowering(JSGraph* jsgraph,
-                                       Handle<FeedbackVector> feedback_vector,
+JSTypeHintLowering::JSTypeHintLowering(JSHeapBroker* broker, JSGraph* jsgraph,
+                                       FeedbackVectorRef feedback_vector,
                                        Flags flags)
-    : jsgraph_(jsgraph), flags_(flags), feedback_vector_(feedback_vector) {}
+    : broker_(broker),
+      jsgraph_(jsgraph),
+      flags_(flags),
+      feedback_vector_(feedback_vector) {}
 
 Isolate* JSTypeHintLowering::isolate() const { return jsgraph()->isolate(); }
 
+BinaryOperationHint JSTypeHintLowering::GetBinaryOperationHint(
+    FeedbackSlot slot) const {
+  FeedbackSource source(feedback_vector(), slot);
+  return broker()->GetFeedbackForBinaryOperation(source);
+}
+
+CompareOperationHint JSTypeHintLowering::GetCompareOperationHint(
+    FeedbackSlot slot) const {
+  FeedbackSource source(feedback_vector(), slot);
+  return broker()->GetFeedbackForCompareOperation(source);
+}
+
 JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
     const Operator* op, Node* operand, Node* effect, Node* control,
     FeedbackSlot slot) const {
-  DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
   if (Node* node = TryBuildSoftDeopt(
-          nexus, effect, control,
+          slot, effect, control,
           DeoptimizeReason::kInsufficientTypeFeedbackForUnaryOperation)) {
     return LoweringResult::Exit(node);
   }
@@ -309,9 +318,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
                                   control, slot);
       node = b.TryBuildNumberBinop();
       if (!node) {
-        FeedbackNexus nexus(feedback_vector(), slot);
-        if (nexus.GetBinaryOperationFeedback() ==
-            BinaryOperationHint::kBigInt) {
+        if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
           const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate(
               BigIntOperationHint::kBigInt);
           node = jsgraph()->graph()->NewNode(op, operand, effect, control);
@@ -335,10 +342,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
     FeedbackSlot slot) const {
   switch (op->opcode()) {
     case IrOpcode::kJSStrictEqual: {
-      DCHECK(!slot.IsInvalid());
-      FeedbackNexus nexus(feedback_vector(), slot);
       if (Node* node = TryBuildSoftDeopt(
-              nexus, effect, control,
+              slot, effect, control,
               DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
         return LoweringResult::Exit(node);
       }
@@ -351,10 +356,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
     case IrOpcode::kJSGreaterThan:
     case IrOpcode::kJSLessThanOrEqual:
     case IrOpcode::kJSGreaterThanOrEqual: {
-      DCHECK(!slot.IsInvalid());
-      FeedbackNexus nexus(feedback_vector(), slot);
       if (Node* node = TryBuildSoftDeopt(
-              nexus, effect, control,
+              slot, effect, control,
               DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
         return LoweringResult::Exit(node);
       }
@@ -365,10 +368,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
       break;
     }
     case IrOpcode::kJSInstanceOf: {
-      DCHECK(!slot.IsInvalid());
-      FeedbackNexus nexus(feedback_vector(), slot);
       if (Node* node = TryBuildSoftDeopt(
-              nexus, effect, control,
+              slot, effect, control,
               DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
         return LoweringResult::Exit(node);
       }
@@ -387,10 +388,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
     case IrOpcode::kJSMultiply:
     case IrOpcode::kJSDivide:
     case IrOpcode::kJSModulus: {
-      DCHECK(!slot.IsInvalid());
-      FeedbackNexus nexus(feedback_vector(), slot);
       if (Node* node = TryBuildSoftDeopt(
-              nexus, effect, control,
+              slot, effect, control,
               DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation)) {
         return LoweringResult::Exit(node);
       }
@@ -406,6 +405,11 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
       break;
     }
     case IrOpcode::kJSExponentiate: {
+      if (Node* node = TryBuildSoftDeopt(
+              slot, effect, control,
+              DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation)) {
+        return LoweringResult::Exit(node);
+      }
       // TODO(neis): Introduce a SpeculativeNumberPow operator?
       break;
     }
@@ -418,10 +422,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
 JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceForInNextOperation(
     Node* receiver, Node* cache_array, Node* cache_type, Node* index,
     Node* effect, Node* control, FeedbackSlot slot) const {
-  DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
   if (Node* node = TryBuildSoftDeopt(
-          nexus, effect, control,
+          slot, effect, control,
           DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
     return LoweringResult::Exit(node);
   }
@@ -432,10 +434,8 @@ JSTypeHintLowering::LoweringResult
 JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect,
                                                 Node* control,
                                                 FeedbackSlot slot) const {
-  DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
   if (Node* node = TryBuildSoftDeopt(
-          nexus, effect, control,
+          slot, effect, control,
           DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
     return LoweringResult::Exit(node);
   }
@@ -445,12 +445,11 @@ JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect,
 JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceToNumberOperation(
     Node* input, Node* effect, Node* control, FeedbackSlot slot) const {
   DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
   NumberOperationHint hint;
-  if (BinaryOperationHintToNumberOperationHint(
-          nexus.GetBinaryOperationFeedback(), &hint)) {
+  if (BinaryOperationHintToNumberOperationHint(GetBinaryOperationHint(slot),
+                                               &hint)) {
     Node* node = jsgraph()->graph()->NewNode(
-        jsgraph()->simplified()->SpeculativeToNumber(hint, VectorSlotPair()),
+        jsgraph()->simplified()->SpeculativeToNumber(hint, FeedbackSource()),
         input, effect, control);
     return LoweringResult::SideEffectFree(node, node, control);
   }
@@ -462,10 +461,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceCallOperation(
     Node* control, FeedbackSlot slot) const {
   DCHECK(op->opcode() == IrOpcode::kJSCall ||
          op->opcode() == IrOpcode::kJSCallWithSpread);
-  DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
   if (Node* node = TryBuildSoftDeopt(
-          nexus, effect, control,
+          slot, effect, control,
           DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
     return LoweringResult::Exit(node);
   }
@@ -477,10 +474,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation(
     Node* control, FeedbackSlot slot) const {
   DCHECK(op->opcode() == IrOpcode::kJSConstruct ||
          op->opcode() == IrOpcode::kJSConstructWithSpread);
-  DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
   if (Node* node = TryBuildSoftDeopt(
-          nexus, effect, control,
+          slot, effect, control,
           DeoptimizeReason::kInsufficientTypeFeedbackForConstruct)) {
     return LoweringResult::Exit(node);
   }
@@ -490,11 +485,11 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation(
 JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation(
     const Operator* op, Node* receiver, Node* effect, Node* control,
     FeedbackSlot slot) const {
-  DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
-  DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
+  // JSGetIterator involves a named load of the Symbol.iterator property.
+  DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
+         op->opcode() == IrOpcode::kJSGetIterator);
   if (Node* node = TryBuildSoftDeopt(
-          nexus, effect, control,
+          slot, effect, control,
           DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
     return LoweringResult::Exit(node);
   }
@@ -505,10 +500,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadKeyedOperation(
     const Operator* op, Node* obj, Node* key, Node* effect, Node* control,
     FeedbackSlot slot) const {
   DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
-  DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
   if (Node* node = TryBuildSoftDeopt(
-          nexus, effect, control,
+          slot, effect, control,
           DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
     return LoweringResult::Exit(node);
   }
@@ -522,10 +515,8 @@ JSTypeHintLowering::ReduceStoreNamedOperation(const Operator* op, Node* obj,
                                               FeedbackSlot slot) const {
   DCHECK(op->opcode() == IrOpcode::kJSStoreNamed ||
          op->opcode() == IrOpcode::kJSStoreNamedOwn);
-  DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
   if (Node* node = TryBuildSoftDeopt(
-          nexus, effect, control,
+          slot, effect, control,
           DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
     return LoweringResult::Exit(node);
   }
@@ -538,31 +529,32 @@ JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj,
                                               Node* effect, Node* control,
                                               FeedbackSlot slot) const {
   DCHECK(op->opcode() == IrOpcode::kJSStoreProperty ||
-         op->opcode() == IrOpcode::kJSStoreInArrayLiteral);
-  DCHECK(!slot.IsInvalid());
-  FeedbackNexus nexus(feedback_vector(), slot);
+         op->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
+         op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
   if (Node* node = TryBuildSoftDeopt(
-          nexus, effect, control,
+          slot, effect, control,
           DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
     return LoweringResult::Exit(node);
   }
   return LoweringResult::NoChange();
 }
 
-Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect,
+Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackSlot slot, Node* effect,
                                             Node* control,
                                             DeoptimizeReason reason) const {
-  if ((flags() & kBailoutOnUninitialized) && nexus.IsUninitialized()) {
-    Node* deoptimize = jsgraph()->graph()->NewNode(
-        jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason,
-                                        VectorSlotPair()),
-        jsgraph()->Dead(), effect, control);
-    Node* frame_state =
-        NodeProperties::FindFrameStateBefore(deoptimize, jsgraph()->Dead());
-    deoptimize->ReplaceInput(0, frame_state);
-    return deoptimize;
-  }
-  return nullptr;
+  if (!(flags() & kBailoutOnUninitialized)) return nullptr;
+
+  FeedbackSource source(feedback_vector(), slot);
+  if (!broker()->FeedbackIsInsufficient(source)) return nullptr;
+
+  Node* deoptimize = jsgraph()->graph()->NewNode(
+      jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason,
+                                      FeedbackSource()),
+      jsgraph()->Dead(), effect, control);
+  Node* frame_state =
+      NodeProperties::FindFrameStateBefore(deoptimize, jsgraph()->Dead());
+  deoptimize->ReplaceInput(0, frame_state);
+  return deoptimize;
 }
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index a74c0193558734..3e46fb2ec2a19c 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -41,8 +41,8 @@ class JSTypeHintLowering {
   enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 1 };
   using Flags = base::Flags<Flag>;
 
-  JSTypeHintLowering(JSGraph* jsgraph, Handle<FeedbackVector> feedback_vector,
-                     Flags flags);
+  JSTypeHintLowering(JSHeapBroker* broker, JSGraph* jsgraph,
+                     FeedbackVectorRef feedback_vector, Flags flags);
 
   // {LoweringResult} describes the result of lowering. The following outcomes
   // are possible:
@@ -153,20 +153,22 @@ class JSTypeHintLowering {
 
  private:
   friend class JSSpeculativeBinopBuilder;
-  Node* TryBuildSoftDeopt(FeedbackNexus& nexus,  // NOLINT(runtime/references)
-                          Node* effect, Node* control,
+
+  BinaryOperationHint GetBinaryOperationHint(FeedbackSlot slot) const;
+  CompareOperationHint GetCompareOperationHint(FeedbackSlot slot) const;
+  Node* TryBuildSoftDeopt(FeedbackSlot slot, Node* effect, Node* control,
                           DeoptimizeReason reson) const;
 
+  JSHeapBroker* broker() const { return broker_; }
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
   Flags flags() const { return flags_; }
-  const Handle<FeedbackVector>& feedback_vector() const {
-    return feedback_vector_;
-  }
+  FeedbackVectorRef const& feedback_vector() const { return feedback_vector_; }
 
-  JSGraph* jsgraph_;
+  JSHeapBroker* const broker_;
+  JSGraph* const jsgraph_;
   Flags const flags_;
-  Handle<FeedbackVector> feedback_vector_;
+  FeedbackVectorRef const feedback_vector_;
 
   DISALLOW_COPY_AND_ASSIGN(JSTypeHintLowering);
 };
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 3190fc993056c1..8caafe6aadfc40 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -200,14 +200,14 @@ class JSBinopReduction final {
   void CheckInputsToString() {
     if (!left_type().Is(Type::String())) {
       Node* left_input =
-          graph()->NewNode(simplified()->CheckString(VectorSlotPair()), left(),
+          graph()->NewNode(simplified()->CheckString(FeedbackSource()), left(),
                            effect(), control());
       node_->ReplaceInput(0, left_input);
       update_effect(left_input);
     }
     if (!right_type().Is(Type::String())) {
       Node* right_input =
-          graph()->NewNode(simplified()->CheckString(VectorSlotPair()), right(),
+          graph()->NewNode(simplified()->CheckString(FeedbackSource()), right(),
                            effect(), control());
       node_->ReplaceInput(1, right_input);
       update_effect(right_input);
@@ -576,7 +576,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
       // and thus potentially reduces the number of live ranges and allows for
       // more truncations.
       length = effect = graph()->NewNode(
-          simplified()->CheckBounds(VectorSlotPair()), length,
+          simplified()->CheckBounds(FeedbackSource()), length,
           jsgraph()->Constant(String::kMaxLength + 1), effect, control);
     } else {
       // Check if we would overflow the allowed maximum string length.
@@ -1320,7 +1320,7 @@ Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
   for (size_t i = 0; i < access.depth(); ++i) {
     context = effect = graph()->NewNode(
         simplified()->LoadField(
-            AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+            AccessBuilder::ForContextSlotKnownPointer(Context::PREVIOUS_INDEX)),
         context, effect, control);
   }
   node->ReplaceInput(0, context);
@@ -1342,7 +1342,7 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
   for (size_t i = 0; i < access.depth(); ++i) {
     context = effect = graph()->NewNode(
         simplified()->LoadField(
-            AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+            AccessBuilder::ForContextSlotKnownPointer(Context::PREVIOUS_INDEX)),
         context, effect, control);
   }
   node->ReplaceInput(0, context);
@@ -1367,8 +1367,8 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
   if (module_type.IsHeapConstant()) {
     SourceTextModuleRef module_constant =
         module_type.AsHeapConstant()->Ref().AsSourceTextModule();
-    CellRef cell_constant = module_constant.GetCell(cell_index);
-    return jsgraph()->Constant(cell_constant);
+    base::Optional<CellRef> cell_constant = module_constant.GetCell(cell_index);
+    if (cell_constant.has_value()) return jsgraph()->Constant(*cell_constant);
   }
 
   FieldAccess field_access;
@@ -1554,21 +1554,21 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
   if (target_type.IsHeapConstant() &&
       target_type.AsHeapConstant()->Ref().IsJSFunction()) {
     JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction();
-    SharedFunctionInfoRef shared = function.shared();
 
     // Only optimize [[Construct]] here if {function} is a Constructor.
     if (!function.map().is_constructor()) return NoChange();
 
-    CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+    if (!function.serialized()) {
+      TRACE_BROKER_MISSING(broker(), "data for function " << function);
+      return NoChange();
+    }
 
     // Patch {node} to an indirect call via the {function}s construct stub.
-    bool use_builtin_construct_stub = shared.construct_as_builtin();
-
+    bool use_builtin_construct_stub = function.shared().construct_as_builtin();
     CodeRef code(broker(),
                  use_builtin_construct_stub
                      ? BUILTIN_CODE(isolate(), JSBuiltinsConstructStub)
                      : BUILTIN_CODE(isolate(), JSConstructStubGeneric));
-
     node->RemoveInput(arity + 1);
     node->InsertInput(graph()->zone(), 0, jsgraph()->Constant(code));
     node->InsertInput(graph()->zone(), 2, new_target);
@@ -1576,10 +1576,9 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
     node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
     node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
     NodeProperties::ChangeOp(
-        node,
-        common()->Call(Linkage::GetStubCallDescriptor(
-            graph()->zone(), ConstructStubDescriptor{}, 1 + arity, flags)));
-
+        node, common()->Call(Linkage::GetStubCallDescriptor(
+                  graph()->zone(), ConstructStubDescriptor{}, 1 + arity,
+                  CallDescriptor::kNeedsFrameState)));
     return Changed(node);
   }
 
@@ -1637,12 +1636,15 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
   if (target_type.IsHeapConstant() &&
       target_type.AsHeapConstant()->Ref().IsJSFunction()) {
     JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction();
-    SharedFunctionInfoRef shared = function.shared();
 
-    if (shared.HasBreakInfo()) {
-      // Do not inline the call if we need to check whether to break at entry.
+    if (!function.serialized()) {
+      TRACE_BROKER_MISSING(broker(), "data for function " << function);
       return NoChange();
     }
+    SharedFunctionInfoRef shared = function.shared();
+
+    // Do not inline the call if we need to check whether to break at entry.
+    if (shared.HasBreakInfo()) return NoChange();
 
     // Class constructors are callable, but [[Call]] will raise an exception.
     // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
@@ -1652,7 +1654,8 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
     // require data from a foreign native context.
     if (is_sloppy(shared.language_mode()) && !shared.native() &&
         !receiver_type.Is(Type::Receiver())) {
-      if (!function.native_context().equals(broker()->native_context())) {
+      if (!function.native_context().equals(
+              broker()->target_native_context())) {
         return NoChange();
       }
       Node* global_proxy =
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 1d88a27a5f758e..39c93c03289363 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -7,9 +7,7 @@
 #include "src/codegen/assembler-inl.h"
 #include "src/codegen/macro-assembler.h"
 #include "src/codegen/optimized-compilation-info.h"
-#include "src/compiler/common-operator.h"
 #include "src/compiler/frame.h"
-#include "src/compiler/node.h"
 #include "src/compiler/osr.h"
 #include "src/compiler/pipeline.h"
 
@@ -75,15 +73,6 @@ MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const {
   return new (zone) MachineSignature(return_count, param_count, types);
 }
 
-bool CallDescriptor::HasSameReturnLocationsAs(
-    const CallDescriptor* other) const {
-  if (ReturnCount() != other->ReturnCount()) return false;
-  for (size_t i = 0; i < ReturnCount(); ++i) {
-    if (GetReturnLocation(i) != other->GetReturnLocation(i)) return false;
-  }
-  return true;
-}
-
 int CallDescriptor::GetFirstUnusedStackSlot() const {
   int slots_above_sp = 0;
   for (size_t i = 0; i < InputCount(); ++i) {
@@ -104,19 +93,16 @@ int CallDescriptor::GetStackParameterDelta(
   int callee_slots_above_sp = GetFirstUnusedStackSlot();
   int tail_caller_slots_above_sp = tail_caller->GetFirstUnusedStackSlot();
   int stack_param_delta = callee_slots_above_sp - tail_caller_slots_above_sp;
-  if (kPadArguments) {
-    // Adjust stack delta when it is odd.
-    if (stack_param_delta % 2 != 0) {
-      if (callee_slots_above_sp % 2 != 0) {
-        // The delta is odd due to the callee - we will need to add one slot
-        // of padding.
-        ++stack_param_delta;
-      } else {
-        // The delta is odd because of the caller. We already have one slot of
-        // padding that we can reuse for arguments, so we will need one fewer
-        // slot.
-        --stack_param_delta;
-      }
+  if (ShouldPadArguments(stack_param_delta)) {
+    if (callee_slots_above_sp % 2 != 0) {
+      // The delta is odd due to the callee - we will need to add one slot
+      // of padding.
+      ++stack_param_delta;
+    } else {
+      // The delta is odd because of the caller. We already have one slot of
+      // padding that we can reuse for arguments, so we will need one fewer
+      // slot.
+      --stack_param_delta;
     }
   }
   return stack_param_delta;
@@ -133,8 +119,14 @@ int CallDescriptor::GetTaggedParameterSlots() const {
   return result;
 }
 
-bool CallDescriptor::CanTailCall(const Node* node) const {
-  return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
+bool CallDescriptor::CanTailCall(const CallDescriptor* callee) const {
+  if (ReturnCount() != callee->ReturnCount()) return false;
+  for (size_t i = 0; i < ReturnCount(); ++i) {
+    if (!LinkageLocation::IsSameLocation(GetReturnLocation(i),
+                                         callee->GetReturnLocation(i)))
+      return false;
+  }
+  return true;
 }
 
 // TODO(jkummerow, sigurds): Arguably frame size calculation should be
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 05eb0e7d11732f..69e7fbfa427fb8 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -28,20 +28,33 @@ namespace compiler {
 
 const RegList kNoCalleeSaved = 0;
 
-class Node;
 class OsrHelper;
 
 // Describes the location for a parameter or a return value to a call.
 class LinkageLocation {
  public:
   bool operator==(const LinkageLocation& other) const {
-    return bit_field_ == other.bit_field_;
+    return bit_field_ == other.bit_field_ &&
+           machine_type_ == other.machine_type_;
   }
 
   bool operator!=(const LinkageLocation& other) const {
     return !(*this == other);
   }
 
+  static bool IsSameLocation(const LinkageLocation& a,
+                             const LinkageLocation& b) {
+    // Different MachineTypes may end up at the same physical location. With the
+    // sub-type check we make sure that types like {AnyTagged} and
+    // {TaggedPointer} which would end up with the same physical location are
+    // considered equal here.
+    return (a.bit_field_ == b.bit_field_) &&
+           (IsSubtype(a.machine_type_.representation(),
+                      b.machine_type_.representation()) ||
+            IsSubtype(b.machine_type_.representation(),
+                      a.machine_type_.representation()));
+  }
+
   static LinkageLocation ForAnyRegister(
       MachineType type = MachineType::None()) {
     return LinkageLocation(REGISTER, ANY_REGISTER, type);
@@ -144,8 +157,8 @@ class LinkageLocation {
  private:
   enum LocationType { REGISTER, STACK_SLOT };
 
-  class TypeField : public BitField<LocationType, 0, 1> {};
-  class LocationField : public BitField<int32_t, TypeField::kNext, 31> {};
+  using TypeField = BitField<LocationType, 0, 1>;
+  using LocationField = TypeField::Next<int32_t, 31>;
 
   static constexpr int32_t ANY_REGISTER = -1;
   static constexpr int32_t MAX_STACK_SLOT = 32767;
@@ -197,7 +210,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
     // Use the kJavaScriptCallCodeStartRegister (fixed) register for the
     // indirect target address when calling.
     kFixedTargetRegister = 1u << 7,
-    kAllowCallThroughSlot = 1u << 8
+    kAllowCallThroughSlot = 1u << 8,
+    kCallerSavedRegisters = 1u << 9
   };
   using Flags = base::Flags<Flag>;
 
@@ -276,6 +290,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
   bool InitializeRootRegister() const {
     return flags() & kInitializeRootRegister;
   }
+  bool NeedsCallerSavedRegisters() const {
+    return flags() & kCallerSavedRegisters;
+  }
 
   LinkageLocation GetReturnLocation(size_t index) const {
     return location_sig_->GetReturn(index);
@@ -314,8 +331,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
 
   bool UsesOnlyRegisters() const;
 
-  bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
-
   // Returns the first stack slot that is not used by the stack parameters.
   int GetFirstUnusedStackSlot() const;
 
@@ -323,7 +338,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
 
   int GetTaggedParameterSlots() const;
 
-  bool CanTailCall(const Node* call) const;
+  bool CanTailCall(const CallDescriptor* callee) const;
 
   int CalculateFixedFrameSize(Code::Kind code_kind) const;
 
@@ -418,7 +433,7 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
   // structs, pointers to members, etc.
   static CallDescriptor* GetSimplifiedCDescriptor(
       Zone* zone, const MachineSignature* sig,
-      bool set_initialize_root_flag = false);
+      CallDescriptor::Flags flags = CallDescriptor::kNoFlags);
 
   // Get the location of an (incoming) parameter to this function.
   LinkageLocation GetParameterLocation(int index) const {
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index f9998723f387da..3778775e9bca1c 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -8,7 +8,6 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
 #include "src/heap/factory.h"
 #include "src/objects/objects-inl.h"
 
@@ -284,6 +283,28 @@ class LoadElimination::AliasStateInfo {
   MaybeHandle<Map> map_;
 };
 
+LoadElimination::AbstractField const* LoadElimination::AbstractField::KillConst(
+    Node* object, Zone* zone) const {
+  for (auto pair : this->info_for_node_) {
+    if (pair.first->IsDead()) continue;
+    // If we previously recorded information about a const store on the given
+    // 'object', we might not have done it on the same node; e.g. we might now
+    // identify the object by a FinishRegion node, whereas the initial const
+    // store was performed on the Allocate node. We therefore remove information
+    // on all nodes that must alias with 'object'.
+    if (MustAlias(object, pair.first)) {
+      AbstractField* that = new (zone) AbstractField(zone);
+      for (auto pair : this->info_for_node_) {
+        if (!MustAlias(object, pair.first)) {
+          that->info_for_node_.insert(pair);
+        }
+      }
+      return that;
+    }
+  }
+  return this;
+}
+
 LoadElimination::AbstractField const* LoadElimination::AbstractField::Kill(
     const AliasStateInfo& alias_info, MaybeHandle<Name> name,
     Zone* zone) const {
@@ -527,38 +548,60 @@ LoadElimination::AbstractState::KillElement(Node* object, Node* index,
 }
 
 LoadElimination::AbstractState const* LoadElimination::AbstractState::AddField(
-    Node* object, size_t index, LoadElimination::FieldInfo info,
-    PropertyConstness constness, Zone* zone) const {
+    Node* object, IndexRange index_range, LoadElimination::FieldInfo info,
+    Zone* zone) const {
   AbstractState* that = new (zone) AbstractState(*this);
-  AbstractFields& fields = constness == PropertyConstness::kConst
-                               ? that->const_fields_
-                               : that->fields_;
-  if (fields[index]) {
-    fields[index] = fields[index]->Extend(object, info, zone);
-  } else {
-    fields[index] = new (zone) AbstractField(object, info, zone);
+  AbstractFields& fields =
+      info.const_field_info.IsConst() ? that->const_fields_ : that->fields_;
+  for (int index : index_range) {
+    if (fields[index]) {
+      fields[index] = fields[index]->Extend(object, info, zone);
+    } else {
+      fields[index] = new (zone) AbstractField(object, info, zone);
+    }
   }
   return that;
 }
 
-LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField(
-    Node* object, size_t index, MaybeHandle<Name> name, Zone* zone) const {
+LoadElimination::AbstractState const*
+LoadElimination::AbstractState::KillConstField(Node* object,
+                                               IndexRange index_range,
+                                               Zone* zone) const {
   AliasStateInfo alias_info(this, object);
-  return KillField(alias_info, index, name, zone);
+  AbstractState* that = nullptr;
+  for (int index : index_range) {
+    if (AbstractField const* this_field = this->const_fields_[index]) {
+      this_field = this_field->KillConst(object, zone);
+      if (this->const_fields_[index] != this_field) {
+        if (!that) that = new (zone) AbstractState(*this);
+        that->const_fields_[index] = this_field;
+      }
+    }
+  }
+  return that ? that : this;
 }
 
 LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField(
-    const AliasStateInfo& alias_info, size_t index, MaybeHandle<Name> name,
+    Node* object, IndexRange index_range, MaybeHandle<Name> name,
     Zone* zone) const {
-  if (AbstractField const* this_field = this->fields_[index]) {
-    this_field = this_field->Kill(alias_info, name, zone);
-    if (this->fields_[index] != this_field) {
-      AbstractState* that = new (zone) AbstractState(*this);
-      that->fields_[index] = this_field;
-      return that;
+  AliasStateInfo alias_info(this, object);
+  return KillField(alias_info, index_range, name, zone);
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField(
+    const AliasStateInfo& alias_info, IndexRange index_range,
+    MaybeHandle<Name> name, Zone* zone) const {
+  AbstractState* that = nullptr;
+  for (int index : index_range) {
+    if (AbstractField const* this_field = this->fields_[index]) {
+      this_field = this_field->Kill(alias_info, name, zone);
+      if (this->fields_[index] != this_field) {
+        if (!that) that = new (zone) AbstractState(*this);
+        that->fields_[index] = this_field;
+      }
     }
   }
-  return this;
+  return that ? that : this;
 }
 
 LoadElimination::AbstractState const*
@@ -598,13 +641,38 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::KillAll(
 }
 
 LoadElimination::FieldInfo const* LoadElimination::AbstractState::LookupField(
-    Node* object, size_t index, PropertyConstness constness) const {
-  AbstractFields const& fields =
-      constness == PropertyConstness::kConst ? const_fields_ : fields_;
-  if (AbstractField const* this_field = fields[index]) {
-    return this_field->Lookup(object);
+    Node* object, IndexRange index_range,
+    ConstFieldInfo const_field_info) const {
+  // Check if all the indices in {index_range} contain identical information.
+  // If not, a partially overlapping access has invalidated part of the value.
+  base::Optional<LoadElimination::FieldInfo const*> result;
+  for (int index : index_range) {
+    LoadElimination::FieldInfo const* info = nullptr;
+    if (const_field_info.IsConst()) {
+      if (AbstractField const* this_field = const_fields_[index]) {
+        info = this_field->Lookup(object);
+      }
+      if (!(info && info->const_field_info == const_field_info)) return nullptr;
+    } else {
+      if (AbstractField const* this_field = fields_[index]) {
+        info = this_field->Lookup(object);
+      }
+      if (!info) return nullptr;
+    }
+    if (!result.has_value()) {
+      result = info;
+    } else {
+      // We detected a partially overlapping access here.
+      // We currently don't seem to have such accesses, so this code path is
+      // unreachable, but if we eventually have them, it is safe to return
+      // nullptr and continue the analysis. But store-store elimination is
+      // currently unsafe for such overlapping accesses, so when we remove
+      // this check, we should double-check that store-store elimination can
+      // handle it too.
+      DCHECK_EQ(**result, *info);
+    }
   }
-  return nullptr;
+  return *result;
 }
 
 bool LoadElimination::AliasStateInfo::MayAlias(Node* other) const {
@@ -733,12 +801,13 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
   // We know that the resulting elements have the fixed array map.
   state = state->SetMaps(node, fixed_array_maps, zone());
   // Kill the previous elements on {object}.
-  state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
+  state = state->KillField(object,
+                           FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
                            MaybeHandle<Name>(), zone());
   // Add the new elements on {object}.
-  state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset),
-                          {node, MachineType::RepCompressedTaggedPointer()},
-                          PropertyConstness::kMutable, zone());
+  state = state->AddField(
+      object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
+      {node, MachineType::RepCompressedTaggedPointer()}, zone());
   return UpdateState(node, state);
 }
 
@@ -760,12 +829,13 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
     state = state->SetMaps(node, fixed_array_maps, zone());
   }
   // Kill the previous elements on {object}.
-  state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
+  state = state->KillField(object,
+                           FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
                            MaybeHandle<Name>(), zone());
   // Add the new elements on {object}.
-  state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset),
-                          {node, MachineType::RepCompressedTaggedPointer()},
-                          PropertyConstness::kMutable, zone());
+  state = state->AddField(
+      object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
+      {node, MachineType::RepCompressedTaggedPointer()}, zone());
   return UpdateState(node, state);
 }
 
@@ -783,9 +853,9 @@ Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
     case ElementsTransition::kSlowTransition:
       // Kill the elements as well.
       AliasStateInfo alias_info(state, object, source_map);
-      state =
-          state->KillField(alias_info, FieldIndexOf(JSObject::kElementsOffset),
-                           MaybeHandle<Name>(), zone());
+      state = state->KillField(
+          alias_info, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
+          MaybeHandle<Name>(), zone());
       break;
   }
   ZoneHandleSet<Map> object_maps;
@@ -828,7 +898,8 @@ Reduction LoadElimination::ReduceTransitionAndStoreElement(Node* node) {
     state = state->SetMaps(object, object_maps, zone());
   }
   // Kill the elements as well.
-  state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
+  state = state->KillField(object,
+                           FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
                            MaybeHandle<Name>(), zone());
   return UpdateState(node, state);
 }
@@ -851,16 +922,17 @@ Reduction LoadElimination::ReduceLoadField(Node* node,
       return Replace(value);
     }
   } else {
-    int field_index = FieldIndexOf(access);
-    if (field_index >= 0) {
-      PropertyConstness constness = access.constness;
+    IndexRange field_index = FieldIndexOf(access);
+    if (field_index != IndexRange::Invalid()) {
       MachineRepresentation representation =
           access.machine_type.representation();
       FieldInfo const* lookup_result =
-          state->LookupField(object, field_index, constness);
-      if (!lookup_result && constness == PropertyConstness::kConst) {
-        lookup_result = state->LookupField(object, field_index,
-                                           PropertyConstness::kMutable);
+          state->LookupField(object, field_index, access.const_field_info);
+      if (!lookup_result && access.const_field_info.IsConst()) {
+        // If the access is const and we didn't find anything, also try to look
+        // up information from mutable stores
+        lookup_result =
+            state->LookupField(object, field_index, ConstFieldInfo::None());
       }
       if (lookup_result) {
         // Make sure we don't reuse values that were recorded with a different
@@ -884,8 +956,9 @@ Reduction LoadElimination::ReduceLoadField(Node* node,
           return Replace(replacement);
         }
       }
-      FieldInfo info(node, access.name, representation);
-      state = state->AddField(object, field_index, info, constness, zone());
+      FieldInfo info(node, representation, access.name,
+                     access.const_field_info);
+      state = state->AddField(object, field_index, info, zone());
     }
   }
   Handle<Map> field_map;
@@ -910,26 +983,26 @@ Reduction LoadElimination::ReduceStoreField(Node* node,
     Type const new_value_type = NodeProperties::GetType(new_value);
     if (new_value_type.IsHeapConstant()) {
       // Record the new {object} map information.
-      AllowHandleDereference handle_dereference;
       ZoneHandleSet<Map> object_maps(
-          Handle<Map>::cast(new_value_type.AsHeapConstant()->Value()));
+          new_value_type.AsHeapConstant()->Ref().AsMap().object());
       state = state->SetMaps(object, object_maps, zone());
     }
   } else {
-    int field_index = FieldIndexOf(access);
-    if (field_index >= 0) {
-      PropertyConstness constness = access.constness;
+    IndexRange field_index = FieldIndexOf(access);
+    if (field_index != IndexRange::Invalid()) {
+      bool is_const_store = access.const_field_info.IsConst();
       MachineRepresentation representation =
           access.machine_type.representation();
       FieldInfo const* lookup_result =
-          state->LookupField(object, field_index, constness);
+          state->LookupField(object, field_index, access.const_field_info);
 
-      if (lookup_result && (constness == PropertyConstness::kMutable ||
-                            V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL)) {
+      if (lookup_result &&
+          (!is_const_store || V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL)) {
         // At runtime, we should never encounter
         // - any store replacing existing info with a different, incompatible
         //   representation, nor
-        // - two consecutive const stores.
+        // - two consecutive const stores, unless the latter is a store into
+        //   a literal.
         // However, we may see such code statically, so we guard against
         // executing it by emitting Unreachable.
         // TODO(gsps): Re-enable the double const store check even for
@@ -939,8 +1012,9 @@ Reduction LoadElimination::ReduceStoreField(Node* node,
         bool incompatible_representation =
             !lookup_result->name.is_null() &&
             !IsCompatible(representation, lookup_result->representation);
-        if (incompatible_representation ||
-            constness == PropertyConstness::kConst) {
+        bool illegal_double_const_store =
+            is_const_store && !access.is_store_in_literal;
+        if (incompatible_representation || illegal_double_const_store) {
           Node* control = NodeProperties::GetControlInput(node);
           Node* unreachable =
               graph()->NewNode(common()->Unreachable(), effect, control);
@@ -953,16 +1027,22 @@ Reduction LoadElimination::ReduceStoreField(Node* node,
       }
 
       // Kill all potentially aliasing fields and record the new value.
-      FieldInfo new_info(new_value, access.name, representation);
+      FieldInfo new_info(new_value, representation, access.name,
+                         access.const_field_info);
+      if (is_const_store && access.is_store_in_literal) {
+        // We only kill const information when there is a chance that we
+        // previously stored information about the given const field (namely,
+        // when we observe const stores to literals).
+        state = state->KillConstField(object, field_index, zone());
+      }
       state = state->KillField(object, field_index, access.name, zone());
-      state = state->AddField(object, field_index, new_info,
-                              PropertyConstness::kMutable, zone());
-      if (constness == PropertyConstness::kConst) {
+      state = state->AddField(object, field_index, new_info, zone());
+      if (is_const_store) {
         // For const stores, we track information in both the const and the
         // mutable world to guard against field accesses that should have
         // been marked const, but were not.
-        state =
-            state->AddField(object, field_index, new_info, constness, zone());
+        new_info.const_field_info = ConstFieldInfo::None();
+        state = state->AddField(object, field_index, new_info, zone());
       }
     } else {
       // Unsupported StoreField operator.
@@ -1180,8 +1260,8 @@ LoadElimination::ComputeLoopStateForStoreField(
     // Invalidate what we know about the {object}s map.
     state = state->KillMaps(object, zone());
   } else {
-    int field_index = FieldIndexOf(access);
-    if (field_index < 0) {
+    IndexRange field_index = FieldIndexOf(access);
+    if (field_index == IndexRange::Invalid()) {
       state = state->KillFields(object, access.name, zone());
     } else {
       state = state->KillField(object, field_index, access.name, zone());
@@ -1197,9 +1277,12 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
     ElementsTransition transition;
     Node* object;
   };
-  ZoneVector<TransitionElementsKindInfo> element_transitions_(zone());
-  ZoneQueue<Node*> queue(zone());
-  ZoneSet<Node*> visited(zone());
+  // Allocate zone data structures in a temporary zone with a lifetime limited
+  // to this function to avoid blowing up the size of the stage-global zone.
+  Zone temp_zone(zone()->allocator(), "Temporary scoped zone");
+  ZoneVector<TransitionElementsKindInfo> element_transitions_(&temp_zone);
+  ZoneQueue<Node*> queue(&temp_zone);
+  ZoneSet<Node*> visited(&temp_zone);
   visited.insert(node);
   for (int i = 1; i < control->InputCount(); ++i) {
     queue.push(node->InputAt(i));
@@ -1213,16 +1296,16 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
         switch (current->opcode()) {
           case IrOpcode::kEnsureWritableFastElements: {
             Node* const object = NodeProperties::GetValueInput(current, 0);
-            state = state->KillField(object,
-                                     FieldIndexOf(JSObject::kElementsOffset),
-                                     MaybeHandle<Name>(), zone());
+            state = state->KillField(
+                object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
+                MaybeHandle<Name>(), zone());
             break;
           }
           case IrOpcode::kMaybeGrowFastElements: {
             Node* const object = NodeProperties::GetValueInput(current, 0);
-            state = state->KillField(object,
-                                     FieldIndexOf(JSObject::kElementsOffset),
-                                     MaybeHandle<Name>(), zone());
+            state = state->KillField(
+                object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
+                MaybeHandle<Name>(), zone());
             break;
           }
           case IrOpcode::kTransitionElementsKind: {
@@ -1241,9 +1324,9 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
             // Invalidate what we know about the {object}s map.
             state = state->KillMaps(object, zone());
             // Kill the elements as well.
-            state = state->KillField(object,
-                                     FieldIndexOf(JSObject::kElementsOffset),
-                                     MaybeHandle<Name>(), zone());
+            state = state->KillField(
+                object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
+                MaybeHandle<Name>(), zone());
             break;
           }
           case IrOpcode::kStoreField: {
@@ -1305,9 +1388,9 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
         break;
       case ElementsTransition::kSlowTransition: {
         AliasStateInfo alias_info(state, t.object, t.transition.source());
-        state = state->KillField(alias_info,
-                                 FieldIndexOf(JSObject::kElementsOffset),
-                                 MaybeHandle<Name>(), zone());
+        state = state->KillField(
+            alias_info, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize),
+            MaybeHandle<Name>(), zone());
         break;
       }
     }
@@ -1316,55 +1399,49 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
 }
 
 // static
-int LoadElimination::FieldIndexOf(int offset) {
+LoadElimination::IndexRange LoadElimination::FieldIndexOf(
+    int offset, int representation_size) {
   DCHECK(IsAligned(offset, kTaggedSize));
-  int field_index = offset / kTaggedSize;
-  if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
-  DCHECK_LT(0, field_index);
-  return field_index - 1;
+  int field_index = offset / kTaggedSize - 1;
+  DCHECK_EQ(0, representation_size % kTaggedSize);
+  return IndexRange(field_index, representation_size / kTaggedSize);
 }
 
 // static
-int LoadElimination::FieldIndexOf(FieldAccess const& access) {
+LoadElimination::IndexRange LoadElimination::FieldIndexOf(
+    FieldAccess const& access) {
   MachineRepresentation rep = access.machine_type.representation();
   switch (rep) {
     case MachineRepresentation::kNone:
     case MachineRepresentation::kBit:
     case MachineRepresentation::kSimd128:
       UNREACHABLE();
-    case MachineRepresentation::kWord32:
-      if (kInt32Size != kTaggedSize) {
-        return -1;  // We currently only track tagged pointer size fields.
-      }
-      break;
-    case MachineRepresentation::kWord64:
-      if (kInt64Size != kTaggedSize) {
-        return -1;  // We currently only track tagged pointer size fields.
-      }
-      break;
     case MachineRepresentation::kWord8:
     case MachineRepresentation::kWord16:
     case MachineRepresentation::kFloat32:
-      return -1;  // Currently untracked.
+      // Currently untracked.
+      return IndexRange::Invalid();
     case MachineRepresentation::kFloat64:
-      if (kDoubleSize != kTaggedSize) {
-        return -1;  // We currently only track tagged pointer size fields.
-      }
-      break;
+    case MachineRepresentation::kWord32:
+    case MachineRepresentation::kWord64:
     case MachineRepresentation::kTaggedSigned:
     case MachineRepresentation::kTaggedPointer:
     case MachineRepresentation::kTagged:
     case MachineRepresentation::kCompressedSigned:
     case MachineRepresentation::kCompressedPointer:
     case MachineRepresentation::kCompressed:
-      // TODO(bmeurer): Check that we never do overlapping load/stores of
-      // individual parts of Float64 values.
       break;
   }
+  int representation_size = ElementSizeInBytes(rep);
+  // We currently only track fields that are at least tagged pointer sized.
+  if (representation_size < kTaggedSize) return IndexRange::Invalid();
+  DCHECK_EQ(0, representation_size % kTaggedSize);
+
   if (access.base_is_tagged != kTaggedBase) {
-    return -1;  // We currently only track tagged objects.
+    // We currently only track tagged objects.
+    return IndexRange::Invalid();
   }
-  return FieldIndexOf(access.offset);
+  return FieldIndexOf(access.offset, representation_size);
 }
 
 CommonOperatorBuilder* LoadElimination::common() const {
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 4ad1fa64a201ef..b97fd7b8834028 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -9,6 +9,7 @@
 #include "src/codegen/machine-type.h"
 #include "src/common/globals.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/handles/maybe-handles.h"
 #include "src/zone/zone-handle-set.h"
 
@@ -100,20 +101,25 @@ class V8_EXPORT_PRIVATE LoadElimination final
 
   struct FieldInfo {
     FieldInfo() = default;
-    FieldInfo(Node* value, MachineRepresentation representation)
-        : value(value), name(), representation(representation) {}
-    FieldInfo(Node* value, MaybeHandle<Name> name,
-              MachineRepresentation representation)
-        : value(value), name(name), representation(representation) {}
+    FieldInfo(Node* value, MachineRepresentation representation,
+              MaybeHandle<Name> name = {},
+              ConstFieldInfo const_field_info = ConstFieldInfo::None())
+        : value(value),
+          representation(representation),
+          name(name),
+          const_field_info(const_field_info) {}
 
     bool operator==(const FieldInfo& other) const {
-      return value == other.value && name.address() == other.name.address() &&
-             representation == other.representation;
+      return value == other.value && representation == other.representation &&
+             name.address() == other.name.address() &&
+             const_field_info == other.const_field_info;
     }
+    bool operator!=(const FieldInfo& other) const { return !(*this == other); }
 
     Node* value = nullptr;
-    MaybeHandle<Name> name;
     MachineRepresentation representation = MachineRepresentation::kNone;
+    MaybeHandle<Name> name;
+    ConstFieldInfo const_field_info;
   };
 
   // Abstract state to approximate the current state of a certain field along
@@ -134,6 +140,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
       return that;
     }
     FieldInfo const* Lookup(Node* object) const;
+    AbstractField const* KillConst(Node* object, Zone* zone) const;
     AbstractField const* Kill(const AliasStateInfo& alias_info,
                               MaybeHandle<Name> name, Zone* zone) const;
     bool Equals(AbstractField const* that) const {
@@ -186,6 +193,39 @@ class V8_EXPORT_PRIVATE LoadElimination final
     ZoneMap<Node*, ZoneHandleSet<Map>> info_for_node_;
   };
 
+  class IndexRange {
+   public:
+    IndexRange(int begin, int size) : begin_(begin), end_(begin + size) {
+      DCHECK_LE(0, begin);
+      DCHECK_LE(1, size);
+      if (end_ > static_cast<int>(kMaxTrackedFields)) {
+        *this = IndexRange::Invalid();
+      }
+    }
+    static IndexRange Invalid() { return IndexRange(); }
+
+    bool operator==(const IndexRange& other) {
+      return begin_ == other.begin_ && end_ == other.end_;
+    }
+    bool operator!=(const IndexRange& other) { return !(*this == other); }
+
+    struct Iterator {
+      int i;
+      int operator*() { return i; }
+      void operator++() { ++i; }
+      bool operator!=(Iterator other) { return i != other.i; }
+    };
+
+    Iterator begin() { return {begin_}; }
+    Iterator end() { return {end_}; }
+
+   private:
+    int begin_;
+    int end_;
+
+    IndexRange() : begin_(-1), end_(-1) {}
+  };
+
   class AbstractState final : public ZoneObject {
    public:
     AbstractState() {}
@@ -200,19 +240,20 @@ class V8_EXPORT_PRIVATE LoadElimination final
                                   Zone* zone) const;
     bool LookupMaps(Node* object, ZoneHandleSet<Map>* object_maps) const;
 
-    AbstractState const* AddField(Node* object, size_t index, FieldInfo info,
-                                  PropertyConstness constness,
-                                  Zone* zone) const;
+    AbstractState const* AddField(Node* object, IndexRange index,
+                                  FieldInfo info, Zone* zone) const;
+    AbstractState const* KillConstField(Node* object, IndexRange index_range,
+                                        Zone* zone) const;
     AbstractState const* KillField(const AliasStateInfo& alias_info,
-                                   size_t index, MaybeHandle<Name> name,
+                                   IndexRange index, MaybeHandle<Name> name,
                                    Zone* zone) const;
-    AbstractState const* KillField(Node* object, size_t index,
+    AbstractState const* KillField(Node* object, IndexRange index,
                                    MaybeHandle<Name> name, Zone* zone) const;
     AbstractState const* KillFields(Node* object, MaybeHandle<Name> name,
                                     Zone* zone) const;
     AbstractState const* KillAll(Zone* zone) const;
-    FieldInfo const* LookupField(Node* object, size_t index,
-                                 PropertyConstness constness) const;
+    FieldInfo const* LookupField(Node* object, IndexRange index,
+                                 ConstFieldInfo const_field_info) const;
 
     AbstractState const* AddElement(Node* object, Node* index, Node* value,
                                     MachineRepresentation representation,
@@ -280,8 +321,8 @@ class V8_EXPORT_PRIVATE LoadElimination final
   AbstractState const* UpdateStateForPhi(AbstractState const* state,
                                          Node* effect_phi, Node* phi);
 
-  static int FieldIndexOf(int offset);
-  static int FieldIndexOf(FieldAccess const& access);
+  static IndexRange FieldIndexOf(int offset, int representation_size);
+  static IndexRange FieldIndexOf(FieldAccess const& access);
 
   static AbstractState const* empty_state() {
     return AbstractState::empty_state();
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 80205f80b64685..4c7ee1d1410f36 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -60,8 +60,7 @@ class MachineRepresentationInferrer {
         CHECK_LE(index, static_cast<size_t>(1));
         return index == 0 ? MachineRepresentation::kWord64
                           : MachineRepresentation::kBit;
-      case IrOpcode::kCall:
-      case IrOpcode::kCallWithCallerSavedRegisters: {
+      case IrOpcode::kCall: {
         auto call_descriptor = CallDescriptorOf(input->op());
         return call_descriptor->GetReturnType(index).representation();
       }
@@ -128,7 +127,6 @@ class MachineRepresentationInferrer {
             representation_vector_[node->id()] = PromoteRepresentation(
                 LoadRepresentationOf(node->op()).representation());
             break;
-          case IrOpcode::kLoadStackPointer:
           case IrOpcode::kLoadFramePointer:
           case IrOpcode::kLoadParentFramePointer:
             representation_vector_[node->id()] =
@@ -142,8 +140,7 @@ class MachineRepresentationInferrer {
             representation_vector_[node->id()] =
                 PhiRepresentationOf(node->op());
             break;
-          case IrOpcode::kCall:
-          case IrOpcode::kCallWithCallerSavedRegisters: {
+          case IrOpcode::kCall: {
             auto call_descriptor = CallDescriptorOf(node->op());
             if (call_descriptor->ReturnCount() > 0) {
               representation_vector_[node->id()] =
@@ -235,6 +232,10 @@ class MachineRepresentationInferrer {
           case IrOpcode::kWord64PoisonOnSpeculation:
             representation_vector_[node->id()] = MachineRepresentation::kWord64;
             break;
+          case IrOpcode::kCompressedHeapConstant:
+            representation_vector_[node->id()] =
+                MachineRepresentation::kCompressedPointer;
+            break;
           case IrOpcode::kExternalConstant:
             representation_vector_[node->id()] =
                 MachineType::PointerRepresentation();
@@ -248,6 +249,13 @@ class MachineRepresentationInferrer {
             representation_vector_[node->id()] =
                 MachineRepresentation::kTaggedSigned;
             break;
+          case IrOpcode::kBitcastWord32ToCompressedSigned:
+            representation_vector_[node->id()] =
+                MachineRepresentation::kCompressedSigned;
+            break;
+          case IrOpcode::kBitcastCompressedSignedToWord32:
+            representation_vector_[node->id()] = MachineRepresentation::kWord32;
+            break;
           case IrOpcode::kWord32Equal:
           case IrOpcode::kInt32LessThan:
           case IrOpcode::kInt32LessThanOrEqual:
@@ -265,6 +273,7 @@ class MachineRepresentationInferrer {
           case IrOpcode::kFloat64LessThan:
           case IrOpcode::kFloat64LessThanOrEqual:
           case IrOpcode::kChangeTaggedToBit:
+          case IrOpcode::kStackPointerGreaterThan:
             representation_vector_[node->id()] = MachineRepresentation::kBit;
             break;
 #define LABEL(opcode) case IrOpcode::k##opcode:
@@ -373,7 +382,6 @@ class MachineRepresentationChecker {
         }
         switch (node->opcode()) {
           case IrOpcode::kCall:
-          case IrOpcode::kCallWithCallerSavedRegisters:
           case IrOpcode::kTailCall:
             CheckCallInputs(node);
             break;
@@ -433,6 +441,13 @@ class MachineRepresentationChecker {
           case IrOpcode::kTaggedPoisonOnSpeculation:
             CheckValueInputIsTagged(node, 0);
             break;
+          case IrOpcode::kBitcastWord32ToCompressedSigned:
+            CheckValueInputRepresentationIs(node, 0,
+                                            MachineRepresentation::kWord32);
+            break;
+          case IrOpcode::kBitcastCompressedSignedToWord32:
+            CheckValueInputIsCompressed(node, 0);
+            break;
           case IrOpcode::kTruncateFloat64ToWord32:
           case IrOpcode::kTruncateFloat64ToUint32:
           case IrOpcode::kTruncateFloat64ToFloat32:
@@ -699,6 +714,10 @@ class MachineRepresentationChecker {
             }
             break;
           }
+          case IrOpcode::kStackPointerGreaterThan:
+            CheckValueInputRepresentationIs(
+                node, 0, MachineType::PointerRepresentation());
+            break;
           case IrOpcode::kThrow:
           case IrOpcode::kTypedStateValues:
           case IrOpcode::kFrameState:
@@ -751,11 +770,6 @@ class MachineRepresentationChecker {
       case MachineRepresentation::kCompressedPointer:
       case MachineRepresentation::kCompressedSigned:
         return;
-      case MachineRepresentation::kNone:
-        if (input->opcode() == IrOpcode::kCompressedHeapConstant) {
-          return;
-        }
-        break;
       default:
         break;
     }
@@ -858,17 +872,6 @@ class MachineRepresentationChecker {
       case MachineRepresentation::kCompressedSigned:
       case MachineRepresentation::kCompressedPointer:
         return;
-      case MachineRepresentation::kNone: {
-        if (input->opcode() == IrOpcode::kCompressedHeapConstant) {
-          return;
-        }
-        std::ostringstream str;
-        str << "TypeError: node #" << input->id() << ":" << *input->op()
-            << " is untyped.";
-        PrintDebugHelp(str, node);
-        FATAL("%s", str.str().c_str());
-        break;
-      }
       default:
         break;
     }
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index f720c2908461ba..11124579f61d5c 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -34,17 +34,14 @@ Node* MachineOperatorReducer::Float32Constant(volatile float value) {
   return graph()->NewNode(common()->Float32Constant(value));
 }
 
-
 Node* MachineOperatorReducer::Float64Constant(volatile double value) {
   return mcgraph()->Float64Constant(value);
 }
 
-
 Node* MachineOperatorReducer::Int32Constant(int32_t value) {
   return mcgraph()->Int32Constant(value);
 }
 
-
 Node* MachineOperatorReducer::Int64Constant(int64_t value) {
   return graph()->NewNode(common()->Int64Constant(value));
 }
@@ -70,23 +67,27 @@ Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
   return reduction.Changed() ? reduction.replacement() : node;
 }
 
-
 Node* MachineOperatorReducer::Word32Sar(Node* lhs, uint32_t rhs) {
   if (rhs == 0) return lhs;
   return graph()->NewNode(machine()->Word32Sar(), lhs, Uint32Constant(rhs));
 }
 
-
 Node* MachineOperatorReducer::Word32Shr(Node* lhs, uint32_t rhs) {
   if (rhs == 0) return lhs;
   return graph()->NewNode(machine()->Word32Shr(), lhs, Uint32Constant(rhs));
 }
 
-
 Node* MachineOperatorReducer::Word32Equal(Node* lhs, Node* rhs) {
   return graph()->NewNode(machine()->Word32Equal(), lhs, rhs);
 }
 
+Node* MachineOperatorReducer::BitcastWord32ToCompressedSigned(Node* value) {
+  return graph()->NewNode(machine()->BitcastWord32ToCompressedSigned(), value);
+}
+
+Node* MachineOperatorReducer::BitcastCompressedSignedToWord32(Node* value) {
+  return graph()->NewNode(machine()->BitcastCompressedSignedToWord32(), value);
+}
 
 Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) {
   Node* const node = graph()->NewNode(machine()->Int32Add(), lhs, rhs);
@@ -94,19 +95,16 @@ Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) {
   return reduction.Changed() ? reduction.replacement() : node;
 }
 
-
 Node* MachineOperatorReducer::Int32Sub(Node* lhs, Node* rhs) {
   Node* const node = graph()->NewNode(machine()->Int32Sub(), lhs, rhs);
   Reduction const reduction = ReduceInt32Sub(node);
   return reduction.Changed() ? reduction.replacement() : node;
 }
 
-
 Node* MachineOperatorReducer::Int32Mul(Node* lhs, Node* rhs) {
   return graph()->NewNode(machine()->Int32Mul(), lhs, rhs);
 }
 
-
 Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) {
   DCHECK_NE(0, divisor);
   DCHECK_NE(std::numeric_limits<int32_t>::min(), divisor);
@@ -122,7 +120,6 @@ Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) {
   return Int32Add(Word32Sar(quotient, mag.shift), Word32Shr(dividend, 31));
 }
 
-
 Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
   DCHECK_LT(0u, divisor);
   // If the divisor is even, we can avoid using the expensive fixup by shifting
@@ -146,7 +143,6 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
   return quotient;
 }
 
-
 // Perform constant folding and strength reduction on machine operators.
 Reduction MachineOperatorReducer::Reduce(Node* node) {
   switch (node->opcode()) {
@@ -664,6 +660,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
       if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
       break;
     }
+    case IrOpcode::kChangeTaggedToCompressed: {
+      Int64Matcher m(node->InputAt(0));
+      if (m.IsBitcastWordToTaggedSigned()) {
+        Int64Matcher n(m.node()->InputAt(0));
+        if (n.IsChangeInt32ToInt64()) {
+          DCHECK(machine()->Is64() && SmiValuesAre31Bits());
+          return Replace(BitcastWord32ToCompressedSigned(n.node()->InputAt(0)));
+        }
+      }
+      break;
+    }
     case IrOpcode::kTruncateFloat64ToWord32: {
       Float64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
@@ -674,6 +681,13 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
       Int64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
       if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
+      if (m.IsBitcastTaggedSignedToWord()) {
+        Int64Matcher n(m.node()->InputAt(0));
+        if (n.IsChangeCompressedToTagged()) {
+          DCHECK(machine()->Is64() && SmiValuesAre31Bits());
+          return Replace(BitcastCompressedSignedToWord32(n.node()->InputAt(0)));
+        }
+      }
       break;
     }
     case IrOpcode::kTruncateFloat64ToFloat32: {
@@ -871,7 +885,6 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
   return NoChange();
 }
 
-
 Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
   Uint32BinopMatcher m(node);
   if (m.left().Is(0)) return Replace(m.left().node());    // 0 / x => 0
@@ -900,7 +913,6 @@ Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
   return NoChange();
 }
 
-
 Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
   Int32BinopMatcher m(node);
   if (m.left().Is(0)) return Replace(m.left().node());    // 0 % x  => 0
@@ -937,7 +949,6 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
   return NoChange();
 }
 
-
 Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
   Uint32BinopMatcher m(node);
   if (m.left().Is(0)) return Replace(m.left().node());    // 0 % x => 0
@@ -967,7 +978,6 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
   return NoChange();
 }
 
-
 Reduction MachineOperatorReducer::ReduceStore(Node* node) {
   NodeMatcher nm(node);
   MachineRepresentation rep;
@@ -1015,7 +1025,6 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
   return NoChange();
 }
 
-
 Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kInt32AddWithOverflow: {
@@ -1069,7 +1078,6 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
   return NoChange();
 }
 
-
 Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
   DCHECK((node->opcode() == IrOpcode::kWord32Shl) ||
          (node->opcode() == IrOpcode::kWord32Shr) ||
@@ -1089,7 +1097,6 @@ Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
   return NoChange();
 }
 
-
 Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
   DCHECK_EQ(IrOpcode::kWord32Shl, node->opcode());
   Int32BinopMatcher m(node);
@@ -1399,7 +1406,6 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
   return NoChange();
 }
 
-
 Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
   DCHECK_EQ(IrOpcode::kFloat64InsertHighWord32, node->opcode());
   Float64Matcher mlhs(node->InputAt(0));
@@ -1412,7 +1418,6 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
   return NoChange();
 }
 
-
 namespace {
 
 bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
@@ -1492,7 +1497,6 @@ CommonOperatorBuilder* MachineOperatorReducer::common() const {
   return mcgraph()->common();
 }
 
-
 MachineOperatorBuilder* MachineOperatorReducer::machine() const {
   return mcgraph()->machine();
 }
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index a8e4cd5749566f..6eab08653e6031 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -51,6 +51,8 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
   Node* Word32Sar(Node* lhs, uint32_t rhs);
   Node* Word32Shr(Node* lhs, uint32_t rhs);
   Node* Word32Equal(Node* lhs, Node* rhs);
+  Node* BitcastWord32ToCompressedSigned(Node* value);
+  Node* BitcastCompressedSignedToWord32(Node* value);
   Node* Int32Add(Node* lhs, Node* rhs);
   Node* Int32Sub(Node* lhs, Node* rhs);
   Node* Int32Mul(Node* lhs, Node* rhs);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index f447861aca758b..0355534408dbf9 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -89,6 +89,8 @@ MachineType AtomicOpType(Operator const* op) {
   return OpParameter<MachineType>(op);
 }
 
+// The format is:
+// V(Name, properties, value_input_count, control_input_count, output_count)
 #define PURE_BINARY_OP_LIST_32(V)                                           \
   V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
   V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
@@ -112,6 +114,8 @@ MachineType AtomicOpType(Operator const* op) {
   V(Uint32Mod, Operator::kNoProperties, 2, 1, 1)                            \
   V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)
 
+// The format is:
+// V(Name, properties, value_input_count, control_input_count, output_count)
 #define PURE_BINARY_OP_LIST_64(V)                                        \
   V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
@@ -133,6 +137,8 @@ MachineType AtomicOpType(Operator const* op) {
   V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1)                    \
   V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)
 
+// The format is:
+// V(Name, properties, value_input_count, control_input_count, output_count)
 #define MACHINE_PURE_OP_LIST(V)                                               \
   PURE_BINARY_OP_LIST_32(V)                                                   \
   PURE_BINARY_OP_LIST_64(V)                                                   \
@@ -142,6 +148,8 @@ MachineType AtomicOpType(Operator const* op) {
   V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1)                     \
   V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1)              \
   V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1)              \
+  V(BitcastWord32ToCompressedSigned, Operator::kNoProperties, 1, 0, 1)        \
+  V(BitcastCompressedSignedToWord32, Operator::kNoProperties, 1, 0, 1)        \
   V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1)                \
   V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
   V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
@@ -236,7 +244,6 @@ MachineType AtomicOpType(Operator const* op) {
   V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1)               \
   V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1)                 \
   V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1)                \
-  V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)                       \
   V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)                       \
   V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)                 \
   V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2)                           \
@@ -248,6 +255,12 @@ MachineType AtomicOpType(Operator const* op) {
   V(F64x2Splat, Operator::kNoProperties, 1, 0, 1)                             \
   V(F64x2Abs, Operator::kNoProperties, 1, 0, 1)                               \
   V(F64x2Neg, Operator::kNoProperties, 1, 0, 1)                               \
+  V(F64x2Add, Operator::kCommutative, 2, 0, 1)                                \
+  V(F64x2Sub, Operator::kNoProperties, 2, 0, 1)                               \
+  V(F64x2Mul, Operator::kCommutative, 2, 0, 1)                                \
+  V(F64x2Div, Operator::kNoProperties, 2, 0, 1)                               \
+  V(F64x2Min, Operator::kCommutative, 2, 0, 1)                                \
+  V(F64x2Max, Operator::kCommutative, 2, 0, 1)                                \
   V(F64x2Eq, Operator::kCommutative, 2, 0, 1)                                 \
   V(F64x2Ne, Operator::kCommutative, 2, 0, 1)                                 \
   V(F64x2Lt, Operator::kNoProperties, 2, 0, 1)                                \
@@ -263,6 +276,7 @@ MachineType AtomicOpType(Operator const* op) {
   V(F32x4AddHoriz, Operator::kNoProperties, 2, 0, 1)                          \
   V(F32x4Sub, Operator::kNoProperties, 2, 0, 1)                               \
   V(F32x4Mul, Operator::kCommutative, 2, 0, 1)                                \
+  V(F32x4Div, Operator::kNoProperties, 2, 0, 1)                               \
   V(F32x4Min, Operator::kCommutative, 2, 0, 1)                                \
   V(F32x4Max, Operator::kCommutative, 2, 0, 1)                                \
   V(F32x4Eq, Operator::kCommutative, 2, 0, 1)                                 \
@@ -271,13 +285,20 @@ MachineType AtomicOpType(Operator const* op) {
   V(F32x4Le, Operator::kNoProperties, 2, 0, 1)                                \
   V(I64x2Splat, Operator::kNoProperties, 1, 0, 1)                             \
   V(I64x2Neg, Operator::kNoProperties, 1, 0, 1)                               \
+  V(I64x2Shl, Operator::kNoProperties, 2, 0, 1)                               \
+  V(I64x2ShrS, Operator::kNoProperties, 2, 0, 1)                              \
   V(I64x2Add, Operator::kCommutative, 2, 0, 1)                                \
   V(I64x2Sub, Operator::kNoProperties, 2, 0, 1)                               \
   V(I64x2Mul, Operator::kCommutative, 2, 0, 1)                                \
+  V(I64x2MinS, Operator::kCommutative, 2, 0, 1)                               \
+  V(I64x2MaxS, Operator::kCommutative, 2, 0, 1)                               \
   V(I64x2Eq, Operator::kCommutative, 2, 0, 1)                                 \
   V(I64x2Ne, Operator::kCommutative, 2, 0, 1)                                 \
   V(I64x2GtS, Operator::kNoProperties, 2, 0, 1)                               \
   V(I64x2GeS, Operator::kNoProperties, 2, 0, 1)                               \
+  V(I64x2ShrU, Operator::kNoProperties, 2, 0, 1)                              \
+  V(I64x2MinU, Operator::kCommutative, 2, 0, 1)                               \
+  V(I64x2MaxU, Operator::kCommutative, 2, 0, 1)                               \
   V(I64x2GtU, Operator::kNoProperties, 2, 0, 1)                               \
   V(I64x2GeU, Operator::kNoProperties, 2, 0, 1)                               \
   V(I32x4Splat, Operator::kNoProperties, 1, 0, 1)                             \
@@ -285,6 +306,8 @@ MachineType AtomicOpType(Operator const* op) {
   V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1)                  \
   V(I32x4SConvertI16x8High, Operator::kNoProperties, 1, 0, 1)                 \
   V(I32x4Neg, Operator::kNoProperties, 1, 0, 1)                               \
+  V(I32x4Shl, Operator::kNoProperties, 2, 0, 1)                               \
+  V(I32x4ShrS, Operator::kNoProperties, 2, 0, 1)                              \
   V(I32x4Add, Operator::kCommutative, 2, 0, 1)                                \
   V(I32x4AddHoriz, Operator::kNoProperties, 2, 0, 1)                          \
   V(I32x4Sub, Operator::kNoProperties, 2, 0, 1)                               \
@@ -298,6 +321,7 @@ MachineType AtomicOpType(Operator const* op) {
   V(I32x4UConvertF32x4, Operator::kNoProperties, 1, 0, 1)                     \
   V(I32x4UConvertI16x8Low, Operator::kNoProperties, 1, 0, 1)                  \
   V(I32x4UConvertI16x8High, Operator::kNoProperties, 1, 0, 1)                 \
+  V(I32x4ShrU, Operator::kNoProperties, 2, 0, 1)                              \
   V(I32x4MinU, Operator::kCommutative, 2, 0, 1)                               \
   V(I32x4MaxU, Operator::kCommutative, 2, 0, 1)                               \
   V(I32x4GtU, Operator::kNoProperties, 2, 0, 1)                               \
@@ -306,6 +330,8 @@ MachineType AtomicOpType(Operator const* op) {
   V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1)                  \
   V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1)                 \
   V(I16x8Neg, Operator::kNoProperties, 1, 0, 1)                               \
+  V(I16x8Shl, Operator::kNoProperties, 2, 0, 1)                               \
+  V(I16x8ShrS, Operator::kNoProperties, 2, 0, 1)                              \
   V(I16x8SConvertI32x4, Operator::kNoProperties, 2, 0, 1)                     \
   V(I16x8Add, Operator::kCommutative, 2, 0, 1)                                \
   V(I16x8AddSaturateS, Operator::kCommutative, 2, 0, 1)                       \
@@ -321,6 +347,7 @@ MachineType AtomicOpType(Operator const* op) {
   V(I16x8GeS, Operator::kNoProperties, 2, 0, 1)                               \
   V(I16x8UConvertI8x16Low, Operator::kNoProperties, 1, 0, 1)                  \
   V(I16x8UConvertI8x16High, Operator::kNoProperties, 1, 0, 1)                 \
+  V(I16x8ShrU, Operator::kNoProperties, 2, 0, 1)                              \
   V(I16x8UConvertI32x4, Operator::kNoProperties, 2, 0, 1)                     \
   V(I16x8AddSaturateU, Operator::kCommutative, 2, 0, 1)                       \
   V(I16x8SubSaturateU, Operator::kNoProperties, 2, 0, 1)                      \
@@ -330,6 +357,8 @@ MachineType AtomicOpType(Operator const* op) {
   V(I16x8GeU, Operator::kNoProperties, 2, 0, 1)                               \
   V(I8x16Splat, Operator::kNoProperties, 1, 0, 1)                             \
   V(I8x16Neg, Operator::kNoProperties, 1, 0, 1)                               \
+  V(I8x16Shl, Operator::kNoProperties, 2, 0, 1)                               \
+  V(I8x16ShrS, Operator::kNoProperties, 2, 0, 1)                              \
   V(I8x16SConvertI16x8, Operator::kNoProperties, 2, 0, 1)                     \
   V(I8x16Add, Operator::kCommutative, 2, 0, 1)                                \
   V(I8x16AddSaturateS, Operator::kCommutative, 2, 0, 1)                       \
@@ -342,6 +371,7 @@ MachineType AtomicOpType(Operator const* op) {
   V(I8x16Ne, Operator::kCommutative, 2, 0, 1)                                 \
   V(I8x16GtS, Operator::kNoProperties, 2, 0, 1)                               \
   V(I8x16GeS, Operator::kNoProperties, 2, 0, 1)                               \
+  V(I8x16ShrU, Operator::kNoProperties, 2, 0, 1)                              \
   V(I8x16UConvertI16x8, Operator::kNoProperties, 2, 0, 1)                     \
   V(I8x16AddSaturateU, Operator::kCommutative, 2, 0, 1)                       \
   V(I8x16SubSaturateU, Operator::kNoProperties, 2, 0, 1)                      \
@@ -364,8 +394,11 @@ MachineType AtomicOpType(Operator const* op) {
   V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1)                            \
   V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1)                            \
   V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1)                           \
-  V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1)
+  V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1)                           \
+  V(StackPointerGreaterThan, Operator::kNoProperties, 1, 0, 1)
 
+// The format is:
+// V(Name, properties, value_input_count, control_input_count, output_count)
 #define PURE_OPTIONAL_OP_LIST(V)                            \
   V(Word32Ctz, Operator::kNoProperties, 1, 0, 1)            \
   V(Word64Ctz, Operator::kNoProperties, 1, 0, 1)            \
@@ -385,6 +418,8 @@ MachineType AtomicOpType(Operator const* op) {
   V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
   V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
 
+// The format is:
+// V(Name, properties, value_input_count, control_input_count, output_count)
 #define OVERFLOW_OP_LIST(V)                                                \
   V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \
   V(Int32SubWithOverflow, Operator::kNoProperties)                         \
@@ -467,12 +502,6 @@ MachineType AtomicOpType(Operator const* op) {
   V(I16x8, 8)                \
   V(I8x16, 16)
 
-#define SIMD_FORMAT_LIST(V) \
-  V(64x2, 64)               \
-  V(32x4, 32)               \
-  V(16x8, 16)               \
-  V(8x16, 8)
-
 #define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
   V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
 
@@ -1305,28 +1334,6 @@ const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
 SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
 #undef SIMD_LANE_OPS
 
-#define SIMD_SHIFT_OPS(format, bits)                                           \
-  const Operator* MachineOperatorBuilder::I##format##Shl(int32_t shift) {      \
-    DCHECK(0 <= shift && shift < bits);                                        \
-    return new (zone_)                                                         \
-        Operator1<int32_t>(IrOpcode::kI##format##Shl, Operator::kPure,         \
-                           "Shift left", 1, 0, 0, 1, 0, 0, shift);             \
-  }                                                                            \
-  const Operator* MachineOperatorBuilder::I##format##ShrS(int32_t shift) {     \
-    DCHECK(0 < shift && shift <= bits);                                        \
-    return new (zone_)                                                         \
-        Operator1<int32_t>(IrOpcode::kI##format##ShrS, Operator::kPure,        \
-                           "Arithmetic shift right", 1, 0, 0, 1, 0, 0, shift); \
-  }                                                                            \
-  const Operator* MachineOperatorBuilder::I##format##ShrU(int32_t shift) {     \
-    DCHECK(0 <= shift && shift < bits);                                        \
-    return new (zone_)                                                         \
-        Operator1<int32_t>(IrOpcode::kI##format##ShrU, Operator::kPure,        \
-                           "Shift right", 1, 0, 0, 1, 0, 0, shift);            \
-  }
-SIMD_FORMAT_LIST(SIMD_SHIFT_OPS)
-#undef SIMD_SHIFT_OPS
-
 const Operator* MachineOperatorBuilder::S8x16Shuffle(
     const uint8_t shuffle[16]) {
   uint8_t* array = zone_->NewArray<uint8_t>(16);
@@ -1354,7 +1361,6 @@ const uint8_t* S8x16ShuffleOf(Operator const* op) {
 #undef ATOMIC_REPRESENTATION_LIST
 #undef ATOMIC64_REPRESENTATION_LIST
 #undef SIMD_LANE_OP_LIST
-#undef SIMD_FORMAT_LIST
 #undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 0f8130120693f9..17db145f58e449 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -314,6 +314,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
   // This operator reinterprets the bits of a word as a Smi.
   const Operator* BitcastWordToTaggedSigned();
 
+  // This operator reinterprets the bits of a word32 as a Compressed Smi.
+  const Operator* BitcastWord32ToCompressedSigned();
+
+  // This operator reinterprets the bits of a Compressed Smi as a word32.
+  const Operator* BitcastCompressedSignedToWord32();
+
   // JavaScript float64 to int32/uint32 truncation.
   const Operator* TruncateFloat64ToWord32();
 
@@ -471,7 +477,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
   const Operator* F64x2Splat();
   const Operator* F64x2Abs();
   const Operator* F64x2Neg();
+  const Operator* F64x2Add();
+  const Operator* F64x2Sub();
+  const Operator* F64x2Mul();
+  const Operator* F64x2Div();
   const Operator* F64x2ExtractLane(int32_t);
+  const Operator* F64x2Min();
+  const Operator* F64x2Max();
   const Operator* F64x2ReplaceLane(int32_t);
   const Operator* F64x2Eq();
   const Operator* F64x2Ne();
@@ -503,16 +515,20 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
   const Operator* I64x2ExtractLane(int32_t);
   const Operator* I64x2ReplaceLane(int32_t);
   const Operator* I64x2Neg();
-  const Operator* I64x2Shl(int32_t);
-  const Operator* I64x2ShrS(int32_t);
+  const Operator* I64x2Shl();
+  const Operator* I64x2ShrS();
   const Operator* I64x2Add();
   const Operator* I64x2Sub();
   const Operator* I64x2Mul();
+  const Operator* I64x2MinS();
+  const Operator* I64x2MaxS();
   const Operator* I64x2Eq();
   const Operator* I64x2Ne();
   const Operator* I64x2GtS();
   const Operator* I64x2GeS();
-  const Operator* I64x2ShrU(int32_t);
+  const Operator* I64x2ShrU();
+  const Operator* I64x2MinU();
+  const Operator* I64x2MaxU();
   const Operator* I64x2GtU();
   const Operator* I64x2GeU();
 
@@ -523,8 +539,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
   const Operator* I32x4SConvertI16x8Low();
   const Operator* I32x4SConvertI16x8High();
   const Operator* I32x4Neg();
-  const Operator* I32x4Shl(int32_t);
-  const Operator* I32x4ShrS(int32_t);
+  const Operator* I32x4Shl();
+  const Operator* I32x4ShrS();
   const Operator* I32x4Add();
   const Operator* I32x4AddHoriz();
   const Operator* I32x4Sub();
@@ -539,7 +555,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
   const Operator* I32x4UConvertF32x4();
   const Operator* I32x4UConvertI16x8Low();
   const Operator* I32x4UConvertI16x8High();
-  const Operator* I32x4ShrU(int32_t);
+  const Operator* I32x4ShrU();
   const Operator* I32x4MinU();
   const Operator* I32x4MaxU();
   const Operator* I32x4GtU();
@@ -551,8 +567,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
   const Operator* I16x8SConvertI8x16Low();
   const Operator* I16x8SConvertI8x16High();
   const Operator* I16x8Neg();
-  const Operator* I16x8Shl(int32_t);
-  const Operator* I16x8ShrS(int32_t);
+  const Operator* I16x8Shl();
+  const Operator* I16x8ShrS();
   const Operator* I16x8SConvertI32x4();
   const Operator* I16x8Add();
   const Operator* I16x8AddSaturateS();
@@ -569,7 +585,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
 
   const Operator* I16x8UConvertI8x16Low();
   const Operator* I16x8UConvertI8x16High();
-  const Operator* I16x8ShrU(int32_t);
+  const Operator* I16x8ShrU();
   const Operator* I16x8UConvertI32x4();
   const Operator* I16x8AddSaturateU();
   const Operator* I16x8SubSaturateU();
@@ -582,8 +598,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
   const Operator* I8x16ExtractLane(int32_t);
   const Operator* I8x16ReplaceLane(int32_t);
   const Operator* I8x16Neg();
-  const Operator* I8x16Shl(int32_t);
-  const Operator* I8x16ShrS(int32_t);
+  const Operator* I8x16Shl();
+  const Operator* I8x16ShrS();
   const Operator* I8x16SConvertI16x8();
   const Operator* I8x16Add();
   const Operator* I8x16AddSaturateS();
@@ -597,7 +613,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
   const Operator* I8x16GtS();
   const Operator* I8x16GeS();
 
-  const Operator* I8x16ShrU(int32_t);
+  const Operator* I8x16ShrU();
   const Operator* I8x16UConvertI16x8();
   const Operator* I8x16AddSaturateU();
   const Operator* I8x16SubSaturateU();
@@ -651,10 +667,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
   const Operator* Word64PoisonOnSpeculation();
 
   // Access to the machine stack.
-  const Operator* LoadStackPointer();
   const Operator* LoadFramePointer();
   const Operator* LoadParentFramePointer();
 
+  // Compares: stack_pointer > value.
+  const Operator* StackPointerGreaterThan();
+
   // Memory barrier.
   const Operator* MemBarrier();
 
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
index 07ac95b4f7a539..1e2434f4ae67ce 100644
--- a/deps/v8/src/compiler/map-inference.cc
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -5,9 +5,9 @@
 #include "src/compiler/map-inference.h"
 
 #include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/simplified-operator.h"
-#include "src/compiler/vector-slot-pair.h"
 #include "src/objects/map-inl.h"
 #include "src/zone/zone-handle-set.h"
 
@@ -93,7 +93,7 @@ MapHandles const& MapInference::GetMaps() {
 
 void MapInference::InsertMapChecks(JSGraph* jsgraph, Node** effect,
                                    Node* control,
-                                   const VectorSlotPair& feedback) {
+                                   const FeedbackSource& feedback) {
   CHECK(HaveMaps());
   CHECK(feedback.IsValid());
   ZoneHandleSet<Map> maps;
@@ -112,7 +112,7 @@ bool MapInference::RelyOnMapsViaStability(
 
 bool MapInference::RelyOnMapsPreferStability(
     CompilationDependencies* dependencies, JSGraph* jsgraph, Node** effect,
-    Node* control, const VectorSlotPair& feedback) {
+    Node* control, const FeedbackSource& feedback) {
   CHECK(HaveMaps());
   if (Safe()) return false;
   if (RelyOnMapsViaStability(dependencies)) return true;
@@ -123,7 +123,7 @@ bool MapInference::RelyOnMapsPreferStability(
 bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies,
                                     JSGraph* jsgraph, Node** effect,
                                     Node* control,
-                                    const VectorSlotPair& feedback) {
+                                    const FeedbackSource& feedback) {
   if (Safe()) return true;
 
   auto is_stable = [this](Handle<Map> map) {
diff --git a/deps/v8/src/compiler/map-inference.h b/deps/v8/src/compiler/map-inference.h
index 64cec77f2b0188..acba2eb0f2ff08 100644
--- a/deps/v8/src/compiler/map-inference.h
+++ b/deps/v8/src/compiler/map-inference.h
@@ -13,11 +13,11 @@
 namespace v8 {
 namespace internal {
 
-class VectorSlotPair;
 
 namespace compiler {
 
 class CompilationDependencies;
+struct FeedbackSource;
 class JSGraph;
 class JSHeapBroker;
 class Node;
@@ -67,10 +67,10 @@ class MapInference {
   // dependencies were taken.
   bool RelyOnMapsPreferStability(CompilationDependencies* dependencies,
                                  JSGraph* jsgraph, Node** effect, Node* control,
-                                 const VectorSlotPair& feedback);
+                                 const FeedbackSource& feedback);
   // Inserts map checks even if maps were already reliable.
   void InsertMapChecks(JSGraph* jsgraph, Node** effect, Node* control,
-                       const VectorSlotPair& feedback);
+                       const FeedbackSource& feedback);
 
   // Internally marks the maps as reliable (thus bypassing the safety check) and
   // returns the NoChange reduction. USE THIS ONLY WHEN RETURNING, e.g.:
@@ -98,7 +98,7 @@ class MapInference {
       std::function<bool(InstanceType)> f) const;
   V8_WARN_UNUSED_RESULT bool RelyOnMapsHelper(
       CompilationDependencies* dependencies, JSGraph* jsgraph, Node** effect,
-      Node* control, const VectorSlotPair& feedback);
+      Node* control, const FeedbackSource& feedback);
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 368c060c1d90e8..8684f2ce3cf5bc 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -101,6 +101,12 @@ bool CanAllocate(const Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kBitcastTaggedToWord:
     case IrOpcode::kBitcastWordToTagged:
+    case IrOpcode::kChangeCompressedToTagged:
+    case IrOpcode::kChangeCompressedSignedToTaggedSigned:
+    case IrOpcode::kChangeCompressedPointerToTaggedPointer:
+    case IrOpcode::kChangeTaggedToCompressed:
+    case IrOpcode::kChangeTaggedSignedToCompressedSigned:
+    case IrOpcode::kChangeTaggedPointerToCompressedPointer:
     case IrOpcode::kComment:
     case IrOpcode::kAbortCSAAssert:
     case IrOpcode::kDebugBreak:
@@ -161,7 +167,6 @@ bool CanAllocate(const Node* node) {
       return false;
 
     case IrOpcode::kCall:
-    case IrOpcode::kCallWithCallerSavedRegisters:
       return !(CallDescriptorOf(node->op())->flags() &
                CallDescriptor::kNoAllocate);
     default:
@@ -231,8 +236,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
       return VisitAllocateRaw(node, state);
     case IrOpcode::kCall:
       return VisitCall(node, state);
-    case IrOpcode::kCallWithCallerSavedRegisters:
-      return VisitCallWithCallerSavedRegisters(node, state);
     case IrOpcode::kLoadFromObject:
       return VisitLoadFromObject(node, state);
     case IrOpcode::kLoadElement:
@@ -258,6 +261,35 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
 
 #define __ gasm()->
 
+bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
+                                                     const Edge edge) {
+  if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) {
+    // In Pointer Compression we might have a Compress node between an
+    // AllocateRaw and the value used as input. This case is trickier since we
+    // have to check all of the Compress node edges to test for a StoreField.
+    for (Edge const new_edge : node->use_edges()) {
+      if (AllocationTypeNeedsUpdateToOld(new_edge.from(), new_edge)) {
+        return true;
+      }
+    }
+
+    // If we arrived here, we tested all the edges of the Compress node and
+    // didn't find it necessary to update the AllocationType.
+    return false;
+  }
+
+  // Test to see if we need to update the AllocationType.
+  if (node->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
+    Node* parent = node->InputAt(0);
+    if (parent->opcode() == IrOpcode::kAllocateRaw &&
+        AllocationTypeOf(parent->op()) == AllocationType::kOld) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
 void MemoryOptimizer::VisitAllocateRaw(Node* node,
                                        AllocationState const* state) {
   DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
@@ -278,8 +310,17 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
   if (allocation_type == AllocationType::kOld) {
     for (Edge const edge : node->use_edges()) {
       Node* const user = edge.from();
+
       if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
-        Node* const child = user->InputAt(1);
+        Node* child = user->InputAt(1);
+        // In Pointer Compression we might have a Compress node between an
+        // AllocateRaw and the value used as input. If so, we need to update
+        // child to point to the StoreField.
+        if (COMPRESS_POINTERS_BOOL &&
+            IrOpcode::IsCompressOpcode(child->opcode())) {
+          child = child->InputAt(0);
+        }
+
         if (child->opcode() == IrOpcode::kAllocateRaw &&
             AllocationTypeOf(child->op()) == AllocationType::kYoung) {
           NodeProperties::ChangeOp(child, node->op());
@@ -291,13 +332,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
     DCHECK_EQ(AllocationType::kYoung, allocation_type);
     for (Edge const edge : node->use_edges()) {
       Node* const user = edge.from();
-      if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
-        Node* const parent = user->InputAt(0);
-        if (parent->opcode() == IrOpcode::kAllocateRaw &&
-            AllocationTypeOf(parent->op()) == AllocationType::kOld) {
-          allocation_type = AllocationType::kOld;
-          break;
-        }
+      if (AllocationTypeNeedsUpdateToOld(user, edge)) {
+        allocation_type = AllocationType::kOld;
+        break;
       }
     }
   }
@@ -523,16 +560,6 @@ void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
   EnqueueUses(node, state);
 }
 
-void MemoryOptimizer::VisitCallWithCallerSavedRegisters(
-    Node* node, AllocationState const* state) {
-  DCHECK_EQ(IrOpcode::kCallWithCallerSavedRegisters, node->opcode());
-  // If the call can allocate, we start with a fresh state.
-  if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
-    state = empty_state();
-  }
-  EnqueueUses(node, state);
-}
-
 void MemoryOptimizer::VisitLoadElement(Node* node,
                                        AllocationState const* state) {
   DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
@@ -540,9 +567,7 @@ void MemoryOptimizer::VisitLoadElement(Node* node,
   Node* index = node->InputAt(1);
   node->ReplaceInput(1, ComputeIndex(access, index));
   MachineType type = access.machine_type;
-  if (NeedsPoisoning(access.load_sensitivity) &&
-      type.representation() != MachineRepresentation::kTaggedPointer &&
-      type.representation() != MachineRepresentation::kCompressedPointer) {
+  if (NeedsPoisoning(access.load_sensitivity)) {
     NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
   } else {
     NodeProperties::ChangeOp(node, machine()->Load(type));
@@ -556,9 +581,7 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
   Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
   node->InsertInput(graph()->zone(), 1, offset);
   MachineType type = access.machine_type;
-  if (NeedsPoisoning(access.load_sensitivity) &&
-      type.representation() != MachineRepresentation::kTaggedPointer &&
-      type.representation() != MachineRepresentation::kCompressedPointer) {
+  if (NeedsPoisoning(access.load_sensitivity)) {
     NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
   } else {
     NodeProperties::ChangeOp(node, machine()->Load(type));
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 71f33fa3d7d7df..a663bf07ed6a49 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -118,7 +118,6 @@ class MemoryOptimizer final {
   void VisitNode(Node*, AllocationState const*);
   void VisitAllocateRaw(Node*, AllocationState const*);
   void VisitCall(Node*, AllocationState const*);
-  void VisitCallWithCallerSavedRegisters(Node*, AllocationState const*);
   void VisitLoadFromObject(Node*, AllocationState const*);
   void VisitLoadElement(Node*, AllocationState const*);
   void VisitLoadField(Node*, AllocationState const*);
@@ -142,6 +141,11 @@ class MemoryOptimizer final {
 
   bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
 
+  // Returns true if the AllocationType of the current AllocateRaw node that we
+  // are visiting needs to be updated to kOld, due to propagation of tenuring
+  // from outer to inner allocations.
+  bool AllocationTypeNeedsUpdateToOld(Node* const user, const Edge edge);
+
   AllocationState const* empty_state() const { return empty_state_; }
   Graph* graph() const;
   Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 7c0c702e3fa5a3..20698f4cd6d637 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -761,66 +761,6 @@ struct V8_EXPORT_PRIVATE DiamondMatcher
   Node* if_false_;
 };
 
-template <class BinopMatcher, IrOpcode::Value expected_opcode>
-struct WasmStackCheckMatcher {
-  explicit WasmStackCheckMatcher(Node* compare) : compare_(compare) {}
-
-  bool Matched() {
-    if (compare_->opcode() != expected_opcode) return false;
-    BinopMatcher m(compare_);
-    return MatchedInternal(m.left(), m.right());
-  }
-
- private:
-  bool MatchedInternal(const typename BinopMatcher::LeftMatcher& l,
-                       const typename BinopMatcher::RightMatcher& r) {
-    // In wasm, the stack check is performed by loading the value given by
-    // the address of a field stored in the instance object. That object is
-    // passed as a parameter.
-    if (l.IsLoad() && r.IsLoadStackPointer()) {
-      LoadMatcher<LoadMatcher<NodeMatcher>> mleft(l.node());
-      if (mleft.object().IsLoad() && mleft.index().Is(0) &&
-          mleft.object().object().IsParameter()) {
-        return true;
-      }
-    }
-    return false;
-  }
-  Node* compare_;
-};
-
-template <class BinopMatcher, IrOpcode::Value expected_opcode>
-struct StackCheckMatcher {
-  StackCheckMatcher(Isolate* isolate, Node* compare)
-      : isolate_(isolate), compare_(compare) {
-    DCHECK_NOT_NULL(isolate);
-  }
-  bool Matched() {
-    // TODO(jgruber): Ideally, we could be more flexible here and also match the
-    // same pattern with switched operands (i.e.: left is LoadStackPointer and
-    // right is the js_stack_limit load). But to be correct in all cases, we'd
-    // then have to invert the outcome of the stack check comparison.
-    if (compare_->opcode() != expected_opcode) return false;
-    BinopMatcher m(compare_);
-    return MatchedInternal(m.left(), m.right());
-  }
-
- private:
-  bool MatchedInternal(const typename BinopMatcher::LeftMatcher& l,
-                       const typename BinopMatcher::RightMatcher& r) {
-    if (l.IsLoad() && r.IsLoadStackPointer()) {
-      LoadMatcher<ExternalReferenceMatcher> mleft(l.node());
-      ExternalReference js_stack_limit =
-          ExternalReference::address_of_stack_limit(isolate_);
-      if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) return true;
-    }
-    return false;
-  }
-
-  Isolate* isolate_;
-  Node* compare_;
-};
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 1e00ec00f48a29..7ba3a59f6f98d5 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -380,7 +380,10 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
     ObjectRef target = mtarget.Ref(broker);
     JSFunctionRef newtarget = mnewtarget.Ref(broker).AsJSFunction();
     if (newtarget.map().has_prototype_slot() && newtarget.has_initial_map()) {
-      if (broker->mode() == JSHeapBroker::kSerializing) newtarget.Serialize();
+      if (!newtarget.serialized()) {
+        TRACE_BROKER_MISSING(broker, "initial map on " << newtarget);
+        return base::nullopt;
+      }
       MapRef initial_map = newtarget.initial_map();
       if (initial_map.GetConstructor().equals(target)) {
         DCHECK(target.AsJSFunction().map().is_constructor());
@@ -449,7 +452,7 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
       }
       case IrOpcode::kJSCreatePromise: {
         if (IsSame(receiver, effect)) {
-          *maps_return = ZoneHandleSet<Map>(broker->native_context()
+          *maps_return = ZoneHandleSet<Map>(broker->target_native_context()
                                                 .promise_function()
                                                 .initial_map()
                                                 .object());
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 7688379e9f317d..525ce33c84b1ce 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -22,10 +22,12 @@ Node::OutOfLineInputs* Node::OutOfLineInputs::New(Zone* zone, int capacity) {
 
 void Node::OutOfLineInputs::ExtractFrom(Use* old_use_ptr, Node** old_input_ptr,
                                         int count) {
+  DCHECK_GE(count, 0);
   // Extract the inputs from the old use and input pointers and copy them
   // to this out-of-line-storage.
   Use* new_use_ptr = reinterpret_cast<Use*>(this) - 1;
   Node** new_input_ptr = inputs();
+  CHECK_IMPLIES(count > 0, Use::InputIndexField::is_valid(count - 1));
   for (int current = 0; current < count; current++) {
     new_use_ptr->bit_field_ =
         Use::InputIndexField::encode(current) | Use::InlineField::encode(false);
@@ -51,6 +53,8 @@ void Node::OutOfLineInputs::ExtractFrom(Use* old_use_ptr, Node** old_input_ptr,
 
 Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
                 Node* const* inputs, bool has_extensible_inputs) {
+  DCHECK_GE(input_count, 0);
+
   Node** input_ptr;
   Use* use_ptr;
   Node* node;
@@ -102,6 +106,8 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
   }
 
   // Initialize the input pointers and the uses.
+  CHECK_IMPLIES(input_count > 0,
+                Use::InputIndexField::is_valid(input_count - 1));
   for (int current = 0; current < input_count; ++current) {
     Node* to = *inputs++;
     input_ptr[current] = to;
@@ -137,19 +143,20 @@ void Node::AppendInput(Zone* zone, Node* new_to) {
   DCHECK_NOT_NULL(zone);
   DCHECK_NOT_NULL(new_to);
 
-  int inline_count = InlineCountField::decode(bit_field_);
-  int inline_capacity = InlineCapacityField::decode(bit_field_);
+  int const inline_count = InlineCountField::decode(bit_field_);
+  int const inline_capacity = InlineCapacityField::decode(bit_field_);
   if (inline_count < inline_capacity) {
     // Append inline input.
     bit_field_ = InlineCountField::update(bit_field_, inline_count + 1);
     *GetInputPtr(inline_count) = new_to;
     Use* use = GetUsePtr(inline_count);
+    STATIC_ASSERT(InlineCapacityField::kMax <= Use::InputIndexField::kMax);
     use->bit_field_ = Use::InputIndexField::encode(inline_count) |
                       Use::InlineField::encode(true);
     new_to->AppendUse(use);
   } else {
     // Append out-of-line input.
-    int input_count = InputCount();
+    int const input_count = InputCount();
     OutOfLineInputs* outline = nullptr;
     if (inline_count != kOutlineMarker) {
       // switch to out of line inputs.
@@ -172,6 +179,7 @@ void Node::AppendInput(Zone* zone, Node* new_to) {
     outline->count_++;
     *GetInputPtr(input_count) = new_to;
     Use* use = GetUsePtr(input_count);
+    CHECK(Use::InputIndexField::is_valid(input_count));
     use->bit_field_ = Use::InputIndexField::encode(input_count) |
                       Use::InlineField::encode(false);
     new_to->AppendUse(use);
@@ -336,9 +344,13 @@ Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
       bit_field_(IdField::encode(id) | InlineCountField::encode(inline_count) |
                  InlineCapacityField::encode(inline_capacity)),
       first_use_(nullptr) {
+  // Check that the id didn't overflow.
+  STATIC_ASSERT(IdField::kMax < std::numeric_limits<NodeId>::max());
+  CHECK(IdField::is_valid(id));
+
   // Inputs must either be out of line or within the inline capacity.
-  DCHECK_GE(kMaxInlineCapacity, inline_capacity);
   DCHECK(inline_count == kOutlineMarker || inline_count <= inline_capacity);
+  DCHECK_LE(inline_capacity, kMaxInlineCapacity);
 }
 
 
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index d7daca38ef612a..76ea4bb1a9ed86 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -201,9 +201,7 @@ class V8_EXPORT_PRIVATE Node final {
     }
 
     using InlineField = BitField<bool, 0, 1>;
-    using InputIndexField = BitField<unsigned, 1, 17>;
-    // Leaving some space in the bitset in case we ever decide to record
-    // the output index.
+    using InputIndexField = BitField<unsigned, 1, 31>;
   };
 
   //============================================================================
@@ -291,7 +289,6 @@ class V8_EXPORT_PRIVATE Node final {
   using InlineCountField = BitField<unsigned, 24, 4>;
   using InlineCapacityField = BitField<unsigned, 28, 4>;
   static const int kOutlineMarker = InlineCountField::kMax;
-  static const int kMaxInlineCount = InlineCountField::kMax - 1;
   static const int kMaxInlineCapacity = InlineCapacityField::kMax - 1;
 
   const Operator* op_;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index d621e23e3a3c42..fe45d9276ac4de 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -66,7 +66,6 @@
   V(ObjectId)                     \
   V(TypedObjectState)             \
   V(Call)                         \
-  V(CallWithCallerSavedRegisters) \
   V(Parameter)                    \
   V(OsrValue)                     \
   V(LoopExit)                     \
@@ -204,6 +203,7 @@
   V(JSForInEnumerate)                  \
   V(JSForInNext)                       \
   V(JSForInPrepare)                    \
+  V(JSGetIterator)                     \
   V(JSLoadMessage)                     \
   V(JSStoreMessage)                    \
   V(JSLoadModule)                      \
@@ -617,15 +617,33 @@
   V(Float64Mod)                       \
   V(Float64Pow)
 
-#define MACHINE_WORD64_ATOMIC_OP_LIST(V) \
-  V(Word64AtomicLoad)                    \
-  V(Word64AtomicStore)                   \
-  V(Word64AtomicAdd)                     \
-  V(Word64AtomicSub)                     \
-  V(Word64AtomicAnd)                     \
-  V(Word64AtomicOr)                      \
-  V(Word64AtomicXor)                     \
-  V(Word64AtomicExchange)                \
+#define MACHINE_ATOMIC_OP_LIST(V)    \
+  V(Word32AtomicLoad)                \
+  V(Word32AtomicStore)               \
+  V(Word32AtomicExchange)            \
+  V(Word32AtomicCompareExchange)     \
+  V(Word32AtomicAdd)                 \
+  V(Word32AtomicSub)                 \
+  V(Word32AtomicAnd)                 \
+  V(Word32AtomicOr)                  \
+  V(Word32AtomicXor)                 \
+  V(Word32AtomicPairLoad)            \
+  V(Word32AtomicPairStore)           \
+  V(Word32AtomicPairAdd)             \
+  V(Word32AtomicPairSub)             \
+  V(Word32AtomicPairAnd)             \
+  V(Word32AtomicPairOr)              \
+  V(Word32AtomicPairXor)             \
+  V(Word32AtomicPairExchange)        \
+  V(Word32AtomicPairCompareExchange) \
+  V(Word64AtomicLoad)                \
+  V(Word64AtomicStore)               \
+  V(Word64AtomicAdd)                 \
+  V(Word64AtomicSub)                 \
+  V(Word64AtomicAnd)                 \
+  V(Word64AtomicOr)                  \
+  V(Word64AtomicXor)                 \
+  V(Word64AtomicExchange)            \
   V(Word64AtomicCompareExchange)
 
 #define MACHINE_OP_LIST(V)                  \
@@ -637,7 +655,7 @@
   MACHINE_FLOAT32_UNOP_LIST(V)              \
   MACHINE_FLOAT64_BINOP_LIST(V)             \
   MACHINE_FLOAT64_UNOP_LIST(V)              \
-  MACHINE_WORD64_ATOMIC_OP_LIST(V)          \
+  MACHINE_ATOMIC_OP_LIST(V)                 \
   V(AbortCSAAssert)                         \
   V(DebugBreak)                             \
   V(Comment)                                \
@@ -656,6 +674,8 @@
   V(BitcastTaggedSignedToWord)              \
   V(BitcastWordToTagged)                    \
   V(BitcastWordToTaggedSigned)              \
+  V(BitcastWord32ToCompressedSigned)        \
+  V(BitcastCompressedSignedToWord32)        \
   V(TruncateFloat64ToWord32)                \
   V(ChangeFloat32ToFloat64)                 \
   V(ChangeFloat64ToInt32)                   \
@@ -702,7 +722,6 @@
   V(TaggedPoisonOnSpeculation)              \
   V(Word32PoisonOnSpeculation)              \
   V(Word64PoisonOnSpeculation)              \
-  V(LoadStackPointer)                       \
   V(LoadFramePointer)                       \
   V(LoadParentFramePointer)                 \
   V(UnalignedLoad)                          \
@@ -716,30 +735,13 @@
   V(ProtectedLoad)                          \
   V(ProtectedStore)                         \
   V(MemoryBarrier)                          \
-  V(Word32AtomicLoad)                       \
-  V(Word32AtomicStore)                      \
-  V(Word32AtomicExchange)                   \
-  V(Word32AtomicCompareExchange)            \
-  V(Word32AtomicAdd)                        \
-  V(Word32AtomicSub)                        \
-  V(Word32AtomicAnd)                        \
-  V(Word32AtomicOr)                         \
-  V(Word32AtomicXor)                        \
-  V(Word32AtomicPairLoad)                   \
-  V(Word32AtomicPairStore)                  \
-  V(Word32AtomicPairAdd)                    \
-  V(Word32AtomicPairSub)                    \
-  V(Word32AtomicPairAnd)                    \
-  V(Word32AtomicPairOr)                     \
-  V(Word32AtomicPairXor)                    \
-  V(Word32AtomicPairExchange)               \
-  V(Word32AtomicPairCompareExchange)        \
   V(SignExtendWord8ToInt32)                 \
   V(SignExtendWord16ToInt32)                \
   V(SignExtendWord8ToInt64)                 \
   V(SignExtendWord16ToInt64)                \
   V(SignExtendWord32ToInt64)                \
-  V(UnsafePointerAdd)
+  V(UnsafePointerAdd)                       \
+  V(StackPointerGreaterThan)
 
 #define MACHINE_SIMD_OP_LIST(V) \
   V(F64x2Splat)                 \
@@ -747,6 +749,12 @@
   V(F64x2ReplaceLane)           \
   V(F64x2Abs)                   \
   V(F64x2Neg)                   \
+  V(F64x2Add)                   \
+  V(F64x2Sub)                   \
+  V(F64x2Mul)                   \
+  V(F64x2Div)                   \
+  V(F64x2Min)                   \
+  V(F64x2Max)                   \
   V(F64x2Eq)                    \
   V(F64x2Ne)                    \
   V(F64x2Lt)                    \
@@ -764,6 +772,7 @@
   V(F32x4AddHoriz)              \
   V(F32x4Sub)                   \
   V(F32x4Mul)                   \
+  V(F32x4Div)                   \
   V(F32x4Min)                   \
   V(F32x4Max)                   \
   V(F32x4Eq)                    \
@@ -781,11 +790,15 @@
   V(I64x2Add)                   \
   V(I64x2Sub)                   \
   V(I64x2Mul)                   \
+  V(I64x2MinS)                  \
+  V(I64x2MaxS)                  \
   V(I64x2Eq)                    \
   V(I64x2Ne)                    \
   V(I64x2GtS)                   \
   V(I64x2GeS)                   \
   V(I64x2ShrU)                  \
+  V(I64x2MinU)                  \
+  V(I64x2MaxU)                  \
   V(I64x2GtU)                   \
   V(I64x2GeU)                   \
   V(I32x4Splat)                 \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 959e7433693f1d..1fcc12291d9136 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -54,6 +54,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
     case IrOpcode::kJSStackCheck:
     case IrOpcode::kJSStoreGlobal:
     case IrOpcode::kJSStoreMessage:
+    case IrOpcode::kJSGetIterator:
       return false;
 
     case IrOpcode::kJSCallRuntime:
@@ -237,6 +238,9 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
     case IrOpcode::kJSPerformPromiseThen:
     case IrOpcode::kJSObjectIsArray:
     case IrOpcode::kJSRegExpTest:
+
+    // Iterator protocol operations
+    case IrOpcode::kJSGetIterator:
       return true;
 
     default:
diff --git a/deps/v8/src/compiler/per-isolate-compiler-cache.h b/deps/v8/src/compiler/per-isolate-compiler-cache.h
index b715950c0c7ecf..f4f74381284076 100644
--- a/deps/v8/src/compiler/per-isolate-compiler-cache.h
+++ b/deps/v8/src/compiler/per-isolate-compiler-cache.h
@@ -19,41 +19,41 @@ namespace compiler {
 
 class ObjectData;
 
-// This class serves as a per-isolate container of data that should be
-// persisted between compiler runs. For now it stores the code builtins
-// so they are not serialized on each compiler run.
+// This class serves as a container of data that should persist across all
+// (optimizing) compiler runs in an isolate. For now it stores serialized data
+// for various common objects such as builtins, so that these objects don't have
+// to be serialized in each compilation job. See JSHeapBroker::InitializeRefsMap
+// for details.
 class PerIsolateCompilerCache : public ZoneObject {
  public:
   explicit PerIsolateCompilerCache(Zone* zone)
       : zone_(zone), refs_snapshot_(nullptr) {}
 
-  RefsMap* GetSnapshot() { return refs_snapshot_; }
+  bool HasSnapshot() const { return refs_snapshot_ != nullptr; }
+  RefsMap* GetSnapshot() {
+    DCHECK(HasSnapshot());
+    return refs_snapshot_;
+  }
   void SetSnapshot(RefsMap* refs) {
-    DCHECK_NULL(refs_snapshot_);
+    DCHECK(!HasSnapshot());
     DCHECK(!refs->IsEmpty());
     refs_snapshot_ = new (zone_) RefsMap(refs, zone_);
+    DCHECK(HasSnapshot());
   }
 
-  bool HasSnapshot() const { return refs_snapshot_; }
-
   Zone* zone() const { return zone_; }
 
   static void Setup(Isolate* isolate) {
-    if (isolate->compiler_cache()) return;
-
-    // The following zone is supposed to contain compiler-related objects
-    // that should live through all compilations, as opposed to the
-    // broker_zone which holds per-compilation data. It's not meant for
-    // per-compilation or heap broker data.
-    Zone* compiler_zone = new Zone(isolate->allocator(), "Compiler zone");
-    PerIsolateCompilerCache* compiler_cache =
-        new (compiler_zone) PerIsolateCompilerCache(compiler_zone);
-    isolate->set_compiler_utils(compiler_cache, compiler_zone);
+    if (isolate->compiler_cache() == nullptr) {
+      Zone* zone = new Zone(isolate->allocator(), "Compiler zone");
+      PerIsolateCompilerCache* cache = new (zone) PerIsolateCompilerCache(zone);
+      isolate->set_compiler_utils(cache, zone);
+    }
+    DCHECK_NOT_NULL(isolate->compiler_cache());
   }
 
  private:
   Zone* const zone_;
-
   RefsMap* refs_snapshot_;
 };
 
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index eb060b71e1fcd3..8b2f4247898918 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -110,6 +110,9 @@ class PipelineData {
         may_have_unverifiable_graph_(false),
         zone_stats_(zone_stats),
         pipeline_statistics_(pipeline_statistics),
+        roots_relative_addressing_enabled_(
+            !isolate->serializer_enabled() &&
+            !isolate->IsGeneratingEmbeddedBuiltins()),
         graph_zone_scope_(zone_stats_, ZONE_NAME),
         graph_zone_(graph_zone_scope_.zone()),
         instruction_zone_scope_(zone_stats_, ZONE_NAME),
@@ -173,12 +176,12 @@ class PipelineData {
 
   // For CodeStubAssembler and machine graph testing entry point.
   PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
-               Isolate* isolate, Graph* graph, Schedule* schedule,
-               SourcePositionTable* source_positions,
+               Isolate* isolate, AccountingAllocator* allocator, Graph* graph,
+               Schedule* schedule, SourcePositionTable* source_positions,
                NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
                const AssemblerOptions& assembler_options)
       : isolate_(isolate),
-        allocator_(isolate->allocator()),
+        allocator_(allocator),
         info_(info),
         debug_name_(info_->GetDebugName()),
         zone_stats_(zone_stats),
@@ -320,6 +323,13 @@ class PipelineData {
     return assembler_options_;
   }
 
+  size_t* address_of_max_unoptimized_frame_height() {
+    return &max_unoptimized_frame_height_;
+  }
+  size_t max_unoptimized_frame_height() const {
+    return max_unoptimized_frame_height_;
+  }
+
   CodeTracer* GetCodeTracer() const {
     return wasm_engine_ == nullptr ? isolate_->GetCodeTracer()
                                    : wasm_engine_->GetCodeTracer();
@@ -434,7 +444,8 @@ class PipelineData {
         codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
         osr_helper_, start_source_position_, jump_optimization_info_,
         info()->GetPoisoningMitigationLevel(), assembler_options_,
-        info_->builtin_index(), std::move(buffer));
+        info_->builtin_index(), max_unoptimized_frame_height(),
+        std::move(buffer));
   }
 
   void BeginPhaseKind(const char* phase_kind_name) {
@@ -451,6 +462,10 @@ class PipelineData {
 
   const char* debug_name() const { return debug_name_.get(); }
 
+  bool roots_relative_addressing_enabled() {
+    return roots_relative_addressing_enabled_;
+  }
+
  private:
   Isolate* const isolate_;
   wasm::WasmEngine* const wasm_engine_ = nullptr;
@@ -468,6 +483,7 @@ class PipelineData {
   CodeGenerator* code_generator_ = nullptr;
   Typer* typer_ = nullptr;
   Typer::Flags typer_flags_ = Typer::kNoFlags;
+  bool roots_relative_addressing_enabled_ = false;
 
   // All objects in the following group of fields are allocated in graph_zone_.
   // They are all set to nullptr when the graph_zone_ is destroyed.
@@ -516,6 +532,11 @@ class PipelineData {
   JumpOptimizationInfo* jump_optimization_info_ = nullptr;
   AssemblerOptions assembler_options_;
 
+  // The maximal combined height of all inlined frames in their unoptimized
+  // state. Calculated during instruction selection, applied during code
+  // generation.
+  size_t max_unoptimized_frame_height_ = 0;
+
   DISALLOW_COPY_AND_ASSIGN(PipelineData);
 };
 
@@ -893,9 +914,7 @@ PipelineCompilationJob::PipelineCompilationJob(
     // Note that the OptimizedCompilationInfo is not initialized at the time
     // we pass it to the CompilationJob constructor, but it is not
     // dereferenced there.
-    : OptimizedCompilationJob(
-          function->GetIsolate()->stack_guard()->real_climit(),
-          &compilation_info_, "TurboFan"),
+    : OptimizedCompilationJob(&compilation_info_, "TurboFan"),
       zone_(function->GetIsolate()->allocator(), ZONE_NAME),
       zone_stats_(function->GetIsolate()->allocator()),
       compilation_info_(&zone_, function->GetIsolate(), shared_info, function),
@@ -973,11 +992,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
   linkage_ = new (compilation_info()->zone()) Linkage(
       Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info()));
 
-  if (!pipeline_.CreateGraph()) {
-    if (isolate->has_pending_exception()) return FAILED;  // Stack overflowed.
-    return AbortOptimization(BailoutReason::kGraphBuildingFailed);
-  }
-
   if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
 
   // Make sure that we have generated the deopt entries code.  This is in order
@@ -985,6 +999,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
   // assembly.
   Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
 
+  if (!pipeline_.CreateGraph()) {
+    CHECK(!isolate->has_pending_exception());
+    return AbortOptimization(BailoutReason::kGraphBuildingFailed);
+  }
+
   return SUCCEEDED;
 }
 
@@ -1048,7 +1067,8 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
 
 class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
  public:
-  WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
+  WasmHeapStubCompilationJob(Isolate* isolate, wasm::WasmEngine* wasm_engine,
+                             CallDescriptor* call_descriptor,
                              std::unique_ptr<Zone> zone, Graph* graph,
                              Code::Kind kind,
                              std::unique_ptr<char[]> debug_name,
@@ -1057,17 +1077,19 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
       // Note that the OptimizedCompilationInfo is not initialized at the time
       // we pass it to the CompilationJob constructor, but it is not
       // dereferenced there.
-      : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_,
-                                "TurboFan"),
+      : OptimizedCompilationJob(&info_, "TurboFan",
+                                CompilationJob::State::kReadyToExecute),
         debug_name_(std::move(debug_name)),
         info_(CStrVector(debug_name_.get()), graph->zone(), kind),
         call_descriptor_(call_descriptor),
-        zone_stats_(isolate->allocator()),
+        zone_stats_(zone->allocator()),
         zone_(std::move(zone)),
         graph_(graph),
-        data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions,
+        data_(&zone_stats_, &info_, isolate, wasm_engine->allocator(), graph_,
+              nullptr, source_positions,
               new (zone_.get()) NodeOriginTable(graph_), nullptr, options),
-        pipeline_(&data_) {}
+        pipeline_(&data_),
+        wasm_engine_(wasm_engine) {}
 
   ~WasmHeapStubCompilationJob() = default;
 
@@ -1085,30 +1107,33 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
   Graph* graph_;
   PipelineData data_;
   PipelineImpl pipeline_;
+  wasm::WasmEngine* wasm_engine_;
 
   DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob);
 };
 
 // static
 std::unique_ptr<OptimizedCompilationJob>
-Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate,
-                                        CallDescriptor* call_descriptor,
-                                        std::unique_ptr<Zone> zone,
-                                        Graph* graph, Code::Kind kind,
-                                        std::unique_ptr<char[]> debug_name,
-                                        const AssemblerOptions& options,
-                                        SourcePositionTable* source_positions) {
+Pipeline::NewWasmHeapStubCompilationJob(
+    Isolate* isolate, wasm::WasmEngine* wasm_engine,
+    CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
+    Code::Kind kind, std::unique_ptr<char[]> debug_name,
+    const AssemblerOptions& options, SourcePositionTable* source_positions) {
   return base::make_unique<WasmHeapStubCompilationJob>(
-      isolate, call_descriptor, std::move(zone), graph, kind,
+      isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind,
       std::move(debug_name), options, source_positions);
 }
 
 CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
     Isolate* isolate) {
+  UNREACHABLE();
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
   std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
     pipeline_statistics.reset(new PipelineStatistics(
-        &info_, isolate->GetTurboStatistics(), &zone_stats_));
+        &info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_));
     pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
   }
   if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) {
@@ -1130,10 +1155,6 @@ CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
             << "\", \"source\":\"\",\n\"phases\":[";
   }
   pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
-  return CompilationJob::SUCCEEDED;
-}
-
-CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
   pipeline_.ComputeScheduledGraph();
   if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
     return CompilationJob::SUCCEEDED;
@@ -1144,8 +1165,11 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
 CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
     Isolate* isolate) {
   Handle<Code> code;
-  if (pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code) &&
-      pipeline_.CommitDependencies(code)) {
+  if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
+    V8::FatalProcessOutOfMemory(isolate,
+                                "WasmHeapStubCompilationJob::FinalizeJobImpl");
+  }
+  if (pipeline_.CommitDependencies(code)) {
     info_.SetCode(code);
 #ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_opt_code) {
@@ -1177,14 +1201,14 @@ struct GraphBuilderPhase {
     if (data->info()->is_bailout_on_uninitialized()) {
       flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
     }
+
+    JSFunctionRef closure(data->broker(), data->info()->closure());
     CallFrequency frequency(1.0f);
     BuildGraphFromBytecode(
-        data->broker(), temp_zone, data->info()->bytecode_array(),
-        data->info()->shared_info(),
-        handle(data->info()->closure()->feedback_vector(), data->isolate()),
+        data->broker(), temp_zone, closure.shared(), closure.feedback_vector(),
         data->info()->osr_offset(), data->jsgraph(), frequency,
-        data->source_positions(), data->native_context(),
-        SourcePosition::kNotInlined, flags, &data->info()->tick_counter());
+        data->source_positions(), SourcePosition::kNotInlined, flags,
+        &data->info()->tick_counter());
   }
 };
 
@@ -1253,14 +1277,15 @@ struct InliningPhase {
     // that need to live until code generation.
     JSNativeContextSpecialization native_context_specialization(
         &graph_reducer, data->jsgraph(), data->broker(), flags,
-        data->native_context(), data->dependencies(), temp_zone, info->zone());
+        data->dependencies(), temp_zone, info->zone());
     JSInliningHeuristic inlining(&graph_reducer,
                                  data->info()->is_inlining_enabled()
                                      ? JSInliningHeuristic::kGeneralInlining
                                      : JSInliningHeuristic::kRestrictedInlining,
                                  temp_zone, data->info(), data->jsgraph(),
                                  data->broker(), data->source_positions());
-    JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph());
+    JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
+                                           data->broker());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &checkpoint_elimination);
     AddReducer(data, &graph_reducer, &common_reducer);
@@ -1323,11 +1348,11 @@ struct UntyperPhase {
   }
 };
 
-struct SerializeStandardObjectsPhase {
-  static const char* phase_name() { return "V8.TFSerializeStandardObjects"; }
+struct HeapBrokerInitializationPhase {
+  static const char* phase_name() { return "V8.TFHeapBrokerInitialization"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
-    data->broker()->SerializeStandardObjects();
+    data->broker()->InitializeAndStartSerializing(data->native_context());
   }
 };
 
@@ -1349,11 +1374,8 @@ struct CopyMetadataForConcurrentCompilePhase {
   }
 };
 
-// TODO(turbofan): Move all calls from CopyMetaDataForConcurrentCompilePhase
-// here. Also all the calls to Serialize* methods that are currently sprinkled
-// over inlining will move here as well.
 struct SerializationPhase {
-  static const char* phase_name() { return "V8.TFSerializeBytecode"; }
+  static const char* phase_name() { return "V8.TFSerialization"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
     SerializerForBackgroundCompilationFlags flags;
@@ -1488,7 +1510,8 @@ struct GenericLoweringPhase {
     GraphReducer graph_reducer(temp_zone, data->graph(),
                                &data->info()->tick_counter(),
                                data->jsgraph()->Dead());
-    JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer);
+    JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer,
+                                       data->broker());
     AddReducer(data, &graph_reducer, &generic_lowering);
     graph_reducer.ReduceGraph();
   }
@@ -1613,7 +1636,8 @@ struct LoadEliminationPhase {
                                &data->info()->tick_counter(),
                                data->jsgraph()->Dead());
     BranchElimination branch_condition_elimination(&graph_reducer,
-                                                   data->jsgraph(), temp_zone);
+                                                   data->jsgraph(), temp_zone,
+                                                   BranchElimination::kEARLY);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common(), temp_zone);
     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
@@ -1849,6 +1873,7 @@ struct InstructionSelectionPhase {
             ? InstructionSelector::kEnableSwitchJumpTable
             : InstructionSelector::kDisableSwitchJumpTable,
         &data->info()->tick_counter(),
+        data->address_of_max_unoptimized_frame_height(),
         data->info()->is_source_positions_enabled()
             ? InstructionSelector::kAllSourcePositions
             : InstructionSelector::kCallSourcePositions,
@@ -1856,10 +1881,9 @@ struct InstructionSelectionPhase {
         FLAG_turbo_instruction_scheduling
             ? InstructionSelector::kEnableScheduling
             : InstructionSelector::kDisableScheduling,
-        !data->isolate() || data->isolate()->serializer_enabled() ||
-                data->isolate()->IsGeneratingEmbeddedBuiltins()
-            ? InstructionSelector::kDisableRootsRelativeAddressing
-            : InstructionSelector::kEnableRootsRelativeAddressing,
+        data->roots_relative_addressing_enabled()
+            ? InstructionSelector::kEnableRootsRelativeAddressing
+            : InstructionSelector::kDisableRootsRelativeAddressing,
         data->info()->GetPoisoningMitigationLevel(),
         data->info()->trace_turbo_json_enabled()
             ? InstructionSelector::kEnableTraceTurboJson
@@ -2175,12 +2199,10 @@ bool PipelineImpl::CreateGraph() {
     data->node_origins()->AddDecorator();
   }
 
+  data->broker()->SetTargetNativeContextRef(data->native_context());
   if (FLAG_concurrent_inlining) {
-    data->broker()->StartSerializing();
-    Run<SerializeStandardObjectsPhase>();
+    Run<HeapBrokerInitializationPhase>();
     Run<SerializationPhase>();
-  } else {
-    data->broker()->SetNativeContextRef();
   }
 
   Run<GraphBuilderPhase>();
@@ -2219,8 +2241,7 @@ bool PipelineImpl::CreateGraph() {
       Run<CopyMetadataForConcurrentCompilePhase>();
       data->broker()->StopSerializing();
     } else {
-      data->broker()->StartSerializing();
-      Run<SerializeStandardObjectsPhase>();
+      Run<HeapBrokerInitializationPhase>();
       Run<CopyMetadataForConcurrentCompilePhase>();
       data->broker()->StopSerializing();
     }
@@ -2356,8 +2377,8 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
   JumpOptimizationInfo jump_opt;
   bool should_optimize_jumps =
       isolate->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
-  PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
-                    source_positions, &node_origins,
+  PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
+                    nullptr, source_positions, &node_origins,
                     should_optimize_jumps ? &jump_opt : nullptr, options);
   data.set_verify_graph(FLAG_verify_csa);
   std::unique_ptr<PipelineStatistics> pipeline_statistics;
@@ -2402,10 +2423,10 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
   // First run code generation on a copy of the pipeline, in order to be able to
   // repeat it for jump optimization. The first run has to happen on a temporary
   // pipeline to avoid deletion of zones on the main pipeline.
-  PipelineData second_data(&zone_stats, &info, isolate, data.graph(),
-                           data.schedule(), data.source_positions(),
-                           data.node_origins(), data.jump_optimization_info(),
-                           options);
+  PipelineData second_data(&zone_stats, &info, isolate, isolate->allocator(),
+                           data.graph(), data.schedule(),
+                           data.source_positions(), data.node_origins(),
+                           data.jump_optimization_info(), options);
   second_data.set_verify_graph(FLAG_verify_csa);
   PipelineImpl second_pipeline(&second_data);
   second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
@@ -2421,6 +2442,23 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
   return code;
 }
 
+struct BlockStartsAsJSON {
+  const ZoneVector<int>* block_starts;
+};
+
+std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
+  out << ", \"blockIdToOffset\": {";
+  bool need_comma = false;
+  for (size_t i = 0; i < s.block_starts->size(); ++i) {
+    if (need_comma) out << ", ";
+    int offset = (*s.block_starts)[i];
+    out << "\"" << i << "\":" << offset;
+    need_comma = true;
+  }
+  out << "},";
+  return out;
+}
+
 // static
 wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
     wasm::WasmEngine* wasm_engine, CallDescriptor* call_descriptor,
@@ -2491,7 +2529,9 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
 
   if (info.trace_turbo_json_enabled()) {
     TurboJsonFile json_of(&info, std::ios_base::app);
-    json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+    json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
+            << BlockStartsAsJSON{&code_generator->block_starts()}
+            << "\"data\":\"";
 #ifdef ENABLE_DISASSEMBLER
     std::stringstream disassembler_stream;
     Disassembler::Decode(
@@ -2551,8 +2591,8 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
   // Construct a pipeline for scheduling and code generation.
   ZoneStats zone_stats(isolate->allocator());
   NodeOriginTable* node_positions = new (info->zone()) NodeOriginTable(graph);
-  PipelineData data(&zone_stats, info, isolate, graph, schedule, nullptr,
-                    node_positions, nullptr, options);
+  PipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph,
+                    schedule, nullptr, node_positions, nullptr, options);
   std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
     pipeline_statistics.reset(new PipelineStatistics(
@@ -2684,7 +2724,9 @@ void Pipeline::GenerateCodeForWasmFunction(
 
   if (data.info()->trace_turbo_json_enabled()) {
     TurboJsonFile json_of(data.info(), std::ios_base::app);
-    json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+    json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
+            << BlockStartsAsJSON{&code_generator->block_starts()}
+            << "\"data\":\"";
 #ifdef ENABLE_DISASSEMBLER
     std::stringstream disassembler_stream;
     Disassembler::Decode(
@@ -2888,7 +2930,7 @@ void PipelineImpl::VerifyGeneratedCodeIsIdempotent() {
 }
 
 struct InstructionStartsAsJSON {
-  const ZoneVector<int>* instr_starts;
+  const ZoneVector<TurbolizerInstructionStartInfo>* instr_starts;
 };
 
 std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
@@ -2896,14 +2938,39 @@ std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
   bool need_comma = false;
   for (size_t i = 0; i < s.instr_starts->size(); ++i) {
     if (need_comma) out << ", ";
-    int offset = (*s.instr_starts)[i];
-    out << "\"" << i << "\":" << offset;
+    const TurbolizerInstructionStartInfo& info = (*s.instr_starts)[i];
+    out << "\"" << i << "\": {";
+    out << "\"gap\": " << info.gap_pc_offset;
+    out << ", \"arch\": " << info.arch_instr_pc_offset;
+    out << ", \"condition\": " << info.condition_pc_offset;
+    out << "}";
     need_comma = true;
   }
   out << "}";
   return out;
 }
 
+struct TurbolizerCodeOffsetsInfoAsJSON {
+  const TurbolizerCodeOffsetsInfo* offsets_info;
+};
+
+std::ostream& operator<<(std::ostream& out,
+                         const TurbolizerCodeOffsetsInfoAsJSON& s) {
+  out << ", \"codeOffsetsInfo\": {";
+  out << "\"codeStartRegisterCheck\": "
+      << s.offsets_info->code_start_register_check << ", ";
+  out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", ";
+  out << "\"initPoison\": " << s.offsets_info->init_poison << ", ";
+  out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", ";
+  out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", ";
+  out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits
+      << ", ";
+  out << "\"pools\": " << s.offsets_info->pools << ", ";
+  out << "\"jumpTables\": " << s.offsets_info->jump_tables;
+  out << "}";
+  return out;
+}
+
 void PipelineImpl::AssembleCode(Linkage* linkage,
                                 std::unique_ptr<AssemblerBuffer> buffer) {
   PipelineData* data = this->data_;
@@ -2915,30 +2982,15 @@ void PipelineImpl::AssembleCode(Linkage* linkage,
     TurboJsonFile json_of(data->info(), std::ios_base::app);
     json_of << "{\"name\":\"code generation\""
             << ", \"type\":\"instructions\""
-            << InstructionStartsAsJSON{&data->code_generator()->instr_starts()};
+            << InstructionStartsAsJSON{&data->code_generator()->instr_starts()}
+            << TurbolizerCodeOffsetsInfoAsJSON{
+                   &data->code_generator()->offsets_info()};
     json_of << "},\n";
   }
   data->DeleteInstructionZone();
   data->EndPhaseKind();
 }
 
-struct BlockStartsAsJSON {
-  const ZoneVector<int>* block_starts;
-};
-
-std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
-  out << ", \"blockIdToOffset\": {";
-  bool need_comma = false;
-  for (size_t i = 0; i < s.block_starts->size(); ++i) {
-    if (need_comma) out << ", ";
-    int offset = (*s.block_starts)[i];
-    out << "\"" << i << "\":" << offset;
-    need_comma = true;
-  }
-  out << "},";
-  return out;
-}
-
 MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
   PipelineData* data = this->data_;
   if (data->broker() && retire_broker) {
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 6898faaad0d432..3707bfb06e5d59 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -61,9 +61,10 @@ class Pipeline : public AllStatic {
 
   // Returns a new compilation job for a wasm heap stub.
   static std::unique_ptr<OptimizedCompilationJob> NewWasmHeapStubCompilationJob(
-      Isolate* isolate, CallDescriptor* call_descriptor,
-      std::unique_ptr<Zone> zone, Graph* graph, Code::Kind kind,
-      std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
+      Isolate* isolate, wasm::WasmEngine* wasm_engine,
+      CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
+      Code::Kind kind, std::unique_ptr<char[]> debug_name,
+      const AssemblerOptions& options,
       SourcePositionTable* source_positions = nullptr);
 
   // Run the pipeline on a machine graph and generate code.
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
new file mode 100644
index 00000000000000..17829863de1a09
--- /dev/null
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -0,0 +1,226 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PROCESSED_FEEDBACK_H_
+#define V8_COMPILER_PROCESSED_FEEDBACK_H_
+
+#include "src/compiler/heap-refs.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BinaryOperationFeedback;
+class CallFeedback;
+class CompareOperationFeedback;
+class ElementAccessFeedback;
+class ForInFeedback;
+class GlobalAccessFeedback;
+class InstanceOfFeedback;
+class NamedAccessFeedback;
+
+class ProcessedFeedback : public ZoneObject {
+ public:
+  enum Kind {
+    kInsufficient,
+    kBinaryOperation,
+    kCall,
+    kCompareOperation,
+    kElementAccess,
+    kForIn,
+    kGlobalAccess,
+    kInstanceOf,
+    kNamedAccess,
+  };
+  Kind kind() const { return kind_; }
+
+  FeedbackSlotKind slot_kind() const { return slot_kind_; }
+  bool IsInsufficient() const { return kind() == kInsufficient; }
+
+  BinaryOperationFeedback const& AsBinaryOperation() const;
+  CallFeedback const& AsCall() const;
+  CompareOperationFeedback const& AsCompareOperation() const;
+  ElementAccessFeedback const& AsElementAccess() const;
+  ForInFeedback const& AsForIn() const;
+  GlobalAccessFeedback const& AsGlobalAccess() const;
+  InstanceOfFeedback const& AsInstanceOf() const;
+  NamedAccessFeedback const& AsNamedAccess() const;
+
+ protected:
+  ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind);
+
+ private:
+  Kind const kind_;
+  FeedbackSlotKind const slot_kind_;
+};
+
+class InsufficientFeedback final : public ProcessedFeedback {
+ public:
+  explicit InsufficientFeedback(FeedbackSlotKind slot_kind);
+};
+
+class GlobalAccessFeedback : public ProcessedFeedback {
+ public:
+  GlobalAccessFeedback(PropertyCellRef cell, FeedbackSlotKind slot_kind);
+  GlobalAccessFeedback(ContextRef script_context, int slot_index,
+                       bool immutable, FeedbackSlotKind slot_kind);
+  explicit GlobalAccessFeedback(FeedbackSlotKind slot_kind);  // Megamorphic
+
+  bool IsMegamorphic() const;
+
+  bool IsPropertyCell() const;
+  PropertyCellRef property_cell() const;
+
+  bool IsScriptContextSlot() const;
+  ContextRef script_context() const;
+  int slot_index() const;
+  bool immutable() const;
+
+  base::Optional<ObjectRef> GetConstantHint() const;
+
+ private:
+  base::Optional<ObjectRef> const cell_or_context_;
+  int const index_and_immutable_;
+};
+
+class KeyedAccessMode {
+ public:
+  static KeyedAccessMode FromNexus(FeedbackNexus const& nexus);
+
+  AccessMode access_mode() const;
+  bool IsLoad() const;
+  bool IsStore() const;
+  KeyedAccessLoadMode load_mode() const;
+  KeyedAccessStoreMode store_mode() const;
+
+ private:
+  AccessMode const access_mode_;
+  union LoadStoreMode {
+    LoadStoreMode(KeyedAccessLoadMode load_mode);
+    LoadStoreMode(KeyedAccessStoreMode store_mode);
+    KeyedAccessLoadMode load_mode;
+    KeyedAccessStoreMode store_mode;
+  } const load_store_mode_;
+
+  KeyedAccessMode(AccessMode access_mode, KeyedAccessLoadMode load_mode);
+  KeyedAccessMode(AccessMode access_mode, KeyedAccessStoreMode store_mode);
+};
+
+class ElementAccessFeedback : public ProcessedFeedback {
+ public:
+  ElementAccessFeedback(Zone* zone, KeyedAccessMode const& keyed_mode,
+                        FeedbackSlotKind slot_kind);
+
+  KeyedAccessMode keyed_mode() const;
+
+  // A transition group is a target and a possibly empty set of sources that can
+  // transition to the target. It is represented as a non-empty vector with the
+  // target at index 0.
+  using TransitionGroup = ZoneVector<Handle<Map>>;
+  ZoneVector<TransitionGroup> const& transition_groups() const;
+
+  bool HasOnlyStringMaps(JSHeapBroker* broker) const;
+
+  void AddGroup(TransitionGroup&& group);
+
+  // Refine {this} by trying to restrict it to the maps in {inferred_maps}. A
+  // transition group's target is kept iff it is in {inferred_maps} or if more
+  // than one of its sources is in {inferred_maps}. Here's an (unrealistic)
+  // example showing all the possible situations:
+  //
+  // inferred_maps = [a0, a2, c1, c2, d1, e0, e1]
+  //
+  // Groups before:                     Groups after:
+  // [a0, a1, a2]                       [a0, a2]
+  // [b0]
+  // [c0, c1, c2, c3]                   [c0, c1, c2]
+  // [d0, d1]                           [d1]
+  // [e0, e1]                           [e0, e1]
+  //
+  ElementAccessFeedback const& Refine(
+      ZoneVector<Handle<Map>> const& inferred_maps, Zone* zone) const;
+
+ private:
+  KeyedAccessMode const keyed_mode_;
+  ZoneVector<TransitionGroup> transition_groups_;
+};
+
+class NamedAccessFeedback : public ProcessedFeedback {
+ public:
+  NamedAccessFeedback(NameRef const& name, ZoneVector<Handle<Map>> const& maps,
+                      FeedbackSlotKind slot_kind);
+
+  NameRef const& name() const { return name_; }
+  ZoneVector<Handle<Map>> const& maps() const { return maps_; }
+
+ private:
+  NameRef const name_;
+  ZoneVector<Handle<Map>> const maps_;
+};
+
+class CallFeedback : public ProcessedFeedback {
+ public:
+  CallFeedback(base::Optional<HeapObjectRef> target, float frequency,
+               SpeculationMode mode, FeedbackSlotKind slot_kind)
+      : ProcessedFeedback(kCall, slot_kind),
+        target_(target),
+        frequency_(frequency),
+        mode_(mode) {}
+
+  base::Optional<HeapObjectRef> target() const { return target_; }
+  float frequency() const { return frequency_; }
+  SpeculationMode speculation_mode() const { return mode_; }
+
+ private:
+  base::Optional<HeapObjectRef> const target_;
+  float const frequency_;
+  SpeculationMode const mode_;
+};
+
+template <class T, ProcessedFeedback::Kind K>
+class SingleValueFeedback : public ProcessedFeedback {
+ public:
+  explicit SingleValueFeedback(T value, FeedbackSlotKind slot_kind)
+      : ProcessedFeedback(K, slot_kind), value_(value) {
+    DCHECK(
+        (K == kBinaryOperation && slot_kind == FeedbackSlotKind::kBinaryOp) ||
+        (K == kCompareOperation && slot_kind == FeedbackSlotKind::kCompareOp) ||
+        (K == kForIn && slot_kind == FeedbackSlotKind::kForIn) ||
+        (K == kInstanceOf && slot_kind == FeedbackSlotKind::kInstanceOf));
+  }
+
+  T value() const { return value_; }
+
+ private:
+  T const value_;
+};
+
+class InstanceOfFeedback
+    : public SingleValueFeedback<base::Optional<JSObjectRef>,
+                                 ProcessedFeedback::kInstanceOf> {
+  using SingleValueFeedback::SingleValueFeedback;
+};
+
+class BinaryOperationFeedback
+    : public SingleValueFeedback<BinaryOperationHint,
+                                 ProcessedFeedback::kBinaryOperation> {
+  using SingleValueFeedback::SingleValueFeedback;
+};
+
+class CompareOperationFeedback
+    : public SingleValueFeedback<CompareOperationHint,
+                                 ProcessedFeedback::kCompareOperation> {
+  using SingleValueFeedback::SingleValueFeedback;
+};
+
+class ForInFeedback
+    : public SingleValueFeedback<ForInHint, ProcessedFeedback::kForIn> {
+  using SingleValueFeedback::SingleValueFeedback;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_PROCESSED_FEEDBACK_H_
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 99a06ef874a289..6c33ae227fdc00 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -61,7 +61,7 @@ bool PropertyAccessBuilder::TryBuildStringCheck(
     // Monormorphic string access (ignoring the fact that there are multiple
     // String maps).
     *receiver = *effect =
-        graph()->NewNode(simplified()->CheckString(VectorSlotPair()), *receiver,
+        graph()->NewNode(simplified()->CheckString(FeedbackSource()), *receiver,
                          *effect, control);
     return true;
   }
@@ -74,7 +74,7 @@ bool PropertyAccessBuilder::TryBuildNumberCheck(
   if (HasOnlyNumberMaps(broker, maps)) {
     // Monomorphic number access (we also deal with Smis here).
     *receiver = *effect =
-        graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), *receiver,
+        graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), *receiver,
                          *effect, control);
     return true;
   }
@@ -151,14 +151,6 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
 Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
     NameRef const& name, PropertyAccessInfo const& access_info,
     Node* receiver) {
-  // TODO(neis): Eliminate FastPropertyAt call below by doing the lookup during
-  // acccess info computation. Requires extra care in the case where the
-  // receiver is the holder.
-  AllowCodeDependencyChange dependency_change_;
-  AllowHandleAllocation handle_allocation_;
-  AllowHandleDereference handle_dereference_;
-  AllowHeapAllocation heap_allocation_;
-
   if (!access_info.IsDataConstant()) return nullptr;
 
   // First, determine if we have a constant holder to load from.
@@ -174,17 +166,21 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
     MapRef receiver_map = m.Ref(broker()).map();
     if (std::find_if(access_info.receiver_maps().begin(),
                      access_info.receiver_maps().end(), [&](Handle<Map> map) {
-                       return map.equals(receiver_map.object());
+                       return MapRef(broker(), map).equals(receiver_map);
                      }) == access_info.receiver_maps().end()) {
       // The map of the receiver is not in the feedback, let us bail out.
       return nullptr;
     }
-    holder = Handle<JSObject>::cast(m.Value());
+    holder = m.Ref(broker()).AsJSObject().object();
   }
 
-  Handle<Object> value = JSObject::FastPropertyAt(
-      holder, access_info.field_representation(), access_info.field_index());
-  return jsgraph()->Constant(value);
+  JSObjectRef holder_ref(broker(), holder);
+  base::Optional<ObjectRef> value = holder_ref.GetOwnDataProperty(
+      access_info.field_representation(), access_info.field_index());
+  if (!value.has_value()) {
+    return nullptr;
+  }
+  return jsgraph()->Constant(*value);
 }
 
 Node* PropertyAccessBuilder::BuildLoadDataField(
@@ -203,12 +199,10 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
   Node* storage = ResolveHolder(access_info, receiver);
   if (!field_index.is_inobject()) {
     storage = *effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSObjectPropertiesOrHash()),
+        simplified()->LoadField(
+            AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer()),
         storage, *effect, *control);
   }
-  PropertyConstness constness = access_info.IsDataConstant()
-                                    ? PropertyConstness::kConst
-                                    : PropertyConstness::kMutable;
   FieldAccess field_access = {
       kTaggedBase,
       field_index.offset(),
@@ -218,7 +212,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
       MachineType::TypeForRepresentation(field_representation),
       kFullWriteBarrier,
       LoadSensitivity::kCritical,
-      constness};
+      access_info.GetConstFieldInfo()};
   if (field_representation == MachineRepresentation::kFloat64) {
     if (!field_index.is_inobject() || !FLAG_unbox_double_fields) {
       FieldAccess const storage_access = {
@@ -230,7 +224,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
           MachineType::TypeCompressedTaggedPointer(),
           kPointerWriteBarrier,
           LoadSensitivity::kCritical,
-          constness};
+          access_info.GetConstFieldInfo()};
       storage = *effect = graph()->NewNode(
           simplified()->LoadField(storage_access), storage, *effect, *control);
       field_access.offset = HeapNumber::kValueOffset;
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 277c89c932e92f..e399b9c4f6b424 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -77,7 +77,7 @@ Node* RawMachineAssembler::OptimizedAllocate(
       size);
 }
 
-Schedule* RawMachineAssembler::Export() {
+Schedule* RawMachineAssembler::ExportForTest() {
   // Compute the correct codegen order.
   DCHECK(schedule_->rpo_order()->empty());
   if (FLAG_trace_turbo_scheduler) {
@@ -106,6 +106,7 @@ Graph* RawMachineAssembler::ExportForOptimization() {
     StdoutStream{} << *schedule_;
   }
   schedule_->EnsureCFGWellFormedness();
+  OptimizeControlFlow(schedule_, graph(), common());
   Scheduler::ComputeSpecialRPO(zone(), schedule_);
   if (FLAG_trace_turbo_scheduler) {
     PrintF("--- SCHEDULE BEFORE GRAPH CREATION -------------------------\n");
@@ -117,6 +118,99 @@ Graph* RawMachineAssembler::ExportForOptimization() {
   return graph();
 }
 
+void RawMachineAssembler::OptimizeControlFlow(Schedule* schedule, Graph* graph,
+                                              CommonOperatorBuilder* common) {
+  for (bool changed = true; changed;) {
+    changed = false;
+    for (size_t i = 0; i < schedule->all_blocks()->size(); ++i) {
+      BasicBlock* block = (*schedule->all_blocks())[i];
+      if (block == nullptr) continue;
+
+      // Short-circuit a goto if the succeeding block is not a control-flow
+      // merge. This is not really useful on it's own since graph construction
+      // has the same effect, but combining blocks improves the pattern-match on
+      // their structure below.
+      if (block->control() == BasicBlock::kGoto) {
+        DCHECK_EQ(block->SuccessorCount(), 1);
+        BasicBlock* successor = block->SuccessorAt(0);
+        if (successor->PredecessorCount() == 1) {
+          DCHECK_EQ(successor->PredecessorAt(0), block);
+          for (Node* node : *successor) {
+            schedule->SetBlockForNode(nullptr, node);
+            schedule->AddNode(block, node);
+          }
+          block->set_control(successor->control());
+          Node* control_input = successor->control_input();
+          block->set_control_input(control_input);
+          if (control_input) {
+            schedule->SetBlockForNode(block, control_input);
+          }
+          if (successor->deferred()) block->set_deferred(true);
+          block->ClearSuccessors();
+          schedule->MoveSuccessors(successor, block);
+          schedule->ClearBlockById(successor->id());
+          changed = true;
+          --i;
+          continue;
+        }
+      }
+      // Block-cloning in the simple case where a block consists only of a phi
+      // node and a branch on that phi. This just duplicates the branch block
+      // for each predecessor, replacing the phi node with the corresponding phi
+      // input.
+      if (block->control() == BasicBlock::kBranch && block->NodeCount() == 1) {
+        Node* phi = block->NodeAt(0);
+        if (phi->opcode() != IrOpcode::kPhi) continue;
+        Node* branch = block->control_input();
+        DCHECK_EQ(branch->opcode(), IrOpcode::kBranch);
+        if (NodeProperties::GetValueInput(branch, 0) != phi) continue;
+        if (phi->UseCount() != 1) continue;
+        DCHECK_EQ(phi->op()->ValueInputCount(), block->PredecessorCount());
+
+        // Turn projection blocks into normal blocks.
+        DCHECK_EQ(block->SuccessorCount(), 2);
+        BasicBlock* true_block = block->SuccessorAt(0);
+        BasicBlock* false_block = block->SuccessorAt(1);
+        DCHECK_EQ(true_block->NodeAt(0)->opcode(), IrOpcode::kIfTrue);
+        DCHECK_EQ(false_block->NodeAt(0)->opcode(), IrOpcode::kIfFalse);
+        (*true_block->begin())->Kill();
+        true_block->RemoveNode(true_block->begin());
+        (*false_block->begin())->Kill();
+        false_block->RemoveNode(false_block->begin());
+        true_block->ClearPredecessors();
+        false_block->ClearPredecessors();
+
+        size_t arity = block->PredecessorCount();
+        for (size_t i = 0; i < arity; ++i) {
+          BasicBlock* predecessor = block->PredecessorAt(i);
+          predecessor->ClearSuccessors();
+          if (block->deferred()) predecessor->set_deferred(true);
+          Node* branch_clone = graph->CloneNode(branch);
+          int phi_input = static_cast<int>(i);
+          NodeProperties::ReplaceValueInput(
+              branch_clone, NodeProperties::GetValueInput(phi, phi_input), 0);
+          BasicBlock* new_true_block = schedule->NewBasicBlock();
+          BasicBlock* new_false_block = schedule->NewBasicBlock();
+          new_true_block->AddNode(
+              graph->NewNode(common->IfTrue(), branch_clone));
+          new_false_block->AddNode(
+              graph->NewNode(common->IfFalse(), branch_clone));
+          schedule->AddGoto(new_true_block, true_block);
+          schedule->AddGoto(new_false_block, false_block);
+          DCHECK_EQ(predecessor->control(), BasicBlock::kGoto);
+          predecessor->set_control(BasicBlock::kNone);
+          schedule->AddBranch(predecessor, branch_clone, new_true_block,
+                              new_false_block);
+        }
+        branch->Kill();
+        schedule->ClearBlockById(block->id());
+        changed = true;
+        continue;
+      }
+    }
+  }
+}
+
 void RawMachineAssembler::MakeReschedulable() {
   std::vector<Node*> block_final_control(schedule_->all_blocks_.size());
   std::vector<Node*> block_final_effect(schedule_->all_blocks_.size());
@@ -619,8 +713,10 @@ Node* CallCFunctionImpl(
   builder.AddReturn(return_type);
   for (const auto& arg : args) builder.AddParam(arg.first);
 
-  auto call_descriptor =
-      Linkage::GetSimplifiedCDescriptor(rasm->zone(), builder.Build());
+  auto call_descriptor = Linkage::GetSimplifiedCDescriptor(
+      rasm->zone(), builder.Build(),
+      caller_saved_regs ? CallDescriptor::kCallerSavedRegisters
+                        : CallDescriptor::kNoFlags);
 
   if (caller_saved_regs) call_descriptor->set_save_fp_mode(mode);
 
@@ -631,10 +727,8 @@ Node* CallCFunctionImpl(
       [](const RawMachineAssembler::CFunctionArg& arg) { return arg.second; });
 
   auto common = rasm->common();
-  return rasm->AddNode(
-      caller_saved_regs ? common->CallWithCallerSavedRegisters(call_descriptor)
-                        : common->Call(call_descriptor),
-      static_cast<int>(nodes.size()), nodes.begin());
+  return rasm->AddNode(common->Call(call_descriptor),
+                       static_cast<int>(nodes.size()), nodes.begin());
 }
 
 }  // namespace
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 890c38c5515af7..46940df44f88f5 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -65,9 +65,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
   CallDescriptor* call_descriptor() const { return call_descriptor_; }
   PoisoningMitigationLevel poisoning_level() const { return poisoning_level_; }
 
-  // Finalizes the schedule and exports it to be used for code generation. Note
-  // that this RawMachineAssembler becomes invalid after export.
-  Schedule* Export();
+  // Only used for tests: Finalizes the schedule and exports it to be used for
+  // code generation. Note that this RawMachineAssembler becomes invalid after
+  // export.
+  Schedule* ExportForTest();
   // Finalizes the schedule and transforms it into a graph that's suitable for
   // it to be used for Turbofan optimization and re-scheduling. Note that this
   // RawMachineAssembler becomes invalid after export.
@@ -577,6 +578,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
   Node* Word32PairSar(Node* low_word, Node* high_word, Node* shift) {
     return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
   }
+  Node* StackPointerGreaterThan(Node* value) {
+    return AddNode(machine()->StackPointerGreaterThan(), value);
+  }
 
 #define INTPTR_BINOP(prefix, name)                           \
   Node* IntPtr##name(Node* a, Node* b) {                     \
@@ -907,7 +911,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
   }
 
   // Stack operations.
-  Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); }
   Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
   Node* LoadParentFramePointer() {
     return AddNode(machine()->LoadParentFramePointer());
@@ -1091,6 +1094,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
   Schedule* schedule() { return schedule_; }
   size_t parameter_count() const { return call_descriptor_->ParameterCount(); }
 
+  static void OptimizeControlFlow(Schedule* schedule, Graph* graph,
+                                  CommonOperatorBuilder* common);
+
   Isolate* isolate_;
   Graph* graph_;
   Schedule* schedule_;
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 7a4577b799a0e7..fd0cbabe6685d1 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -316,12 +316,12 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
       node = InsertChangeFloat64ToUint32(node);
       op = simplified()->CheckedUint32ToTaggedSigned(use_info.feedback());
     } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
-      op = simplified()->CheckedFloat64ToInt32(
+      node = InsertCheckedFloat64ToInt32(
+          node,
           output_type.Maybe(Type::MinusZero())
               ? CheckForMinusZeroMode::kCheckForMinusZero
               : CheckForMinusZeroMode::kDontCheckForMinusZero,
-          use_info.feedback());
-      node = InsertConversion(node, op, use_node);
+          use_info.feedback(), use_node);
       if (SmiValuesAre32Bits()) {
         op = simplified()->ChangeInt32ToTagged();
       } else {
@@ -333,14 +333,13 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
-      op = machine()->ChangeFloat32ToFloat64();
-      node = InsertConversion(node, op, use_node);
-      op = simplified()->CheckedFloat64ToInt32(
+      node = InsertChangeFloat32ToFloat64(node);
+      node = InsertCheckedFloat64ToInt32(
+          node,
           output_type.Maybe(Type::MinusZero())
               ? CheckForMinusZeroMode::kCheckForMinusZero
               : CheckForMinusZeroMode::kDontCheckForMinusZero,
-          use_info.feedback());
-      node = InsertConversion(node, op, use_node);
+          use_info.feedback(), use_node);
       if (SmiValuesAre32Bits()) {
         op = simplified()->ChangeInt32ToTagged();
       } else {
@@ -475,7 +474,7 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
   } else if (output_rep == MachineRepresentation::kCompressedPointer) {
     if (use_info.type_check() == TypeCheckKind::kBigInt &&
         !output_type.Is(Type::BigInt())) {
-      node = InsertChangeCompressedToTagged(node);
+      node = InsertChangeCompressedPointerToTaggedPointer(node);
       op = simplified()->CheckBigInt(use_info.feedback());
     } else {
       op = machine()->ChangeCompressedPointerToTaggedPointer();
@@ -671,13 +670,48 @@ Node* RepresentationChanger::GetCompressedSignedRepresentationFor(
                                             use_node, use_info);
     op = machine()->ChangeTaggedSignedToCompressedSigned();
   } else if (output_rep == MachineRepresentation::kFloat32) {
-    node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
-                                            use_node, use_info);
-    op = machine()->ChangeTaggedSignedToCompressedSigned();
+    // float 32 -> float64 -> int32 -> Compressed signed
+    if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+      node = InsertChangeFloat32ToFloat64(node);
+      node = InsertCheckedFloat64ToInt32(
+          node,
+          output_type.Maybe(Type::MinusZero())
+              ? CheckForMinusZeroMode::kCheckForMinusZero
+              : CheckForMinusZeroMode::kDontCheckForMinusZero,
+          use_info.feedback(), use_node);
+      op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback());
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kCompressedSigned);
+    }
   } else if (output_rep == MachineRepresentation::kFloat64) {
-    node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
-                                            use_node, use_info);
-    op = machine()->ChangeTaggedSignedToCompressedSigned();
+    if (output_type.Is(Type::Signed31())) {
+      // float64 -> int32 -> compressed signed
+      node = InsertChangeFloat64ToInt32(node);
+      op = simplified()->ChangeInt31ToCompressedSigned();
+    } else if (output_type.Is(Type::Signed32())) {
+      // float64 -> int32 -> compressed signed
+      node = InsertChangeFloat64ToInt32(node);
+      if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+        op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback());
+      } else {
+        return TypeError(node, output_rep, output_type,
+                         MachineRepresentation::kCompressedSigned);
+      }
+    } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+      node = InsertCheckedFloat64ToInt32(
+          node,
+          output_type.Maybe(Type::MinusZero())
+              ? CheckForMinusZeroMode::kCheckForMinusZero
+              : CheckForMinusZeroMode::kDontCheckForMinusZero,
+          use_info.feedback(), use_node);
+      op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback());
+    } else {
+      // TODO(v8:8977): specialize here and below. Missing the unsigned case.
+      node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
+                                              use_node, use_info);
+      op = machine()->ChangeTaggedSignedToCompressedSigned();
+    }
   } else {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kCompressedSigned);
@@ -830,20 +864,17 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
     }
   } else if (output_rep == MachineRepresentation::kCompressed) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedToTagged();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedToTagged(node);
     return GetFloat32RepresentationFor(node, MachineRepresentation::kTagged,
                                        output_type, truncation);
   } else if (output_rep == MachineRepresentation::kCompressedSigned) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedSignedToTaggedSigned();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedSignedToTaggedSigned(node);
     return GetFloat32RepresentationFor(
         node, MachineRepresentation::kTaggedSigned, output_type, truncation);
   } else if (output_rep == MachineRepresentation::kCompressedPointer) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedPointerToTaggedPointer();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedPointerToTaggedPointer(node);
     return GetFloat32RepresentationFor(
         node, MachineRepresentation::kTaggedPointer, output_type, truncation);
   } else if (output_rep == MachineRepresentation::kFloat64) {
@@ -948,21 +979,18 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
     }
   } else if (output_rep == MachineRepresentation::kCompressed) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedToTagged();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedToTagged(node);
     return GetFloat64RepresentationFor(node, MachineRepresentation::kTagged,
                                        output_type, use_node, use_info);
   } else if (output_rep == MachineRepresentation::kCompressedSigned) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedSignedToTaggedSigned();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedSignedToTaggedSigned(node);
     return GetFloat64RepresentationFor(node,
                                        MachineRepresentation::kTaggedSigned,
                                        output_type, use_node, use_info);
   } else if (output_rep == MachineRepresentation::kCompressedPointer) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedPointerToTaggedPointer();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedPointerToTaggedPointer(node);
     return GetFloat64RepresentationFor(node,
                                        MachineRepresentation::kTaggedPointer,
                                        output_type, use_node, use_info);
@@ -1116,8 +1144,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
     }
   } else if (output_rep == MachineRepresentation::kCompressed) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedToTagged();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedToTagged(node);
     return GetWord32RepresentationFor(node, MachineRepresentation::kTagged,
                                       output_type, use_node, use_info);
   } else if (output_rep == MachineRepresentation::kCompressedSigned) {
@@ -1125,16 +1152,14 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
     if (output_type.Is(Type::SignedSmall())) {
       op = simplified()->ChangeCompressedSignedToInt32();
     } else {
-      op = machine()->ChangeCompressedSignedToTaggedSigned();
-      node = jsgraph()->graph()->NewNode(op, node);
+      node = InsertChangeCompressedSignedToTaggedSigned(node);
       return GetWord32RepresentationFor(node,
                                         MachineRepresentation::kTaggedSigned,
                                         output_type, use_node, use_info);
     }
   } else if (output_rep == MachineRepresentation::kCompressedPointer) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedPointerToTaggedPointer();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedPointerToTaggedPointer(node);
     return GetWord32RepresentationFor(node,
                                       MachineRepresentation::kTaggedPointer,
                                       output_type, use_node, use_info);
@@ -1253,20 +1278,17 @@ Node* RepresentationChanger::GetBitRepresentationFor(
                                        jsgraph()->Int32Constant(0));
   } else if (output_rep == MachineRepresentation::kCompressed) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedToTagged();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedToTagged(node);
     return GetBitRepresentationFor(node, MachineRepresentation::kTagged,
                                    output_type);
   } else if (output_rep == MachineRepresentation::kCompressedSigned) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedSignedToTaggedSigned();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedSignedToTaggedSigned(node);
     return GetBitRepresentationFor(node, MachineRepresentation::kTaggedSigned,
                                    output_type);
   } else if (output_rep == MachineRepresentation::kCompressedPointer) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedPointerToTaggedPointer();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedPointerToTaggedPointer(node);
     return GetBitRepresentationFor(node, MachineRepresentation::kTaggedPointer,
                                    output_type);
   } else if (IsWord(output_rep)) {
@@ -1423,21 +1445,18 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
     }
   } else if (output_rep == MachineRepresentation::kCompressed) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedToTagged();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedToTagged(node);
     return GetWord64RepresentationFor(node, MachineRepresentation::kTagged,
                                       output_type, use_node, use_info);
   } else if (output_rep == MachineRepresentation::kCompressedSigned) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedSignedToTaggedSigned();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedSignedToTaggedSigned(node);
     return GetWord64RepresentationFor(node,
                                       MachineRepresentation::kTaggedSigned,
                                       output_type, use_node, use_info);
   } else if (output_rep == MachineRepresentation::kCompressedPointer) {
     // TODO(v8:8977): Specialise here
-    op = machine()->ChangeCompressedPointerToTaggedPointer();
-    node = jsgraph()->graph()->NewNode(op, node);
+    node = InsertChangeCompressedPointerToTaggedPointer(node);
     return GetWord64RepresentationFor(node,
                                       MachineRepresentation::kTaggedPointer,
                                       output_type, use_node, use_info);
@@ -1741,11 +1760,30 @@ Node* RepresentationChanger::InsertTruncateInt64ToInt32(Node* node) {
   return jsgraph()->graph()->NewNode(machine()->TruncateInt64ToInt32(), node);
 }
 
+Node* RepresentationChanger::InsertChangeCompressedPointerToTaggedPointer(
+    Node* node) {
+  return jsgraph()->graph()->NewNode(
+      machine()->ChangeCompressedPointerToTaggedPointer(), node);
+}
+
+Node* RepresentationChanger::InsertChangeCompressedSignedToTaggedSigned(
+    Node* node) {
+  return jsgraph()->graph()->NewNode(
+      machine()->ChangeCompressedSignedToTaggedSigned(), node);
+}
+
 Node* RepresentationChanger::InsertChangeCompressedToTagged(Node* node) {
   return jsgraph()->graph()->NewNode(machine()->ChangeCompressedToTagged(),
                                      node);
 }
 
+Node* RepresentationChanger::InsertCheckedFloat64ToInt32(
+    Node* node, CheckForMinusZeroMode check, const FeedbackSource& feedback,
+    Node* use_node) {
+  return InsertConversion(
+      node, simplified()->CheckedFloat64ToInt32(check, feedback), use_node);
+}
+
 Isolate* RepresentationChanger::isolate() const { return broker_->isolate(); }
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index d3386676032b7a..43e85085badc9b 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -5,6 +5,7 @@
 #ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
 #define V8_COMPILER_REPRESENTATION_CHANGE_H_
 
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/simplified-operator.h"
 
@@ -165,7 +166,7 @@ class UseInfo {
  public:
   UseInfo(MachineRepresentation representation, Truncation truncation,
           TypeCheckKind type_check = TypeCheckKind::kNone,
-          const VectorSlotPair& feedback = VectorSlotPair())
+          const FeedbackSource& feedback = FeedbackSource())
       : representation_(representation),
         truncation_(truncation),
         type_check_(type_check),
@@ -176,7 +177,7 @@ class UseInfo {
   static UseInfo TruncatingWord64() {
     return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
   }
-  static UseInfo CheckedBigIntTruncatingWord64(const VectorSlotPair& feedback) {
+  static UseInfo CheckedBigIntTruncatingWord64(const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kWord64, Truncation::Word64(),
                    TypeCheckKind::kBigInt, feedback);
   }
@@ -219,59 +220,59 @@ class UseInfo {
 
   // Possibly deoptimizing conversions.
   static UseInfo CheckedHeapObjectAsTaggedPointer(
-      const VectorSlotPair& feedback) {
+      const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
                    TypeCheckKind::kHeapObject, feedback);
   }
 
-  static UseInfo CheckedBigIntAsTaggedPointer(const VectorSlotPair& feedback) {
+  static UseInfo CheckedBigIntAsTaggedPointer(const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
                    TypeCheckKind::kBigInt, feedback);
   }
 
   static UseInfo CheckedSignedSmallAsTaggedSigned(
-      const VectorSlotPair& feedback,
+      const FeedbackSource& feedback,
       IdentifyZeros identify_zeros = kDistinguishZeros) {
     return UseInfo(MachineRepresentation::kTaggedSigned,
                    Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall,
                    feedback);
   }
   static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros,
-                                            const VectorSlotPair& feedback) {
+                                            const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kWord32,
                    Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall,
                    feedback);
   }
   static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros,
-                                         const VectorSlotPair& feedback) {
+                                         const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kWord32,
                    Truncation::Any(identify_zeros), TypeCheckKind::kSigned32,
                    feedback);
   }
   static UseInfo CheckedSigned64AsWord64(IdentifyZeros identify_zeros,
-                                         const VectorSlotPair& feedback) {
+                                         const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kWord64,
                    Truncation::Any(identify_zeros), TypeCheckKind::kSigned64,
                    feedback);
   }
   static UseInfo CheckedNumberAsFloat64(IdentifyZeros identify_zeros,
-                                        const VectorSlotPair& feedback) {
+                                        const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kFloat64,
                    Truncation::Any(identify_zeros), TypeCheckKind::kNumber,
                    feedback);
   }
-  static UseInfo CheckedNumberAsWord32(const VectorSlotPair& feedback) {
+  static UseInfo CheckedNumberAsWord32(const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
                    TypeCheckKind::kNumber, feedback);
   }
   static UseInfo CheckedNumberOrOddballAsFloat64(
-      IdentifyZeros identify_zeros, const VectorSlotPair& feedback) {
+      IdentifyZeros identify_zeros, const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kFloat64,
                    Truncation::Any(identify_zeros),
                    TypeCheckKind::kNumberOrOddball, feedback);
   }
   static UseInfo CheckedNumberOrOddballAsWord32(
-      const VectorSlotPair& feedback) {
+      const FeedbackSource& feedback) {
     return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
                    TypeCheckKind::kNumberOrOddball, feedback);
   }
@@ -297,13 +298,13 @@ class UseInfo {
                ? CheckForMinusZeroMode::kDontCheckForMinusZero
                : CheckForMinusZeroMode::kCheckForMinusZero;
   }
-  const VectorSlotPair& feedback() const { return feedback_; }
+  const FeedbackSource& feedback() const { return feedback_; }
 
  private:
   MachineRepresentation representation_;
   Truncation truncation_;
   TypeCheckKind type_check_;
-  VectorSlotPair feedback_;
+  FeedbackSource feedback_;
 };
 
 // Contains logic related to changing the representation of values for constants
@@ -395,7 +396,12 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
   Node* InsertChangeTaggedSignedToInt32(Node* node);
   Node* InsertChangeTaggedToFloat64(Node* node);
   Node* InsertChangeUint32ToFloat64(Node* node);
+  Node* InsertChangeCompressedPointerToTaggedPointer(Node* node);
+  Node* InsertChangeCompressedSignedToTaggedSigned(Node* node);
   Node* InsertChangeCompressedToTagged(Node* node);
+  Node* InsertCheckedFloat64ToInt32(Node* node, CheckForMinusZeroMode check,
+                                    const FeedbackSource& feedback,
+                                    Node* use_node);
   Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
   Node* InsertTruncateInt64ToInt32(Node* node);
   Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason);
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 84d74b4685407b..3b335f9712f5c4 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -163,6 +163,11 @@ BasicBlock* Schedule::GetBlockById(BasicBlock::Id block_id) {
   return all_blocks_[block_id.ToSize()];
 }
 
+void Schedule::ClearBlockById(BasicBlock::Id block_id) {
+  DCHECK(block_id.ToSize() < all_blocks_.size());
+  all_blocks_[block_id.ToSize()] = nullptr;
+}
+
 bool Schedule::SameBasicBlock(Node* a, Node* b) const {
   BasicBlock* block = this->block(a);
   return block != nullptr && block == this->block(b);
@@ -210,7 +215,6 @@ bool IsPotentiallyThrowingCall(IrOpcode::Value opcode) {
     JS_OP_LIST(BUILD_BLOCK_JS_CASE)
 #undef BUILD_BLOCK_JS_CASE
     case IrOpcode::kCall:
-    case IrOpcode::kCallWithCallerSavedRegisters:
       return true;
     default:
       return false;
@@ -321,9 +325,6 @@ void Schedule::EnsureCFGWellFormedness() {
       if (block != end_) {
         EnsureSplitEdgeForm(block);
       }
-      if (block->deferred()) {
-        EnsureDeferredCodeSingleEntryPoint(block);
-      }
     }
   }
 
@@ -356,6 +357,7 @@ void Schedule::EliminateRedundantPhiNodes() {
           }
           if (!inputs_equal) continue;
           node->ReplaceUses(first_input);
+          node->Kill();
           block->RemoveNode(block->begin() + node_pos);
           --node_pos;
           reached_fixed_point = false;
@@ -376,43 +378,6 @@ void Schedule::EnsureSplitEdgeForm(BasicBlock* block) {
 #endif
 }
 
-void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
-  // If a deferred block has multiple predecessors, they have to
-  // all be deferred. Otherwise, we can run into a situation where a range
-  // that spills only in deferred blocks inserts its spill in the block, but
-  // other ranges need moves inserted by ResolveControlFlow in the predecessors,
-  // which may clobber the register of this range.
-  // To ensure that, when a deferred block has multiple predecessors, and some
-  // are not deferred, we add a non-deferred block to collect all such edges.
-
-  DCHECK(block->deferred() && block->PredecessorCount() > 1);
-  bool all_deferred = true;
-  for (auto current_pred = block->predecessors().begin();
-       current_pred != block->predecessors().end(); ++current_pred) {
-    BasicBlock* pred = *current_pred;
-    if (!pred->deferred()) {
-      all_deferred = false;
-      break;
-    }
-  }
-
-  if (all_deferred) return;
-  BasicBlock* merger = NewBasicBlock();
-  merger->set_control(BasicBlock::kGoto);
-  merger->successors().push_back(block);
-  for (auto current_pred = block->predecessors().begin();
-       current_pred != block->predecessors().end(); ++current_pred) {
-    BasicBlock* pred = *current_pred;
-    merger->predecessors().push_back(pred);
-    pred->successors().clear();
-    pred->successors().push_back(merger);
-  }
-  merger->set_deferred(false);
-  block->predecessors().clear();
-  block->predecessors().push_back(merger);
-  MovePhis(block, merger);
-}
-
 void Schedule::MovePhis(BasicBlock* from, BasicBlock* to) {
   for (size_t i = 0; i < from->NodeCount();) {
     Node* node = from->NodeAt(i);
@@ -481,6 +446,7 @@ void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
 std::ostream& operator<<(std::ostream& os, const Schedule& s) {
   for (BasicBlock* block :
        ((s.RpoBlockCount() == 0) ? *s.all_blocks() : *s.rpo_order())) {
+    if (block == nullptr) continue;
     if (block->rpo_number() == -1) {
       os << "--- BLOCK id:" << block->id().ToInt();
     } else {
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index aae2cd3ad839da..ea42951d50db3c 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -200,6 +200,7 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
 
   bool IsScheduled(Node* node);
   BasicBlock* GetBlockById(BasicBlock::Id block_id);
+  void ClearBlockById(BasicBlock::Id block_id);
 
   size_t BasicBlockCount() const { return all_blocks_.size(); }
   size_t RpoBlockCount() const { return rpo_order_.size(); }
@@ -280,8 +281,6 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
   void EliminateRedundantPhiNodes();
   // Ensure split-edge form for a hand-assembled schedule.
   void EnsureSplitEdgeForm(BasicBlock* block);
-  // Ensure entry into a deferred block happens from a single hot block.
-  void EnsureDeferredCodeSingleEntryPoint(BasicBlock* block);
   // Move Phi operands to newly created merger blocks
   void MovePhis(BasicBlock* from, BasicBlock* to);
   // Copy deferred block markers down as far as possible
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 25919bb3b3a35f..bf23e436f68f1e 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -359,7 +359,6 @@ class CFGBuilder : public ZoneObject {
 // JS opcodes are just like calls => fall through.
 #undef BUILD_BLOCK_JS_CASE
       case IrOpcode::kCall:
-      case IrOpcode::kCallWithCallerSavedRegisters:
         if (NodeProperties::IsExceptionalCall(node)) {
           BuildBlocksForSuccessors(node);
         }
@@ -404,7 +403,6 @@ class CFGBuilder : public ZoneObject {
 // JS opcodes are just like calls => fall through.
 #undef CONNECT_BLOCK_JS_CASE
       case IrOpcode::kCall:
-      case IrOpcode::kCallWithCallerSavedRegisters:
         if (NodeProperties::IsExceptionalCall(node)) {
           scheduler_->UpdatePlacement(node, Scheduler::kFixed);
           ConnectCall(node);
@@ -820,7 +818,7 @@ class SpecialRPONumberer : public ZoneObject {
     if (num_loops > static_cast<int>(loops_.size())) {
       // Otherwise, compute the loop information from the backedges in order
       // to perform a traversal that groups loop bodies together.
-      ComputeLoopInfo(stack_, num_loops, &backedges_);
+      ComputeLoopInfo(&stack_, num_loops, &backedges_);
 
       // Initialize the "loop stack". Note the entry could be a loop header.
       LoopInfo* loop =
@@ -962,9 +960,8 @@ class SpecialRPONumberer : public ZoneObject {
   }
 
   // Computes loop membership from the backedges of the control flow graph.
-  void ComputeLoopInfo(
-      ZoneVector<SpecialRPOStackFrame>& queue,  // NOLINT(runtime/references)
-      size_t num_loops, ZoneVector<Backedge>* backedges) {
+  void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>* queue,
+                       size_t num_loops, ZoneVector<Backedge>* backedges) {
     // Extend existing loop membership vectors.
     for (LoopInfo& loop : loops_) {
       loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
@@ -993,19 +990,19 @@ class SpecialRPONumberer : public ZoneObject {
         if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
           loops_[loop_num].members->Add(member->id().ToInt());
         }
-        queue[queue_length++].block = member;
+        (*queue)[queue_length++].block = member;
       }
 
       // Propagate loop membership backwards. All predecessors of M up to the
       // loop header H are members of the loop too. O(|blocks between M and H|).
       while (queue_length > 0) {
-        BasicBlock* block = queue[--queue_length].block;
+        BasicBlock* block = (*queue)[--queue_length].block;
         for (size_t i = 0; i < block->PredecessorCount(); i++) {
           BasicBlock* pred = block->PredecessorAt(i);
           if (pred != header) {
             if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
               loops_[loop_num].members->Add(pred->id().ToInt());
-              queue[queue_length++].block = pred;
+              (*queue)[queue_length++].block = pred;
             }
           }
         }
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 5597850b0612c4..20d405b77579f9 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -11,13 +11,13 @@
 #include "src/compiler/bytecode-analysis.h"
 #include "src/compiler/compilation-dependencies.h"
 #include "src/compiler/js-heap-broker.h"
-#include "src/compiler/vector-slot-pair.h"
 #include "src/handles/handles-inl.h"
 #include "src/ic/call-optimization.h"
 #include "src/interpreter/bytecode-array-iterator.h"
 #include "src/objects/code.h"
 #include "src/objects/js-array-inl.h"
 #include "src/objects/js-regexp-inl.h"
+#include "src/objects/literal-objects-inl.h"
 #include "src/objects/shared-function-info-inl.h"
 #include "src/zone/zone-containers.h"
 #include "src/zone/zone.h"
@@ -38,63 +38,21 @@ namespace compiler {
   V(Throw)
 
 #define CLEAR_ACCUMULATOR_LIST(V) \
-  V(Add)                          \
-  V(AddSmi)                       \
-  V(BitwiseAnd)                   \
-  V(BitwiseAndSmi)                \
-  V(BitwiseNot)                   \
-  V(BitwiseOr)                    \
-  V(BitwiseOrSmi)                 \
-  V(BitwiseXor)                   \
-  V(BitwiseXorSmi)                \
   V(CallRuntime)                  \
   V(CloneObject)                  \
   V(CreateArrayFromIterable)      \
-  V(CreateArrayLiteral)           \
   V(CreateEmptyArrayLiteral)      \
   V(CreateEmptyObjectLiteral)     \
   V(CreateMappedArguments)        \
-  V(CreateObjectLiteral)          \
-  V(CreateRegExpLiteral)          \
   V(CreateRestParameter)          \
   V(CreateUnmappedArguments)      \
-  V(Dec)                          \
   V(DeletePropertySloppy)         \
   V(DeletePropertyStrict)         \
-  V(Div)                          \
-  V(DivSmi)                       \
-  V(Exp)                          \
-  V(ExpSmi)                       \
   V(ForInContinue)                \
   V(ForInEnumerate)               \
-  V(ForInNext)                    \
   V(ForInStep)                    \
-  V(Inc)                          \
-  V(LdaLookupSlot)                \
-  V(LdaLookupSlotInsideTypeof)    \
   V(LogicalNot)                   \
-  V(Mod)                          \
-  V(ModSmi)                       \
-  V(Mul)                          \
-  V(MulSmi)                       \
-  V(Negate)                       \
   V(SetPendingMessage)            \
-  V(ShiftLeft)                    \
-  V(ShiftLeftSmi)                 \
-  V(ShiftRight)                   \
-  V(ShiftRightLogical)            \
-  V(ShiftRightLogicalSmi)         \
-  V(ShiftRightSmi)                \
-  V(StaLookupSlot)                \
-  V(Sub)                          \
-  V(SubSmi)                       \
-  V(TestEqual)                    \
-  V(TestEqualStrict)              \
-  V(TestGreaterThan)              \
-  V(TestGreaterThanOrEqual)       \
-  V(TestInstanceOf)               \
-  V(TestLessThan)                 \
-  V(TestLessThanOrEqual)          \
   V(TestNull)                     \
   V(TestReferenceEqual)           \
   V(TestTypeOf)                   \
@@ -102,8 +60,6 @@ namespace compiler {
   V(TestUndetectable)             \
   V(ToBooleanLogicalNot)          \
   V(ToName)                       \
-  V(ToNumber)                     \
-  V(ToNumeric)                    \
   V(ToString)                     \
   V(TypeOf)
 
@@ -130,15 +86,13 @@ namespace compiler {
   V(JumpIfTrue)                   \
   V(JumpIfTrueConstant)           \
   V(JumpIfUndefined)              \
-  V(JumpIfUndefinedConstant)
+  V(JumpIfUndefinedConstant)      \
+  V(JumpIfUndefinedOrNull)        \
+  V(JumpIfUndefinedOrNullConstant)
 
 #define IGNORED_BYTECODE_LIST(V)      \
-  V(CallNoFeedback)                   \
   V(IncBlockCounter)                  \
-  V(LdaNamedPropertyNoFeedback)       \
   V(StackCheck)                       \
-  V(StaNamedPropertyNoFeedback)       \
-  V(ThrowReferenceErrorIfHole)        \
   V(ThrowSuperAlreadyCalledIfNotHole) \
   V(ThrowSuperNotCalledIfHole)
 
@@ -147,9 +101,50 @@ namespace compiler {
   V(Illegal)                         \
   V(Wide)
 
+#define BINARY_OP_LIST(V) \
+  V(Add)                  \
+  V(AddSmi)               \
+  V(BitwiseAnd)           \
+  V(BitwiseAndSmi)        \
+  V(BitwiseOr)            \
+  V(BitwiseOrSmi)         \
+  V(BitwiseXor)           \
+  V(BitwiseXorSmi)        \
+  V(Div)                  \
+  V(DivSmi)               \
+  V(Exp)                  \
+  V(ExpSmi)               \
+  V(Mod)                  \
+  V(ModSmi)               \
+  V(Mul)                  \
+  V(MulSmi)               \
+  V(ShiftLeft)            \
+  V(ShiftLeftSmi)         \
+  V(ShiftRight)           \
+  V(ShiftRightSmi)        \
+  V(ShiftRightLogical)    \
+  V(ShiftRightLogicalSmi) \
+  V(Sub)                  \
+  V(SubSmi)
+
+#define UNARY_OP_LIST(V) \
+  V(BitwiseNot)          \
+  V(Dec)                 \
+  V(Inc)                 \
+  V(Negate)
+
+#define COMPARE_OP_LIST(V)  \
+  V(TestEqual)              \
+  V(TestEqualStrict)        \
+  V(TestGreaterThan)        \
+  V(TestGreaterThanOrEqual) \
+  V(TestLessThan)           \
+  V(TestLessThanOrEqual)
+
 #define SUPPORTED_BYTECODE_LIST(V)    \
   V(CallAnyReceiver)                  \
   V(CallJSRuntime)                    \
+  V(CallNoFeedback)                   \
   V(CallProperty)                     \
   V(CallProperty0)                    \
   V(CallProperty1)                    \
@@ -161,12 +156,18 @@ namespace compiler {
   V(CallWithSpread)                   \
   V(Construct)                        \
   V(ConstructWithSpread)              \
+  V(CreateArrayLiteral)               \
   V(CreateBlockContext)               \
   V(CreateCatchContext)               \
   V(CreateClosure)                    \
   V(CreateEvalContext)                \
   V(CreateFunctionContext)            \
+  V(CreateObjectLiteral)              \
+  V(CreateRegExpLiteral)              \
   V(CreateWithContext)                \
+  V(ForInNext)                        \
+  V(ForInPrepare)                     \
+  V(GetIterator)                      \
   V(GetSuperConstructor)              \
   V(GetTemplateObject)                \
   V(InvokeIntrinsic)                  \
@@ -184,7 +185,10 @@ namespace compiler {
   V(LdaLookupContextSlotInsideTypeof) \
   V(LdaLookupGlobalSlot)              \
   V(LdaLookupGlobalSlotInsideTypeof)  \
+  V(LdaLookupSlot)                    \
+  V(LdaLookupSlotInsideTypeof)        \
   V(LdaNamedProperty)                 \
+  V(LdaNamedPropertyNoFeedback)       \
   V(LdaNull)                          \
   V(Ldar)                             \
   V(LdaSmi)                           \
@@ -198,21 +202,31 @@ namespace compiler {
   V(Return)                           \
   V(StaContextSlot)                   \
   V(StaCurrentContextSlot)            \
+  V(StaDataPropertyInLiteral)         \
   V(StaGlobal)                        \
   V(StaInArrayLiteral)                \
   V(StaKeyedProperty)                 \
+  V(StaLookupSlot)                    \
   V(StaModuleVariable)                \
   V(StaNamedOwnProperty)              \
   V(StaNamedProperty)                 \
+  V(StaNamedPropertyNoFeedback)       \
   V(Star)                             \
   V(SwitchOnGeneratorState)           \
   V(SwitchOnSmiNoFeedback)            \
   V(TestIn)                           \
+  V(TestInstanceOf)                   \
+  V(ThrowReferenceErrorIfHole)        \
+  V(ToNumber)                         \
+  V(ToNumeric)                        \
+  BINARY_OP_LIST(V)                   \
+  COMPARE_OP_LIST(V)                  \
   CLEAR_ACCUMULATOR_LIST(V)           \
   CLEAR_ENVIRONMENT_LIST(V)           \
   CONDITIONAL_JUMPS_LIST(V)           \
   IGNORED_BYTECODE_LIST(V)            \
   KILL_ENVIRONMENT_LIST(V)            \
+  UNARY_OP_LIST(V)                    \
   UNCONDITIONAL_JUMPS_LIST(V)         \
   UNREACHABLE_BYTECODE_LIST(V)
 
@@ -247,6 +261,8 @@ class Hints {
  public:
   explicit Hints(Zone* zone);
 
+  static Hints SingleConstant(Handle<Object> constant, Zone* zone);
+
   const ConstantsSet& constants() const;
   const MapsSet& maps() const;
   const BlueprintsSet& function_blueprints() const;
@@ -340,7 +356,7 @@ class SerializerForBackgroundCompilation {
       const HintsVector& arguments,
       SerializerForBackgroundCompilationFlags flags);
 
-  bool BailoutOnUninitialized(FeedbackSlot slot);
+  bool BailoutOnUninitialized(ProcessedFeedback const& feedback);
 
   void TraverseBytecode();
 
@@ -349,55 +365,87 @@ class SerializerForBackgroundCompilation {
   SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
 #undef DECLARE_VISIT_BYTECODE
 
+  // Returns whether the callee with the given SFI should be processed further,
+  // i.e. whether it's inlineable.
+  bool ProcessSFIForCallOrConstruct(Handle<SharedFunctionInfo> shared,
+                                    const HintsVector& arguments,
+                                    SpeculationMode speculation_mode);
+  // Returns whether {function} should be serialized for compilation.
+  bool ProcessCalleeForCallOrConstruct(Handle<JSFunction> function,
+                                       const HintsVector& arguments,
+                                       SpeculationMode speculation_mode);
   void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
                               const HintsVector& arguments, FeedbackSlot slot,
                               bool with_spread = false);
-  void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
-                          ConvertReceiverMode receiver_mode,
+  void ProcessCallVarArgs(ConvertReceiverMode receiver_mode,
+                          Hints const& callee, interpreter::Register first_reg,
+                          int reg_count, FeedbackSlot slot,
                           bool with_spread = false);
   void ProcessApiCall(Handle<SharedFunctionInfo> target,
                       const HintsVector& arguments);
-  void ProcessReceiverMapForApiCall(
-      FunctionTemplateInfoRef& target,  // NOLINT(runtime/references)
-      Handle<Map> receiver);
+  void ProcessReceiverMapForApiCall(FunctionTemplateInfoRef target,
+                                    Handle<Map> receiver);
   void ProcessBuiltinCall(Handle<SharedFunctionInfo> target,
-                          const HintsVector& arguments);
+                          const HintsVector& arguments,
+                          SpeculationMode speculation_mode);
 
   void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
 
   void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key,
-                                  FeedbackSlot slot, AccessMode mode);
-  void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator,
-                                  AccessMode mode);
-  void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name,
-                                  FeedbackSlot slot, AccessMode mode);
+                                  FeedbackSlot slot, AccessMode access_mode,
+                                  bool honor_bailout_on_uninitialized);
+  void ProcessNamedPropertyAccess(Hints receiver, NameRef const& name,
+                                  FeedbackSlot slot, AccessMode access_mode);
+  void ProcessNamedAccess(Hints receiver, NamedAccessFeedback const& feedback,
+                          AccessMode access_mode, Hints* new_accumulator_hints);
+  void ProcessElementAccess(Hints receiver, Hints key,
+                            ElementAccessFeedback const& feedback,
+                            AccessMode access_mode);
+
+  void ProcessModuleVariableAccess(
+      interpreter::BytecodeArrayIterator* iterator);
+
+  void ProcessHintsForObjectCreate(Hints const& prototype);
   void ProcessMapHintsForPromises(Hints const& receiver_hints);
   void ProcessHintsForPromiseResolve(Hints const& resolution_hints);
+  void ProcessHintsForHasInPrototypeChain(Hints const& instance_hints);
   void ProcessHintsForRegExpTest(Hints const& regexp_hints);
   PropertyAccessInfo ProcessMapForRegExpTest(MapRef map);
   void ProcessHintsForFunctionCall(Hints const& target_hints);
+  void ProcessHintsForFunctionBind(Hints const& receiver_hints);
+  void ProcessHintsForObjectGetPrototype(Hints const& object_hints);
+  void ProcessConstantForOrdinaryHasInstance(HeapObjectRef const& constructor,
+                                             bool* walk_prototypes);
+  void ProcessConstantForInstanceOf(ObjectRef const& constant,
+                                    bool* walk_prototypes);
+  void ProcessHintsForOrdinaryHasInstance(Hints const& constructor_hints,
+                                          Hints const& instance_hints);
+
+  void ProcessGlobalAccess(FeedbackSlot slot, bool is_load);
+
+  void ProcessCompareOperation(FeedbackSlot slot);
+  void ProcessForIn(FeedbackSlot slot);
+  void ProcessUnaryOrBinaryOperation(FeedbackSlot slot,
+                                     bool honor_bailout_on_uninitialized);
+
+  PropertyAccessInfo ProcessMapForNamedPropertyAccess(
+      MapRef receiver_map, NameRef const& name, AccessMode access_mode,
+      base::Optional<JSObjectRef> receiver, Hints* new_accumulator_hints);
+
+  void ProcessCreateContext(interpreter::BytecodeArrayIterator* iterator,
+                            int scopeinfo_operand_index);
 
-  GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot);
-  NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess(
-      const MapHandles& maps, AccessMode mode, NameRef const& name);
-  ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
-      const MapHandles& maps, AccessMode mode,
-      KeyedAccessMode const& keyed_mode);
-  void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode,
-                                        base::Optional<NameRef> static_name);
-  void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name);
-
-  void ProcessCreateContext();
   enum ContextProcessingMode {
     kIgnoreSlot,
     kSerializeSlot,
-    kSerializeSlotAndAddToAccumulator
   };
 
-  void ProcessContextAccess(const Hints& context_hints, int slot, int depth,
-                            ContextProcessingMode mode);
-  void ProcessImmutableLoad(ContextRef& context,  // NOLINT(runtime/references)
-                            int slot, ContextProcessingMode mode);
+  void ProcessContextAccess(Hints const& context_hints, int slot, int depth,
+                            ContextProcessingMode mode,
+                            Hints* result_hints = nullptr);
+  void ProcessImmutableLoad(ContextRef const& context, int slot,
+                            ContextProcessingMode mode,
+                            Hints* new_accumulator_hints);
   void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator);
   void ProcessLdaLookupContextSlot(
       interpreter::BytecodeArrayIterator* iterator);
@@ -420,8 +468,10 @@ class SerializerForBackgroundCompilation {
   void ContributeToJumpTargetEnvironment(int target_offset);
   void IncorporateJumpTargetEnvironment(int target_offset);
 
+  Handle<FeedbackVector> feedback_vector() const;
   Handle<BytecodeArray> bytecode_array() const;
-  BytecodeAnalysis const& GetBytecodeAnalysis(bool serialize);
+  BytecodeAnalysis const& GetBytecodeAnalysis(
+      SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
 
   JSHeapBroker* broker() const { return broker_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
@@ -496,6 +546,12 @@ bool Hints::Equals(Hints const& other) const {
 }
 #endif
 
+Hints Hints::SingleConstant(Handle<Object> constant, Zone* zone) {
+  Hints result(zone);
+  result.AddConstant(constant);
+  return result;
+}
+
 const ConstantsSet& Hints::constants() const { return constants_; }
 
 const MapsSet& Hints::maps() const { return maps_; }
@@ -628,7 +684,7 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
 
   // Appends the hints for the given register range to {dst} (in order).
   void ExportRegisterHints(interpreter::Register first, size_t count,
-                           HintsVector& dst);  // NOLINT(runtime/references)
+                           HintsVector* dst);
 
  private:
   friend std::ostream& operator<<(std::ostream& out, const Environment& env);
@@ -693,8 +749,8 @@ SerializerForBackgroundCompilation::Environment::Environment(
   }
 
   // Pad the rest with "undefined".
-  Hints undefined_hint(zone);
-  undefined_hint.AddConstant(isolate->factory()->undefined_value());
+  Hints undefined_hint =
+      Hints::SingleConstant(isolate->factory()->undefined_value(), zone);
   for (size_t i = arguments.size(); i < param_count; ++i) {
     ephemeral_hints_[i] = undefined_hint;
   }
@@ -826,7 +882,7 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
 }
 
 bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
-    FeedbackSlot slot) {
+    ProcessedFeedback const& feedback) {
   DCHECK(!environment()->IsDead());
   if (!(flags() &
         SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) {
@@ -837,16 +893,7 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
     // OSR entry point. TODO(neis): Support OSR?
     return false;
   }
-  FeedbackNexus nexus(environment()->function().feedback_vector(), slot);
-  if (!slot.IsInvalid() && nexus.IsUninitialized()) {
-    FeedbackSource source(nexus);
-    if (broker()->HasFeedback(source)) {
-      DCHECK_EQ(broker()->GetFeedback(source)->kind(),
-                ProcessedFeedback::kInsufficient);
-    } else {
-      broker()->SetFeedback(source,
-                            new (broker()->zone()) InsufficientFeedback());
-    }
+  if (feedback.IsInsufficient()) {
     environment()->Kill();
     return true;
   }
@@ -856,15 +903,14 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
 Hints SerializerForBackgroundCompilation::Run() {
   TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run");
   SharedFunctionInfoRef shared(broker(), environment()->function().shared());
-  FeedbackVectorRef feedback_vector(
-      broker(), environment()->function().feedback_vector());
-  if (shared.IsSerializedForCompilation(feedback_vector)) {
+  FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector());
+  if (shared.IsSerializedForCompilation(feedback_vector_ref)) {
     TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo "
                                << Brief(*shared.object())
                                << ", bailing out.\n");
     return Hints(zone());
   }
-  shared.SetSerializedForCompilation(feedback_vector);
+  shared.SetSerializedForCompilation(feedback_vector_ref);
 
   // We eagerly call the {EnsureSourcePositionsAvailable} for all serialized
   // SFIs while still on the main thread. Source positions will later be used
@@ -875,7 +921,7 @@ Hints SerializerForBackgroundCompilation::Run() {
                                                        shared.object());
   }
 
-  feedback_vector.SerializeSlots();
+  feedback_vector_ref.Serialize();
   TraverseBytecode();
   return environment()->return_value_hints();
 }
@@ -909,6 +955,11 @@ class ExceptionHandlerMatcher {
   std::set<int>::const_iterator handlers_iterator_;
 };
 
+Handle<FeedbackVector> SerializerForBackgroundCompilation::feedback_vector()
+    const {
+  return environment()->function().feedback_vector();
+}
+
 Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array()
     const {
   return handle(environment()->function().shared()->GetBytecodeArray(),
@@ -916,22 +967,28 @@ Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array()
 }
 
 BytecodeAnalysis const& SerializerForBackgroundCompilation::GetBytecodeAnalysis(
-    bool serialize) {
+    SerializationPolicy policy) {
   return broker()->GetBytecodeAnalysis(
       bytecode_array(), osr_offset(),
       flags() &
           SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness,
-      serialize);
+      policy);
 }
 
 void SerializerForBackgroundCompilation::TraverseBytecode() {
-  BytecodeAnalysis const& bytecode_analysis = GetBytecodeAnalysis(true);
+  BytecodeAnalysis const& bytecode_analysis =
+      GetBytecodeAnalysis(SerializationPolicy::kSerializeIfNeeded);
   BytecodeArrayRef(broker(), bytecode_array()).SerializeForCompilation();
 
   BytecodeArrayIterator iterator(bytecode_array());
   ExceptionHandlerMatcher handler_matcher(iterator, bytecode_array());
 
+  bool has_one_shot_bytecode = false;
   for (; !iterator.done(); iterator.Advance()) {
+    has_one_shot_bytecode =
+        has_one_shot_bytecode ||
+        interpreter::Bytecodes::IsOneShotBytecode(iterator.current_bytecode());
+
     int const current_offset = iterator.current_offset();
     IncorporateJumpTargetEnvironment(current_offset);
 
@@ -970,6 +1027,21 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
       }
     }
   }
+
+  if (has_one_shot_bytecode) {
+    broker()->isolate()->CountUsage(
+        v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
+  }
+}
+
+void SerializerForBackgroundCompilation::VisitGetIterator(
+    BytecodeArrayIterator* iterator) {
+  AccessMode mode = AccessMode::kLoad;
+  Hints const& receiver =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  Handle<Name> name = broker()->isolate()->factory()->iterator_symbol();
+  FeedbackSlot slot = iterator->GetSlotOperand(1);
+  ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode);
 }
 
 void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
@@ -995,11 +1067,11 @@ void SerializerForBackgroundCompilation::VisitGetTemplateObject(
   ObjectRef description(
       broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
   FeedbackSlot slot = iterator->GetSlotOperand(1);
-  FeedbackVectorRef feedback_vector(
-      broker(), environment()->function().feedback_vector());
+  FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector());
   SharedFunctionInfoRef shared(broker(), environment()->function().shared());
   JSArrayRef template_object =
-      shared.GetTemplateObject(description, feedback_vector, slot, true);
+      shared.GetTemplateObject(description, feedback_vector_ref, slot,
+                               SerializationPolicy::kSerializeIfNeeded);
   environment()->accumulator_hints().Clear();
   environment()->accumulator_hints().AddConstant(template_object.object());
 }
@@ -1058,25 +1130,92 @@ void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
   Runtime::FunctionId functionId = iterator->GetIntrinsicIdOperand(0);
   // For JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve and
   // JSNativeContextSpecialization::ReduceJSResolvePromise.
-  if (functionId == Runtime::kInlineAsyncFunctionResolve) {
-    interpreter::Register first_reg = iterator->GetRegisterOperand(1);
-    size_t reg_count = iterator->GetRegisterCountOperand(2);
-    CHECK_EQ(reg_count, 3);
-    HintsVector arguments(zone());
-    environment()->ExportRegisterHints(first_reg, reg_count, arguments);
-    Hints const& resolution_hints = arguments[1];  // The resolution object.
-    ProcessHintsForPromiseResolve(resolution_hints);
-    environment()->accumulator_hints().Clear();
-    return;
+  switch (functionId) {
+    case Runtime::kInlineAsyncFunctionResolve: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncFunctionResolve));
+      interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+      size_t reg_count = iterator->GetRegisterCountOperand(2);
+      CHECK_EQ(reg_count, 3);
+      HintsVector arguments(zone());
+      environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
+      Hints const& resolution_hints = arguments[1];  // The resolution object.
+      ProcessHintsForPromiseResolve(resolution_hints);
+      environment()->accumulator_hints().Clear();
+      return;
+    }
+    case Runtime::kInlineAsyncGeneratorReject:
+    case Runtime::kAsyncGeneratorReject: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncGeneratorReject));
+      break;
+    }
+    case Runtime::kInlineAsyncGeneratorResolve:
+    case Runtime::kAsyncGeneratorResolve: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncGeneratorResolve));
+      break;
+    }
+    case Runtime::kInlineAsyncGeneratorYield:
+    case Runtime::kAsyncGeneratorYield: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncGeneratorYield));
+      break;
+    }
+    case Runtime::kInlineAsyncGeneratorAwaitUncaught:
+    case Runtime::kAsyncGeneratorAwaitUncaught: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncGeneratorAwaitUncaught));
+      break;
+    }
+    case Runtime::kInlineAsyncGeneratorAwaitCaught:
+    case Runtime::kAsyncGeneratorAwaitCaught: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncGeneratorAwaitCaught));
+      break;
+    }
+    case Runtime::kInlineAsyncFunctionAwaitUncaught:
+    case Runtime::kAsyncFunctionAwaitUncaught: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncFunctionAwaitUncaught));
+      break;
+    }
+    case Runtime::kInlineAsyncFunctionAwaitCaught:
+    case Runtime::kAsyncFunctionAwaitCaught: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncFunctionAwaitCaught));
+      break;
+    }
+    case Runtime::kInlineAsyncFunctionReject:
+    case Runtime::kAsyncFunctionReject: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncFunctionReject));
+      break;
+    }
+    case Runtime::kAsyncFunctionResolve: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kAsyncFunctionResolve));
+      break;
+    }
+    case Runtime::kInlineCopyDataProperties:
+    case Runtime::kCopyDataProperties: {
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kCopyDataProperties));
+      break;
+    }
+    default: {
+      break;
+    }
   }
   environment()->ClearEphemeralHints();
 }
 
 void SerializerForBackgroundCompilation::VisitLdaConstant(
     BytecodeArrayIterator* iterator) {
+  ObjectRef object(
+      broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
   environment()->accumulator_hints().Clear();
-  environment()->accumulator_hints().AddConstant(
-      iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+  environment()->accumulator_hints().AddConstant(object.object());
 }
 
 void SerializerForBackgroundCompilation::VisitPushContext(
@@ -1088,7 +1227,7 @@ void SerializerForBackgroundCompilation::VisitPushContext(
   saved_context_hints.Clear();
   saved_context_hints.Add(current_context_hints);
 
-  // New Context is in the accumulator. Put those hints into the current context
+  // New context is in the accumulator. Put those hints into the current context
   // register hints.
   current_context_hints.Clear();
   current_context_hints.Add(environment()->accumulator_hints());
@@ -1104,19 +1243,21 @@ void SerializerForBackgroundCompilation::VisitPopContext(
 }
 
 void SerializerForBackgroundCompilation::ProcessImmutableLoad(
-    ContextRef& context_ref, int slot, ContextProcessingMode mode) {
-  DCHECK(mode == kSerializeSlot || mode == kSerializeSlotAndAddToAccumulator);
-  base::Optional<ObjectRef> slot_value = context_ref.get(slot, true);
+    ContextRef const& context_ref, int slot, ContextProcessingMode mode,
+    Hints* result_hints) {
+  DCHECK_EQ(mode, kSerializeSlot);
+  base::Optional<ObjectRef> slot_value =
+      context_ref.get(slot, SerializationPolicy::kSerializeIfNeeded);
 
-  // Also, put the object into the constant hints for the accumulator.
-  if (mode == kSerializeSlotAndAddToAccumulator && slot_value.has_value()) {
-    environment()->accumulator_hints().AddConstant(slot_value.value().object());
+  // If requested, record the object as a hint for the result value.
+  if (result_hints != nullptr && slot_value.has_value()) {
+    result_hints->AddConstant(slot_value.value().object());
   }
 }
 
 void SerializerForBackgroundCompilation::ProcessContextAccess(
-    const Hints& context_hints, int slot, int depth,
-    ContextProcessingMode mode) {
+    Hints const& context_hints, int slot, int depth, ContextProcessingMode mode,
+    Hints* result_hints) {
   // This function is for JSContextSpecialization::ReduceJSLoadContext and
   // ReduceJSStoreContext. Those reductions attempt to eliminate as many
   // loads as possible by making use of constant Context objects. In the
@@ -1127,9 +1268,10 @@ void SerializerForBackgroundCompilation::ProcessContextAccess(
       // Walk this context to the given depth and serialize the slot found.
       ContextRef context_ref(broker(), x);
       size_t remaining_depth = depth;
-      context_ref = context_ref.previous(&remaining_depth, true);
+      context_ref = context_ref.previous(
+          &remaining_depth, SerializationPolicy::kSerializeIfNeeded);
       if (remaining_depth == 0 && mode != kIgnoreSlot) {
-        ProcessImmutableLoad(context_ref, slot, mode);
+        ProcessImmutableLoad(context_ref, slot, mode, result_hints);
       }
     }
   }
@@ -1137,9 +1279,10 @@ void SerializerForBackgroundCompilation::ProcessContextAccess(
     if (x.distance <= static_cast<unsigned int>(depth)) {
       ContextRef context_ref(broker(), x.context);
       size_t remaining_depth = depth - x.distance;
-      context_ref = context_ref.previous(&remaining_depth, true);
+      context_ref = context_ref.previous(
+          &remaining_depth, SerializationPolicy::kSerializeIfNeeded);
       if (remaining_depth == 0 && mode != kIgnoreSlot) {
-        ProcessImmutableLoad(context_ref, slot, mode);
+        ProcessImmutableLoad(context_ref, slot, mode, result_hints);
       }
     }
   }
@@ -1147,67 +1290,92 @@ void SerializerForBackgroundCompilation::ProcessContextAccess(
 
 void SerializerForBackgroundCompilation::VisitLdaContextSlot(
     BytecodeArrayIterator* iterator) {
-  Hints& context_hints =
+  Hints const& context_hints =
       environment()->register_hints(iterator->GetRegisterOperand(0));
   const int slot = iterator->GetIndexOperand(1);
   const int depth = iterator->GetUnsignedImmediateOperand(2);
+  Hints new_accumulator_hints(zone());
+  ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot,
+                       &new_accumulator_hints);
   environment()->accumulator_hints().Clear();
-  ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
+  environment()->accumulator_hints().Add(new_accumulator_hints);
 }
 
 void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot(
     BytecodeArrayIterator* iterator) {
   const int slot = iterator->GetIndexOperand(0);
   const int depth = 0;
-  Hints& context_hints = environment()->current_context_hints();
+  Hints const& context_hints = environment()->current_context_hints();
+  Hints new_accumulator_hints(zone());
+  ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot,
+                       &new_accumulator_hints);
   environment()->accumulator_hints().Clear();
-  ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
+  environment()->accumulator_hints().Add(new_accumulator_hints);
 }
 
 void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot(
     BytecodeArrayIterator* iterator) {
   const int slot = iterator->GetIndexOperand(1);
   const int depth = iterator->GetUnsignedImmediateOperand(2);
-  Hints& context_hints =
+  Hints const& context_hints =
       environment()->register_hints(iterator->GetRegisterOperand(0));
+  Hints new_accumulator_hints(zone());
+  ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
+                       &new_accumulator_hints);
   environment()->accumulator_hints().Clear();
-  ProcessContextAccess(context_hints, slot, depth,
-                       kSerializeSlotAndAddToAccumulator);
+  environment()->accumulator_hints().Add(new_accumulator_hints);
 }
 
 void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot(
     BytecodeArrayIterator* iterator) {
   const int slot = iterator->GetIndexOperand(0);
   const int depth = 0;
-  Hints& context_hints = environment()->current_context_hints();
+  Hints const& context_hints = environment()->current_context_hints();
+  Hints new_accumulator_hints(zone());
+  ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
+                       &new_accumulator_hints);
   environment()->accumulator_hints().Clear();
-  ProcessContextAccess(context_hints, slot, depth,
-                       kSerializeSlotAndAddToAccumulator);
+  environment()->accumulator_hints().Add(new_accumulator_hints);
 }
 
-void SerializerForBackgroundCompilation::VisitLdaModuleVariable(
+void SerializerForBackgroundCompilation::ProcessModuleVariableAccess(
     BytecodeArrayIterator* iterator) {
+  const int slot = Context::EXTENSION_INDEX;
   const int depth = iterator->GetUnsignedImmediateOperand(1);
+  Hints const& context_hints = environment()->current_context_hints();
 
-  // TODO(mvstanton): If we have a constant module, should we serialize the
-  // cell as well? Then we could put the value in the accumulator.
-  environment()->accumulator_hints().Clear();
-  ProcessContextAccess(environment()->current_context_hints(),
-                       Context::EXTENSION_INDEX, depth, kSerializeSlot);
+  Hints result_hints(zone());
+  ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
+                       &result_hints);
+  for (Handle<Object> constant : result_hints.constants()) {
+    ObjectRef object(broker(), constant);
+    // For JSTypedLowering::BuildGetModuleCell.
+    if (object.IsSourceTextModule()) object.AsSourceTextModule().Serialize();
+  }
+}
+
+void SerializerForBackgroundCompilation::VisitLdaModuleVariable(
+    BytecodeArrayIterator* iterator) {
+  ProcessModuleVariableAccess(iterator);
 }
 
 void SerializerForBackgroundCompilation::VisitStaModuleVariable(
     BytecodeArrayIterator* iterator) {
-  const int depth = iterator->GetUnsignedImmediateOperand(1);
-  ProcessContextAccess(environment()->current_context_hints(),
-                       Context::EXTENSION_INDEX, depth, kSerializeSlot);
+  ProcessModuleVariableAccess(iterator);
+}
+
+void SerializerForBackgroundCompilation::VisitStaLookupSlot(
+    BytecodeArrayIterator* iterator) {
+  ObjectRef(broker(),
+            iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+  environment()->accumulator_hints().Clear();
 }
 
 void SerializerForBackgroundCompilation::VisitStaContextSlot(
     BytecodeArrayIterator* iterator) {
   const int slot = iterator->GetIndexOperand(1);
   const int depth = iterator->GetUnsignedImmediateOperand(2);
-  Hints& register_hints =
+  Hints const& register_hints =
       environment()->register_hints(iterator->GetRegisterOperand(0));
   ProcessContextAccess(register_hints, slot, depth, kIgnoreSlot);
 }
@@ -1216,7 +1384,7 @@ void SerializerForBackgroundCompilation::VisitStaCurrentContextSlot(
     BytecodeArrayIterator* iterator) {
   const int slot = iterator->GetIndexOperand(0);
   const int depth = 0;
-  Hints& context_hints = environment()->current_context_hints();
+  Hints const& context_hints = environment()->current_context_hints();
   ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
 }
 
@@ -1242,35 +1410,80 @@ void SerializerForBackgroundCompilation::VisitMov(
   environment()->register_hints(dst).Add(environment()->register_hints(src));
 }
 
+void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral(
+    BytecodeArrayIterator* iterator) {
+  Handle<String> constant_pattern = Handle<String>::cast(
+      iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+  StringRef description(broker(), constant_pattern);
+  environment()->accumulator_hints().Clear();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateArrayLiteral(
+    BytecodeArrayIterator* iterator) {
+  Handle<ArrayBoilerplateDescription> array_boilerplate_description =
+      Handle<ArrayBoilerplateDescription>::cast(
+          iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+  ArrayBoilerplateDescriptionRef description(broker(),
+                                             array_boilerplate_description);
+  environment()->accumulator_hints().Clear();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateObjectLiteral(
+    BytecodeArrayIterator* iterator) {
+  Handle<ObjectBoilerplateDescription> constant_properties =
+      Handle<ObjectBoilerplateDescription>::cast(
+          iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+  ObjectBoilerplateDescriptionRef description(broker(), constant_properties);
+  environment()->accumulator_hints().Clear();
+}
+
 void SerializerForBackgroundCompilation::VisitCreateFunctionContext(
     BytecodeArrayIterator* iterator) {
-  ProcessCreateContext();
+  ProcessCreateContext(iterator, 0);
 }
 
 void SerializerForBackgroundCompilation::VisitCreateBlockContext(
     BytecodeArrayIterator* iterator) {
-  ProcessCreateContext();
+  ProcessCreateContext(iterator, 0);
 }
 
 void SerializerForBackgroundCompilation::VisitCreateEvalContext(
     BytecodeArrayIterator* iterator) {
-  ProcessCreateContext();
+  ProcessCreateContext(iterator, 0);
 }
 
 void SerializerForBackgroundCompilation::VisitCreateWithContext(
     BytecodeArrayIterator* iterator) {
-  ProcessCreateContext();
+  ProcessCreateContext(iterator, 1);
 }
 
 void SerializerForBackgroundCompilation::VisitCreateCatchContext(
     BytecodeArrayIterator* iterator) {
-  ProcessCreateContext();
+  ProcessCreateContext(iterator, 1);
+}
+
+void SerializerForBackgroundCompilation::VisitForInNext(
+    BytecodeArrayIterator* iterator) {
+  FeedbackSlot slot = iterator->GetSlotOperand(3);
+  ProcessForIn(slot);
+}
+
+void SerializerForBackgroundCompilation::VisitForInPrepare(
+    BytecodeArrayIterator* iterator) {
+  FeedbackSlot slot = iterator->GetSlotOperand(1);
+  ProcessForIn(slot);
 }
 
-void SerializerForBackgroundCompilation::ProcessCreateContext() {
+void SerializerForBackgroundCompilation::ProcessCreateContext(
+    interpreter::BytecodeArrayIterator* iterator, int scopeinfo_operand_index) {
+  Handle<ScopeInfo> scope_info =
+      Handle<ScopeInfo>::cast(iterator->GetConstantForIndexOperand(
+          scopeinfo_operand_index, broker()->isolate()));
+  ScopeInfoRef scope_info_ref(broker(), scope_info);
+
+  Hints const& current_context_hints = environment()->current_context_hints();
   Hints& accumulator_hints = environment()->accumulator_hints();
   accumulator_hints.Clear();
-  Hints& current_context_hints = environment()->current_context_hints();
 
   // For each constant context, we must create a virtual context from
   // it of distance one.
@@ -1291,31 +1504,33 @@ void SerializerForBackgroundCompilation::ProcessCreateContext() {
 
 void SerializerForBackgroundCompilation::VisitCreateClosure(
     BytecodeArrayIterator* iterator) {
+  environment()->accumulator_hints().Clear();
+
   Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(
       iterator->GetConstantForIndexOperand(0, broker()->isolate()));
-
   Handle<FeedbackCell> feedback_cell =
-      environment()->function().feedback_vector()->GetClosureFeedbackCell(
-          iterator->GetIndexOperand(1));
+      feedback_vector()->GetClosureFeedbackCell(iterator->GetIndexOperand(1));
   FeedbackCellRef feedback_cell_ref(broker(), feedback_cell);
   Handle<Object> cell_value(feedback_cell->value(), broker()->isolate());
   ObjectRef cell_value_ref(broker(), cell_value);
 
-  environment()->accumulator_hints().Clear();
   if (cell_value->IsFeedbackVector()) {
-    // Gather the context hints from the current context register hint
-    // structure.
     FunctionBlueprint blueprint(shared,
                                 Handle<FeedbackVector>::cast(cell_value),
                                 environment()->current_context_hints());
-
     environment()->accumulator_hints().AddFunctionBlueprint(blueprint);
   }
 }
 
 void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver(
     BytecodeArrayIterator* iterator) {
-  ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined);
+  const Hints& callee =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+  int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
+  FeedbackSlot slot = iterator->GetSlotOperand(3);
+  ProcessCallVarArgs(ConvertReceiverMode::kNullOrUndefined, callee, first_reg,
+                     reg_count, slot);
 }
 
 void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0(
@@ -1324,9 +1539,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0(
       environment()->register_hints(iterator->GetRegisterOperand(0));
   FeedbackSlot slot = iterator->GetSlotOperand(1);
 
-  Hints receiver(zone());
-  receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
-
+  Hints receiver = Hints::SingleConstant(
+      broker()->isolate()->factory()->undefined_value(), zone());
   HintsVector parameters({receiver}, zone());
   ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
 }
@@ -1339,9 +1553,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1(
       environment()->register_hints(iterator->GetRegisterOperand(1));
   FeedbackSlot slot = iterator->GetSlotOperand(2);
 
-  Hints receiver(zone());
-  receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
-
+  Hints receiver = Hints::SingleConstant(
+      broker()->isolate()->factory()->undefined_value(), zone());
   HintsVector parameters({receiver, arg0}, zone());
   ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
 }
@@ -1356,21 +1569,42 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2(
       environment()->register_hints(iterator->GetRegisterOperand(2));
   FeedbackSlot slot = iterator->GetSlotOperand(3);
 
-  Hints receiver(zone());
-  receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
-
+  Hints receiver = Hints::SingleConstant(
+      broker()->isolate()->factory()->undefined_value(), zone());
   HintsVector parameters({receiver, arg0, arg1}, zone());
   ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
 }
 
 void SerializerForBackgroundCompilation::VisitCallAnyReceiver(
     BytecodeArrayIterator* iterator) {
-  ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny);
+  const Hints& callee =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+  int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
+  FeedbackSlot slot = iterator->GetSlotOperand(3);
+  ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count,
+                     slot);
+}
+
+void SerializerForBackgroundCompilation::VisitCallNoFeedback(
+    BytecodeArrayIterator* iterator) {
+  const Hints& callee =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+  int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
+  ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count,
+                     FeedbackSlot::Invalid());
 }
 
 void SerializerForBackgroundCompilation::VisitCallProperty(
     BytecodeArrayIterator* iterator) {
-  ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined);
+  const Hints& callee =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+  int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
+  FeedbackSlot slot = iterator->GetSlotOperand(3);
+  ProcessCallVarArgs(ConvertReceiverMode::kNotNullOrUndefined, callee,
+                     first_reg, reg_count, slot);
 }
 
 void SerializerForBackgroundCompilation::VisitCallProperty0(
@@ -1417,17 +1651,28 @@ void SerializerForBackgroundCompilation::VisitCallProperty2(
 
 void SerializerForBackgroundCompilation::VisitCallWithSpread(
     BytecodeArrayIterator* iterator) {
-  ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny, true);
+  const Hints& callee =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+  int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
+  FeedbackSlot slot = iterator->GetSlotOperand(3);
+  ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count,
+                     slot, true);
 }
 
 void SerializerForBackgroundCompilation::VisitCallJSRuntime(
     BytecodeArrayIterator* iterator) {
-  environment()->accumulator_hints().Clear();
-
-  // BytecodeGraphBuilder::VisitCallJSRuntime needs the {runtime_index}
-  // slot in the native context to be serialized.
   const int runtime_index = iterator->GetNativeContextIndexOperand(0);
-  broker()->native_context().get(runtime_index, true);
+  ObjectRef constant =
+      broker()
+          ->target_native_context()
+          .get(runtime_index, SerializationPolicy::kSerializeIfNeeded)
+          .value();
+  Hints callee = Hints::SingleConstant(constant.object(), zone());
+  interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+  int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
+  ProcessCallVarArgs(ConvertReceiverMode::kNullOrUndefined, callee, first_reg,
+                     reg_count, FeedbackSlot::Invalid());
 }
 
 Hints SerializerForBackgroundCompilation::RunChildSerializer(
@@ -1456,107 +1701,168 @@ Hints SerializerForBackgroundCompilation::RunChildSerializer(
   return child_serializer.Run();
 }
 
+bool SerializerForBackgroundCompilation::ProcessSFIForCallOrConstruct(
+    Handle<SharedFunctionInfo> shared, const HintsVector& arguments,
+    SpeculationMode speculation_mode) {
+  if (shared->IsApiFunction()) {
+    ProcessApiCall(shared, arguments);
+    DCHECK(!shared->IsInlineable());
+  } else if (shared->HasBuiltinId()) {
+    ProcessBuiltinCall(shared, arguments, speculation_mode);
+    DCHECK(!shared->IsInlineable());
+  }
+  return shared->IsInlineable();
+}
+
+bool SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
+    Handle<JSFunction> function, const HintsVector& arguments,
+    SpeculationMode speculation_mode) {
+  JSFunctionRef(broker(), function).Serialize();
+
+  Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate());
+
+  return ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode) &&
+         function->has_feedback_vector();
+}
+
 namespace {
-base::Optional<HeapObjectRef> GetHeapObjectFeedback(
-    JSHeapBroker* broker, Handle<FeedbackVector> feedback_vector,
-    FeedbackSlot slot) {
-  if (slot.IsInvalid()) return base::nullopt;
-  FeedbackNexus nexus(feedback_vector, slot);
-  VectorSlotPair feedback(feedback_vector, slot, nexus.ic_state());
-  DCHECK(feedback.IsValid());
-  if (nexus.IsUninitialized()) return base::nullopt;
-  HeapObject object;
-  if (!nexus.GetFeedback()->GetHeapObject(&object)) return base::nullopt;
-  return HeapObjectRef(broker, handle(object, broker->isolate()));
+// Returns the innermost bound target, if it's a JSFunction and inserts
+// all bound arguments and {original_arguments} into {expanded_arguments}
+// in the appropriate order.
+MaybeHandle<JSFunction> UnrollBoundFunction(
+    JSBoundFunctionRef const& bound_function, JSHeapBroker* broker,
+    const HintsVector& original_arguments, HintsVector* expanded_arguments) {
+  DCHECK(expanded_arguments->empty());
+
+  JSReceiverRef target = bound_function.AsJSReceiver();
+  HintsVector reversed_bound_arguments(broker->zone());
+  for (; target.IsJSBoundFunction();
+       target = target.AsJSBoundFunction().bound_target_function()) {
+    for (int i = target.AsJSBoundFunction().bound_arguments().length() - 1;
+         i >= 0; --i) {
+      Hints arg = Hints::SingleConstant(
+          target.AsJSBoundFunction().bound_arguments().get(i).object(),
+          broker->zone());
+      reversed_bound_arguments.push_back(arg);
+    }
+    Hints arg = Hints::SingleConstant(
+        target.AsJSBoundFunction().bound_this().object(), broker->zone());
+    reversed_bound_arguments.push_back(arg);
+  }
+
+  if (!target.IsJSFunction()) return MaybeHandle<JSFunction>();
+
+  expanded_arguments->insert(expanded_arguments->end(),
+                             reversed_bound_arguments.rbegin(),
+                             reversed_bound_arguments.rend());
+  expanded_arguments->insert(expanded_arguments->end(),
+                             original_arguments.begin(),
+                             original_arguments.end());
+
+  return target.AsJSFunction().object();
 }
 }  // namespace
 
 void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
     Hints callee, base::Optional<Hints> new_target,
     const HintsVector& arguments, FeedbackSlot slot, bool with_spread) {
-  // TODO(neis): Make this part of ProcessFeedback*?
-  if (BailoutOnUninitialized(slot)) return;
-
-  // Incorporate feedback into hints.
-  base::Optional<HeapObjectRef> feedback = GetHeapObjectFeedback(
-      broker(), environment()->function().feedback_vector(), slot);
-  if (feedback.has_value() && feedback->map().is_callable()) {
-    if (new_target.has_value()) {
-      // Construct; feedback is new_target, which often is also the callee.
-      new_target->AddConstant(feedback->object());
-      callee.AddConstant(feedback->object());
-    } else {
-      // Call; feedback is callee.
-      callee.AddConstant(feedback->object());
+  SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation;
+  if (!slot.IsInvalid()) {
+    FeedbackSource source(feedback_vector(), slot);
+    ProcessedFeedback const& feedback =
+        broker()->ProcessFeedbackForCall(source);
+    if (BailoutOnUninitialized(feedback)) return;
+
+    // Incorporate feedback into hints copy to simplify processing.
+    if (!feedback.IsInsufficient()) {
+      speculation_mode = feedback.AsCall().speculation_mode();
+      base::Optional<HeapObjectRef> target = feedback.AsCall().target();
+      if (target.has_value() && target->map().is_callable()) {
+        // TODO(mvstanton): if the map isn't callable then we have an allocation
+        // site, and it may make sense to add the Array JSFunction constant.
+        if (new_target.has_value()) {
+          // Construct; feedback is new_target, which often is also the callee.
+          new_target->AddConstant(target->object());
+          callee.AddConstant(target->object());
+        } else {
+          // Call; target is callee.
+          callee.AddConstant(target->object());
+        }
+      }
     }
   }
 
   environment()->accumulator_hints().Clear();
 
+  // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct.
   for (auto hint : callee.constants()) {
-    if (!hint->IsJSFunction()) continue;
-
-    Handle<JSFunction> function = Handle<JSFunction>::cast(hint);
-    JSFunctionRef(broker(), function).Serialize();
-
-    Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate());
-
-    if (shared->IsApiFunction()) {
-      ProcessApiCall(shared, arguments);
-      DCHECK(!shared->IsInlineable());
-    } else if (shared->HasBuiltinId()) {
-      ProcessBuiltinCall(shared, arguments);
-      DCHECK(!shared->IsInlineable());
+    const HintsVector* actual_arguments = &arguments;
+    Handle<JSFunction> function;
+    HintsVector expanded_arguments(zone());
+    if (hint->IsJSBoundFunction()) {
+      JSBoundFunctionRef bound_function(broker(),
+                                        Handle<JSBoundFunction>::cast(hint));
+      bound_function.Serialize();
+
+      MaybeHandle<JSFunction> maybe_function = UnrollBoundFunction(
+          bound_function, broker(), arguments, &expanded_arguments);
+      if (maybe_function.is_null()) continue;
+      function = maybe_function.ToHandleChecked();
+      actual_arguments = &expanded_arguments;
+    } else if (hint->IsJSFunction()) {
+      function = Handle<JSFunction>::cast(hint);
+    } else {
+      continue;
     }
 
-    if (!shared->IsInlineable() || !function->has_feedback_vector()) continue;
-
-    environment()->accumulator_hints().Add(RunChildSerializer(
-        CompilationSubject(function, broker()->isolate(), zone()), new_target,
-        arguments, with_spread));
+    if (ProcessCalleeForCallOrConstruct(function, *actual_arguments,
+                                        speculation_mode)) {
+      environment()->accumulator_hints().Add(RunChildSerializer(
+          CompilationSubject(function, broker()->isolate(), zone()), new_target,
+          *actual_arguments, with_spread));
+    }
   }
 
+  // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct.
   for (auto hint : callee.function_blueprints()) {
     Handle<SharedFunctionInfo> shared = hint.shared();
-
-    if (shared->IsApiFunction()) {
-      ProcessApiCall(shared, arguments);
-      DCHECK(!shared->IsInlineable());
-    } else if (shared->HasBuiltinId()) {
-      ProcessBuiltinCall(shared, arguments);
-      DCHECK(!shared->IsInlineable());
+    if (!ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode)) {
+      continue;
     }
 
-    if (!shared->IsInlineable()) continue;
     environment()->accumulator_hints().Add(RunChildSerializer(
         CompilationSubject(hint), new_target, arguments, with_spread));
   }
 }
 
 void SerializerForBackgroundCompilation::ProcessCallVarArgs(
-    BytecodeArrayIterator* iterator, ConvertReceiverMode receiver_mode,
+    ConvertReceiverMode receiver_mode, Hints const& callee,
+    interpreter::Register first_reg, int reg_count, FeedbackSlot slot,
     bool with_spread) {
-  const Hints& callee =
-      environment()->register_hints(iterator->GetRegisterOperand(0));
-  interpreter::Register first_reg = iterator->GetRegisterOperand(1);
-  int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
-  FeedbackSlot slot = iterator->GetSlotOperand(3);
-
   HintsVector arguments(zone());
   // The receiver is either given in the first register or it is implicitly
   // the {undefined} value.
   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
-    Hints receiver(zone());
-    receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
-    arguments.push_back(receiver);
+    arguments.push_back(Hints::SingleConstant(
+        broker()->isolate()->factory()->undefined_value(), zone()));
   }
-  environment()->ExportRegisterHints(first_reg, reg_count, arguments);
+  environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
 
   ProcessCallOrConstruct(callee, base::nullopt, arguments, slot);
 }
 
 void SerializerForBackgroundCompilation::ProcessApiCall(
     Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
+  ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                          Builtins::kCallFunctionTemplate_CheckAccess));
+  ObjectRef(broker(),
+            broker()->isolate()->builtins()->builtin_handle(
+                Builtins::kCallFunctionTemplate_CheckCompatibleReceiver));
+  ObjectRef(
+      broker(),
+      broker()->isolate()->builtins()->builtin_handle(
+          Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver));
+
   FunctionTemplateInfoRef target_template_info(
       broker(), handle(target->function_data(), broker()->isolate()));
   if (!target_template_info.has_call_code()) return;
@@ -1576,7 +1882,7 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
     if (hint->IsUndefined()) {
       // The receiver is the global proxy.
       Handle<JSGlobalProxy> global_proxy =
-          broker()->native_context().global_proxy_object().object();
+          broker()->target_native_context().global_proxy_object().object();
       ProcessReceiverMapForApiCall(
           target_template_info,
           handle(global_proxy->map(), broker()->isolate()));
@@ -1596,40 +1902,62 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
 }
 
 void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall(
-    FunctionTemplateInfoRef& target, Handle<Map> receiver) {
-  if (receiver->is_access_check_needed()) {
-    return;
+    FunctionTemplateInfoRef target, Handle<Map> receiver) {
+  if (!receiver->is_access_check_needed()) {
+    MapRef receiver_map(broker(), receiver);
+    TRACE_BROKER(broker(), "Serializing holder for target:" << target);
+    target.LookupHolderOfExpectedType(receiver_map,
+                                      SerializationPolicy::kSerializeIfNeeded);
   }
+}
 
-  MapRef receiver_map(broker(), receiver);
-  TRACE_BROKER(broker(), "Serializing holder for target:" << target);
-
-  target.LookupHolderOfExpectedType(receiver_map, true);
+void SerializerForBackgroundCompilation::ProcessHintsForObjectCreate(
+    Hints const& prototype) {
+  for (Handle<Object> constant_handle : prototype.constants()) {
+    ObjectRef constant(broker(), constant_handle);
+    if (constant.IsJSObject()) constant.AsJSObject().SerializeObjectCreateMap();
+  }
 }
 
 void SerializerForBackgroundCompilation::ProcessBuiltinCall(
-    Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
+    Handle<SharedFunctionInfo> target, const HintsVector& arguments,
+    SpeculationMode speculation_mode) {
   DCHECK(target->HasBuiltinId());
   const int builtin_id = target->builtin_id();
   const char* name = Builtins::name(builtin_id);
   TRACE_BROKER(broker(), "Serializing for call to builtin " << name);
   switch (builtin_id) {
+    case Builtins::kObjectCreate: {
+      if (arguments.size() >= 2) {
+        ProcessHintsForObjectCreate(arguments[1]);
+      } else {
+        ProcessHintsForObjectCreate(Hints::SingleConstant(
+            broker()->isolate()->factory()->undefined_value(), zone()));
+      }
+      break;
+    }
     case Builtins::kPromisePrototypeCatch: {
       // For JSCallReducer::ReducePromisePrototypeCatch.
-      CHECK_GE(arguments.size(), 1);
-      ProcessMapHintsForPromises(arguments[0]);
+      if (speculation_mode != SpeculationMode::kDisallowSpeculation) {
+        CHECK_GE(arguments.size(), 1);
+        ProcessMapHintsForPromises(arguments[0]);
+      }
       break;
     }
     case Builtins::kPromisePrototypeFinally: {
       // For JSCallReducer::ReducePromisePrototypeFinally.
-      CHECK_GE(arguments.size(), 1);
-      ProcessMapHintsForPromises(arguments[0]);
+      if (speculation_mode != SpeculationMode::kDisallowSpeculation) {
+        CHECK_GE(arguments.size(), 1);
+        ProcessMapHintsForPromises(arguments[0]);
+      }
       break;
     }
     case Builtins::kPromisePrototypeThen: {
       // For JSCallReducer::ReducePromisePrototypeThen.
-      CHECK_GE(arguments.size(), 1);
-      ProcessMapHintsForPromises(arguments[0]);
+      if (speculation_mode != SpeculationMode::kDisallowSpeculation) {
+        CHECK_GE(arguments.size(), 1);
+        ProcessMapHintsForPromises(arguments[0]);
+      }
       break;
     }
     case Builtins::kPromiseResolveTrampoline:
@@ -1648,30 +1976,142 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
         ProcessHintsForPromiseResolve(resolution_hints);
       }
       break;
-    case Builtins::kRegExpPrototypeTest: {
+    case Builtins::kRegExpPrototypeTest:
       // For JSCallReducer::ReduceRegExpPrototypeTest.
-      if (arguments.size() >= 1) {
+      if (arguments.size() >= 1 &&
+          speculation_mode != SpeculationMode::kDisallowSpeculation) {
         Hints const& regexp_hints = arguments[0];
         ProcessHintsForRegExpTest(regexp_hints);
       }
       break;
-    }
+    case Builtins::kArrayEvery:
+    case Builtins::kArrayFilter:
+    case Builtins::kArrayForEach:
+    case Builtins::kArrayPrototypeFind:
+    case Builtins::kArrayPrototypeFindIndex:
+    case Builtins::kArrayMap:
+    case Builtins::kArrayReduce:
+    case Builtins::kArrayReduceRight:
+    case Builtins::kArraySome:
+      if (arguments.size() >= 2 &&
+          speculation_mode != SpeculationMode::kDisallowSpeculation) {
+        Hints const& callback_hints = arguments[1];
+        ProcessHintsForFunctionCall(callback_hints);
+      }
+      break;
+    case Builtins::kFunctionPrototypeApply:
     case Builtins::kFunctionPrototypeCall:
+    case Builtins::kPromiseConstructor:
+      // TODO(mslekova): Since the reducer for all these introduce a
+      // JSCall/JSConstruct that will again get optimized by the JSCallReducer,
+      // we basically might have to do all the serialization that we do for that
+      // here as well.  The only difference is that the new JSCall/JSConstruct
+      // has speculation disabled, causing the JSCallReducer to do much less
+      // work. To account for that, ProcessCallOrConstruct should have a way of
+      // taking the speculation mode as an argument rather than getting that
+      // from the feedback. (Also applies to Reflect.apply and
+      // Reflect.construct.)
       if (arguments.size() >= 1) {
-        Hints const& target_hints = arguments[0];
-        ProcessHintsForFunctionCall(target_hints);
+        ProcessHintsForFunctionCall(arguments[0]);
+      }
+      break;
+    case Builtins::kReflectApply:
+    case Builtins::kReflectConstruct:
+      if (arguments.size() >= 2) {
+        ProcessHintsForFunctionCall(arguments[1]);
+      }
+      break;
+    case Builtins::kObjectPrototypeIsPrototypeOf:
+      if (arguments.size() >= 2) {
+        ProcessHintsForHasInPrototypeChain(arguments[1]);
       }
       break;
+    case Builtins::kFunctionPrototypeHasInstance:
+      // For JSCallReducer::ReduceFunctionPrototypeHasInstance.
+      if (arguments.size() >= 2) {
+        ProcessHintsForOrdinaryHasInstance(arguments[0], arguments[1]);
+      }
+      break;
+    case Builtins::kFastFunctionPrototypeBind:
+      if (arguments.size() >= 1 &&
+          speculation_mode != SpeculationMode::kDisallowSpeculation) {
+        ProcessHintsForFunctionBind(arguments[0]);
+      }
+      break;
+    case Builtins::kObjectGetPrototypeOf:
+    case Builtins::kReflectGetPrototypeOf:
+      if (arguments.size() >= 2) {
+        ProcessHintsForObjectGetPrototype(arguments[1]);
+      } else {
+        Hints undefined_hint = Hints::SingleConstant(
+            broker()->isolate()->factory()->undefined_value(), zone());
+        ProcessHintsForObjectGetPrototype(undefined_hint);
+      }
+      break;
+    case Builtins::kObjectPrototypeGetProto:
+      if (arguments.size() >= 1) {
+        ProcessHintsForObjectGetPrototype(arguments[0]);
+      }
+      break;
+    case Builtins::kMapIteratorPrototypeNext:
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kOrderedHashTableHealIndex));
+      ObjectRef(broker(),
+                broker()->isolate()->factory()->empty_ordered_hash_map());
+      break;
+    case Builtins::kSetIteratorPrototypeNext:
+      ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(
+                              Builtins::kOrderedHashTableHealIndex));
+      ObjectRef(broker(),
+                broker()->isolate()->factory()->empty_ordered_hash_set());
+      break;
     default:
       break;
   }
 }
 
+void SerializerForBackgroundCompilation::ProcessHintsForOrdinaryHasInstance(
+    Hints const& constructor_hints, Hints const& instance_hints) {
+  bool walk_prototypes = false;
+  for (Handle<Object> constructor : constructor_hints.constants()) {
+    // For JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance.
+    if (constructor->IsHeapObject()) {
+      ProcessConstantForOrdinaryHasInstance(
+          HeapObjectRef(broker(), constructor), &walk_prototypes);
+    }
+  }
+  // For JSNativeContextSpecialization::ReduceJSHasInPrototypeChain.
+  if (walk_prototypes) ProcessHintsForHasInPrototypeChain(instance_hints);
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForHasInPrototypeChain(
+    Hints const& instance_hints) {
+  auto processMap = [&](Handle<Map> map_handle) {
+    MapRef map(broker(), map_handle);
+    while (map.IsJSObjectMap()) {
+      map.SerializePrototype();
+      map = map.prototype().map();
+    }
+  };
+
+  for (auto hint : instance_hints.constants()) {
+    if (!hint->IsHeapObject()) continue;
+    Handle<HeapObject> object(Handle<HeapObject>::cast(hint));
+    processMap(handle(object->map(), broker()->isolate()));
+  }
+  for (auto map_hint : instance_hints.maps()) {
+    processMap(map_hint);
+  }
+}
+
 void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve(
     Hints const& resolution_hints) {
   auto processMap = [&](Handle<Map> map) {
-    broker()->CreateAccessInfoForLoadingThen(MapRef(broker(), map),
-                                             dependencies());
+    broker()->GetPropertyAccessInfo(
+        MapRef(broker(), map),
+        NameRef(broker(), broker()->isolate()->factory()->then_string()),
+        AccessMode::kLoad, dependencies(),
+        SerializationPolicy::kSerializeIfNeeded);
   };
 
   for (auto hint : resolution_hints.constants()) {
@@ -1701,15 +2141,18 @@ void SerializerForBackgroundCompilation::ProcessMapHintsForPromises(
 
 PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest(
     MapRef map) {
-  PropertyAccessInfo ai_exec =
-      broker()->CreateAccessInfoForLoadingExec(map, dependencies());
+  PropertyAccessInfo ai_exec = broker()->GetPropertyAccessInfo(
+      map, NameRef(broker(), broker()->isolate()->factory()->exec_string()),
+      AccessMode::kLoad, dependencies(),
+      SerializationPolicy::kSerializeIfNeeded);
 
   Handle<JSObject> holder;
   if (ai_exec.IsDataConstant() && ai_exec.holder().ToHandle(&holder)) {
     // The property is on the prototype chain.
     JSObjectRef holder_ref(broker(), holder);
-    holder_ref.GetOwnProperty(ai_exec.field_representation(),
-                              ai_exec.field_index(), true);
+    holder_ref.GetOwnDataProperty(ai_exec.field_representation(),
+                                  ai_exec.field_index(),
+                                  SerializationPolicy::kSerializeIfNeeded);
   }
   return ai_exec;
 }
@@ -1726,8 +2169,9 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
     if (ai_exec.IsDataConstant() && !ai_exec.holder().ToHandle(&holder)) {
       // The property is on the object itself.
       JSObjectRef holder_ref(broker(), regexp);
-      holder_ref.GetOwnProperty(ai_exec.field_representation(),
-                                ai_exec.field_index(), true);
+      holder_ref.GetOwnDataProperty(ai_exec.field_representation(),
+                                    ai_exec.field_index(),
+                                    SerializationPolicy::kSerializeIfNeeded);
     }
   }
 
@@ -1740,9 +2184,50 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
 void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall(
     Hints const& target_hints) {
   for (auto constant : target_hints.constants()) {
+    if (constant->IsJSFunction()) JSFunctionRef(broker(), constant).Serialize();
+  }
+}
+
+namespace {
+void ProcessMapForFunctionBind(MapRef map) {
+  map.SerializePrototype();
+  int min_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex,
+                                   JSFunction::kNameDescriptorIndex) +
+                            1;
+  if (map.NumberOfOwnDescriptors() >= min_nof_descriptors) {
+    map.SerializeOwnDescriptor(JSFunction::kLengthDescriptorIndex);
+    map.SerializeOwnDescriptor(JSFunction::kNameDescriptorIndex);
+  }
+}
+}  // namespace
+
+void SerializerForBackgroundCompilation::ProcessHintsForFunctionBind(
+    Hints const& receiver_hints) {
+  for (auto constant : receiver_hints.constants()) {
     if (!constant->IsJSFunction()) continue;
-    JSFunctionRef func(broker(), constant);
-    func.Serialize();
+    JSFunctionRef function(broker(), constant);
+    function.Serialize();
+    ProcessMapForFunctionBind(function.map());
+  }
+
+  for (auto map : receiver_hints.maps()) {
+    if (!map->IsJSFunctionMap()) continue;
+    MapRef map_ref(broker(), map);
+    ProcessMapForFunctionBind(map_ref);
+  }
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForObjectGetPrototype(
+    Hints const& object_hints) {
+  for (auto constant : object_hints.constants()) {
+    if (!constant->IsHeapObject()) continue;
+    HeapObjectRef object(broker(), constant);
+    object.map().SerializePrototype();
+  }
+
+  for (auto map : object_hints.maps()) {
+    MapRef map_ref(broker(), map);
+    map_ref.SerializePrototype();
   }
 }
 
@@ -1791,79 +2276,77 @@ void SerializerForBackgroundCompilation::VisitSwitchOnSmiNoFeedback(
 
 void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState(
     interpreter::BytecodeArrayIterator* iterator) {
-  for (const auto& target : GetBytecodeAnalysis(false).resume_jump_targets()) {
+  for (const auto& target : GetBytecodeAnalysis().resume_jump_targets()) {
     ContributeToJumpTargetEnvironment(target.target_offset());
   }
 }
 
 void SerializerForBackgroundCompilation::Environment::ExportRegisterHints(
-    interpreter::Register first, size_t count, HintsVector& dst) {
+    interpreter::Register first, size_t count, HintsVector* dst) {
   const int reg_base = first.index();
   for (int i = 0; i < static_cast<int>(count); ++i) {
-    dst.push_back(register_hints(interpreter::Register(reg_base + i)));
+    dst->push_back(register_hints(interpreter::Register(reg_base + i)));
   }
 }
 
 void SerializerForBackgroundCompilation::VisitConstruct(
     BytecodeArrayIterator* iterator) {
-  const Hints& callee =
+  Hints const& new_target = environment()->accumulator_hints();
+  Hints const& callee =
       environment()->register_hints(iterator->GetRegisterOperand(0));
   interpreter::Register first_reg = iterator->GetRegisterOperand(1);
   size_t reg_count = iterator->GetRegisterCountOperand(2);
   FeedbackSlot slot = iterator->GetSlotOperand(3);
-  const Hints& new_target = environment()->accumulator_hints();
 
   HintsVector arguments(zone());
-  environment()->ExportRegisterHints(first_reg, reg_count, arguments);
+  environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
 
   ProcessCallOrConstruct(callee, new_target, arguments, slot);
 }
 
 void SerializerForBackgroundCompilation::VisitConstructWithSpread(
     BytecodeArrayIterator* iterator) {
-  const Hints& callee =
+  Hints const& new_target = environment()->accumulator_hints();
+  Hints const& callee =
       environment()->register_hints(iterator->GetRegisterOperand(0));
   interpreter::Register first_reg = iterator->GetRegisterOperand(1);
   size_t reg_count = iterator->GetRegisterCountOperand(2);
   FeedbackSlot slot = iterator->GetSlotOperand(3);
-  const Hints& new_target = environment()->accumulator_hints();
 
   HintsVector arguments(zone());
-  environment()->ExportRegisterHints(first_reg, reg_count, arguments);
+  environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
 
   ProcessCallOrConstruct(callee, new_target, arguments, slot, true);
 }
 
-GlobalAccessFeedback const*
-SerializerForBackgroundCompilation::ProcessFeedbackForGlobalAccess(
-    FeedbackSlot slot) {
-  if (slot.IsInvalid()) return nullptr;
-  if (environment()->function().feedback_vector().is_null()) return nullptr;
-  FeedbackSource source(environment()->function().feedback_vector(), slot);
+void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot,
+                                                             bool is_load) {
+  if (slot.IsInvalid() || feedback_vector().is_null()) return;
+  FeedbackSource source(feedback_vector(), slot);
+  ProcessedFeedback const& feedback =
+      broker()->ProcessFeedbackForGlobalAccess(source);
 
-  if (broker()->HasFeedback(source)) {
-    return broker()->GetGlobalAccessFeedback(source);
+  if (is_load) {
+    environment()->accumulator_hints().Clear();
+    if (feedback.kind() == ProcessedFeedback::kGlobalAccess) {
+      // We may be able to contribute to accumulator constant hints.
+      base::Optional<ObjectRef> value =
+          feedback.AsGlobalAccess().GetConstantHint();
+      if (value.has_value()) {
+        environment()->accumulator_hints().AddConstant(value->object());
+      }
+    } else {
+      DCHECK(feedback.IsInsufficient());
+    }
   }
-
-  const GlobalAccessFeedback* feedback =
-      broker()->ProcessFeedbackForGlobalAccess(source);
-  broker()->SetFeedback(source, feedback);
-  return feedback;
 }
 
 void SerializerForBackgroundCompilation::VisitLdaGlobal(
     BytecodeArrayIterator* iterator) {
+  NameRef(broker(),
+          iterator->GetConstantForIndexOperand(0, broker()->isolate()));
   FeedbackSlot slot = iterator->GetSlotOperand(1);
-
-  environment()->accumulator_hints().Clear();
-  GlobalAccessFeedback const* feedback = ProcessFeedbackForGlobalAccess(slot);
-  if (feedback != nullptr) {
-    // We may be able to contribute to accumulator constant hints.
-    base::Optional<ObjectRef> value = feedback->GetConstantHint();
-    if (value.has_value()) {
-      environment()->accumulator_hints().AddConstant(value->object());
-    }
-  }
+  ProcessGlobalAccess(slot, true);
 }
 
 void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof(
@@ -1871,6 +2354,20 @@ void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof(
   VisitLdaGlobal(iterator);
 }
 
+void SerializerForBackgroundCompilation::VisitLdaLookupSlot(
+    BytecodeArrayIterator* iterator) {
+  ObjectRef(broker(),
+            iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+  environment()->accumulator_hints().Clear();
+}
+
+void SerializerForBackgroundCompilation::VisitLdaLookupSlotInsideTypeof(
+    BytecodeArrayIterator* iterator) {
+  ObjectRef(broker(),
+            iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+  environment()->accumulator_hints().Clear();
+}
+
 void SerializerForBackgroundCompilation::ProcessCheckContextExtensions(
     int depth) {
   // for BytecodeGraphBuilder::CheckContextExtensions.
@@ -1900,18 +2397,22 @@ void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof(
 
 void SerializerForBackgroundCompilation::VisitStaGlobal(
     BytecodeArrayIterator* iterator) {
+  NameRef(broker(),
+          iterator->GetConstantForIndexOperand(0, broker()->isolate()));
   FeedbackSlot slot = iterator->GetSlotOperand(1);
-  ProcessFeedbackForGlobalAccess(slot);
+  ProcessGlobalAccess(slot, false);
 }
 
 void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot(
     BytecodeArrayIterator* iterator) {
   const int slot_index = iterator->GetIndexOperand(1);
   const int depth = iterator->GetUnsignedImmediateOperand(2);
+  NameRef(broker(),
+          iterator->GetConstantForIndexOperand(0, broker()->isolate()));
   ProcessCheckContextExtensions(depth);
-  Hints& context_hints = environment()->current_context_hints();
   environment()->accumulator_hints().Clear();
-  ProcessContextAccess(context_hints, slot_index, depth, kIgnoreSlot);
+  ProcessContextAccess(environment()->current_context_hints(), slot_index,
+                       depth, kIgnoreSlot);
 }
 
 void SerializerForBackgroundCompilation::VisitLdaLookupContextSlot(
@@ -1924,6 +2425,7 @@ void SerializerForBackgroundCompilation::VisitLdaLookupContextSlotInsideTypeof(
   ProcessLdaLookupContextSlot(iterator);
 }
 
+// TODO(neis): Avoid duplicating this.
 namespace {
 template <class MapContainer>
 MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) {
@@ -1939,220 +2441,334 @@ MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) {
 }
 }  // namespace
 
-ElementAccessFeedback const*
-SerializerForBackgroundCompilation::ProcessFeedbackMapsForElementAccess(
-    const MapHandles& maps, AccessMode mode,
-    KeyedAccessMode const& keyed_mode) {
-  ElementAccessFeedback const* result =
-      broker()->ProcessFeedbackMapsForElementAccess(maps, keyed_mode);
-  for (ElementAccessFeedback::MapIterator it = result->all_maps(broker());
-       !it.done(); it.advance()) {
-    switch (mode) {
-      case AccessMode::kHas:
-      case AccessMode::kLoad:
-        it.current().SerializeForElementLoad();
-        break;
-      case AccessMode::kStore:
-        it.current().SerializeForElementStore();
-        break;
-      case AccessMode::kStoreInLiteral:
-        // This operation is fairly local and simple, nothing to serialize.
-        break;
-    }
+void SerializerForBackgroundCompilation::ProcessCompareOperation(
+    FeedbackSlot slot) {
+  if (slot.IsInvalid() || feedback_vector().is_null()) return;
+  FeedbackSource source(environment()->function().feedback_vector(), slot);
+  ProcessedFeedback const& feedback =
+      broker()->ProcessFeedbackForCompareOperation(source);
+  if (BailoutOnUninitialized(feedback)) return;
+  environment()->accumulator_hints().Clear();
+}
+
+void SerializerForBackgroundCompilation::ProcessForIn(FeedbackSlot slot) {
+  if (slot.IsInvalid() || feedback_vector().is_null()) return;
+  FeedbackSource source(feedback_vector(), slot);
+  ProcessedFeedback const& feedback = broker()->ProcessFeedbackForForIn(source);
+  if (BailoutOnUninitialized(feedback)) return;
+  environment()->accumulator_hints().Clear();
+}
+
+void SerializerForBackgroundCompilation::ProcessUnaryOrBinaryOperation(
+    FeedbackSlot slot, bool honor_bailout_on_uninitialized) {
+  if (slot.IsInvalid() || feedback_vector().is_null()) return;
+  FeedbackSource source(feedback_vector(), slot);
+  // Internally V8 uses binary op feedback also for unary ops.
+  ProcessedFeedback const& feedback =
+      broker()->ProcessFeedbackForBinaryOperation(source);
+  if (honor_bailout_on_uninitialized && BailoutOnUninitialized(feedback)) {
+    return;
   }
-  return result;
+  environment()->accumulator_hints().Clear();
 }
 
-NamedAccessFeedback const*
-SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess(
-    const MapHandles& maps, AccessMode mode, NameRef const& name) {
-  ZoneVector<PropertyAccessInfo> access_infos(broker()->zone());
-  for (Handle<Map> map : maps) {
-    MapRef map_ref(broker(), map);
-    ProcessMapForNamedPropertyAccess(map_ref, name);
-    AccessInfoFactory access_info_factory(broker(), dependencies(),
-                                          broker()->zone());
-    PropertyAccessInfo info(access_info_factory.ComputePropertyAccessInfo(
-        map, name.object(), mode));
-    access_infos.push_back(info);
-
-    // TODO(turbofan): We want to take receiver hints into account as well,
-    // not only the feedback maps.
-    // For JSNativeContextSpecialization::InlinePropertySetterCall
-    // and InlinePropertyGetterCall.
-    if (info.IsAccessorConstant() && !info.constant().is_null()) {
-      if (info.constant()->IsJSFunction()) {
-        // For JSCallReducer::ReduceCallApiFunction.
-        Handle<SharedFunctionInfo> sfi(
-            handle(Handle<JSFunction>::cast(info.constant())->shared(),
-                   broker()->isolate()));
-        if (sfi->IsApiFunction()) {
-          FunctionTemplateInfoRef fti_ref(
-              broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
-          if (fti_ref.has_call_code()) fti_ref.SerializeCallCode();
-          ProcessReceiverMapForApiCall(fti_ref, map);
-        }
-      } else {
+PropertyAccessInfo
+SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
+    MapRef receiver_map, NameRef const& name, AccessMode access_mode,
+    base::Optional<JSObjectRef> receiver, Hints* new_accumulator_hints) {
+  // For JSNativeContextSpecialization::InferReceiverRootMap
+  receiver_map.SerializeRootMap();
+
+  // For JSNativeContextSpecialization::ReduceNamedAccess.
+  if (receiver_map.IsMapOfTargetGlobalProxy()) {
+    broker()->target_native_context().global_proxy_object().GetPropertyCell(
+        name, SerializationPolicy::kSerializeIfNeeded);
+  }
+
+  PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
+      receiver_map, name, access_mode, dependencies(),
+      SerializationPolicy::kSerializeIfNeeded);
+
+  // For JSNativeContextSpecialization::InlinePropertySetterCall
+  // and InlinePropertyGetterCall.
+  if (access_info.IsAccessorConstant() && !access_info.constant().is_null()) {
+    if (access_info.constant()->IsJSFunction()) {
+      JSFunctionRef function(broker(), access_info.constant());
+
+      // For JSCallReducer::ReduceJSCall.
+      function.Serialize();
+
+      // For JSCallReducer::ReduceCallApiFunction.
+      Handle<SharedFunctionInfo> sfi = function.shared().object();
+      if (sfi->IsApiFunction()) {
         FunctionTemplateInfoRef fti_ref(
-            broker(), Handle<FunctionTemplateInfo>::cast(info.constant()));
+            broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
         if (fti_ref.has_call_code()) fti_ref.SerializeCallCode();
+        ProcessReceiverMapForApiCall(fti_ref, receiver_map.object());
       }
+    } else if (access_info.constant()->IsJSBoundFunction()) {
+      JSBoundFunctionRef function(broker(), access_info.constant());
+
+      // For JSCallReducer::ReduceJSCall.
+      function.Serialize();
+    } else {
+      FunctionTemplateInfoRef fti(broker(), access_info.constant());
+      if (fti.has_call_code()) fti.SerializeCallCode();
     }
   }
 
-  DCHECK(!access_infos.empty());
-  return new (broker()->zone()) NamedAccessFeedback(name, access_infos);
-}
+  // For PropertyAccessBuilder::TryBuildLoadConstantDataField
+  if (access_mode == AccessMode::kLoad) {
+    if (access_info.IsDataConstant()) {
+      base::Optional<JSObjectRef> holder;
+      Handle<JSObject> prototype;
+      if (access_info.holder().ToHandle(&prototype)) {
+        holder = JSObjectRef(broker(), prototype);
+      } else {
+        CHECK_IMPLIES(receiver.has_value(),
+                      receiver->map().equals(receiver_map));
+        holder = receiver;
+      }
 
-void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess(
-    FeedbackSlot slot, AccessMode mode, base::Optional<NameRef> static_name) {
-  if (slot.IsInvalid()) return;
-  if (environment()->function().feedback_vector().is_null()) return;
+      if (holder.has_value()) {
+        base::Optional<ObjectRef> constant(holder->GetOwnDataProperty(
+            access_info.field_representation(), access_info.field_index(),
+            SerializationPolicy::kSerializeIfNeeded));
+        if (constant.has_value()) {
+          new_accumulator_hints->AddConstant(constant->object());
+        }
+      }
+    }
+  }
+
+  return access_info;
+}
 
-  FeedbackNexus nexus(environment()->function().feedback_vector(), slot);
-  FeedbackSource source(nexus);
-  if (broker()->HasFeedback(source)) return;
+void SerializerForBackgroundCompilation::VisitLdaKeyedProperty(
+    BytecodeArrayIterator* iterator) {
+  Hints const& key = environment()->accumulator_hints();
+  Hints const& receiver =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  FeedbackSlot slot = iterator->GetSlotOperand(1);
+  ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kLoad, true);
+}
 
-  if (nexus.ic_state() == UNINITIALIZED) {
-    broker()->SetFeedback(source,
-                          new (broker()->zone()) InsufficientFeedback());
+void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess(
+    Hints const& receiver, Hints const& key, FeedbackSlot slot,
+    AccessMode access_mode, bool honor_bailout_on_uninitialized) {
+  if (slot.IsInvalid() || feedback_vector().is_null()) return;
+  FeedbackSource source(feedback_vector(), slot);
+  ProcessedFeedback const& feedback =
+      broker()->ProcessFeedbackForPropertyAccess(source, access_mode,
+                                                 base::nullopt);
+  if (honor_bailout_on_uninitialized && BailoutOnUninitialized(feedback)) {
     return;
   }
 
-  MapHandles maps;
-  if (nexus.ExtractMaps(&maps) == 0) {  // Megamorphic.
-    broker()->SetFeedback(source, nullptr);
-    return;
+  Hints new_accumulator_hints(zone());
+  switch (feedback.kind()) {
+    case ProcessedFeedback::kElementAccess:
+      ProcessElementAccess(receiver, key, feedback.AsElementAccess(),
+                           access_mode);
+      break;
+    case ProcessedFeedback::kNamedAccess:
+      ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode,
+                         &new_accumulator_hints);
+      break;
+    case ProcessedFeedback::kInsufficient:
+      break;
+    default:
+      UNREACHABLE();
   }
 
-  maps = GetRelevantReceiverMaps(broker()->isolate(), maps);
-  if (maps.empty()) {
-    broker()->SetFeedback(source,
-                          new (broker()->zone()) InsufficientFeedback());
-    return;
+  if (access_mode == AccessMode::kLoad) {
+    environment()->accumulator_hints().Clear();
+    environment()->accumulator_hints().Add(new_accumulator_hints);
+  } else {
+    DCHECK(new_accumulator_hints.IsEmpty());
+  }
+}
+
+void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
+    Hints receiver, NameRef const& name, FeedbackSlot slot,
+    AccessMode access_mode) {
+  if (slot.IsInvalid() || feedback_vector().is_null()) return;
+  FeedbackSource source(feedback_vector(), slot);
+  ProcessedFeedback const& feedback =
+      broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name);
+  if (BailoutOnUninitialized(feedback)) return;
+
+  Hints new_accumulator_hints(zone());
+  switch (feedback.kind()) {
+    case ProcessedFeedback::kNamedAccess:
+      DCHECK(name.equals(feedback.AsNamedAccess().name()));
+      ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode,
+                         &new_accumulator_hints);
+      break;
+    case ProcessedFeedback::kInsufficient:
+      break;
+    default:
+      UNREACHABLE();
   }
 
-  ProcessedFeedback const* processed = nullptr;
-  base::Optional<NameRef> name =
-      static_name.has_value() ? static_name : broker()->GetNameFeedback(nexus);
-  if (name.has_value()) {
-    processed = ProcessFeedbackMapsForNamedAccess(maps, mode, *name);
-  } else if (nexus.GetKeyType() == ELEMENT) {
-    DCHECK_NE(nexus.ic_state(), MEGAMORPHIC);
-    processed = ProcessFeedbackMapsForElementAccess(
-        maps, mode, KeyedAccessMode::FromNexus(nexus));
+  if (access_mode == AccessMode::kLoad) {
+    environment()->accumulator_hints().Clear();
+    environment()->accumulator_hints().Add(new_accumulator_hints);
+  } else {
+    DCHECK(new_accumulator_hints.IsEmpty());
   }
-  broker()->SetFeedback(source, processed);
 }
 
-void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess(
-    Hints const& receiver, Hints const& key, FeedbackSlot slot,
-    AccessMode mode) {
-  if (BailoutOnUninitialized(slot)) return;
-  ProcessFeedbackForPropertyAccess(slot, mode, base::nullopt);
+void SerializerForBackgroundCompilation::ProcessNamedAccess(
+    Hints receiver, NamedAccessFeedback const& feedback, AccessMode access_mode,
+    Hints* new_accumulator_hints) {
+  for (Handle<Map> map : feedback.AsNamedAccess().maps()) {
+    MapRef map_ref(broker(), map);
+    ProcessMapForNamedPropertyAccess(map_ref, feedback.name(), access_mode,
+                                     base::nullopt, new_accumulator_hints);
+  }
+
+  for (Handle<Map> map :
+       GetRelevantReceiverMaps(broker()->isolate(), receiver.maps())) {
+    MapRef map_ref(broker(), map);
+    ProcessMapForNamedPropertyAccess(map_ref, feedback.name(), access_mode,
+                                     base::nullopt, new_accumulator_hints);
+  }
+
+  JSGlobalProxyRef global_proxy =
+      broker()->target_native_context().global_proxy_object();
+  for (Handle<Object> hint : receiver.constants()) {
+    ObjectRef object(broker(), hint);
+    if (access_mode == AccessMode::kLoad && object.IsJSObject()) {
+      MapRef map_ref = object.AsJSObject().map();
+      ProcessMapForNamedPropertyAccess(map_ref, feedback.name(), access_mode,
+                                       object.AsJSObject(),
+                                       new_accumulator_hints);
+    }
+    // For JSNativeContextSpecialization::ReduceNamedAccessFromNexus.
+    if (object.equals(global_proxy)) {
+      // TODO(neis): Record accumulator hint? Also for string.length and maybe
+      // more.
+      global_proxy.GetPropertyCell(feedback.name(),
+                                   SerializationPolicy::kSerializeIfNeeded);
+    }
+    // For JSNativeContextSpecialization::ReduceJSLoadNamed.
+    if (access_mode == AccessMode::kLoad && object.IsJSFunction() &&
+        feedback.name().equals(ObjectRef(
+            broker(), broker()->isolate()->factory()->prototype_string()))) {
+      JSFunctionRef function = object.AsJSFunction();
+      function.Serialize();
+      if (new_accumulator_hints != nullptr && function.has_prototype()) {
+        new_accumulator_hints->AddConstant(function.prototype().object());
+      }
+    }
+  }
+}
+
+void SerializerForBackgroundCompilation::ProcessElementAccess(
+    Hints receiver, Hints key, ElementAccessFeedback const& feedback,
+    AccessMode access_mode) {
+  for (auto const& group : feedback.transition_groups()) {
+    for (Handle<Map> map_handle : group) {
+      MapRef map(broker(), map_handle);
+      switch (access_mode) {
+        case AccessMode::kHas:
+        case AccessMode::kLoad:
+          map.SerializeForElementLoad();
+          break;
+        case AccessMode::kStore:
+          map.SerializeForElementStore();
+          break;
+        case AccessMode::kStoreInLiteral:
+          // This operation is fairly local and simple, nothing to serialize.
+          break;
+      }
+    }
+  }
 
   for (Handle<Object> hint : receiver.constants()) {
     ObjectRef receiver_ref(broker(), hint);
 
+    // For JSNativeContextSpecialization::InferReceiverRootMap
+    if (receiver_ref.IsHeapObject()) {
+      receiver_ref.AsHeapObject().map().SerializeRootMap();
+    }
+
     // For JSNativeContextSpecialization::ReduceElementAccess.
     if (receiver_ref.IsJSTypedArray()) {
       receiver_ref.AsJSTypedArray().Serialize();
     }
 
-    // For JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant.
-    if (mode == AccessMode::kLoad || mode == AccessMode::kHas) {
+    // For JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant.
+    if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
       for (Handle<Object> hint : key.constants()) {
         ObjectRef key_ref(broker(), hint);
         // TODO(neis): Do this for integer-HeapNumbers too?
         if (key_ref.IsSmi() && key_ref.AsSmi() >= 0) {
           base::Optional<ObjectRef> element =
-              receiver_ref.GetOwnConstantElement(key_ref.AsSmi(), true);
+              receiver_ref.GetOwnConstantElement(
+                  key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
           if (!element.has_value() && receiver_ref.IsJSArray()) {
             // We didn't find a constant element, but if the receiver is a
             // cow-array we can exploit the fact that any future write to the
             // element will replace the whole elements storage.
-            receiver_ref.AsJSArray().GetOwnCowElement(key_ref.AsSmi(), true);
+            receiver_ref.AsJSArray().GetOwnCowElement(
+                key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
           }
         }
       }
     }
   }
 
-  environment()->accumulator_hints().Clear();
-}
-
-void SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
-    MapRef const& map, NameRef const& name) {
-  // For JSNativeContextSpecialization::ReduceNamedAccess.
-  if (map.IsMapOfCurrentGlobalProxy()) {
-    broker()->native_context().global_proxy_object().GetPropertyCell(name,
-                                                                     true);
+  // For JSNativeContextSpecialization::InferReceiverRootMap
+  for (Handle<Map> map : receiver.maps()) {
+    MapRef map_ref(broker(), map);
+    map_ref.SerializeRootMap();
   }
 }
 
-void SerializerForBackgroundCompilation::VisitLdaKeyedProperty(
+void SerializerForBackgroundCompilation::VisitLdaNamedProperty(
     BytecodeArrayIterator* iterator) {
-  Hints const& key = environment()->accumulator_hints();
   Hints const& receiver =
       environment()->register_hints(iterator->GetRegisterOperand(0));
-  FeedbackSlot slot = iterator->GetSlotOperand(1);
-  ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kLoad);
+  NameRef name(broker(),
+               iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+  FeedbackSlot slot = iterator->GetSlotOperand(2);
+  ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kLoad);
 }
 
-void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
-    Hints const& receiver, NameRef const& name, FeedbackSlot slot,
-    AccessMode mode) {
-  if (BailoutOnUninitialized(slot)) return;
-  ProcessFeedbackForPropertyAccess(slot, mode, name);
-
-  for (Handle<Map> map :
-       GetRelevantReceiverMaps(broker()->isolate(), receiver.maps())) {
-    ProcessMapForNamedPropertyAccess(MapRef(broker(), map), name);
-  }
-
-  JSGlobalProxyRef global_proxy =
-      broker()->native_context().global_proxy_object();
-
-  for (Handle<Object> hint : receiver.constants()) {
-    ObjectRef object(broker(), hint);
-    // For JSNativeContextSpecialization::ReduceNamedAccessFromNexus.
-    if (object.equals(global_proxy)) {
-      global_proxy.GetPropertyCell(name, true);
-    }
-    // For JSNativeContextSpecialization::ReduceJSLoadNamed.
-    if (mode == AccessMode::kLoad && object.IsJSFunction() &&
-        name.equals(ObjectRef(
-            broker(), broker()->isolate()->factory()->prototype_string()))) {
-      object.AsJSFunction().Serialize();
-    }
-  }
-
-  environment()->accumulator_hints().Clear();
+// TODO(neis): Do feedback-independent serialization also for *NoFeedback
+// bytecodes.
+void SerializerForBackgroundCompilation::VisitLdaNamedPropertyNoFeedback(
+    BytecodeArrayIterator* iterator) {
+  NameRef(broker(),
+          iterator->GetConstantForIndexOperand(1, broker()->isolate()));
 }
 
-void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
-    BytecodeArrayIterator* iterator, AccessMode mode) {
+void SerializerForBackgroundCompilation::VisitStaNamedProperty(
+    BytecodeArrayIterator* iterator) {
   Hints const& receiver =
       environment()->register_hints(iterator->GetRegisterOperand(0));
-  Handle<Name> name = Handle<Name>::cast(
-      iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+  NameRef name(broker(),
+               iterator->GetConstantForIndexOperand(1, broker()->isolate()));
   FeedbackSlot slot = iterator->GetSlotOperand(2);
-  ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode);
-}
-
-void SerializerForBackgroundCompilation::VisitLdaNamedProperty(
-    BytecodeArrayIterator* iterator) {
-  ProcessNamedPropertyAccess(iterator, AccessMode::kLoad);
+  ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStore);
 }
 
-void SerializerForBackgroundCompilation::VisitStaNamedProperty(
+void SerializerForBackgroundCompilation::VisitStaNamedPropertyNoFeedback(
     BytecodeArrayIterator* iterator) {
-  ProcessNamedPropertyAccess(iterator, AccessMode::kStore);
+  NameRef(broker(),
+          iterator->GetConstantForIndexOperand(1, broker()->isolate()));
 }
 
 void SerializerForBackgroundCompilation::VisitStaNamedOwnProperty(
     BytecodeArrayIterator* iterator) {
-  ProcessNamedPropertyAccess(iterator, AccessMode::kStoreInLiteral);
+  Hints const& receiver =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  NameRef name(broker(),
+               iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+  FeedbackSlot slot = iterator->GetSlotOperand(2);
+  ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStoreInLiteral);
 }
 
 void SerializerForBackgroundCompilation::VisitTestIn(
@@ -2161,7 +2777,113 @@ void SerializerForBackgroundCompilation::VisitTestIn(
   Hints const& key =
       environment()->register_hints(iterator->GetRegisterOperand(0));
   FeedbackSlot slot = iterator->GetSlotOperand(1);
-  ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kHas);
+  ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kHas, false);
+}
+
+// For JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance.
+void SerializerForBackgroundCompilation::ProcessConstantForOrdinaryHasInstance(
+    HeapObjectRef const& constructor, bool* walk_prototypes) {
+  if (constructor.IsJSBoundFunction()) {
+    constructor.AsJSBoundFunction().Serialize();
+    ProcessConstantForInstanceOf(
+        constructor.AsJSBoundFunction().bound_target_function(),
+        walk_prototypes);
+  } else if (constructor.IsJSFunction()) {
+    constructor.AsJSFunction().Serialize();
+    *walk_prototypes =
+        *walk_prototypes ||
+        (constructor.map().has_prototype_slot() &&
+         constructor.AsJSFunction().has_prototype() &&
+         !constructor.AsJSFunction().PrototypeRequiresRuntimeLookup());
+  }
+}
+
+void SerializerForBackgroundCompilation::ProcessConstantForInstanceOf(
+    ObjectRef const& constructor, bool* walk_prototypes) {
+  if (!constructor.IsHeapObject()) return;
+  HeapObjectRef constructor_heap_object = constructor.AsHeapObject();
+
+  PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
+      constructor_heap_object.map(),
+      NameRef(broker(), broker()->isolate()->factory()->has_instance_symbol()),
+      AccessMode::kLoad, dependencies(),
+      SerializationPolicy::kSerializeIfNeeded);
+
+  if (access_info.IsNotFound()) {
+    ProcessConstantForOrdinaryHasInstance(constructor_heap_object,
+                                          walk_prototypes);
+  } else if (access_info.IsDataConstant()) {
+    Handle<JSObject> holder;
+    bool found_on_proto = access_info.holder().ToHandle(&holder);
+    JSObjectRef holder_ref = found_on_proto ? JSObjectRef(broker(), holder)
+                                            : constructor.AsJSObject();
+    base::Optional<ObjectRef> constant = holder_ref.GetOwnDataProperty(
+        access_info.field_representation(), access_info.field_index(),
+        SerializationPolicy::kSerializeIfNeeded);
+    CHECK(constant.has_value());
+    if (constant->IsJSFunction()) {
+      JSFunctionRef function = constant->AsJSFunction();
+      function.Serialize();
+      if (function.shared().HasBuiltinId() &&
+          function.shared().builtin_id() ==
+              Builtins::kFunctionPrototypeHasInstance) {
+        // For JSCallReducer::ReduceFunctionPrototypeHasInstance.
+        ProcessConstantForOrdinaryHasInstance(constructor_heap_object,
+                                              walk_prototypes);
+      }
+    }
+  }
+}
+
+void SerializerForBackgroundCompilation::VisitTestInstanceOf(
+    BytecodeArrayIterator* iterator) {
+  Hints const& lhs =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  Hints rhs = environment()->accumulator_hints();
+  FeedbackSlot slot = iterator->GetSlotOperand(1);
+  Hints new_accumulator_hints(zone());
+
+  if (slot.IsInvalid() || feedback_vector().is_null()) return;
+  FeedbackSource source(feedback_vector(), slot);
+  ProcessedFeedback const& feedback =
+      broker()->ProcessFeedbackForInstanceOf(source);
+
+  // Incorporate feedback (about rhs) into hints copy to simplify processing.
+  if (!feedback.IsInsufficient()) {
+    InstanceOfFeedback const& rhs_feedback = feedback.AsInstanceOf();
+    if (rhs_feedback.value().has_value()) {
+      Handle<JSObject> constructor = rhs_feedback.value()->object();
+      rhs.AddConstant(constructor);
+    }
+  }
+
+  bool walk_prototypes = false;
+  for (Handle<Object> constant : rhs.constants()) {
+    ProcessConstantForInstanceOf(ObjectRef(broker(), constant),
+                                 &walk_prototypes);
+  }
+  if (walk_prototypes) ProcessHintsForHasInPrototypeChain(lhs);
+
+  environment()->accumulator_hints().Clear();
+  environment()->accumulator_hints().Add(new_accumulator_hints);
+}
+
+void SerializerForBackgroundCompilation::VisitToNumeric(
+    BytecodeArrayIterator* iterator) {
+  FeedbackSlot slot = iterator->GetSlotOperand(0);
+  ProcessUnaryOrBinaryOperation(slot, false);
+}
+
+void SerializerForBackgroundCompilation::VisitToNumber(
+    BytecodeArrayIterator* iterator) {
+  FeedbackSlot slot = iterator->GetSlotOperand(0);
+  ProcessUnaryOrBinaryOperation(slot, false);
+}
+
+void SerializerForBackgroundCompilation::VisitThrowReferenceErrorIfHole(
+    BytecodeArrayIterator* iterator) {
+  ObjectRef(broker(),
+            iterator->GetConstantForIndexOperand(0, broker()->isolate()));
 }
 
 void SerializerForBackgroundCompilation::VisitStaKeyedProperty(
@@ -2171,7 +2893,7 @@ void SerializerForBackgroundCompilation::VisitStaKeyedProperty(
   Hints const& key =
       environment()->register_hints(iterator->GetRegisterOperand(1));
   FeedbackSlot slot = iterator->GetSlotOperand(2);
-  ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStore);
+  ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStore, true);
 }
 
 void SerializerForBackgroundCompilation::VisitStaInArrayLiteral(
@@ -2181,7 +2903,19 @@ void SerializerForBackgroundCompilation::VisitStaInArrayLiteral(
   Hints const& key =
       environment()->register_hints(iterator->GetRegisterOperand(1));
   FeedbackSlot slot = iterator->GetSlotOperand(2);
-  ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral);
+  ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral,
+                             true);
+}
+
+void SerializerForBackgroundCompilation::VisitStaDataPropertyInLiteral(
+    BytecodeArrayIterator* iterator) {
+  Hints const& receiver =
+      environment()->register_hints(iterator->GetRegisterOperand(0));
+  Hints const& key =
+      environment()->register_hints(iterator->GetRegisterOperand(1));
+  FeedbackSlot slot = iterator->GetSlotOperand(3);
+  ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral,
+                             false);
 }
 
 #define DEFINE_CLEAR_ENVIRONMENT(name, ...)             \
@@ -2239,14 +2973,44 @@ UNREACHABLE_BYTECODE_LIST(DEFINE_UNREACHABLE)
 KILL_ENVIRONMENT_LIST(DEFINE_KILL)
 #undef DEFINE_KILL
 
-#undef CLEAR_ENVIRONMENT_LIST
-#undef KILL_ENVIRONMENT_LIST
+#define DEFINE_BINARY_OP(name, ...)                     \
+  void SerializerForBackgroundCompilation::Visit##name( \
+      BytecodeArrayIterator* iterator) {                \
+    FeedbackSlot slot = iterator->GetSlotOperand(1);    \
+    ProcessUnaryOrBinaryOperation(slot, true);          \
+  }
+BINARY_OP_LIST(DEFINE_BINARY_OP)
+#undef DEFINE_BINARY_OP
+
+#define DEFINE_COMPARE_OP(name, ...)                    \
+  void SerializerForBackgroundCompilation::Visit##name( \
+      BytecodeArrayIterator* iterator) {                \
+    FeedbackSlot slot = iterator->GetSlotOperand(1);    \
+    ProcessCompareOperation(slot);                      \
+  }
+COMPARE_OP_LIST(DEFINE_COMPARE_OP)
+#undef DEFINE_COMPARE_OP
+
+#define DEFINE_UNARY_OP(name, ...)                      \
+  void SerializerForBackgroundCompilation::Visit##name( \
+      BytecodeArrayIterator* iterator) {                \
+    FeedbackSlot slot = iterator->GetSlotOperand(0);    \
+    ProcessUnaryOrBinaryOperation(slot, true);          \
+  }
+UNARY_OP_LIST(DEFINE_UNARY_OP)
+#undef DEFINE_UNARY_OP
+
+#undef BINARY_OP_LIST
 #undef CLEAR_ACCUMULATOR_LIST
-#undef UNCONDITIONAL_JUMPS_LIST
+#undef CLEAR_ENVIRONMENT_LIST
+#undef COMPARE_OP_LIST
 #undef CONDITIONAL_JUMPS_LIST
 #undef IGNORED_BYTECODE_LIST
-#undef UNREACHABLE_BYTECODE_LIST
+#undef KILL_ENVIRONMENT_LIST
 #undef SUPPORTED_BYTECODE_LIST
+#undef UNARY_OP_LIST
+#undef UNCONDITIONAL_JUMPS_LIST
+#undef UNREACHABLE_BYTECODE_LIST
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 6deba2b00291c1..783f3bcc113f19 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -138,6 +138,7 @@ void SimdScalarLowering::LowerGraph() {
   V(F32x4AddHoriz)                  \
   V(F32x4Sub)                       \
   V(F32x4Mul)                       \
+  V(F32x4Div)                       \
   V(F32x4Min)                       \
   V(F32x4Max)
 
@@ -1207,6 +1208,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
       F32X4_BINOP_CASE(Add)
       F32X4_BINOP_CASE(Sub)
       F32X4_BINOP_CASE(Mul)
+      F32X4_BINOP_CASE(Div)
       F32X4_BINOP_CASE(Min)
       F32X4_BINOP_CASE(Max)
 #undef F32X4_BINOP_CASE
@@ -1390,7 +1392,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
       int input_num_lanes = NumLanes(input_rep_type);
       Node** rep = GetReplacements(node->InputAt(0));
       Node** rep_node = zone()->NewArray<Node*>(num_lanes);
-      Node* true_node = mcgraph_->Int32Constant(-1);
+      Node* true_node = mcgraph_->Int32Constant(1);
       Node* false_node = mcgraph_->Int32Constant(0);
       Node* tmp_result = false_node;
       if (node->opcode() == IrOpcode::kS1x4AllTrue ||
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index b028a76bb0d8ea..1ca7bfe707e3b4 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -92,7 +92,7 @@ MachineRepresentation MachineRepresentationFromArrayType(
 }
 
 UseInfo CheckedUseInfoAsWord32FromHint(
-    NumberOperationHint hint, const VectorSlotPair& feedback = VectorSlotPair(),
+    NumberOperationHint hint, const FeedbackSource& feedback = FeedbackSource(),
     IdentifyZeros identify_zeros = kDistinguishZeros) {
   switch (hint) {
     case NumberOperationHint::kSignedSmall:
@@ -109,7 +109,7 @@ UseInfo CheckedUseInfoAsWord32FromHint(
 }
 
 UseInfo CheckedUseInfoAsFloat64FromHint(
-    NumberOperationHint hint, const VectorSlotPair& feedback,
+    NumberOperationHint hint, const FeedbackSource& feedback,
     IdentifyZeros identify_zeros = kDistinguishZeros) {
   switch (hint) {
     case NumberOperationHint::kSignedSmall:
@@ -1092,7 +1092,7 @@ class RepresentationSelector {
       if (lower()) DeferReplacement(node, node->InputAt(0));
     } else {
       VisitUnop(node,
-                UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()),
+                UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
                 MachineRepresentation::kTaggedPointer);
     }
   }
@@ -1299,9 +1299,7 @@ class RepresentationSelector {
     if (base_taggedness == kTaggedBase &&
         CanBeTaggedOrCompressedPointer(field_representation)) {
       Type value_type = NodeProperties::GetType(value);
-      if (field_representation == MachineRepresentation::kTaggedSigned ||
-          value_representation == MachineRepresentation::kTaggedSigned ||
-          field_representation == MachineRepresentation::kCompressedSigned ||
+      if (value_representation == MachineRepresentation::kTaggedSigned ||
           value_representation == MachineRepresentation::kCompressedSigned) {
         // Write barriers are only for stores of heap objects.
         return kNoWriteBarrier;
@@ -1444,13 +1442,13 @@ class RepresentationSelector {
           !right_feedback_type.Maybe(Type::MinusZero())) {
         left_identify_zeros = kIdentifyZeros;
       }
-      UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(),
+      UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(),
                                                         left_identify_zeros);
       // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
       // a minus zero check for the right hand side, since we already
       // know that the left hand side is a proper Signed32 value,
       // potentially guarded by a check.
-      UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(),
+      UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(),
                                                          kIdentifyZeros);
       VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
                  Type::Signed32());
@@ -1483,7 +1481,7 @@ class RepresentationSelector {
     // default case => Float64Add/Sub
     VisitBinop(node,
                UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
-                                                        VectorSlotPair()),
+                                                        FeedbackSource()),
                MachineRepresentation::kFloat64, Type::Number());
     if (lower()) {
       ChangeToPureOp(node, Float64Op(node));
@@ -1546,9 +1544,9 @@ class RepresentationSelector {
       // right hand side doesn't matter anyways, so in particular there's
       // no observable difference between a 0 and a -0 then.
       UseInfo const lhs_use = CheckedUseInfoAsWord32FromHint(
-          hint, VectorSlotPair(), truncation.identify_zeros());
+          hint, FeedbackSource(), truncation.identify_zeros());
       UseInfo const rhs_use = CheckedUseInfoAsWord32FromHint(
-          hint, VectorSlotPair(), kIdentifyZeros);
+          hint, FeedbackSource(), kIdentifyZeros);
       if (truncation.IsUsedAsWord32()) {
         VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32);
         if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
@@ -1589,9 +1587,9 @@ class RepresentationSelector {
     // right hand side doesn't matter anyways, so in particular there's
     // no observable difference between a 0 and a -0 then.
     UseInfo const lhs_use = UseInfo::CheckedNumberOrOddballAsFloat64(
-        truncation.identify_zeros(), VectorSlotPair());
+        truncation.identify_zeros(), FeedbackSource());
     UseInfo const rhs_use = UseInfo::CheckedNumberOrOddballAsFloat64(
-        kIdentifyZeros, VectorSlotPair());
+        kIdentifyZeros, FeedbackSource());
     VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kFloat64,
                Type::Number());
     if (lower()) ChangeToPureOp(node, Float64Op(node));
@@ -1931,7 +1929,7 @@ class RepresentationSelector {
           case NumberOperationHint::kSignedSmall:
             if (propagate()) {
               VisitBinop(node,
-                         CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(),
+                         CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(),
                                                         kIdentifyZeros),
                          MachineRepresentation::kBit);
             } else if (retype()) {
@@ -1944,7 +1942,7 @@ class RepresentationSelector {
                   IsNodeRepresentationTagged(rhs)) {
                 VisitBinop(node,
                            UseInfo::CheckedSignedSmallAsTaggedSigned(
-                               VectorSlotPair(), kIdentifyZeros),
+                               FeedbackSource(), kIdentifyZeros),
                            MachineRepresentation::kBit);
                 ChangeToPureOp(
                     node, changer_->TaggedSignedOperatorFor(node->opcode()));
@@ -1952,7 +1950,7 @@ class RepresentationSelector {
               } else {
                 VisitBinop(node,
                            CheckedUseInfoAsWord32FromHint(
-                               hint, VectorSlotPair(), kIdentifyZeros),
+                               hint, FeedbackSource(), kIdentifyZeros),
                            MachineRepresentation::kBit);
                 ChangeToPureOp(node, Int32Op(node));
               }
@@ -1969,7 +1967,7 @@ class RepresentationSelector {
             V8_FALLTHROUGH;
           case NumberOperationHint::kNumber:
             VisitBinop(node,
-                       CheckedUseInfoAsFloat64FromHint(hint, VectorSlotPair(),
+                       CheckedUseInfoAsFloat64FromHint(hint, FeedbackSource(),
                                                        kIdentifyZeros),
                        MachineRepresentation::kBit);
             if (lower()) ChangeToPureOp(node, Float64Op(node));
@@ -2054,7 +2052,7 @@ class RepresentationSelector {
         // Checked float64 x float64 => float64
         VisitBinop(node,
                    UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
-                                                            VectorSlotPair()),
+                                                            FeedbackSource()),
                    MachineRepresentation::kFloat64, Type::Number());
         if (lower()) ChangeToPureOp(node, Float64Op(node));
         return;
@@ -2150,7 +2148,7 @@ class RepresentationSelector {
         // default case => Float64Div
         VisitBinop(node,
                    UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
-                                                            VectorSlotPair()),
+                                                            FeedbackSource()),
                    MachineRepresentation::kFloat64, Type::Number());
         if (lower()) ChangeToPureOp(node, Float64Op(node));
         return;
@@ -2320,7 +2318,7 @@ class RepresentationSelector {
           if (lower()) {
             node->RemoveInput(1);
             NodeProperties::ChangeOp(
-                node, simplified()->CheckedUint32ToInt32(VectorSlotPair()));
+                node, simplified()->CheckedUint32ToInt32(FeedbackSource()));
           }
           return;
         }
@@ -2707,14 +2705,14 @@ class RepresentationSelector {
       case IrOpcode::kSpeculativeBigIntAdd: {
         if (truncation.IsUsedAsWord64()) {
           VisitBinop(node,
-                     UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}),
+                     UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
                      MachineRepresentation::kWord64);
           if (lower()) {
             ChangeToPureOp(node, lowering->machine()->Int64Add());
           }
         } else {
           VisitBinop(node,
-                     UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}),
+                     UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
                      MachineRepresentation::kTaggedPointer);
           if (lower()) {
             NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd());
@@ -2725,7 +2723,7 @@ class RepresentationSelector {
       case IrOpcode::kSpeculativeBigIntNegate: {
         if (truncation.IsUsedAsWord64()) {
           VisitUnop(node,
-                    UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}),
+                    UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
                     MachineRepresentation::kWord64);
           if (lower()) {
             ChangeUnaryToPureBinaryOp(node, lowering->machine()->Int64Sub(), 0,
@@ -2733,7 +2731,7 @@ class RepresentationSelector {
           }
         } else {
           VisitUnop(node,
-                    UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}),
+                    UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
                     MachineRepresentation::kTaggedPointer);
           if (lower()) {
             ChangeToPureOp(node, lowering->simplified()->BigIntNegate());
@@ -2822,7 +2820,7 @@ class RepresentationSelector {
                     MachineRepresentation::kTaggedPointer);
         } else {
           VisitUnop(node,
-                    UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()),
+                    UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
                     MachineRepresentation::kTaggedPointer);
         }
         if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -3417,12 +3415,12 @@ class RepresentationSelector {
       }
       case IrOpcode::kTransitionElementsKind: {
         return VisitUnop(
-            node, UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()),
+            node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
             MachineRepresentation::kNone);
       }
       case IrOpcode::kCompareMaps:
         return VisitUnop(
-            node, UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()),
+            node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
             MachineRepresentation::kBit);
       case IrOpcode::kEnsureWritableFastElements:
         return VisitBinop(node, UseInfo::AnyTagged(),
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index c3cca499ace799..885a86286ebb83 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -106,6 +106,11 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
       if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
         return Replace(m.InputAt(0));
       }
+      if (m.IsChangeCompressedSignedToTaggedSigned()) {
+        Node* new_node = graph()->NewNode(
+            simplified()->ChangeCompressedSignedToInt32(), m.InputAt(0));
+        return Replace(new_node);
+      }
       break;
     }
     case IrOpcode::kChangeTaggedToUint32: {
@@ -143,6 +148,40 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
       }
       break;
     }
+    case IrOpcode::kChangeTaggedSignedToCompressedSigned: {
+      DCHECK(COMPRESS_POINTERS_BOOL);
+      NodeMatcher m(node->InputAt(0));
+      if (m.IsChangeInt31ToTaggedSigned()) {
+        Node* new_node = graph()->NewNode(
+            simplified()->ChangeInt31ToCompressedSigned(), m.InputAt(0));
+        return Replace(new_node);
+      } else if (m.IsCheckedInt32ToTaggedSigned()) {
+        // Create a new checked node that outputs CompressedSigned values, with
+        // an explicit decompression after it.
+        Node* new_checked = graph()->CloneNode(m.node());
+        NodeProperties::ChangeOp(
+            new_checked, simplified()->CheckedInt32ToCompressedSigned(
+                             CheckParametersOf(m.node()->op()).feedback()));
+        Node* new_decompression = graph()->NewNode(
+            machine()->ChangeCompressedSignedToTaggedSigned(), new_checked);
+
+        // For all uses of the old checked node, instead insert the new "checked
+        // + decompression". Also, update control and effect.
+        ReplaceWithValue(m.node(), new_decompression, new_checked, new_checked);
+
+        // In the current node, we can skip the decompression since we are going
+        // to have a Decompression + Compression combo.
+        return Replace(new_checked);
+      }
+      break;
+    }
+    case IrOpcode::kChangeCompressedSignedToInt32: {
+      NodeMatcher m(node->InputAt(0));
+      if (m.IsCheckedInt32ToCompressedSigned()) {
+        return Replace(m.InputAt(0));
+      }
+      break;
+    }
     case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedTaggedSignedToInt32: {
       NodeMatcher m(node->InputAt(0));
@@ -152,6 +191,14 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
       }
       break;
     }
+    case IrOpcode::kCheckedTaggedToTaggedPointer: {
+      NodeMatcher m(node->InputAt(0));
+      if (m.IsChangeCompressedPointerToTaggedPointer()) {
+        RelaxEffectsAndControls(node);
+        return Replace(m.node());
+      }
+      break;
+    }
     case IrOpcode::kCheckIf: {
       HeapObjectMatcher m(node->InputAt(0));
       if (m.Is(factory()->true_value())) {
@@ -267,6 +314,10 @@ MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
   return jsgraph()->machine();
 }
 
+SimplifiedOperatorBuilder* SimplifiedOperatorReducer::simplified() const {
+  return jsgraph()->simplified();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 4f83635422eb9b..6b86a95e01b2db 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -31,13 +31,34 @@ std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
   UNREACHABLE();
 }
 
+std::ostream& operator<<(std::ostream& os,
+                         ConstFieldInfo const& const_field_info) {
+  if (const_field_info.IsConst()) {
+    return os << "const (field owner: " << const_field_info.owner_map.address()
+              << ")";
+  } else {
+    return os << "mutable";
+  }
+  UNREACHABLE();
+}
+
+bool operator==(ConstFieldInfo const& lhs, ConstFieldInfo const& rhs) {
+  return lhs.owner_map.address() == rhs.owner_map.address();
+}
+
+size_t hash_value(ConstFieldInfo const& const_field_info) {
+  return (size_t)const_field_info.owner_map.address();
+}
+
 bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
   // On purpose we don't include the write barrier kind here, as this method is
   // really only relevant for eliminating loads and they don't care about the
   // write barrier mode.
   return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
          lhs.map.address() == rhs.map.address() &&
-         lhs.machine_type == rhs.machine_type;
+         lhs.machine_type == rhs.machine_type &&
+         lhs.const_field_info == rhs.const_field_info &&
+         lhs.is_store_in_literal == rhs.is_store_in_literal;
 }
 
 size_t hash_value(FieldAccess const& access) {
@@ -45,7 +66,8 @@ size_t hash_value(FieldAccess const& access) {
   // really only relevant for eliminating loads and they don't care about the
   // write barrier mode.
   return base::hash_combine(access.base_is_tagged, access.offset,
-                            access.machine_type);
+                            access.machine_type, access.const_field_info,
+                            access.is_store_in_literal);
 }
 
 size_t hash_value(LoadSensitivity load_sensitivity) {
@@ -78,7 +100,10 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
   }
 #endif
   os << access.type << ", " << access.machine_type << ", "
-     << access.write_barrier_kind << ", " << access.constness;
+     << access.write_barrier_kind << ", " << access.const_field_info;
+  if (access.is_store_in_literal) {
+    os << " (store in literal)";
+  }
   if (FLAG_untrusted_code_mitigations) {
     os << ", " << access.load_sensitivity;
   }
@@ -199,7 +224,8 @@ std::ostream& operator<<(std::ostream& os,
 }
 
 size_t hash_value(const CheckFloat64HoleParameters& params) {
-  return base::hash_combine(params.mode(), params.feedback());
+  FeedbackSource::Hash feedback_hash;
+  return base::hash_combine(params.mode(), feedback_hash(params.feedback()));
 }
 
 bool operator==(CheckFloat64HoleParameters const& lhs,
@@ -249,7 +275,8 @@ bool operator==(CheckMapsParameters const& lhs,
 }
 
 size_t hash_value(CheckMapsParameters const& p) {
-  return base::hash_combine(p.flags(), p.maps(), p.feedback());
+  FeedbackSource::Hash feedback_hash;
+  return base::hash_combine(p.flags(), p.maps(), feedback_hash(p.feedback()));
 }
 
 std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
@@ -305,7 +332,8 @@ bool operator==(const GrowFastElementsParameters& lhs,
 }
 
 inline size_t hash_value(const GrowFastElementsParameters& params) {
-  return base::hash_combine(params.mode(), params.feedback());
+  FeedbackSource::Hash feedback_hash;
+  return base::hash_combine(params.mode(), feedback_hash(params.feedback()));
 }
 
 std::ostream& operator<<(std::ostream& os,
@@ -550,7 +578,8 @@ bool operator==(NumberOperationParameters const& lhs,
 }
 
 size_t hash_value(NumberOperationParameters const& p) {
-  return base::hash_combine(p.hint(), p.feedback());
+  FeedbackSource::Hash feedback_hash;
+  return base::hash_combine(p.hint(), feedback_hash(p.feedback()));
 }
 
 std::ostream& operator<<(std::ostream& os, NumberOperationParameters const& p) {
@@ -619,7 +648,8 @@ std::ostream& operator<<(std::ostream& os,
 }
 
 size_t hash_value(const CheckTaggedInputParameters& params) {
-  return base::hash_combine(params.mode(), params.feedback());
+  FeedbackSource::Hash feedback_hash;
+  return base::hash_combine(params.mode(), feedback_hash(params.feedback()));
 }
 
 bool operator==(CheckTaggedInputParameters const& lhs,
@@ -645,7 +675,8 @@ std::ostream& operator<<(std::ostream& os,
 }
 
 size_t hash_value(const CheckMinusZeroParameters& params) {
-  return base::hash_combine(params.mode(), params.feedback());
+  FeedbackSource::Hash feedback_hash;
+  return base::hash_combine(params.mode(), feedback_hash(params.feedback()));
 }
 
 bool operator==(CheckMinusZeroParameters const& lhs,
@@ -878,7 +909,7 @@ struct SimplifiedOperatorGlobalCache final {
         : Operator1<CheckParameters>(                                      \
               IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \
               #Name, value_input_count, 1, 1, value_output_count, 1, 0,    \
-              CheckParameters(VectorSlotPair())) {}                        \
+              CheckParameters(FeedbackSource())) {}                        \
   };                                                                       \
   Name##Operator k##Name;
   CHECKED_WITH_FEEDBACK_OP_LIST(CHECKED_WITH_FEEDBACK)
@@ -886,16 +917,16 @@ struct SimplifiedOperatorGlobalCache final {
 
 #define CHECKED_BOUNDS(Name)                                                  \
   struct Name##Operator final : public Operator1<CheckBoundsParameters> {     \
-    Name##Operator(VectorSlotPair feedback, CheckBoundsParameters::Mode mode) \
+    Name##Operator(FeedbackSource feedback, CheckBoundsParameters::Mode mode) \
         : Operator1<CheckBoundsParameters>(                                   \
               IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow,    \
               #Name, 2, 1, 1, 1, 1, 0,                                        \
               CheckBoundsParameters(feedback, mode)) {}                       \
   };                                                                          \
   Name##Operator k##Name##Deopting = {                                        \
-      VectorSlotPair(), CheckBoundsParameters::kDeoptOnOutOfBounds};          \
+      FeedbackSource(), CheckBoundsParameters::kDeoptOnOutOfBounds};          \
   Name##Operator k##Name##Aborting = {                                        \
-      VectorSlotPair(), CheckBoundsParameters::kAbortOnOutOfBounds};
+      FeedbackSource(), CheckBoundsParameters::kAbortOnOutOfBounds};
   CHECKED_BOUNDS_OP_LIST(CHECKED_BOUNDS)
 #undef CHECKED_BOUNDS
 
@@ -905,7 +936,7 @@ struct SimplifiedOperatorGlobalCache final {
         : Operator1<CheckIfParameters>(
               IrOpcode::kCheckIf, Operator::kFoldable | Operator::kNoThrow,
               "CheckIf", 1, 1, 1, 0, 1, 0,
-              CheckIfParameters(kDeoptimizeReason, VectorSlotPair())) {}
+              CheckIfParameters(kDeoptimizeReason, FeedbackSource())) {}
   };
 #define CHECK_IF(Name, message) \
   CheckIfOperator<DeoptimizeReason::k##Name> kCheckIf##Name;
@@ -970,7 +1001,7 @@ struct SimplifiedOperatorGlobalCache final {
               IrOpcode::kCheckedFloat64ToInt32,
               Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32",
               1, 1, 1, 1, 1, 0,
-              CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
+              CheckMinusZeroParameters(kMode, FeedbackSource())) {}
   };
   CheckedFloat64ToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
       kCheckedFloat64ToInt32CheckForMinusZeroOperator;
@@ -985,7 +1016,7 @@ struct SimplifiedOperatorGlobalCache final {
               IrOpcode::kCheckedFloat64ToInt64,
               Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt64",
               1, 1, 1, 1, 1, 0,
-              CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
+              CheckMinusZeroParameters(kMode, FeedbackSource())) {}
   };
   CheckedFloat64ToInt64Operator<CheckForMinusZeroMode::kCheckForMinusZero>
       kCheckedFloat64ToInt64CheckForMinusZeroOperator;
@@ -1000,7 +1031,7 @@ struct SimplifiedOperatorGlobalCache final {
               IrOpcode::kCheckedTaggedToInt32,
               Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt32",
               1, 1, 1, 1, 1, 0,
-              CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
+              CheckMinusZeroParameters(kMode, FeedbackSource())) {}
   };
   CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
       kCheckedTaggedToInt32CheckForMinusZeroOperator;
@@ -1015,7 +1046,7 @@ struct SimplifiedOperatorGlobalCache final {
               IrOpcode::kCheckedTaggedToInt64,
               Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt64",
               1, 1, 1, 1, 1, 0,
-              CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
+              CheckMinusZeroParameters(kMode, FeedbackSource())) {}
   };
   CheckedTaggedToInt64Operator<CheckForMinusZeroMode::kCheckForMinusZero>
       kCheckedTaggedToInt64CheckForMinusZeroOperator;
@@ -1030,7 +1061,7 @@ struct SimplifiedOperatorGlobalCache final {
               IrOpcode::kCheckedTaggedToFloat64,
               Operator::kFoldable | Operator::kNoThrow,
               "CheckedTaggedToFloat64", 1, 1, 1, 1, 1, 0,
-              CheckTaggedInputParameters(kMode, VectorSlotPair())) {}
+              CheckTaggedInputParameters(kMode, FeedbackSource())) {}
   };
   CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumber>
       kCheckedTaggedToFloat64NumberOperator;
@@ -1045,7 +1076,7 @@ struct SimplifiedOperatorGlobalCache final {
               IrOpcode::kCheckedTruncateTaggedToWord32,
               Operator::kFoldable | Operator::kNoThrow,
               "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0,
-              CheckTaggedInputParameters(kMode, VectorSlotPair())) {}
+              CheckTaggedInputParameters(kMode, FeedbackSource())) {}
   };
   CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumber>
       kCheckedTruncateTaggedToWord32NumberOperator;
@@ -1077,7 +1108,7 @@ struct SimplifiedOperatorGlobalCache final {
               IrOpcode::kCheckFloat64Hole,
               Operator::kFoldable | Operator::kNoThrow, "CheckFloat64Hole", 1,
               1, 1, 1, 1, 0,
-              CheckFloat64HoleParameters(kMode, VectorSlotPair())) {}
+              CheckFloat64HoleParameters(kMode, FeedbackSource())) {}
   };
   CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kAllowReturnHole>
       kCheckFloat64HoleAllowReturnHoleOperator;
@@ -1100,7 +1131,7 @@ struct SimplifiedOperatorGlobalCache final {
     GrowFastElementsOperator()
         : Operator1(IrOpcode::kMaybeGrowFastElements, Operator::kNoThrow,
                     "MaybeGrowFastElements", 4, 1, 1, 1, 1, 0,
-                    GrowFastElementsParameters(kMode, VectorSlotPair())) {}
+                    GrowFastElementsParameters(kMode, FeedbackSource())) {}
   };
 
   GrowFastElementsOperator<GrowFastElementsMode::kDoubleElements>
@@ -1145,7 +1176,7 @@ struct SimplifiedOperatorGlobalCache final {
               IrOpcode::kSpeculativeToNumber,
               Operator::kFoldable | Operator::kNoThrow, "SpeculativeToNumber",
               1, 1, 1, 1, 1, 0,
-              NumberOperationParameters(kHint, VectorSlotPair())) {}
+              NumberOperationParameters(kHint, FeedbackSource())) {}
   };
   SpeculativeToNumberOperator<NumberOperationHint::kSignedSmall>
       kSpeculativeToNumberSignedSmallOperator;
@@ -1179,7 +1210,7 @@ GET_FROM_CACHE(LoadFieldByIndex)
 #define GET_FROM_CACHE_WITH_FEEDBACK(Name, value_input_count,               \
                                      value_output_count)                    \
   const Operator* SimplifiedOperatorBuilder::Name(                          \
-      const VectorSlotPair& feedback) {                                     \
+      const FeedbackSource& feedback) {                                     \
     if (!feedback.IsValid()) {                                              \
       return &cache_.k##Name;                                               \
     }                                                                       \
@@ -1193,7 +1224,7 @@ CHECKED_WITH_FEEDBACK_OP_LIST(GET_FROM_CACHE_WITH_FEEDBACK)
 
 #define GET_FROM_CACHE_WITH_FEEDBACK(Name)                                \
   const Operator* SimplifiedOperatorBuilder::Name(                        \
-      const VectorSlotPair& feedback, CheckBoundsParameters::Mode mode) { \
+      const FeedbackSource& feedback, CheckBoundsParameters::Mode mode) { \
     if (!feedback.IsValid()) {                                            \
       switch (mode) {                                                     \
         case CheckBoundsParameters::kDeoptOnOutOfBounds:                  \
@@ -1242,7 +1273,7 @@ const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckIf(
-    DeoptimizeReason reason, const VectorSlotPair& feedback) {
+    DeoptimizeReason reason, const FeedbackSource& feedback) {
   if (!feedback.IsValid()) {
     switch (reason) {
 #define CHECK_IF(Name, message)   \
@@ -1280,7 +1311,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
-    CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+    CheckForMinusZeroMode mode, const FeedbackSource& feedback) {
   if (!feedback.IsValid()) {
     switch (mode) {
       case CheckForMinusZeroMode::kCheckForMinusZero:
@@ -1296,7 +1327,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt64(
-    CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+    CheckForMinusZeroMode mode, const FeedbackSource& feedback) {
   if (!feedback.IsValid()) {
     switch (mode) {
       case CheckForMinusZeroMode::kCheckForMinusZero:
@@ -1312,7 +1343,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt64(
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
-    CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+    CheckForMinusZeroMode mode, const FeedbackSource& feedback) {
   if (!feedback.IsValid()) {
     switch (mode) {
       case CheckForMinusZeroMode::kCheckForMinusZero:
@@ -1328,7 +1359,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt64(
-    CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+    CheckForMinusZeroMode mode, const FeedbackSource& feedback) {
   if (!feedback.IsValid()) {
     switch (mode) {
       case CheckForMinusZeroMode::kCheckForMinusZero:
@@ -1344,7 +1375,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt64(
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
-    CheckTaggedInputMode mode, const VectorSlotPair& feedback) {
+    CheckTaggedInputMode mode, const FeedbackSource& feedback) {
   if (!feedback.IsValid()) {
     switch (mode) {
       case CheckTaggedInputMode::kNumber:
@@ -1360,7 +1391,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
-    CheckTaggedInputMode mode, const VectorSlotPair& feedback) {
+    CheckTaggedInputMode mode, const FeedbackSource& feedback) {
   if (!feedback.IsValid()) {
     switch (mode) {
       case CheckTaggedInputMode::kNumber:
@@ -1377,7 +1408,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
 
 const Operator* SimplifiedOperatorBuilder::CheckMaps(
     CheckMapsFlags flags, ZoneHandleSet<Map> maps,
-    const VectorSlotPair& feedback) {
+    const FeedbackSource& feedback) {
   CheckMapsParameters const parameters(flags, maps, feedback);
   return new (zone()) Operator1<CheckMapsParameters>(  // --
       IrOpcode::kCheckMaps,                            // opcode
@@ -1422,7 +1453,7 @@ const Operator* SimplifiedOperatorBuilder::ConvertReceiver(
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
-    CheckFloat64HoleMode mode, VectorSlotPair const& feedback) {
+    CheckFloat64HoleMode mode, FeedbackSource const& feedback) {
   if (!feedback.IsValid()) {
     switch (mode) {
       case CheckFloat64HoleMode::kAllowReturnHole:
@@ -1454,7 +1485,7 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate(
 }
 
 const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
-    NumberOperationHint hint, const VectorSlotPair& feedback) {
+    NumberOperationHint hint, const FeedbackSource& feedback) {
   if (!feedback.IsValid()) {
     switch (hint) {
       case NumberOperationHint::kSignedSmall:
@@ -1480,7 +1511,7 @@ const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
 }
 
 const Operator* SimplifiedOperatorBuilder::MaybeGrowFastElements(
-    GrowFastElementsMode mode, const VectorSlotPair& feedback) {
+    GrowFastElementsMode mode, const FeedbackSource& feedback) {
   if (!feedback.IsValid()) {
     switch (mode) {
       case GrowFastElementsMode::kDoubleElements:
@@ -1556,7 +1587,10 @@ bool operator==(CheckParameters const& lhs, CheckParameters const& rhs) {
   return lhs.feedback() == rhs.feedback();
 }
 
-size_t hash_value(CheckParameters const& p) { return hash_value(p.feedback()); }
+size_t hash_value(CheckParameters const& p) {
+  FeedbackSource::Hash feedback_hash;
+  return feedback_hash(p.feedback());
+}
 
 std::ostream& operator<<(std::ostream& os, CheckParameters const& p) {
   return os << p.feedback();
@@ -1605,7 +1639,8 @@ bool operator==(CheckIfParameters const& lhs, CheckIfParameters const& rhs) {
 }
 
 size_t hash_value(CheckIfParameters const& p) {
-  return base::hash_combine(p.reason(), p.feedback());
+  FeedbackSource::Hash feedback_hash;
+  return base::hash_combine(p.reason(), feedback_hash(p.feedback()));
 }
 
 std::ostream& operator<<(std::ostream& os, CheckIfParameters const& p) {
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index bdac796adfff69..58e9bfdffbbf17 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -10,9 +10,9 @@
 #include "src/base/compiler-specific.h"
 #include "src/codegen/machine-type.h"
 #include "src/common/globals.h"
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/operator.h"
 #include "src/compiler/types.h"
-#include "src/compiler/vector-slot-pair.h"
 #include "src/compiler/write-barrier-kind.h"
 #include "src/deoptimizer/deoptimize-reason.h"
 #include "src/handles/handles.h"
@@ -44,6 +44,27 @@ size_t hash_value(LoadSensitivity);
 
 std::ostream& operator<<(std::ostream&, LoadSensitivity);
 
+struct ConstFieldInfo {
+  // the map that introduced the const field, if any. An access is considered
+  // mutable iff the handle is null.
+  MaybeHandle<Map> owner_map;
+
+  ConstFieldInfo() : owner_map(MaybeHandle<Map>()) {}
+  explicit ConstFieldInfo(Handle<Map> owner_map) : owner_map(owner_map) {}
+
+  bool IsConst() const { return !owner_map.is_null(); }
+
+  // No const field owner, i.e., a mutable field
+  static ConstFieldInfo None() { return ConstFieldInfo(); }
+};
+
+V8_EXPORT_PRIVATE bool operator==(ConstFieldInfo const&, ConstFieldInfo const&);
+
+size_t hash_value(ConstFieldInfo const&);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+                                           ConstFieldInfo const&);
+
 // An access descriptor for loads/stores of fixed structures like field
 // accesses of heap objects. Accesses from either tagged or untagged base
 // pointers are supported; untagging is done automatically during lowering.
@@ -56,7 +77,9 @@ struct FieldAccess {
   MachineType machine_type;       // machine type of the field.
   WriteBarrierKind write_barrier_kind;  // write barrier hint.
   LoadSensitivity load_sensitivity;     // load safety for poisoning.
-  PropertyConstness constness;  // whether the field is assigned only once
+  ConstFieldInfo const_field_info;      // the constness of this access, and the
+                                    // field owner map, if the access is const
+  bool is_store_in_literal;  // originates from a kStoreInLiteral access
 
   FieldAccess()
       : base_is_tagged(kTaggedBase),
@@ -65,13 +88,15 @@ struct FieldAccess {
         machine_type(MachineType::None()),
         write_barrier_kind(kFullWriteBarrier),
         load_sensitivity(LoadSensitivity::kUnsafe),
-        constness(PropertyConstness::kMutable) {}
+        const_field_info(ConstFieldInfo::None()),
+        is_store_in_literal(false) {}
 
   FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle<Name> name,
               MaybeHandle<Map> map, Type type, MachineType machine_type,
               WriteBarrierKind write_barrier_kind,
               LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe,
-              PropertyConstness constness = PropertyConstness::kMutable)
+              ConstFieldInfo const_field_info = ConstFieldInfo::None(),
+              bool is_store_in_literal = false)
       : base_is_tagged(base_is_tagged),
         offset(offset),
         name(name),
@@ -80,7 +105,8 @@ struct FieldAccess {
         machine_type(machine_type),
         write_barrier_kind(write_barrier_kind),
         load_sensitivity(load_sensitivity),
-        constness(constness) {}
+        const_field_info(const_field_info),
+        is_store_in_literal(is_store_in_literal) {}
 
   int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
 };
@@ -175,13 +201,13 @@ ConvertReceiverMode ConvertReceiverModeOf(Operator const* op)
 // fails, then speculation on that CallIC slot will be disabled.
 class CheckParameters final {
  public:
-  explicit CheckParameters(const VectorSlotPair& feedback)
+  explicit CheckParameters(const FeedbackSource& feedback)
       : feedback_(feedback) {}
 
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
  private:
-  VectorSlotPair feedback_;
+  FeedbackSource feedback_;
 };
 
 bool operator==(CheckParameters const&, CheckParameters const&);
@@ -196,7 +222,7 @@ class CheckBoundsParameters final {
  public:
   enum Mode { kAbortOnOutOfBounds, kDeoptOnOutOfBounds };
 
-  CheckBoundsParameters(const VectorSlotPair& feedback, Mode mode)
+  CheckBoundsParameters(const FeedbackSource& feedback, Mode mode)
       : check_parameters_(feedback), mode_(mode) {}
 
   Mode mode() const { return mode_; }
@@ -219,15 +245,15 @@ CheckBoundsParameters const& CheckBoundsParametersOf(Operator const*)
 class CheckIfParameters final {
  public:
   explicit CheckIfParameters(DeoptimizeReason reason,
-                             const VectorSlotPair& feedback)
+                             const FeedbackSource& feedback)
       : reason_(reason), feedback_(feedback) {}
 
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
   DeoptimizeReason reason() const { return reason_; }
 
  private:
   DeoptimizeReason reason_;
-  VectorSlotPair feedback_;
+  FeedbackSource feedback_;
 };
 
 bool operator==(CheckIfParameters const&, CheckIfParameters const&);
@@ -251,15 +277,15 @@ std::ostream& operator<<(std::ostream&, CheckFloat64HoleMode);
 class CheckFloat64HoleParameters {
  public:
   CheckFloat64HoleParameters(CheckFloat64HoleMode mode,
-                             VectorSlotPair const& feedback)
+                             FeedbackSource const& feedback)
       : mode_(mode), feedback_(feedback) {}
 
   CheckFloat64HoleMode mode() const { return mode_; }
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
  private:
   CheckFloat64HoleMode mode_;
-  VectorSlotPair feedback_;
+  FeedbackSource feedback_;
 };
 
 CheckFloat64HoleParameters const& CheckFloat64HoleParametersOf(Operator const*)
@@ -286,15 +312,15 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
 class CheckTaggedInputParameters {
  public:
   CheckTaggedInputParameters(CheckTaggedInputMode mode,
-                             const VectorSlotPair& feedback)
+                             const FeedbackSource& feedback)
       : mode_(mode), feedback_(feedback) {}
 
   CheckTaggedInputMode mode() const { return mode_; }
-  const VectorSlotPair& feedback() const { return feedback_; }
+  const FeedbackSource& feedback() const { return feedback_; }
 
  private:
   CheckTaggedInputMode mode_;
-  VectorSlotPair feedback_;
+  FeedbackSource feedback_;
 };
 
 const CheckTaggedInputParameters& CheckTaggedInputParametersOf(const Operator*)
@@ -324,15 +350,15 @@ CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*)
 class CheckMinusZeroParameters {
  public:
   CheckMinusZeroParameters(CheckForMinusZeroMode mode,
-                           const VectorSlotPair& feedback)
+                           const FeedbackSource& feedback)
       : mode_(mode), feedback_(feedback) {}
 
   CheckForMinusZeroMode mode() const { return mode_; }
-  const VectorSlotPair& feedback() const { return feedback_; }
+  const FeedbackSource& feedback() const { return feedback_; }
 
  private:
   CheckForMinusZeroMode mode_;
-  VectorSlotPair feedback_;
+  FeedbackSource feedback_;
 };
 
 V8_EXPORT_PRIVATE const CheckMinusZeroParameters& CheckMinusZeroParametersOf(
@@ -363,17 +389,17 @@ std::ostream& operator<<(std::ostream&, CheckMapsFlags);
 class CheckMapsParameters final {
  public:
   CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps,
-                      const VectorSlotPair& feedback)
+                      const FeedbackSource& feedback)
       : flags_(flags), maps_(maps), feedback_(feedback) {}
 
   CheckMapsFlags flags() const { return flags_; }
   ZoneHandleSet<Map> const& maps() const { return maps_; }
-  VectorSlotPair const& feedback() const { return feedback_; }
+  FeedbackSource const& feedback() const { return feedback_; }
 
  private:
   CheckMapsFlags const flags_;
   ZoneHandleSet<Map> const maps_;
-  VectorSlotPair const feedback_;
+  FeedbackSource const feedback_;
 };
 
 bool operator==(CheckMapsParameters const&, CheckMapsParameters const&);
@@ -406,15 +432,15 @@ std::ostream& operator<<(std::ostream&, GrowFastElementsMode);
 class GrowFastElementsParameters {
  public:
   GrowFastElementsParameters(GrowFastElementsMode mode,
-                             const VectorSlotPair& feedback)
+                             const FeedbackSource& feedback)
       : mode_(mode), feedback_(feedback) {}
 
   GrowFastElementsMode mode() const { return mode_; }
-  const VectorSlotPair& feedback() const { return feedback_; }
+  const FeedbackSource& feedback() const { return feedback_; }
 
  private:
   GrowFastElementsMode mode_;
-  VectorSlotPair feedback_;
+  FeedbackSource feedback_;
 };
 
 bool operator==(const GrowFastElementsParameters&,
@@ -490,15 +516,15 @@ V8_EXPORT_PRIVATE NumberOperationHint NumberOperationHintOf(const Operator* op)
 class NumberOperationParameters {
  public:
   NumberOperationParameters(NumberOperationHint hint,
-                            const VectorSlotPair& feedback)
+                            const FeedbackSource& feedback)
       : hint_(hint), feedback_(feedback) {}
 
   NumberOperationHint hint() const { return hint_; }
-  const VectorSlotPair& feedback() const { return feedback_; }
+  const FeedbackSource& feedback() const { return feedback_; }
 
  private:
   NumberOperationHint hint_;
-  VectorSlotPair feedback_;
+  FeedbackSource feedback_;
 };
 
 size_t hash_value(NumberOperationParameters const&);
@@ -692,7 +718,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
   const Operator* FindOrderedHashMapEntryForInt32Key();
 
   const Operator* SpeculativeToNumber(NumberOperationHint hint,
-                                      const VectorSlotPair& feedback);
+                                      const FeedbackSource& feedback);
 
   const Operator* StringToNumber();
   const Operator* PlainPrimitiveToNumber();
@@ -730,67 +756,67 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
   const Operator* CompareMaps(ZoneHandleSet<Map>);
   const Operator* MapGuard(ZoneHandleSet<Map> maps);
 
-  const Operator* CheckBounds(const VectorSlotPair& feedback);
+  const Operator* CheckBounds(const FeedbackSource& feedback);
   const Operator* CheckEqualsInternalizedString();
   const Operator* CheckEqualsSymbol();
-  const Operator* CheckFloat64Hole(CheckFloat64HoleMode, VectorSlotPair const&);
+  const Operator* CheckFloat64Hole(CheckFloat64HoleMode, FeedbackSource const&);
   const Operator* CheckHeapObject();
   const Operator* CheckIf(DeoptimizeReason deoptimize_reason,
-                          const VectorSlotPair& feedback = VectorSlotPair());
+                          const FeedbackSource& feedback = FeedbackSource());
   const Operator* CheckInternalizedString();
   const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
-                            const VectorSlotPair& = VectorSlotPair());
+                            const FeedbackSource& = FeedbackSource());
   const Operator* CheckNotTaggedHole();
-  const Operator* CheckNumber(const VectorSlotPair& feedback);
+  const Operator* CheckNumber(const FeedbackSource& feedback);
   const Operator* CheckReceiver();
   const Operator* CheckReceiverOrNullOrUndefined();
-  const Operator* CheckSmi(const VectorSlotPair& feedback);
-  const Operator* CheckString(const VectorSlotPair& feedback);
+  const Operator* CheckSmi(const FeedbackSource& feedback);
+  const Operator* CheckString(const FeedbackSource& feedback);
   const Operator* CheckSymbol();
 
   const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode,
-                                        const VectorSlotPair& feedback);
+                                        const FeedbackSource& feedback);
   const Operator* CheckedFloat64ToInt64(CheckForMinusZeroMode,
-                                        const VectorSlotPair& feedback);
+                                        const FeedbackSource& feedback);
   const Operator* CheckedInt32Add();
   const Operator* CheckedInt32Div();
   const Operator* CheckedInt32Mod();
   const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
   const Operator* CheckedInt32Sub();
   const Operator* CheckedInt32ToCompressedSigned(
-      const VectorSlotPair& feedback);
-  const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
-  const Operator* CheckedInt64ToInt32(const VectorSlotPair& feedback);
-  const Operator* CheckedInt64ToTaggedSigned(const VectorSlotPair& feedback);
-  const Operator* CheckedTaggedSignedToInt32(const VectorSlotPair& feedback);
+      const FeedbackSource& feedback);
+  const Operator* CheckedInt32ToTaggedSigned(const FeedbackSource& feedback);
+  const Operator* CheckedInt64ToInt32(const FeedbackSource& feedback);
+  const Operator* CheckedInt64ToTaggedSigned(const FeedbackSource& feedback);
+  const Operator* CheckedTaggedSignedToInt32(const FeedbackSource& feedback);
   const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode,
-                                         const VectorSlotPair& feedback);
+                                         const FeedbackSource& feedback);
   const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode,
-                                       const VectorSlotPair& feedback);
+                                       const FeedbackSource& feedback);
   const Operator* CheckedTaggedToInt64(CheckForMinusZeroMode,
-                                       const VectorSlotPair& feedback);
-  const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
-  const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback);
-  const Operator* CheckBigInt(const VectorSlotPair& feedback);
+                                       const FeedbackSource& feedback);
+  const Operator* CheckedTaggedToTaggedPointer(const FeedbackSource& feedback);
+  const Operator* CheckedTaggedToTaggedSigned(const FeedbackSource& feedback);
+  const Operator* CheckBigInt(const FeedbackSource& feedback);
   const Operator* CheckedCompressedToTaggedPointer(
-      const VectorSlotPair& feedback);
+      const FeedbackSource& feedback);
   const Operator* CheckedCompressedToTaggedSigned(
-      const VectorSlotPair& feedback);
+      const FeedbackSource& feedback);
   const Operator* CheckedTaggedToCompressedPointer(
-      const VectorSlotPair& feedback);
+      const FeedbackSource& feedback);
   const Operator* CheckedTaggedToCompressedSigned(
-      const VectorSlotPair& feedback);
+      const FeedbackSource& feedback);
   const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode,
-                                                const VectorSlotPair& feedback);
+                                                const FeedbackSource& feedback);
   const Operator* CheckedUint32Div();
   const Operator* CheckedUint32Mod();
-  const Operator* CheckedUint32Bounds(const VectorSlotPair& feedback,
+  const Operator* CheckedUint32Bounds(const FeedbackSource& feedback,
                                       CheckBoundsParameters::Mode mode);
-  const Operator* CheckedUint32ToInt32(const VectorSlotPair& feedback);
-  const Operator* CheckedUint32ToTaggedSigned(const VectorSlotPair& feedback);
-  const Operator* CheckedUint64Bounds(const VectorSlotPair& feedback);
-  const Operator* CheckedUint64ToInt32(const VectorSlotPair& feedback);
-  const Operator* CheckedUint64ToTaggedSigned(const VectorSlotPair& feedback);
+  const Operator* CheckedUint32ToInt32(const FeedbackSource& feedback);
+  const Operator* CheckedUint32ToTaggedSigned(const FeedbackSource& feedback);
+  const Operator* CheckedUint64Bounds(const FeedbackSource& feedback);
+  const Operator* CheckedUint64ToInt32(const FeedbackSource& feedback);
+  const Operator* CheckedUint64ToTaggedSigned(const FeedbackSource& feedback);
 
   const Operator* ConvertReceiver(ConvertReceiverMode);
 
@@ -839,7 +865,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
 
   // maybe-grow-fast-elements object, elements, index, length
   const Operator* MaybeGrowFastElements(GrowFastElementsMode mode,
-                                        const VectorSlotPair& feedback);
+                                        const FeedbackSource& feedback);
 
   // transition-elements-kind object, from-map, to-map
   const Operator* TransitionElementsKind(ElementsTransition transition);
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index b71bcd7e669fb8..bd53fb895fadc5 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -10,7 +10,6 @@
 #include "src/compiler/all-nodes.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
 
 namespace v8 {
 namespace internal {
@@ -42,163 +41,7 @@ namespace compiler {
 #define DCHECK_EXTRA(condition, fmt, ...) ((void)0)
 #endif
 
-// Store-store elimination.
-//
-// The aim of this optimization is to detect the following pattern in the
-// effect graph:
-//
-// - StoreField[+24, kRepTagged](263, ...)
-//
-//   ... lots of nodes from which the field at offset 24 of the object
-//       returned by node #263 cannot be observed ...
-//
-// - StoreField[+24, kRepTagged](263, ...)
-//
-// In such situations, the earlier StoreField cannot be observed, and can be
-// eliminated. This optimization should work for any offset and input node, of
-// course.
-//
-// The optimization also works across splits. It currently does not work for
-// loops, because we tend to put a stack check in loops, and like deopts,
-// stack checks can observe anything.
-
-// Assumption: every byte of a JS object is only ever accessed through one
-// offset. For instance, byte 15 of a given object may be accessed using a
-// two-byte read at offset 14, or a four-byte read at offset 12, but never
-// both in the same program.
-//
-// This implementation needs all dead nodes removed from the graph, and the
-// graph should be trimmed.
-
-namespace {
-
-using StoreOffset = uint32_t;
-
-struct UnobservableStore {
-  NodeId id_;
-  StoreOffset offset_;
-
-  bool operator==(const UnobservableStore) const;
-  bool operator<(const UnobservableStore) const;
-};
-
-}  // namespace
-
-namespace {
-
-// Instances of UnobservablesSet are immutable. They represent either a set of
-// UnobservableStores, or the "unvisited empty set".
-//
-// We apply some sharing to save memory. The class UnobservablesSet is only a
-// pointer wide, and a copy does not use any heap (or temp_zone) memory. Most
-// changes to an UnobservablesSet might allocate in the temp_zone.
-//
-// The size of an instance should be the size of a pointer, plus additional
-// space in the zone in the case of non-unvisited UnobservablesSets. Copying
-// an UnobservablesSet allocates no memory.
-class UnobservablesSet final {
- public:
-  static UnobservablesSet Unvisited();
-  static UnobservablesSet VisitedEmpty(Zone* zone);
-  UnobservablesSet();  // unvisited
-  UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default;
-
-  UnobservablesSet Intersect(const UnobservablesSet& other, Zone* zone) const;
-  UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
-  UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
-
-  const ZoneSet<UnobservableStore>* set() const { return set_; }
-
-  bool IsUnvisited() const { return set_ == nullptr; }
-  bool IsEmpty() const { return set_ == nullptr || set_->empty(); }
-  bool Contains(UnobservableStore obs) const {
-    return set_ != nullptr && (set_->find(obs) != set_->end());
-  }
-
-  bool operator==(const UnobservablesSet&) const;
-  bool operator!=(const UnobservablesSet&) const;
-
- private:
-  explicit UnobservablesSet(const ZoneSet<UnobservableStore>* set)
-      : set_(set) {}
-  const ZoneSet<UnobservableStore>* set_;
-};
-
-}  // namespace
-
-namespace {
-
-class RedundantStoreFinder final {
- public:
-  RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter,
-                       Zone* temp_zone);
-
-  void Find();
-
-  const ZoneSet<Node*>& to_remove_const() { return to_remove_; }
-
-  void Visit(Node* node);
-
- private:
-  void VisitEffectfulNode(Node* node);
-  UnobservablesSet RecomputeUseIntersection(Node* node);
-  UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses);
-  static bool CannotObserveStoreField(Node* node);
-
-  void MarkForRevisit(Node* node);
-  bool HasBeenVisited(Node* node);
-
-  JSGraph* jsgraph() const { return jsgraph_; }
-  Isolate* isolate() { return jsgraph()->isolate(); }
-  Zone* temp_zone() const { return temp_zone_; }
-  ZoneVector<UnobservablesSet>& unobservable() { return unobservable_; }
-  UnobservablesSet& unobservable_for_id(NodeId id) {
-    DCHECK_LT(id, unobservable().size());
-    return unobservable()[id];
-  }
-  ZoneSet<Node*>& to_remove() { return to_remove_; }
-
-  JSGraph* const jsgraph_;
-  TickCounter* const tick_counter_;
-  Zone* const temp_zone_;
-
-  ZoneStack<Node*> revisit_;
-  ZoneVector<bool> in_revisit_;
-  // Maps node IDs to UnobservableNodeSets.
-  ZoneVector<UnobservablesSet> unobservable_;
-  ZoneSet<Node*> to_remove_;
-  const UnobservablesSet unobservables_visited_empty_;
-};
-
-// To safely cast an offset from a FieldAccess, which has a potentially wider
-// range (namely int).
-StoreOffset ToOffset(int offset) {
-  CHECK_LE(0, offset);
-  return static_cast<StoreOffset>(offset);
-}
-
-StoreOffset ToOffset(const FieldAccess& access) {
-  return ToOffset(access.offset);
-}
-
-unsigned int RepSizeOf(MachineRepresentation rep) {
-  return 1u << ElementSizeLog2Of(rep);
-}
-unsigned int RepSizeOf(FieldAccess access) {
-  return RepSizeOf(access.machine_type.representation());
-}
-
-bool AtMostTagged(FieldAccess access) {
-  return RepSizeOf(access) <= RepSizeOf(MachineRepresentation::kTagged);
-}
-
-bool AtLeastTagged(FieldAccess access) {
-  return RepSizeOf(access) >= RepSizeOf(MachineRepresentation::kTagged);
-}
-
-}  // namespace
-
-void RedundantStoreFinder::Find() {
+void StoreStoreElimination::RedundantStoreFinder::Find() {
   Visit(jsgraph()->graph()->end());
 
   while (!revisit_.empty()) {
@@ -222,7 +65,7 @@ void RedundantStoreFinder::Find() {
 #endif
 }
 
-void RedundantStoreFinder::MarkForRevisit(Node* node) {
+void StoreStoreElimination::RedundantStoreFinder::MarkForRevisit(Node* node) {
   DCHECK_LT(node->id(), in_revisit_.size());
   if (!in_revisit_[node->id()]) {
     revisit_.push(node);
@@ -230,7 +73,7 @@ void RedundantStoreFinder::MarkForRevisit(Node* node) {
   }
 }
 
-bool RedundantStoreFinder::HasBeenVisited(Node* node) {
+bool StoreStoreElimination::RedundantStoreFinder::HasBeenVisited(Node* node) {
   return !unobservable_for_id(node->id()).IsUnvisited();
 }
 
@@ -241,7 +84,6 @@ void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter,
   finder.Find();
 
   // Remove superfluous nodes
-
   for (Node* node : finder.to_remove_const()) {
     if (FLAG_trace_store_elimination) {
       PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n",
@@ -254,11 +96,9 @@ void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter,
   }
 }
 
-// Recompute unobservables-set for a node. Will also mark superfluous nodes
-// as to be removed.
-
-UnobservablesSet RedundantStoreFinder::RecomputeSet(
-    Node* node, const UnobservablesSet& uses) {
+StoreStoreElimination::UnobservablesSet
+StoreStoreElimination::RedundantStoreFinder::RecomputeSet(
+    Node* node, const StoreStoreElimination::UnobservablesSet& uses) {
   switch (node->op()->opcode()) {
     case IrOpcode::kStoreField: {
       Node* stored_to = node->InputAt(0);
@@ -266,40 +106,21 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(
       StoreOffset offset = ToOffset(access);
 
       UnobservableStore observation = {stored_to->id(), offset};
-      bool isNotObservable = uses.Contains(observation);
+      bool is_not_observable = uses.Contains(observation);
 
-      if (isNotObservable && AtMostTagged(access)) {
+      if (is_not_observable) {
         TRACE("  #%d is StoreField[+%d,%s](#%d), unobservable", node->id(),
               offset, MachineReprToString(access.machine_type.representation()),
               stored_to->id());
         to_remove().insert(node);
         return uses;
-      } else if (isNotObservable && !AtMostTagged(access)) {
-        TRACE(
-            "  #%d is StoreField[+%d,%s](#%d), repeated in future but too "
-            "big to optimize away",
-            node->id(), offset,
-            MachineReprToString(access.machine_type.representation()),
-            stored_to->id());
-        return uses;
-      } else if (!isNotObservable && AtLeastTagged(access)) {
+      } else {
         TRACE("  #%d is StoreField[+%d,%s](#%d), observable, recording in set",
               node->id(), offset,
               MachineReprToString(access.machine_type.representation()),
               stored_to->id());
         return uses.Add(observation, temp_zone());
-      } else if (!isNotObservable && !AtLeastTagged(access)) {
-        TRACE(
-            "  #%d is StoreField[+%d,%s](#%d), observable but too small to "
-            "record",
-            node->id(), offset,
-            MachineReprToString(access.machine_type.representation()),
-            stored_to->id());
-        return uses;
-      } else {
-        UNREACHABLE();
       }
-      break;
     }
     case IrOpcode::kLoadField: {
       Node* loaded_from = node->InputAt(0);
@@ -314,7 +135,6 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(
           loaded_from->id(), offset);
 
       return uses.RemoveSameOffset(offset, temp_zone());
-      break;
     }
     default:
       if (CannotObserveStoreField(node)) {
@@ -330,36 +150,16 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(
   UNREACHABLE();
 }
 
-bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
-  return node->opcode() == IrOpcode::kLoadElement ||
-         node->opcode() == IrOpcode::kLoad ||
-         node->opcode() == IrOpcode::kStore ||
-         node->opcode() == IrOpcode::kEffectPhi ||
-         node->opcode() == IrOpcode::kStoreElement ||
-         node->opcode() == IrOpcode::kUnsafePointerAdd ||
-         node->opcode() == IrOpcode::kRetain;
+bool StoreStoreElimination::RedundantStoreFinder::CannotObserveStoreField(
+    Node* node) {
+  IrOpcode::Value opcode = node->opcode();
+  return opcode == IrOpcode::kLoadElement || opcode == IrOpcode::kLoad ||
+         opcode == IrOpcode::kStore || opcode == IrOpcode::kEffectPhi ||
+         opcode == IrOpcode::kStoreElement ||
+         opcode == IrOpcode::kUnsafePointerAdd || opcode == IrOpcode::kRetain;
 }
 
-// Initialize unobservable_ with js_graph->graph->NodeCount() empty sets.
-RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph,
-                                           TickCounter* tick_counter,
-                                           Zone* temp_zone)
-    : jsgraph_(js_graph),
-      tick_counter_(tick_counter),
-      temp_zone_(temp_zone),
-      revisit_(temp_zone),
-      in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
-      unobservable_(js_graph->graph()->NodeCount(),
-                    UnobservablesSet::Unvisited(), temp_zone),
-      to_remove_(temp_zone),
-      unobservables_visited_empty_(UnobservablesSet::VisitedEmpty(temp_zone)) {}
-
-void RedundantStoreFinder::Visit(Node* node) {
-  // All effectful nodes should be reachable from End via a sequence of
-  // control, then a sequence of effect edges. In VisitEffectfulNode we mark
-  // all effect inputs for revisiting (if they might have stale state); here
-  // we mark all control inputs at least once.
-
+void StoreStoreElimination::RedundantStoreFinder::Visit(Node* node) {
   if (!HasBeenVisited(node)) {
     for (int i = 0; i < node->op()->ControlInputCount(); i++) {
       Node* control_input = NodeProperties::GetControlInput(node, i);
@@ -369,29 +169,32 @@ void RedundantStoreFinder::Visit(Node* node) {
     }
   }
 
-  bool isEffectful = (node->op()->EffectInputCount() >= 1);
-  if (isEffectful) {
+  bool is_effectful = node->op()->EffectInputCount() >= 1;
+  if (is_effectful) {
+    // mark all effect inputs for revisiting (if they might have stale state).
     VisitEffectfulNode(node);
     DCHECK(HasBeenVisited(node));
-  }
-
-  if (!HasBeenVisited(node)) {
+  } else if (!HasBeenVisited(node)) {
     // Mark as visited.
     unobservable_for_id(node->id()) = unobservables_visited_empty_;
   }
 }
 
-void RedundantStoreFinder::VisitEffectfulNode(Node* node) {
+void StoreStoreElimination::RedundantStoreFinder::VisitEffectfulNode(
+    Node* node) {
   if (HasBeenVisited(node)) {
     TRACE("- Revisiting: #%d:%s", node->id(), node->op()->mnemonic());
   }
-  UnobservablesSet after_set = RecomputeUseIntersection(node);
-  UnobservablesSet before_set = RecomputeSet(node, after_set);
+  StoreStoreElimination::UnobservablesSet after_set =
+      RecomputeUseIntersection(node);
+  StoreStoreElimination::UnobservablesSet before_set =
+      RecomputeSet(node, after_set);
   DCHECK(!before_set.IsUnvisited());
 
-  UnobservablesSet stored_for_node = unobservable_for_id(node->id());
+  StoreStoreElimination::UnobservablesSet stores_for_node =
+      unobservable_for_id(node->id());
   bool cur_set_changed =
-      (stored_for_node.IsUnvisited() || stored_for_node != before_set);
+      stores_for_node.IsUnvisited() || stores_for_node != before_set;
   if (!cur_set_changed) {
     // We will not be able to update the part of this chain above any more.
     // Exit.
@@ -409,81 +212,78 @@ void RedundantStoreFinder::VisitEffectfulNode(Node* node) {
   }
 }
 
-// Compute the intersection of the UnobservablesSets of all effect uses and
-// return it. This function only works if {node} has an effect use.
-//
-// The result UnobservablesSet will always be visited.
-UnobservablesSet RedundantStoreFinder::RecomputeUseIntersection(Node* node) {
+StoreStoreElimination::UnobservablesSet
+StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection(
+    Node* node) {
+  // There were no effect uses. Break early.
+  if (node->op()->EffectOutputCount() == 0) {
+    IrOpcode::Value opcode = node->opcode();
+    // List of opcodes that may end this effect chain. The opcodes are not
+    // important to the soundness of this optimization; this serves as a
+    // general sanity check. Add opcodes to this list as it suits you.
+    //
+    // Everything is observable after these opcodes; return the empty set.
+    DCHECK_EXTRA(
+        opcode == IrOpcode::kReturn || opcode == IrOpcode::kTerminate ||
+            opcode == IrOpcode::kDeoptimize || opcode == IrOpcode::kThrow,
+        "for #%d:%s", node->id(), node->op()->mnemonic());
+    USE(opcode);
+
+    return unobservables_visited_empty_;
+  }
+
   // {first} == true indicates that we haven't looked at any elements yet.
   // {first} == false indicates that cur_set is the intersection of at least one
   // thing.
-
   bool first = true;
-  UnobservablesSet cur_set = UnobservablesSet::Unvisited();  // irrelevant
-
+  StoreStoreElimination::UnobservablesSet cur_set =
+      StoreStoreElimination::UnobservablesSet::Unvisited();  // irrelevant
   for (Edge edge : node->use_edges()) {
-    // Skip non-effect edges
     if (!NodeProperties::IsEffectEdge(edge)) {
       continue;
     }
 
+    // Intersect with the new use node.
     Node* use = edge.from();
-    UnobservablesSet new_set = unobservable_for_id(use->id());
-    // Include new_set in the intersection.
+    StoreStoreElimination::UnobservablesSet new_set =
+        unobservable_for_id(use->id());
     if (first) {
-      // Intersection of a one-element set is that one element
       first = false;
       cur_set = new_set;
+      if (cur_set.IsUnvisited()) {
+        cur_set = unobservables_visited_empty_;
+      }
     } else {
-      // Take the intersection of cur_set and new_set.
-      cur_set = cur_set.Intersect(new_set, temp_zone());
+      cur_set =
+          cur_set.Intersect(new_set, unobservables_visited_empty_, temp_zone());
     }
-  }
 
-  if (first) {
-    // There were no effect uses.
-    auto opcode = node->op()->opcode();
-    // List of opcodes that may end this effect chain. The opcodes are not
-    // important to the soundness of this optimization; this serves as a
-    // general sanity check. Add opcodes to this list as it suits you.
-    //
-    // Everything is observable after these opcodes; return the empty set.
-    DCHECK_EXTRA(
-        opcode == IrOpcode::kReturn || opcode == IrOpcode::kTerminate ||
-            opcode == IrOpcode::kDeoptimize || opcode == IrOpcode::kThrow,
-        "for #%d:%s", node->id(), node->op()->mnemonic());
-    USE(opcode);  // silence warning about unused variable in release mode
-
-    return unobservables_visited_empty_;
-  } else {
-    if (cur_set.IsUnvisited()) {
-      cur_set = unobservables_visited_empty_;
+    // Break fast for the empty set since the intersection will always be empty.
+    if (cur_set.IsEmpty()) {
+      break;
     }
-
-    return cur_set;
   }
-}
 
-UnobservablesSet UnobservablesSet::Unvisited() { return UnobservablesSet(); }
+  DCHECK(!cur_set.IsUnvisited());
+  return cur_set;
+}
 
-UnobservablesSet::UnobservablesSet() : set_(nullptr) {}
+StoreStoreElimination::UnobservablesSet::UnobservablesSet() : set_(nullptr) {}
 
-UnobservablesSet UnobservablesSet::VisitedEmpty(Zone* zone) {
-  // Create a new empty UnobservablesSet. This allocates in the zone, and
-  // can probably be optimized to use a global singleton.
+StoreStoreElimination::UnobservablesSet
+StoreStoreElimination::UnobservablesSet::VisitedEmpty(Zone* zone) {
   ZoneSet<UnobservableStore>* empty_set =
       new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
           ZoneSet<UnobservableStore>(zone);
-  return UnobservablesSet(empty_set);
+  return StoreStoreElimination::UnobservablesSet(empty_set);
 }
 
-// Computes the intersection of two UnobservablesSets. May return
-// UnobservablesSet::Unvisited() instead of an empty UnobservablesSet for
-// speed.
-UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other,
-                                             Zone* zone) const {
+StoreStoreElimination::UnobservablesSet
+StoreStoreElimination::UnobservablesSet::Intersect(
+    const StoreStoreElimination::UnobservablesSet& other,
+    const StoreStoreElimination::UnobservablesSet& empty, Zone* zone) const {
   if (IsEmpty() || other.IsEmpty()) {
-    return Unvisited();
+    return empty;
   } else {
     ZoneSet<UnobservableStore>* intersection =
         new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
@@ -493,14 +293,15 @@ UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other,
                      other.set()->end(),
                      std::inserter(*intersection, intersection->end()));
 
-    return UnobservablesSet(intersection);
+    return StoreStoreElimination::UnobservablesSet(intersection);
   }
 }
 
-UnobservablesSet UnobservablesSet::Add(UnobservableStore obs,
-                                       Zone* zone) const {
-  bool present = (set()->find(obs) != set()->end());
-  if (present) {
+StoreStoreElimination::UnobservablesSet
+StoreStoreElimination::UnobservablesSet::Add(UnobservableStore obs,
+                                             Zone* zone) const {
+  bool found = set()->find(obs) != set()->end();
+  if (found) {
     return *this;
   } else {
     // Make a new empty set.
@@ -514,12 +315,13 @@ UnobservablesSet UnobservablesSet::Add(UnobservableStore obs,
     DCHECK(inserted);
     USE(inserted);  // silence warning about unused variable
 
-    return UnobservablesSet(new_set);
+    return StoreStoreElimination::UnobservablesSet(new_set);
   }
 }
 
-UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset,
-                                                    Zone* zone) const {
+StoreStoreElimination::UnobservablesSet
+StoreStoreElimination::UnobservablesSet::RemoveSameOffset(StoreOffset offset,
+                                                          Zone* zone) const {
   // Make a new empty set.
   ZoneSet<UnobservableStore>* new_set =
       new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
@@ -531,30 +333,7 @@ UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset,
     }
   }
 
-  return UnobservablesSet(new_set);
-}
-
-// Used for debugging.
-bool UnobservablesSet::operator==(const UnobservablesSet& other) const {
-  if (IsUnvisited() || other.IsUnvisited()) {
-    return IsEmpty() && other.IsEmpty();
-  } else {
-    // Both pointers guaranteed not to be nullptrs.
-    return *set() == *other.set();
-  }
-}
-
-bool UnobservablesSet::operator!=(const UnobservablesSet& other) const {
-  return !(*this == other);
-}
-
-bool UnobservableStore::operator==(const UnobservableStore other) const {
-  return (id_ == other.id_) && (offset_ == other.offset_);
-}
-
-
-bool UnobservableStore::operator<(const UnobservableStore other) const {
-  return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
+  return StoreStoreElimination::UnobservablesSet(new_set);
 }
 
 #undef TRACE
diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h
index 646640a3104fa7..7704938fc0d6dc 100644
--- a/deps/v8/src/compiler/store-store-elimination.h
+++ b/deps/v8/src/compiler/store-store-elimination.h
@@ -7,6 +7,7 @@
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -16,10 +17,203 @@ class TickCounter;
 
 namespace compiler {
 
+// Store-store elimination.
+//
+// The aim of this optimization is to detect the following pattern in the
+// effect graph:
+//
+// - StoreField[+24, kRepTagged](263, ...)
+//
+//   ... lots of nodes from which the field at offset 24 of the object
+//       returned by node #263 cannot be observed ...
+//
+// - StoreField[+24, kRepTagged](263, ...)
+//
+// In such situations, the earlier StoreField cannot be observed, and can be
+// eliminated. This optimization should work for any offset and input node, of
+// course.
+//
+// The optimization also works across splits. It currently does not work for
+// loops, because we tend to put a stack check in loops, and like deopts,
+// stack checks can observe anything.
+
+// Assumption: every byte of a JS object is only ever accessed through one
+// offset. For instance, byte 15 of a given object may be accessed using a
+// two-byte read at offset 14, or a four-byte read at offset 12, but never
+// both in the same program.
+//
+// This implementation needs all dead nodes removed from the graph, and the
+// graph should be trimmed.
 class StoreStoreElimination final {
  public:
   static void Run(JSGraph* js_graph, TickCounter* tick_counter,
                   Zone* temp_zone);
+
+ private:
+  using StoreOffset = uint32_t;
+
+  struct UnobservableStore {
+    NodeId id_;
+    StoreOffset offset_;
+
+    bool operator==(const UnobservableStore other) const {
+      return (id_ == other.id_) && (offset_ == other.offset_);
+    }
+
+    bool operator<(const UnobservableStore other) const {
+      return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
+    }
+  };
+
+  // Instances of UnobservablesSet are immutable. They represent either a set of
+  // UnobservableStores, or the "unvisited empty set".
+  //
+  // We apply some sharing to save memory. The class UnobservablesSet is only a
+  // pointer wide, and a copy does not use any heap (or temp_zone) memory. Most
+  // changes to an UnobservablesSet might allocate in the temp_zone.
+  //
+  // The size of an instance should be the size of a pointer, plus additional
+  // space in the zone in the case of non-unvisited UnobservablesSets. Copying
+  // an UnobservablesSet allocates no memory.
+  class UnobservablesSet final {
+   public:
+    // Creates a new UnobservablesSet, with the null set.
+    static UnobservablesSet Unvisited() { return UnobservablesSet(); }
+
+    // Create a new empty UnobservablesSet. This allocates in the zone, and
+    // can probably be optimized to use a global singleton.
+    static UnobservablesSet VisitedEmpty(Zone* zone);
+    UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default;
+
+    // Computes the intersection of two UnobservablesSets. If one of the sets is
+    // empty, will return empty.
+    UnobservablesSet Intersect(const UnobservablesSet& other,
+                               const UnobservablesSet& empty, Zone* zone) const;
+
+    // Returns a set that it is the current one, plus the observation obs passed
+    // as parameter. If said obs it's already in the set, we don't have to
+    // create a new one.
+    UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
+
+    // Returns a set that it is the current one, except for all of the
+    // observations with offset off. This is done by creating a new set and
+    // copying all observations with different offsets.
+    // This can probably be done better if the observations are stored first by
+    // offset and then by node.
+    // We are removing all nodes with offset off since different nodes may
+    // alias one another, and we currently we don't have the means to know if
+    // two nodes are definitely the same value.
+    UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
+
+    const ZoneSet<UnobservableStore>* set() const { return set_; }
+
+    bool IsUnvisited() const { return set_ == nullptr; }
+    bool IsEmpty() const { return set_ == nullptr || set_->empty(); }
+    bool Contains(UnobservableStore obs) const {
+      return set_ != nullptr && (set_->find(obs) != set_->end());
+    }
+
+    bool operator==(const UnobservablesSet& other) const {
+      if (IsUnvisited() || other.IsUnvisited()) {
+        return IsEmpty() && other.IsEmpty();
+      } else {
+        // Both pointers guaranteed not to be nullptrs.
+        return *set() == *(other.set());
+      }
+    }
+
+    bool operator!=(const UnobservablesSet& other) const {
+      return !(*this == other);
+    }
+
+   private:
+    UnobservablesSet();
+    explicit UnobservablesSet(const ZoneSet<UnobservableStore>* set)
+        : set_(set) {}
+    const ZoneSet<UnobservableStore>* set_;
+  };
+
+  class RedundantStoreFinder final {
+   public:
+    // Note that we Initialize unobservable_ with js_graph->graph->NodeCount()
+    // amount of empty sets.
+    RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter,
+                         Zone* temp_zone)
+        : jsgraph_(js_graph),
+          tick_counter_(tick_counter),
+          temp_zone_(temp_zone),
+          revisit_(temp_zone),
+          in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
+          unobservable_(js_graph->graph()->NodeCount(),
+                        StoreStoreElimination::UnobservablesSet::Unvisited(),
+                        temp_zone),
+          to_remove_(temp_zone),
+          unobservables_visited_empty_(
+              StoreStoreElimination::UnobservablesSet::VisitedEmpty(
+                  temp_zone)) {}
+
+    // Crawls from the end of the graph to the beginning, with the objective of
+    // finding redundant stores.
+    void Find();
+
+    // This method is used for const correctness to go through the final list of
+    // redundant stores that are replaced on the graph.
+    const ZoneSet<Node*>& to_remove_const() { return to_remove_; }
+
+   private:
+    // Assumption: All effectful nodes are reachable from End via a sequence of
+    // control, then a sequence of effect edges.
+    // Visit goes through the control chain, visiting effectful nodes that it
+    // encounters.
+    void Visit(Node* node);
+
+    // Marks effect inputs for visiting, if we are able to update this path of
+    // the graph.
+    void VisitEffectfulNode(Node* node);
+
+    // Compute the intersection of the UnobservablesSets of all effect uses and
+    // return it.
+    // The result UnobservablesSet will never be null.
+    UnobservablesSet RecomputeUseIntersection(Node* node);
+
+    // Recompute unobservables-set for a node. Will also mark superfluous nodes
+    // as to be removed.
+    UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses);
+
+    // Returns true if node's opcode cannot observe StoreFields.
+    static bool CannotObserveStoreField(Node* node);
+
+    void MarkForRevisit(Node* node);
+    bool HasBeenVisited(Node* node);
+
+    // To safely cast an offset from a FieldAccess, which has a potentially
+    // wider range (namely int).
+    StoreOffset ToOffset(const FieldAccess& access) {
+      DCHECK_GE(access.offset, 0);
+      return static_cast<StoreOffset>(access.offset);
+    }
+
+    JSGraph* jsgraph() const { return jsgraph_; }
+    Isolate* isolate() { return jsgraph()->isolate(); }
+    Zone* temp_zone() const { return temp_zone_; }
+    UnobservablesSet& unobservable_for_id(NodeId id) {
+      DCHECK_LT(id, unobservable_.size());
+      return unobservable_[id];
+    }
+    ZoneSet<Node*>& to_remove() { return to_remove_; }
+
+    JSGraph* const jsgraph_;
+    TickCounter* const tick_counter_;
+    Zone* const temp_zone_;
+
+    ZoneStack<Node*> revisit_;
+    ZoneVector<bool> in_revisit_;
+
+    // Maps node IDs to UnobservableNodeSets.
+    ZoneVector<UnobservablesSet> unobservable_;
+    ZoneSet<Node*> to_remove_;
+    const UnobservablesSet unobservables_visited_empty_;
+  };
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 5dbbad3dcd64bd..6ba1b39431bd0c 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -10,6 +10,7 @@
 #include "src/codegen/tick-counter.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-heap-broker.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/loop-variable-optimizer.h"
@@ -787,7 +788,13 @@ Type Typer::Visitor::TypeParameter(Node* node) {
   return Type::NonInternal();
 }
 
-Type Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
+Type Typer::Visitor::TypeOsrValue(Node* node) {
+  if (OsrValueIndexOf(node->op()) == Linkage::kOsrContextSpillSlotIndex) {
+    return Type::OtherInternal();
+  } else {
+    return Type::Any();
+  }
+}
 
 Type Typer::Visitor::TypeRetain(Node* node) { UNREACHABLE(); }
 
@@ -999,10 +1006,6 @@ Type Typer::Visitor::TypeTypedObjectState(Node* node) {
 
 Type Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
 
-Type Typer::Visitor::TypeCallWithCallerSavedRegisters(Node* node) {
-  UNREACHABLE();
-}
-
 Type Typer::Visitor::TypeProjection(Node* node) {
   Type const type = Operand(node, 0);
   if (type.Is(Type::None())) return Type::None();
@@ -1524,6 +1527,10 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
     return Type::NonInternal();
   }
   JSFunctionRef function = fun.AsHeapConstant()->Ref().AsJSFunction();
+  if (!function.serialized()) {
+    TRACE_BROKER_MISSING(t->broker(), "data for function " << function);
+    return Type::NonInternal();
+  }
   if (!function.shared().HasBuiltinId()) {
     return Type::NonInternal();
   }
@@ -1564,6 +1571,7 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
     case Builtins::kMathPow:
     case Builtins::kMathMax:
     case Builtins::kMathMin:
+    case Builtins::kMathHypot:
       return Type::Number();
     case Builtins::kMathImul:
       return Type::Signed32();
@@ -2364,6 +2372,8 @@ Type Typer::Visitor::TypeConstant(Handle<Object> value) {
   return Type::NewConstant(typer_->broker(), value, zone());
 }
 
+Type Typer::Visitor::TypeJSGetIterator(Node* node) { return Type::Any(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index d4267a75fe0f58..018c54c3d57ce8 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -324,7 +324,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
 
     // Remaining instance types are unsupported for now. If any of them do
     // require bit set types, they should get kOtherInternal.
-    case MUTABLE_HEAP_NUMBER_TYPE:
     case FREE_SPACE_TYPE:
     case FILLER_TYPE:
     case ACCESS_CHECK_INFO_TYPE:
@@ -365,7 +364,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
     case PROMISE_FULFILL_REACTION_JOB_TASK_TYPE:
     case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
     case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
-    case FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE:
 #define MAKE_TORQUE_CLASS_TYPE(V) case V:
       TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
 #undef MAKE_TORQUE_CLASS_TYPE
diff --git a/deps/v8/src/compiler/vector-slot-pair.cc b/deps/v8/src/compiler/vector-slot-pair.cc
deleted file mode 100644
index 97f53648a4ae6e..00000000000000
--- a/deps/v8/src/compiler/vector-slot-pair.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/vector-slot-pair.h"
-
-#include "src/objects/feedback-vector.h"
-
-namespace v8 {
-namespace internal {
-
-VectorSlotPair::VectorSlotPair() = default;
-
-int VectorSlotPair::index() const {
-  return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_);
-}
-
-bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
-  return lhs.slot() == rhs.slot() &&
-         lhs.vector().location() == rhs.vector().location() &&
-         lhs.ic_state() == rhs.ic_state();
-}
-
-bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
-  return !(lhs == rhs);
-}
-
-std::ostream& operator<<(std::ostream& os, const VectorSlotPair& p) {
-  if (p.IsValid()) {
-    return os << "VectorSlotPair(" << p.slot() << ", "
-              << InlineCacheState2String(p.ic_state()) << ")";
-  }
-  return os << "VectorSlotPair(INVALID)";
-}
-
-size_t hash_value(VectorSlotPair const& p) {
-  return base::hash_combine(p.slot(), p.vector().location(), p.ic_state());
-}
-
-}  // namespace internal
-}  // namespace v8
diff --git a/deps/v8/src/compiler/vector-slot-pair.h b/deps/v8/src/compiler/vector-slot-pair.h
deleted file mode 100644
index 9944544a136073..00000000000000
--- a/deps/v8/src/compiler/vector-slot-pair.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_VECTOR_SLOT_PAIR_H_
-#define V8_COMPILER_VECTOR_SLOT_PAIR_H_
-
-#include "src/common/globals.h"
-#include "src/handles/handles.h"
-#include "src/utils/utils.h"
-
-namespace v8 {
-namespace internal {
-
-class FeedbackVector;
-
-// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
-// is used to access the type feedback for a certain {Node}.
-class V8_EXPORT_PRIVATE VectorSlotPair {
- public:
-  VectorSlotPair();
-  VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot,
-                 InlineCacheState ic_state)
-      : vector_(vector), slot_(slot), ic_state_(ic_state) {}
-
-  bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
-
-  Handle<FeedbackVector> vector() const { return vector_; }
-  FeedbackSlot slot() const { return slot_; }
-  InlineCacheState ic_state() const { return ic_state_; }
-
-  int index() const;
-
- private:
-  Handle<FeedbackVector> vector_;
-  FeedbackSlot slot_;
-  InlineCacheState ic_state_ = UNINITIALIZED;
-};
-
-bool operator==(VectorSlotPair const&, VectorSlotPair const&);
-bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
-                                           VectorSlotPair const&);
-
-size_t hash_value(VectorSlotPair const&);
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_VECTOR_SLOT_PAIR_H_
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index d3d4d54ea25485..608d6ffee689bd 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -580,7 +580,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
       // TODO(jarin): what are the constraints on these?
       break;
     case IrOpcode::kCall:
-    case IrOpcode::kCallWithCallerSavedRegisters:
       // TODO(rossberg): what are the constraints on these?
       break;
     case IrOpcode::kTailCall:
@@ -766,6 +765,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
       CheckNotTyped(node);
       CHECK(StoreNamedOwnParametersOf(node->op()).feedback().IsValid());
       break;
+    case IrOpcode::kJSGetIterator:
+      // Type can be anything
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckTypeIs(node, Type::Any());
+      break;
     case IrOpcode::kJSStoreDataPropertyInLiteral:
     case IrOpcode::kJSStoreInArrayLiteral:
       // Type is empty.
@@ -1800,6 +1804,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
     case IrOpcode::kBitcastTaggedSignedToWord:
     case IrOpcode::kBitcastWordToTagged:
     case IrOpcode::kBitcastWordToTaggedSigned:
+    case IrOpcode::kBitcastWord32ToCompressedSigned:
+    case IrOpcode::kBitcastCompressedSignedToWord32:
     case IrOpcode::kChangeInt32ToInt64:
     case IrOpcode::kChangeUint32ToUint64:
     case IrOpcode::kChangeTaggedToCompressed:
@@ -1838,7 +1844,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
     case IrOpcode::kTaggedPoisonOnSpeculation:
     case IrOpcode::kWord32PoisonOnSpeculation:
     case IrOpcode::kWord64PoisonOnSpeculation:
-    case IrOpcode::kLoadStackPointer:
     case IrOpcode::kLoadFramePointer:
     case IrOpcode::kLoadParentFramePointer:
     case IrOpcode::kUnalignedLoad:
@@ -1877,6 +1882,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
     case IrOpcode::kSignExtendWord16ToInt64:
     case IrOpcode::kSignExtendWord32ToInt64:
     case IrOpcode::kStaticAssert:
+    case IrOpcode::kStackPointerGreaterThan:
 
 #define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
       MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 2da7177ece2d54..28f9943e591905 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -25,12 +25,11 @@
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/int64-lowering.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-origin-table.h"
+#include "src/compiler/node-properties.h"
 #include "src/compiler/pipeline.h"
 #include "src/compiler/simd-scalar-lowering.h"
 #include "src/compiler/zone-stats.h"
@@ -259,25 +258,25 @@ Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
 Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, Node** vals,
                             Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
-  Node** buf = Realloc(vals, count, count + 1);
+  Vector<Node*> buf = Realloc(vals, count, count + 1);
   buf[count] = control;
   return graph()->NewNode(
       mcgraph()->common()->Phi(wasm::ValueTypes::MachineRepresentationFor(type),
                                count),
-      count + 1, buf);
+      count + 1, buf.begin());
 }
 
 Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
                                   Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
-  Node** buf = Realloc(effects, count, count + 1);
+  Vector<Node*> buf = Realloc(effects, count, count + 1);
   buf[count] = control;
   return graph()->NewNode(mcgraph()->common()->EffectPhi(count), count + 1,
-                          buf);
+                          buf.begin());
 }
 
 Node* WasmGraphBuilder::RefNull() {
-  Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+  Node* isolate_root = BuildLoadIsolateRoot();
   return LOAD_TAGGED_POINTER(
       isolate_root, IsolateData::root_slot_offset(RootIndex::kNullValue));
 }
@@ -291,10 +290,17 @@ Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
 }
 
 Node* WasmGraphBuilder::NoContextConstant() {
-  // TODO(titzer): avoiding a dependency on JSGraph here. Refactor.
   return mcgraph()->IntPtrConstant(0);
 }
 
+Node* WasmGraphBuilder::BuildLoadIsolateRoot() {
+  // The IsolateRoot is loaded from the instance node so that the generated
+  // code is Isolate independent. This can be overridden by setting a specific
+  // node in {isolate_root_node_} beforehand.
+  if (isolate_root_node_.is_set()) return isolate_root_node_.get();
+  return LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+}
+
 Node* WasmGraphBuilder::Uint32Constant(uint32_t value) {
   return mcgraph()->Uint32Constant(value);
 }
@@ -320,10 +326,6 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
   if (effect == nullptr) effect = effect_;
   if (control == nullptr) control = control_;
 
-  // This instruction sequence is matched in the instruction selector to
-  // load the stack pointer directly on some platforms. Hence, when modifying
-  // please also fix WasmStackCheckMatcher in node-matchers.h
-
   Node* limit_address = graph()->NewNode(
       mcgraph()->machine()->Load(MachineType::Pointer()), instance_node_.get(),
       mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(StackLimitAddress)),
@@ -332,10 +334,9 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
       mcgraph()->machine()->Load(MachineType::Pointer()), limit_address,
       mcgraph()->IntPtrConstant(0), limit_address, *control);
   *effect = limit;
-  Node* pointer = graph()->NewNode(mcgraph()->machine()->LoadStackPointer());
 
   Node* check =
-      graph()->NewNode(mcgraph()->machine()->UintLessThan(), limit, pointer);
+      graph()->NewNode(mcgraph()->machine()->StackPointerGreaterThan(), limit);
 
   Diamond stack_check(graph(), mcgraph()->common(), check, BranchHint::kTrue);
   stack_check.Chain(*control);
@@ -1126,12 +1127,13 @@ Node* WasmGraphBuilder::IfDefault(Node* sw) {
   return graph()->NewNode(mcgraph()->common()->IfDefault(), sw);
 }
 
-Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
+Node* WasmGraphBuilder::Return(Vector<Node*> vals) {
   static const int kStackAllocatedNodeBufferSize = 8;
   Node* stack_buffer[kStackAllocatedNodeBufferSize];
   std::vector<Node*> heap_buffer;
 
   Node** buf = stack_buffer;
+  unsigned count = static_cast<unsigned>(vals.size());
   if (count + 3 > kStackAllocatedNodeBufferSize) {
     heap_buffer.resize(count + 3);
     buf = heap_buffer.data();
@@ -1139,7 +1141,7 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
 
   buf[0] = mcgraph()->Int32Constant(0);
   if (count > 0) {
-    memcpy(buf + 1, vals, sizeof(void*) * count);
+    memcpy(buf + 1, vals.begin(), sizeof(void*) * count);
   }
   buf[count + 1] = Effect();
   buf[count + 2] = Control();
@@ -1150,11 +1152,9 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
   return ret;
 }
 
-Node* WasmGraphBuilder::ReturnVoid() { return Return(0, nullptr); }
-
 Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) {
   TrapIfFalse(wasm::TrapReason::kTrapUnreachable, Int32Constant(0), position);
-  ReturnVoid();
+  Return(Vector<Node*>{});
   return nullptr;
 }
 
@@ -2295,13 +2295,13 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
   return BuildCallToRuntime(Runtime::kWasmExceptionGetTag, &except_obj, 1);
 }
 
-Node** WasmGraphBuilder::GetExceptionValues(
+Vector<Node*> WasmGraphBuilder::GetExceptionValues(
     Node* except_obj, const wasm::WasmException* exception) {
   Node* values_array =
       BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1);
   uint32_t index = 0;
   const wasm::WasmExceptionSig* sig = exception->sig;
-  Node** values = Buffer(sig->parameter_count());
+  Vector<Node*> values = Buffer(sig->parameter_count());
   for (size_t i = 0; i < sig->parameter_count(); ++i) {
     Node* value;
     switch (sig->GetParam(i)) {
@@ -2695,7 +2695,7 @@ Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig, Node** args,
   const size_t count = 1 + params + extra;
 
   // Reallocate the buffer to make space for extra inputs.
-  args = Realloc(args, 1 + params, count);
+  args = Realloc(args, 1 + params, count).begin();
 
   // Make room for the instance_node parameter at index 1, just after code.
   memmove(&args[2], &args[1], params * sizeof(Node*));
@@ -2725,7 +2725,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
   size_t ret_count = sig->return_count();
   if (ret_count == 0) return call;  // No return value.
 
-  *rets = Buffer(ret_count);
+  *rets = Buffer(ret_count).begin();
   if (ret_count == 1) {
     // Only a single return value.
     (*rets)[0] = call;
@@ -3183,12 +3183,12 @@ Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep,
   } else if (tnode != fnode) {
     uint32_t count = merge->InputCount();
     // + 1 for the merge node.
-    Node** vals = Buffer(count + 1);
+    Vector<Node*> vals = Buffer(count + 1);
     for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode;
     vals[count - 1] = fnode;
     vals[count] = merge;
     return graph()->NewNode(mcgraph()->common()->Phi(rep, count), count + 1,
-                            vals);
+                            vals.begin());
   }
   return tnode;
 }
@@ -3199,12 +3199,12 @@ Node* WasmGraphBuilder::CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode,
     AppendToPhi(tnode, fnode);
   } else if (tnode != fnode) {
     uint32_t count = merge->InputCount();
-    Node** effects = Buffer(count);
+    Vector<Node*> effects = Buffer(count);
     for (uint32_t j = 0; j < count - 1; j++) {
       effects[j] = tnode;
     }
     effects[count - 1] = fnode;
-    tnode = EffectPhi(count, effects, merge);
+    tnode = EffectPhi(count, effects.begin(), merge);
   }
   return tnode;
 }
@@ -3326,11 +3326,14 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(
   auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
       mcgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
       CallDescriptor::kNoFlags);
-  // The CEntryStub is loaded from the instance_node so that generated code is
+  // The CEntryStub is loaded from the IsolateRoot so that generated code is
   // Isolate independent. At the moment this is only done for CEntryStub(1).
+  Node* isolate_root = BuildLoadIsolateRoot();
   DCHECK_EQ(1, fun->result_size);
-  Node* centry_stub = LOAD_INSTANCE_FIELD(
-      CEntryStub, MachineType::TypeCompressedTaggedPointer());
+  auto centry_id =
+      Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+  Node* centry_stub = LOAD_TAGGED_POINTER(
+      isolate_root, IsolateData::builtin_slot_offset(centry_id));
   // TODO(titzer): allow arbitrary number of runtime arguments
   // At the moment we only allow 5 parameters. If more parameters are needed,
   // increase this constant accordingly.
@@ -3943,30 +3946,43 @@ Graph* WasmGraphBuilder::graph() { return mcgraph()->graph(); }
 
 namespace {
 Signature<MachineRepresentation>* CreateMachineSignature(
-    Zone* zone, wasm::FunctionSig* sig) {
+    Zone* zone, wasm::FunctionSig* sig, WasmGraphBuilder::CallOrigin origin) {
   Signature<MachineRepresentation>::Builder builder(zone, sig->return_count(),
                                                     sig->parameter_count());
   for (auto ret : sig->returns()) {
-    builder.AddReturn(wasm::ValueTypes::MachineRepresentationFor(ret));
+    if (origin == WasmGraphBuilder::kCalledFromJS) {
+      builder.AddReturn(MachineRepresentation::kTagged);
+    } else {
+      builder.AddReturn(wasm::ValueTypes::MachineRepresentationFor(ret));
+    }
   }
 
   for (auto param : sig->parameters()) {
-    builder.AddParam(wasm::ValueTypes::MachineRepresentationFor(param));
+    if (origin == WasmGraphBuilder::kCalledFromJS) {
+      // Parameters coming from JavaScript are always tagged values. Especially
+      // when the signature says that it's an I64 value, then a BigInt object is
+      // provided by JavaScript, and not two 32-bit parameters.
+      builder.AddParam(MachineRepresentation::kTagged);
+    } else {
+      builder.AddParam(wasm::ValueTypes::MachineRepresentationFor(param));
+    }
   }
   return builder.Build();
 }
 }  // namespace
 
-void WasmGraphBuilder::LowerInt64() {
+void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
   if (mcgraph()->machine()->Is64()) return;
   Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
                   mcgraph()->zone(),
-                  CreateMachineSignature(mcgraph()->zone(), sig_));
+                  CreateMachineSignature(mcgraph()->zone(), sig_, origin),
+                  std::move(lowering_special_case_));
   r.LowerGraph();
 }
 
 void WasmGraphBuilder::SimdScalarLoweringForTesting() {
-  SimdScalarLowering(mcgraph(), CreateMachineSignature(mcgraph()->zone(), sig_))
+  SimdScalarLowering(mcgraph(), CreateMachineSignature(mcgraph()->zone(), sig_,
+                                                       kCalledFromWasm))
       .LowerGraph();
 }
 
@@ -3992,6 +4008,24 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
       return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]);
     case wasm::kExprF64x2Neg:
       return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]);
+    case wasm::kExprF64x2Add:
+      return graph()->NewNode(mcgraph()->machine()->F64x2Add(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF64x2Sub:
+      return graph()->NewNode(mcgraph()->machine()->F64x2Sub(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF64x2Mul:
+      return graph()->NewNode(mcgraph()->machine()->F64x2Mul(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF64x2Div:
+      return graph()->NewNode(mcgraph()->machine()->F64x2Div(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF64x2Min:
+      return graph()->NewNode(mcgraph()->machine()->F64x2Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF64x2Max:
+      return graph()->NewNode(mcgraph()->machine()->F64x2Max(), inputs[0],
+                              inputs[1]);
     case wasm::kExprF64x2Eq:
       return graph()->NewNode(mcgraph()->machine()->F64x2Eq(), inputs[0],
                               inputs[1]);
@@ -4040,6 +4074,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
     case wasm::kExprF32x4Mul:
       return graph()->NewNode(mcgraph()->machine()->F32x4Mul(), inputs[0],
                               inputs[1]);
+    case wasm::kExprF32x4Div:
+      return graph()->NewNode(mcgraph()->machine()->F32x4Div(), inputs[0],
+                              inputs[1]);
     case wasm::kExprF32x4Min:
       return graph()->NewNode(mcgraph()->machine()->F32x4Min(), inputs[0],
                               inputs[1]);
@@ -4068,6 +4105,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
       return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
     case wasm::kExprI64x2Neg:
       return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]);
+    case wasm::kExprI64x2Shl:
+      return graph()->NewNode(mcgraph()->machine()->I64x2Shl(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI64x2ShrS:
+      return graph()->NewNode(mcgraph()->machine()->I64x2ShrS(), inputs[0],
+                              inputs[1]);
     case wasm::kExprI64x2Add:
       return graph()->NewNode(mcgraph()->machine()->I64x2Add(), inputs[0],
                               inputs[1]);
@@ -4077,6 +4120,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
     case wasm::kExprI64x2Mul:
       return graph()->NewNode(mcgraph()->machine()->I64x2Mul(), inputs[0],
                               inputs[1]);
+    case wasm::kExprI64x2MinS:
+      return graph()->NewNode(mcgraph()->machine()->I64x2MinS(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI64x2MaxS:
+      return graph()->NewNode(mcgraph()->machine()->I64x2MaxS(), inputs[0],
+                              inputs[1]);
     case wasm::kExprI64x2Eq:
       return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0],
                               inputs[1]);
@@ -4095,6 +4144,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
     case wasm::kExprI64x2GeS:
       return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0],
                               inputs[1]);
+    case wasm::kExprI64x2ShrU:
+      return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI64x2MinU:
+      return graph()->NewNode(mcgraph()->machine()->I64x2MinU(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI64x2MaxU:
+      return graph()->NewNode(mcgraph()->machine()->I64x2MaxU(), inputs[0],
+                              inputs[1]);
     case wasm::kExprI64x2LtU:
       return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[1],
                               inputs[0]);
@@ -4123,6 +4181,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
                               inputs[0]);
     case wasm::kExprI32x4Neg:
       return graph()->NewNode(mcgraph()->machine()->I32x4Neg(), inputs[0]);
+    case wasm::kExprI32x4Shl:
+      return graph()->NewNode(mcgraph()->machine()->I32x4Shl(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4ShrS:
+      return graph()->NewNode(mcgraph()->machine()->I32x4ShrS(), inputs[0],
+                              inputs[1]);
     case wasm::kExprI32x4Add:
       return graph()->NewNode(mcgraph()->machine()->I32x4Add(), inputs[0],
                               inputs[1]);
@@ -4165,6 +4229,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
     case wasm::kExprI32x4UConvertI16x8High:
       return graph()->NewNode(mcgraph()->machine()->I32x4UConvertI16x8High(),
                               inputs[0]);
+    case wasm::kExprI32x4ShrU:
+      return graph()->NewNode(mcgraph()->machine()->I32x4ShrU(), inputs[0],
+                              inputs[1]);
     case wasm::kExprI32x4MinU:
       return graph()->NewNode(mcgraph()->machine()->I32x4MinU(), inputs[0],
                               inputs[1]);
@@ -4191,6 +4258,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
     case wasm::kExprI16x8SConvertI8x16High:
       return graph()->NewNode(mcgraph()->machine()->I16x8SConvertI8x16High(),
                               inputs[0]);
+    case wasm::kExprI16x8Shl:
+      return graph()->NewNode(mcgraph()->machine()->I16x8Shl(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8ShrS:
+      return graph()->NewNode(mcgraph()->machine()->I16x8ShrS(), inputs[0],
+                              inputs[1]);
     case wasm::kExprI16x8Neg:
       return graph()->NewNode(mcgraph()->machine()->I16x8Neg(), inputs[0]);
     case wasm::kExprI16x8SConvertI32x4:
@@ -4247,6 +4320,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
     case wasm::kExprI16x8UConvertI32x4:
       return graph()->NewNode(mcgraph()->machine()->I16x8UConvertI32x4(),
                               inputs[0], inputs[1]);
+    case wasm::kExprI16x8ShrU:
+      return graph()->NewNode(mcgraph()->machine()->I16x8ShrU(), inputs[0],
+                              inputs[1]);
     case wasm::kExprI16x8AddSaturateU:
       return graph()->NewNode(mcgraph()->machine()->I16x8AddSaturateU(),
                               inputs[0], inputs[1]);
@@ -4275,6 +4351,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
       return graph()->NewNode(mcgraph()->machine()->I8x16Splat(), inputs[0]);
     case wasm::kExprI8x16Neg:
       return graph()->NewNode(mcgraph()->machine()->I8x16Neg(), inputs[0]);
+    case wasm::kExprI8x16Shl:
+      return graph()->NewNode(mcgraph()->machine()->I8x16Shl(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16ShrS:
+      return graph()->NewNode(mcgraph()->machine()->I8x16ShrS(), inputs[0],
+                              inputs[1]);
     case wasm::kExprI8x16SConvertI16x8:
       return graph()->NewNode(mcgraph()->machine()->I8x16SConvertI16x8(),
                               inputs[0], inputs[1]);
@@ -4317,6 +4399,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
     case wasm::kExprI8x16GeS:
       return graph()->NewNode(mcgraph()->machine()->I8x16GeS(), inputs[0],
                               inputs[1]);
+    case wasm::kExprI8x16ShrU:
+      return graph()->NewNode(mcgraph()->machine()->I8x16ShrU(), inputs[0],
+                              inputs[1]);
     case wasm::kExprI8x16UConvertI16x8:
       return graph()->NewNode(mcgraph()->machine()->I8x16UConvertI16x8(),
                               inputs[0], inputs[1]);
@@ -4424,47 +4509,6 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
   }
 }
 
-Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
-                                    Node* const* inputs) {
-  has_simd_ = true;
-  switch (opcode) {
-    case wasm::kExprI64x2Shl:
-      return graph()->NewNode(mcgraph()->machine()->I64x2Shl(shift), inputs[0]);
-    case wasm::kExprI64x2ShrS:
-      return graph()->NewNode(mcgraph()->machine()->I64x2ShrS(shift),
-                              inputs[0]);
-    case wasm::kExprI64x2ShrU:
-      return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(shift),
-                              inputs[0]);
-    case wasm::kExprI32x4Shl:
-      return graph()->NewNode(mcgraph()->machine()->I32x4Shl(shift), inputs[0]);
-    case wasm::kExprI32x4ShrS:
-      return graph()->NewNode(mcgraph()->machine()->I32x4ShrS(shift),
-                              inputs[0]);
-    case wasm::kExprI32x4ShrU:
-      return graph()->NewNode(mcgraph()->machine()->I32x4ShrU(shift),
-                              inputs[0]);
-    case wasm::kExprI16x8Shl:
-      return graph()->NewNode(mcgraph()->machine()->I16x8Shl(shift), inputs[0]);
-    case wasm::kExprI16x8ShrS:
-      return graph()->NewNode(mcgraph()->machine()->I16x8ShrS(shift),
-                              inputs[0]);
-    case wasm::kExprI16x8ShrU:
-      return graph()->NewNode(mcgraph()->machine()->I16x8ShrU(shift),
-                              inputs[0]);
-    case wasm::kExprI8x16Shl:
-      return graph()->NewNode(mcgraph()->machine()->I8x16Shl(shift), inputs[0]);
-    case wasm::kExprI8x16ShrS:
-      return graph()->NewNode(mcgraph()->machine()->I8x16ShrS(shift),
-                              inputs[0]);
-    case wasm::kExprI8x16ShrU:
-      return graph()->NewNode(mcgraph()->machine()->I8x16ShrU(shift),
-                              inputs[0]);
-    default:
-      FATAL_UNSUPPORTED_OPCODE(opcode);
-  }
-}
-
 Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
                                           Node* const* inputs) {
   has_simd_ = true;
@@ -5010,15 +5054,86 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
 namespace {
 class WasmWrapperGraphBuilder : public WasmGraphBuilder {
  public:
-  WasmWrapperGraphBuilder(Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* sig,
+  WasmWrapperGraphBuilder(Zone* zone, MachineGraph* mcgraph,
+                          wasm::FunctionSig* sig,
                           compiler::SourcePositionTable* spt,
                           StubCallMode stub_mode, wasm::WasmFeatures features)
-      : WasmGraphBuilder(nullptr, zone, jsgraph, sig, spt),
-        isolate_(jsgraph->isolate()),
-        jsgraph_(jsgraph),
+      : WasmGraphBuilder(nullptr, zone, mcgraph, sig, spt),
         stub_mode_(stub_mode),
         enabled_features_(features) {}
 
+  CallDescriptor* GetI32PairToBigIntCallDescriptor() {
+    I32PairToBigIntDescriptor interface_descriptor;
+
+    return Linkage::GetStubCallDescriptor(
+        mcgraph()->zone(),                              // zone
+        interface_descriptor,                           // descriptor
+        interface_descriptor.GetStackParameterCount(),  // stack parameter count
+        CallDescriptor::kNoFlags,                       // flags
+        Operator::kNoProperties,                        // properties
+        stub_mode_);                                    // stub call mode
+  }
+
+  CallDescriptor* GetI64ToBigIntCallDescriptor() {
+    if (!lowering_special_case_) {
+      lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>();
+    }
+
+    if (lowering_special_case_->i64_to_bigint_call_descriptor) {
+      return lowering_special_case_->i64_to_bigint_call_descriptor;
+    }
+
+    I64ToBigIntDescriptor interface_descriptor;
+    auto call_descriptor = Linkage::GetStubCallDescriptor(
+        mcgraph()->zone(),                              // zone
+        interface_descriptor,                           // descriptor
+        interface_descriptor.GetStackParameterCount(),  // stack parameter count
+        CallDescriptor::kNoFlags,                       // flags
+        Operator::kNoProperties,                        // properties
+        stub_mode_);                                    // stub call mode
+
+    lowering_special_case_->i64_to_bigint_call_descriptor = call_descriptor;
+    lowering_special_case_->i32_pair_to_bigint_call_descriptor =
+        GetI32PairToBigIntCallDescriptor();
+    return call_descriptor;
+  }
+
+  CallDescriptor* GetBigIntToI32PairCallDescriptor() {
+    BigIntToI32PairDescriptor interface_descriptor;
+
+    return Linkage::GetStubCallDescriptor(
+        mcgraph()->zone(),                              // zone
+        interface_descriptor,                           // descriptor
+        interface_descriptor.GetStackParameterCount(),  // stack parameter count
+        CallDescriptor::kNoFlags,                       // flags
+        Operator::kNoProperties,                        // properties
+        stub_mode_);                                    // stub call mode
+  }
+
+  CallDescriptor* GetBigIntToI64CallDescriptor() {
+    if (!lowering_special_case_) {
+      lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>();
+    }
+
+    if (lowering_special_case_->bigint_to_i64_call_descriptor) {
+      return lowering_special_case_->bigint_to_i64_call_descriptor;
+    }
+
+    BigIntToI64Descriptor interface_descriptor;
+    auto call_descriptor = Linkage::GetStubCallDescriptor(
+        mcgraph()->zone(),                              // zone
+        interface_descriptor,                           // descriptor
+        interface_descriptor.GetStackParameterCount(),  // stack parameter count
+        CallDescriptor::kNoFlags,                       // flags
+        Operator::kNoProperties,                        // properties
+        stub_mode_);                                    // stub call mode
+
+    lowering_special_case_->bigint_to_i64_call_descriptor = call_descriptor;
+    lowering_special_case_->bigint_to_i32_pair_call_descriptor =
+        GetBigIntToI32PairCallDescriptor();
+    return call_descriptor;
+  }
+
   Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) {
     MachineOperatorBuilder* machine = mcgraph()->machine();
     CommonOperatorBuilder* common = mcgraph()->common();
@@ -5027,7 +5142,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
             ? mcgraph()->RelocatableIntPtrConstant(
                   wasm::WasmCode::kWasmAllocateHeapNumber,
                   RelocInfo::WASM_STUB_CALL)
-            : BuildLoadBuiltinFromInstance(Builtins::kAllocateHeapNumber);
+            : BuildLoadBuiltinFromIsolateRoot(Builtins::kAllocateHeapNumber);
     if (!allocate_heap_number_operator_.is_set()) {
       auto call_descriptor = Linkage::GetStubCallDescriptor(
           mcgraph()->zone(), AllocateHeapNumberDescriptor(), 0,
@@ -5084,10 +5199,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
     return undefined_value_node_.get();
   }
 
-  Node* BuildLoadBuiltinFromInstance(int builtin_index) {
+  Node* BuildLoadBuiltinFromIsolateRoot(int builtin_index) {
     DCHECK(Builtins::IsBuiltinId(builtin_index));
-    Node* isolate_root =
-        LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+    Node* isolate_root = BuildLoadIsolateRoot();
     return LOAD_TAGGED_POINTER(isolate_root,
                                IsolateData::builtin_slot_offset(builtin_index));
   }
@@ -5213,7 +5327,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
                             vsmi, vbox, merge);
   }
 
-  int AddArgumentNodes(Node** args, int pos, int param_count,
+  int AddArgumentNodes(Vector<Node*> args, int pos, int param_count,
                        wasm::FunctionSig* sig) {
     // Convert wasm numbers to JS values.
     for (int i = 0; i < param_count; ++i) {
@@ -5232,7 +5346,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
         (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
             ? mcgraph()->RelocatableIntPtrConstant(
                   wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL)
-            : BuildLoadBuiltinFromInstance(Builtins::kToNumber);
+            : BuildLoadBuiltinFromIsolateRoot(Builtins::kToNumber);
 
     Node* result = SetEffect(
         graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code,
@@ -5317,47 +5431,59 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
   }
 
   Node* BuildChangeInt64ToBigInt(Node* input) {
-    I64ToBigIntDescriptor interface_descriptor;
-
-    auto call_descriptor = Linkage::GetStubCallDescriptor(
-        mcgraph()->zone(),                              // zone
-        interface_descriptor,                           // descriptor
-        interface_descriptor.GetStackParameterCount(),  // stack parameter count
-        CallDescriptor::kNoFlags,                       // flags
-        Operator::kNoProperties,                        // properties
-        stub_mode_);                                    // stub call mode
-
-    Node* target =
-        (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
-            ? mcgraph()->RelocatableIntPtrConstant(
-                  wasm::WasmCode::kWasmI64ToBigInt, RelocInfo::WASM_STUB_CALL)
-            : BuildLoadBuiltinFromInstance(Builtins::kI64ToBigInt);
+    const Operator* call =
+        mcgraph()->common()->Call(GetI64ToBigIntCallDescriptor());
+
+    Node* target;
+    if (mcgraph()->machine()->Is64()) {
+      target =
+          (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+              ? mcgraph()->RelocatableIntPtrConstant(
+                    wasm::WasmCode::kWasmI64ToBigInt, RelocInfo::WASM_STUB_CALL)
+              : BuildLoadBuiltinFromIsolateRoot(Builtins::kI64ToBigInt);
+    } else {
+      DCHECK(mcgraph()->machine()->Is32());
+      // On 32-bit platforms we already set the target to the
+      // I32PairToBigInt builtin here, so that we don't have to replace the
+      // target in the int64-lowering.
+      target =
+          (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+              ? mcgraph()->RelocatableIntPtrConstant(
+                    wasm::WasmCode::kWasmI32PairToBigInt,
+                    RelocInfo::WASM_STUB_CALL)
+              : BuildLoadBuiltinFromIsolateRoot(Builtins::kI32PairToBigInt);
+    }
 
     return SetEffect(
-        SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
-                                    target, input, Effect(), Control())));
+        SetControl(graph()->NewNode(call, target, input, Effect(), Control())));
   }
 
   Node* BuildChangeBigIntToInt64(Node* input, Node* context) {
-    BigIntToI64Descriptor interface_descriptor;
-
-    auto call_descriptor = Linkage::GetStubCallDescriptor(
-        mcgraph()->zone(),                              // zone
-        interface_descriptor,                           // descriptor
-        interface_descriptor.GetStackParameterCount(),  // stack parameter count
-        CallDescriptor::kNoFlags,                       // flags
-        Operator::kNoProperties,                        // properties
-        stub_mode_);                                    // stub call mode
-
-    Node* target =
-        (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
-            ? mcgraph()->RelocatableIntPtrConstant(
-                  wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL)
-            : BuildLoadBuiltinFromInstance(Builtins::kBigIntToI64);
+    const Operator* call =
+        mcgraph()->common()->Call(GetBigIntToI64CallDescriptor());
+
+    Node* target;
+    if (mcgraph()->machine()->Is64()) {
+      target =
+          (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+              ? mcgraph()->RelocatableIntPtrConstant(
+                    wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL)
+              : BuildLoadBuiltinFromIsolateRoot(Builtins::kBigIntToI64);
+    } else {
+      DCHECK(mcgraph()->machine()->Is32());
+      // On 32-bit platforms we already set the target to the
+      // BigIntToI32Pair builtin here, so that we don't have to replace the
+      // target in the int64-lowering.
+      target =
+          (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+              ? mcgraph()->RelocatableIntPtrConstant(
+                    wasm::WasmCode::kWasmBigIntToI32Pair,
+                    RelocInfo::WASM_STUB_CALL)
+              : BuildLoadBuiltinFromIsolateRoot(Builtins::kBigIntToI32Pair);
+    }
 
     return SetEffect(SetControl(
-        graph()->NewNode(mcgraph()->common()->Call(call_descriptor), target,
-                         input, context, Effect(), Control())));
+        graph()->NewNode(call, target, input, context, Effect(), Control())));
   }
 
   Node* FromJS(Node* node, Node* js_context, wasm::ValueType type) {
@@ -5427,8 +5553,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
 
   void BuildModifyThreadInWasmFlag(bool new_value) {
     if (!trap_handler::IsTrapHandlerEnabled()) return;
-    Node* isolate_root =
-        LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+    Node* isolate_root = BuildLoadIsolateRoot();
 
     Node* thread_in_wasm_flag_address =
         LOAD_RAW(isolate_root, Isolate::thread_in_wasm_flag_address_offset(),
@@ -5446,9 +5571,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
       Diamond flag_check(graph(), mcgraph()->common(), check,
                          BranchHint::kTrue);
       flag_check.Chain(Control());
-      Node* message_id = jsgraph()->SmiConstant(static_cast<int32_t>(
-          new_value ? AbortReason::kUnexpectedThreadInWasmSet
-                    : AbortReason::kUnexpectedThreadInWasmUnset));
+      Node* message_id = graph()->NewNode(
+          mcgraph()->common()->NumberConstant(static_cast<int32_t>(
+              new_value ? AbortReason::kUnexpectedThreadInWasmSet
+                        : AbortReason::kUnexpectedThreadInWasmUnset)));
 
       Node* effect = Effect();
       BuildCallToRuntimeWithContext(Runtime::kAbort, NoContextConstant(),
@@ -5509,7 +5635,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
 
     // Create the js_closure and js_context parameters.
     Node* js_closure =
-        graph()->NewNode(jsgraph()->common()->Parameter(
+        graph()->NewNode(mcgraph()->common()->Parameter(
                              Linkage::kJSCallClosureParamIndex, "%closure"),
                          graph()->start());
     Node* js_context = graph()->NewNode(
@@ -5525,18 +5651,18 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
     instance_node_.set(
         BuildLoadInstanceFromExportedFunctionData(function_data));
 
-    if (!wasm::IsJSCompatibleSignature(sig_, enabled_features_.bigint)) {
+    if (!wasm::IsJSCompatibleSignature(sig_, enabled_features_)) {
       // Throw a TypeError. Use the js_context of the calling javascript
       // function (passed as a parameter), such that the generated code is
       // js_context independent.
       BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
                                     nullptr, 0, effect_, Control());
-      Return(jsgraph()->SmiConstant(0));
+      TerminateThrow(Effect(), Control());
       return;
     }
 
     const int args_count = wasm_count + 1;  // +1 for wasm_code.
-    Node** args = Buffer(args_count);
+    Vector<Node*> args = Buffer(args_count);
     Node** rets;
 
     // Convert JS parameters to wasm numbers.
@@ -5554,8 +5680,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
       // Load function index from {WasmExportedFunctionData}.
       Node* function_index =
           BuildLoadFunctionIndexFromExportedFunctionData(function_data);
-      BuildImportCall(sig_, args, &rets, wasm::kNoCodePosition, function_index,
-                      kCallContinues);
+      BuildImportCall(sig_, args.begin(), &rets, wasm::kNoCodePosition,
+                      function_index, kCallContinues);
     } else {
       // Call to a wasm function defined in this module.
       // The call target is the jump table slot for that function.
@@ -5567,16 +5693,35 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
           mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset);
       args[0] = jump_table_slot;
 
-      BuildWasmCall(sig_, args, &rets, wasm::kNoCodePosition, nullptr,
+      BuildWasmCall(sig_, args.begin(), &rets, wasm::kNoCodePosition, nullptr,
                     kNoRetpoline);
     }
 
     // Clear the ThreadInWasm flag.
     BuildModifyThreadInWasmFlag(false);
 
-    Node* jsval = sig_->return_count() == 0 ? jsgraph()->UndefinedConstant()
-                                            : ToJS(rets[0], sig_->GetReturn());
+    Node* jsval;
+    if (sig_->return_count() == 0) {
+      jsval = BuildLoadUndefinedValueFromInstance();
+    } else if (sig_->return_count() == 1) {
+      jsval = ToJS(rets[0], sig_->GetReturn());
+    } else {
+      int32_t return_count = static_cast<int32_t>(sig_->return_count());
+      Node* size =
+          graph()->NewNode(mcgraph()->common()->NumberConstant(return_count));
+      // TODO(thibaudm): Replace runtime calls with TurboFan code.
+      Node* fixed_array =
+          BuildCallToRuntime(Runtime::kWasmNewMultiReturnFixedArray, &size, 1);
+      for (int i = 0; i < return_count; ++i) {
+        Node* value = ToJS(rets[i], sig_->GetReturn(i));
+        STORE_FIXED_ARRAY_SLOT_ANY(fixed_array, i, value);
+      }
+      jsval = BuildCallToRuntimeWithContext(Runtime::kWasmNewMultiReturnJSArray,
+                                            js_context, &fixed_array, 1,
+                                            effect_, Control());
+    }
     Return(jsval);
+    if (ContainsInt64(sig_)) LowerInt64(kCalledFromJS);
   }
 
   bool BuildWasmImportCallWrapper(WasmImportCallKind kind) {
@@ -5597,9 +5742,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
       BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
                                     native_context, nullptr, 0, effect_,
                                     Control());
-      // We don't need to return a value here, as the runtime call will not
-      // return anyway (the c entry stub will trigger stack unwinding).
-      ReturnVoid();
+      TerminateThrow(Effect(), Control());
       return false;
     }
 
@@ -5622,7 +5765,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
         sloppy_receiver = false;
         V8_FALLTHROUGH;  // fallthru
       case WasmImportCallKind::kJSFunctionArityMatchSloppy: {
-        Node** args = Buffer(wasm_count + 9);
+        Vector<Node*> args = Buffer(wasm_count + 7);
         int pos = 0;
         Node* function_context =
             LOAD_RAW(callable_node,
@@ -5650,8 +5793,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
         args[pos++] = Effect();
         args[pos++] = Control();
 
+        DCHECK_EQ(pos, args.size());
         call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
-                                args);
+                                args.begin());
         break;
       }
       // =======================================================================
@@ -5661,14 +5805,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
         sloppy_receiver = false;
         V8_FALLTHROUGH;  // fallthru
       case WasmImportCallKind::kJSFunctionArityMismatchSloppy: {
-        Node** args = Buffer(wasm_count + 9);
+        Vector<Node*> args = Buffer(wasm_count + 9);
         int pos = 0;
         Node* function_context =
             LOAD_RAW(callable_node,
                      wasm::ObjectAccess::ContextOffsetInTaggedJSFunction(),
                      MachineType::TypeCompressedTaggedPointer());
-        args[pos++] =
-            BuildLoadBuiltinFromInstance(Builtins::kArgumentsAdaptorTrampoline);
+        args[pos++] = BuildLoadBuiltinFromIsolateRoot(
+            Builtins::kArgumentsAdaptorTrampoline);
         args[pos++] = callable_node;                         // target callable
         args[pos++] = undefined_node;                        // new target
         args[pos++] = mcgraph()->Int32Constant(wasm_count);  // argument count
@@ -5712,26 +5856,27 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
         args[pos++] = function_context;
         args[pos++] = Effect();
         args[pos++] = Control();
+
+        DCHECK_EQ(pos, args.size());
         call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
-                                args);
+                                args.begin());
         break;
       }
       // =======================================================================
       // === General case of unknown callable ==================================
       // =======================================================================
       case WasmImportCallKind::kUseCallBuiltin: {
-        Node** args = Buffer(wasm_count + 9);
+        Vector<Node*> args = Buffer(wasm_count + 7);
         int pos = 0;
-        args[pos++] = mcgraph()->RelocatableIntPtrConstant(
-            wasm::WasmCode::kWasmCallJavaScript, RelocInfo::WASM_STUB_CALL);
+        args[pos++] =
+            BuildLoadBuiltinFromIsolateRoot(Builtins::kCall_ReceiverIsAny);
         args[pos++] = callable_node;
         args[pos++] = mcgraph()->Int32Constant(wasm_count);  // argument count
         args[pos++] = undefined_node;                        // receiver
 
         auto call_descriptor = Linkage::GetStubCallDescriptor(
             graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
-            CallDescriptor::kNoFlags, Operator::kNoProperties,
-            StubCallMode::kCallWasmRuntimeStub);
+            CallDescriptor::kNoFlags, Operator::kNoProperties);
 
         // Convert wasm numbers to JS values.
         pos = AddArgumentNodes(args, pos, wasm_count, sig_);
@@ -5745,8 +5890,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
         args[pos++] = Effect();
         args[pos++] = Control();
 
+        DCHECK_EQ(pos, args.size());
         call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
-                                args);
+                                args.begin());
         break;
       }
       default:
@@ -5766,6 +5912,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
     BuildModifyThreadInWasmFlag(true);
 
     Return(val);
+
+    if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
     return true;
   }
 
@@ -5807,13 +5955,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
     Node* sfi_data = LOAD_RAW(
         shared, SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag,
         MachineType::TypeCompressedTagged());
-    Node* host_data = LOAD_RAW(
+    Node* host_data_foreign = LOAD_RAW(
         sfi_data, WasmCapiFunctionData::kEmbedderDataOffset - kHeapObjectTag,
-        MachineType::Pointer());
+        MachineType::TypeCompressedTagged());
 
     BuildModifyThreadInWasmFlag(false);
-    Node* isolate_root =
-        LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+    Node* isolate_root = BuildLoadIsolateRoot();
     Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer());
     STORE_RAW(isolate_root, Isolate::c_entry_fp_offset(), fp_value,
               MachineType::PointerRepresentation(), kNoWriteBarrier);
@@ -5824,11 +5971,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
     Node* function =
         graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
 
-    // Parameters: void* data, Address arguments.
+    // Parameters: Address host_data_foreign, Address arguments.
     MachineType host_sig_types[] = {
         MachineType::Pointer(), MachineType::Pointer(), MachineType::Pointer()};
     MachineSignature host_sig(1, 2, host_sig_types);
-    Node* return_value = BuildCCall(&host_sig, function, host_data, values);
+    Node* return_value =
+        BuildCCall(&host_sig, function, host_data_foreign, values);
 
     BuildModifyThreadInWasmFlag(true);
 
@@ -5854,13 +6002,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
     SetControl(
         graph()->NewNode(mcgraph()->common()->IfTrue(), exception_branch));
     DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionMultiReturns);
-    int return_count = static_cast<int>(sig_->return_count());
+    size_t return_count = sig_->return_count();
     if (return_count == 0) {
       Return(Int32Constant(0));
     } else {
-      Node** returns = Buffer(return_count);
+      Vector<Node*> returns = Buffer(return_count);
       offset = 0;
-      for (int i = 0; i < return_count; ++i) {
+      for (size_t i = 0; i < return_count; ++i) {
         wasm::ValueType type = sig_->GetReturn(i);
         Node* val = SetEffect(
             graph()->NewNode(GetSafeLoadOperator(offset, type), values,
@@ -5868,10 +6016,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
         returns[i] = val;
         offset += wasm::ValueTypes::ElementSizeInBytes(type);
       }
-      Return(return_count, returns);
+      Return(returns);
     }
 
-    if (ContainsInt64(sig_)) LowerInt64();
+    if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
   }
 
   void BuildWasmInterpreterEntry(int func_index) {
@@ -5918,17 +6066,19 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
     // We are passing the raw arg_buffer here. To the GC and other parts, it
     // looks like a Smi (lowest bit not set). In the runtime function however,
     // don't call Smi::value on it, but just cast it to a byte pointer.
-    Node* parameters[] = {jsgraph()->SmiConstant(func_index), arg_buffer};
+    Node* parameters[] = {
+        graph()->NewNode(mcgraph()->common()->NumberConstant(func_index)),
+        arg_buffer};
     BuildCallToRuntime(Runtime::kWasmRunInterpreter, parameters,
                        arraysize(parameters));
 
     // Read back the return value.
     DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionMultiReturns);
-    unsigned return_count = static_cast<unsigned>(sig_->return_count());
+    size_t return_count = sig_->return_count();
     if (return_count == 0) {
       Return(Int32Constant(0));
     } else {
-      Node** returns = Buffer(return_count);
+      Vector<Node*> returns = Buffer(return_count);
       offset = 0;
       for (size_t i = 0; i < return_count; ++i) {
         wasm::ValueType type = sig_->GetReturn(i);
@@ -5938,10 +6088,85 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
         returns[i] = val;
         offset += wasm::ValueTypes::ElementSizeInBytes(type);
       }
-      Return(return_count, returns);
+      Return(returns);
     }
 
-    if (ContainsInt64(sig_)) LowerInt64();
+    if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
+  }
+
+  void BuildJSToJSWrapper(Isolate* isolate) {
+    int wasm_count = static_cast<int>(sig_->parameter_count());
+
+    // Build the start and the parameter nodes.
+    int param_count = 1 /* closure */ + 1 /* receiver */ + wasm_count +
+                      1 /* new.target */ + 1 /* #arg */ + 1 /* context */;
+    SetEffect(SetControl(Start(param_count)));
+    Node* closure = Param(Linkage::kJSCallClosureParamIndex);
+    Node* context = Param(Linkage::GetJSCallContextParamIndex(wasm_count + 1));
+
+    // Since JS-to-JS wrappers are specific to one Isolate, it is OK to embed
+    // values (for undefined and root) directly into the instruction stream.
+    isolate_root_node_ = mcgraph()->IntPtrConstant(isolate->isolate_root());
+    undefined_value_node_ = graph()->NewNode(mcgraph()->common()->HeapConstant(
+        isolate->factory()->undefined_value()));
+
+    // Throw a TypeError if the signature is incompatible with JavaScript.
+    if (!wasm::IsJSCompatibleSignature(sig_, enabled_features_)) {
+      BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
+                                    nullptr, 0, effect_, Control());
+      TerminateThrow(Effect(), Control());
+      return;
+    }
+
+    // Load the original callable from the closure.
+    Node* shared = LOAD_TAGGED_ANY(
+        closure,
+        wasm::ObjectAccess::ToTagged(JSFunction::kSharedFunctionInfoOffset));
+    Node* func_data = LOAD_TAGGED_ANY(
+        shared,
+        wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset));
+    Node* callable = LOAD_TAGGED_ANY(
+        func_data,
+        wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
+
+    // Call the underlying closure.
+    Vector<Node*> args = Buffer(wasm_count + 7);
+    int pos = 0;
+    args[pos++] = graph()->NewNode(mcgraph()->common()->HeapConstant(
+        BUILTIN_CODE(isolate, Call_ReceiverIsAny)));
+    args[pos++] = callable;
+    args[pos++] = mcgraph()->Int32Constant(wasm_count);   // argument count
+    args[pos++] = BuildLoadUndefinedValueFromInstance();  // receiver
+
+    auto call_descriptor = Linkage::GetStubCallDescriptor(
+        graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
+        CallDescriptor::kNoFlags, Operator::kNoProperties,
+        StubCallMode::kCallCodeObject);
+
+    // Convert parameter JS values to wasm numbers and back to JS values.
+    for (int i = 0; i < wasm_count; ++i) {
+      Node* param = Param(i + 1);  // Start from index 1 to skip receiver.
+      args[pos++] =
+          ToJS(FromJS(param, context, sig_->GetParam(i)), sig_->GetParam(i));
+    }
+
+    args[pos++] = context;
+    args[pos++] = Effect();
+    args[pos++] = Control();
+
+    DCHECK_EQ(pos, args.size());
+    Node* call = SetEffect(graph()->NewNode(
+        mcgraph()->common()->Call(call_descriptor), pos, args.begin()));
+
+    // TODO(wasm): Extend this to support multi-return.
+    DCHECK_LE(sig_->return_count(), 1);
+
+    // Convert return JS values to wasm numbers and back to JS values.
+    Node* jsval =
+        sig_->return_count() == 0
+            ? BuildLoadUndefinedValueFromInstance()
+            : ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn());
+    Return(jsval);
   }
 
   void BuildCWasmEntry() {
@@ -5959,8 +6184,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
               kNoWriteBarrier);
 
     int wasm_arg_count = static_cast<int>(sig_->parameter_count());
-    int arg_count = wasm_arg_count + 4;  // code, object_ref, control, effect
-    Node** args = Buffer(arg_count);
+    Vector<Node*> args = Buffer(wasm_arg_count + 4);
 
     int pos = 0;
     args[pos++] = code_entry;
@@ -5977,13 +6201,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
 
     args[pos++] = Effect();
     args[pos++] = Control();
-    DCHECK_EQ(arg_count, pos);
 
     // Call the wasm code.
     auto call_descriptor = GetWasmCallDescriptor(mcgraph()->zone(), sig_);
 
+    DCHECK_EQ(pos, args.size());
     Node* call = SetEffect(graph()->NewNode(
-        mcgraph()->common()->Call(call_descriptor), arg_count, args));
+        mcgraph()->common()->Call(call_descriptor), pos, args.begin()));
 
     Node* if_success = graph()->NewNode(mcgraph()->common()->IfSuccess(), call);
     Node* if_exception =
@@ -6011,9 +6235,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
       pos++;
     }
 
-    Return(jsgraph()->SmiConstant(0));
+    Return(mcgraph()->IntPtrConstant(0));
 
     if (mcgraph()->machine()->Is32() && ContainsInt64(sig_)) {
+      // No special lowering should be requested in the C entry.
+      DCHECK_NULL(lowering_special_case_);
+
       MachineRepresentation sig_reps[] = {
           MachineType::PointerRepresentation(),  // return value
           MachineType::PointerRepresentation(),  // target
@@ -6028,11 +6255,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
     }
   }
 
-  JSGraph* jsgraph() { return jsgraph_; }
-
  private:
-  Isolate* const isolate_;
-  JSGraph* jsgraph_;
   StubCallMode stub_mode_;
   SetOncePointer<Node> undefined_value_node_;
   SetOncePointer<const Operator> allocate_heap_number_operator_;
@@ -6058,26 +6281,27 @@ void AppendSignature(char* buffer, size_t max_name_len,
 }  // namespace
 
 std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
-    Isolate* isolate, wasm::FunctionSig* sig, bool is_import) {
+    Isolate* isolate, wasm::WasmEngine* wasm_engine, wasm::FunctionSig* sig,
+    bool is_import, const wasm::WasmFeatures& enabled_features) {
   //----------------------------------------------------------------------------
   // Create the Graph.
   //----------------------------------------------------------------------------
   std::unique_ptr<Zone> zone =
-      base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+      base::make_unique<Zone>(wasm_engine->allocator(), ZONE_NAME);
   Graph* graph = new (zone.get()) Graph(zone.get());
   CommonOperatorBuilder common(zone.get());
   MachineOperatorBuilder machine(
       zone.get(), MachineType::PointerRepresentation(),
       InstructionSelector::SupportedMachineOperatorFlags(),
       InstructionSelector::AlignmentRequirements());
-  JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine);
+  MachineGraph mcgraph(graph, &common, &machine);
 
   Node* control = nullptr;
   Node* effect = nullptr;
 
-  WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr,
+  WasmWrapperGraphBuilder builder(zone.get(), &mcgraph, sig, nullptr,
                                   StubCallMode::kCallCodeObject,
-                                  wasm::WasmFeaturesFromIsolate(isolate));
+                                  enabled_features);
   builder.set_control_ptr(&control);
   builder.set_effect_ptr(&effect);
   builder.BuildJSToWasmWrapper(is_import);
@@ -6095,13 +6319,13 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
       zone.get(), false, params + 1, CallDescriptor::kNoFlags);
 
   return Pipeline::NewWasmHeapStubCompilationJob(
-      isolate, incoming, std::move(zone), graph, Code::JS_TO_WASM_FUNCTION,
-      std::move(debug_name), WasmAssemblerOptions());
+      isolate, wasm_engine, incoming, std::move(zone), graph,
+      Code::JS_TO_WASM_FUNCTION, std::move(debug_name), WasmAssemblerOptions());
 }
 
 std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
     Handle<JSReceiver> callable, wasm::FunctionSig* expected_sig,
-    bool has_bigint_feature) {
+    const wasm::WasmFeatures& enabled_features) {
   if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
     auto imported_function = Handle<WasmExportedFunction>::cast(callable);
     auto func_index = imported_function->function_index();
@@ -6136,7 +6360,7 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
     return std::make_pair(WasmImportCallKind::kWasmToCapi, callable);
   }
   // Assuming we are calling to JS, check whether this would be a runtime error.
-  if (!wasm::IsJSCompatibleSignature(expected_sig, has_bigint_feature)) {
+  if (!wasm::IsJSCompatibleSignature(expected_sig, enabled_features)) {
     return std::make_pair(WasmImportCallKind::kRuntimeTypeError, callable);
   }
   // For JavaScript calls, determine whether the target has an arity match
@@ -6176,10 +6400,7 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
         COMPARE_SIG_FOR_BUILTIN_F64(Exp);
         COMPARE_SIG_FOR_BUILTIN_F64(Log);
         COMPARE_SIG_FOR_BUILTIN_F64(Atan2);
-        //===========================================================
-        // TODO(8505): Math.pow for wasm does not match JS.
-        //        COMPARE_SIG_FOR_BUILTIN_F64(Pow);
-        //===========================================================
+        COMPARE_SIG_FOR_BUILTIN_F64(Pow);
         COMPARE_SIG_FOR_BUILTIN_F32_F64(Min);
         COMPARE_SIG_FOR_BUILTIN_F32_F64(Max);
         COMPARE_SIG_FOR_BUILTIN_F32_F64(Abs);
@@ -6347,7 +6568,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
       &zone, MachineType::PointerRepresentation(),
       InstructionSelector::SupportedMachineOperatorFlags(),
       InstructionSelector::AlignmentRequirements());
-  JSGraph jsgraph(nullptr, &graph, &common, nullptr, nullptr, &machine);
+  MachineGraph mcgraph(&graph, &common, &machine);
 
   Node* control = nullptr;
   Node* effect = nullptr;
@@ -6355,7 +6576,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
   SourcePositionTable* source_position_table =
       source_positions ? new (&zone) SourcePositionTable(&graph) : nullptr;
 
-  WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_position_table,
+  WasmWrapperGraphBuilder builder(&zone, &mcgraph, sig, source_position_table,
                                   StubCallMode::kCallWasmRuntimeStub,
                                   env->enabled_features);
   builder.set_control_ptr(&control);
@@ -6372,7 +6593,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
     incoming = GetI32WasmCallDescriptor(&zone, incoming);
   }
   wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
-      wasm_engine, incoming, &jsgraph, Code::WASM_TO_JS_FUNCTION,
+      wasm_engine, incoming, &mcgraph, Code::WASM_TO_JS_FUNCTION,
       wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(),
       source_position_table);
   result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
@@ -6395,10 +6616,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
           &zone, MachineType::PointerRepresentation(),
           InstructionSelector::SupportedMachineOperatorFlags(),
           InstructionSelector::AlignmentRequirements()));
-  JSGraph jsgraph(nullptr, mcgraph->graph(), mcgraph->common(), nullptr,
-                  nullptr, mcgraph->machine());
 
-  WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_positions,
+  WasmWrapperGraphBuilder builder(&zone, mcgraph, sig, source_positions,
                                   StubCallMode::kCallWasmRuntimeStub,
                                   native_module->enabled_features());
 
@@ -6448,12 +6667,12 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
       &zone, MachineType::PointerRepresentation(),
       InstructionSelector::SupportedMachineOperatorFlags(),
       InstructionSelector::AlignmentRequirements());
-  JSGraph jsgraph(nullptr, &graph, &common, nullptr, nullptr, &machine);
+  MachineGraph mcgraph(&graph, &common, &machine);
 
   Node* control = nullptr;
   Node* effect = nullptr;
 
-  WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+  WasmWrapperGraphBuilder builder(&zone, &mcgraph, sig, nullptr,
                                   StubCallMode::kCallWasmRuntimeStub,
                                   enabled_features);
   builder.set_control_ptr(&control);
@@ -6471,7 +6690,7 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
       SNPrintF(func_name, "wasm-interpreter-entry#%d", func_index));
 
   wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
-      wasm_engine, incoming, &jsgraph, Code::WASM_INTERPRETER_ENTRY,
+      wasm_engine, incoming, &mcgraph, Code::WASM_INTERPRETER_ENTRY,
       wasm::WasmCode::kInterpreterEntry, func_name.begin(),
       WasmStubAssemblerOptions());
   result.result_tier = wasm::ExecutionTier::kInterpreter;
@@ -6480,6 +6699,54 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
   return result;
 }
 
+MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
+                                       wasm::FunctionSig* sig) {
+  std::unique_ptr<Zone> zone =
+      base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+  Graph* graph = new (zone.get()) Graph(zone.get());
+  CommonOperatorBuilder common(zone.get());
+  MachineOperatorBuilder machine(
+      zone.get(), MachineType::PointerRepresentation(),
+      InstructionSelector::SupportedMachineOperatorFlags(),
+      InstructionSelector::AlignmentRequirements());
+  MachineGraph mcgraph(graph, &common, &machine);
+
+  Node* control = nullptr;
+  Node* effect = nullptr;
+
+  WasmWrapperGraphBuilder builder(zone.get(), &mcgraph, sig, nullptr,
+                                  StubCallMode::kCallCodeObject,
+                                  wasm::WasmFeaturesFromIsolate(isolate));
+  builder.set_control_ptr(&control);
+  builder.set_effect_ptr(&effect);
+  builder.BuildJSToJSWrapper(isolate);
+
+  int wasm_count = static_cast<int>(sig->parameter_count());
+  CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
+      zone.get(), false, wasm_count + 1, CallDescriptor::kNoFlags);
+
+  // Build a name in the form "js-to-js-wrapper:<params>:<returns>".
+  static constexpr size_t kMaxNameLen = 128;
+  auto debug_name = std::unique_ptr<char[]>(new char[kMaxNameLen]);
+  memcpy(debug_name.get(), "js-to-js-wrapper:", 18);
+  AppendSignature(debug_name.get(), kMaxNameLen, sig);
+
+  // Run the compilation job synchronously.
+  std::unique_ptr<OptimizedCompilationJob> job(
+      Pipeline::NewWasmHeapStubCompilationJob(
+          isolate, isolate->wasm_engine(), incoming, std::move(zone), graph,
+          Code::JS_TO_JS_FUNCTION, std::move(debug_name),
+          AssemblerOptions::Default(isolate)));
+
+  if (job->ExecuteJob() == CompilationJob::FAILED ||
+      job->FinalizeJob(isolate) == CompilationJob::FAILED) {
+    return {};
+  }
+  Handle<Code> code = job->compilation_info()->code();
+
+  return code;
+}
+
 MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
   std::unique_ptr<Zone> zone =
       base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
@@ -6489,12 +6756,12 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
       zone.get(), MachineType::PointerRepresentation(),
       InstructionSelector::SupportedMachineOperatorFlags(),
       InstructionSelector::AlignmentRequirements());
-  JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine);
+  MachineGraph mcgraph(graph, &common, &machine);
 
   Node* control = nullptr;
   Node* effect = nullptr;
 
-  WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr,
+  WasmWrapperGraphBuilder builder(zone.get(), &mcgraph, sig, nullptr,
                                   StubCallMode::kCallCodeObject,
                                   wasm::WasmFeaturesFromIsolate(isolate));
   builder.set_control_ptr(&control);
@@ -6510,9 +6777,9 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
   MachineSignature incoming_sig(1, 4, sig_types);
   // Traps need the root register, for TailCallRuntimeWithCEntry to call
   // Runtime::kThrowWasmError.
-  bool initialize_root_flag = true;
-  CallDescriptor* incoming = Linkage::GetSimplifiedCDescriptor(
-      zone.get(), &incoming_sig, initialize_root_flag);
+  CallDescriptor::Flags flags = CallDescriptor::kInitializeRootRegister;
+  CallDescriptor* incoming =
+      Linkage::GetSimplifiedCDescriptor(zone.get(), &incoming_sig, flags);
 
   // Build a name in the form "c-wasm-entry:<params>:<returns>".
   static constexpr size_t kMaxNameLen = 128;
@@ -6523,11 +6790,11 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
   // Run the compilation job synchronously.
   std::unique_ptr<OptimizedCompilationJob> job(
       Pipeline::NewWasmHeapStubCompilationJob(
-          isolate, incoming, std::move(zone), graph, Code::C_WASM_ENTRY,
-          std::move(debug_name), AssemblerOptions::Default(isolate)));
+          isolate, isolate->wasm_engine(), incoming, std::move(zone), graph,
+          Code::C_WASM_ENTRY, std::move(debug_name),
+          AssemblerOptions::Default(isolate)));
 
-  if (job->PrepareJob(isolate) == CompilationJob::FAILED ||
-      job->ExecuteJob() == CompilationJob::FAILED ||
+  if (job->ExecuteJob() == CompilationJob::FAILED ||
       job->FinalizeJob(isolate) == CompilationJob::FAILED) {
     return {};
   }
@@ -6536,6 +6803,8 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
   return code;
 }
 
+namespace {
+
 bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
                                wasm::CompilationEnv* env,
                                const wasm::FunctionBody& func_body,
@@ -6558,12 +6827,13 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
     return false;
   }
 
-  builder.LowerInt64();
+  builder.LowerInt64(WasmWrapperGraphBuilder::kCalledFromWasm);
 
   if (builder.has_simd() &&
       (!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) {
-    SimdScalarLowering(mcgraph,
-                       CreateMachineSignature(mcgraph->zone(), func_body.sig))
+    SimdScalarLowering(
+        mcgraph, CreateMachineSignature(mcgraph->zone(), func_body.sig,
+                                        WasmGraphBuilder::kCalledFromWasm))
         .LowerGraph();
   }
 
@@ -6574,7 +6844,6 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
   return true;
 }
 
-namespace {
 Vector<const char> GetDebugName(Zone* zone, int index) {
   // TODO(herhut): Use name from module if available.
   constexpr int kBufferLength = 24;
@@ -6587,6 +6856,7 @@ Vector<const char> GetDebugName(Zone* zone, int index) {
   memcpy(index_name, name_vector.begin(), name_len);
   return Vector<const char>(index_name, name_len);
 }
+
 }  // namespace
 
 wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
@@ -6755,7 +7025,7 @@ CallDescriptor* GetWasmCallDescriptor(
                                 wasm::kFpReturnRegisters);
 
   int parameter_slots = params.NumStackSlots();
-  if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
+  if (ShouldPadArguments(parameter_slots)) parameter_slots++;
 
   rets.SetStackOffset(parameter_slots);
 
@@ -6803,7 +7073,7 @@ CallDescriptor* GetWasmCallDescriptor(
 
 namespace {
 CallDescriptor* ReplaceTypeInCallDescriptorWith(
-    Zone* zone, CallDescriptor* call_descriptor, size_t num_replacements,
+    Zone* zone, const CallDescriptor* call_descriptor, size_t num_replacements,
     MachineType input_type, MachineRepresentation output_type) {
   size_t parameter_count = call_descriptor->ParameterCount();
   size_t return_count = call_descriptor->ReturnCount();
@@ -6819,14 +7089,23 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
   }
   if (parameter_count == call_descriptor->ParameterCount() &&
       return_count == call_descriptor->ReturnCount()) {
-    return call_descriptor;
+    return const_cast<CallDescriptor*>(call_descriptor);
   }
 
   LocationSignature::Builder locations(zone, return_count, parameter_count);
 
+  // The last parameter may be the special callable parameter. In that case we
+  // have to preserve it as the last parameter, i.e. we allocate it in the new
+  // location signature again in the same register.
+  bool has_callable_param =
+      (call_descriptor->GetInputLocation(call_descriptor->InputCount() - 1) ==
+       LinkageLocation::ForRegister(kJSFunctionRegister.code(),
+                                    MachineType::TaggedPointer()));
   LinkageLocationAllocator params(wasm::kGpParamRegisters,
                                   wasm::kFpParamRegisters);
-  for (size_t i = 0; i < call_descriptor->ParameterCount(); i++) {
+  for (size_t i = 0, e = call_descriptor->ParameterCount() -
+                         (has_callable_param ? 1 : 0);
+       i < e; i++) {
     if (call_descriptor->GetParameterType(i) == input_type) {
       for (size_t j = 0; j < num_replacements; j++) {
         locations.AddParam(params.Next(output_type));
@@ -6836,6 +7115,10 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
           params.Next(call_descriptor->GetParameterType(i).representation()));
     }
   }
+  if (has_callable_param) {
+    locations.AddParam(LinkageLocation::ForRegister(
+        kJSFunctionRegister.code(), MachineType::TaggedPointer()));
+  }
 
   LinkageLocationAllocator rets(wasm::kGpReturnRegisters,
                                 wasm::kFpReturnRegisters);
@@ -6867,8 +7150,8 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
 }
 }  // namespace
 
-CallDescriptor* GetI32WasmCallDescriptor(Zone* zone,
-                                         CallDescriptor* call_descriptor) {
+CallDescriptor* GetI32WasmCallDescriptor(
+    Zone* zone, const CallDescriptor* call_descriptor) {
   return ReplaceTypeInCallDescriptorWith(zone, call_descriptor, 2,
                                          MachineType::Int64(),
                                          MachineRepresentation::kWord32);
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 315733c396d70c..dd86ea14997359 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -34,6 +34,7 @@ class Operator;
 class SourcePositionTable;
 class WasmDecorator;
 enum class TrapId : uint32_t;
+struct Int64LoweringSpecialCase;
 }  // namespace compiler
 
 namespace wasm {
@@ -47,14 +48,6 @@ struct WasmFeatures;
 
 namespace compiler {
 
-bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
-                               wasm::CompilationEnv* env,
-                               const wasm::FunctionBody& func_body,
-                               int func_index, wasm::WasmFeatures* detected,
-                               MachineGraph* mcgraph,
-                               NodeOriginTable* node_origins,
-                               SourcePositionTable* source_positions);
-
 wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
     wasm::WasmEngine*, wasm::CompilationEnv*, const wasm::FunctionBody&,
     int func_index, Counters*, wasm::WasmFeatures* detected);
@@ -117,7 +110,7 @@ constexpr WasmImportCallKind kDefaultImportCallKind =
 // another target, which is why the ultimate target is returned as well.
 V8_EXPORT_PRIVATE std::pair<WasmImportCallKind, Handle<JSReceiver>>
 ResolveWasmImportCall(Handle<JSReceiver> callable, wasm::FunctionSig* sig,
-                      bool has_bigint_feature);
+                      const wasm::WasmFeatures& enabled_features);
 
 // Compiles an import call wrapper, which allows WASM to call imports.
 V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper(
@@ -131,7 +124,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*,
 
 // Returns an OptimizedCompilationJob object for a JS to Wasm wrapper.
 std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
-    Isolate* isolate, wasm::FunctionSig* sig, bool is_import);
+    Isolate* isolate, wasm::WasmEngine* wasm_engine, wasm::FunctionSig* sig,
+    bool is_import, const wasm::WasmFeatures& enabled_features);
 
 // Compiles a stub that redirects a call to a wasm function to the wasm
 // interpreter. It's ABI compatible with the compiled wasm function.
@@ -139,6 +133,12 @@ V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmInterpreterEntry(
     wasm::WasmEngine*, const wasm::WasmFeatures& enabled_features,
     uint32_t func_index, wasm::FunctionSig*);
 
+// Compiles a stub with JS linkage that serves as an adapter for function
+// objects constructed via {WebAssembly.Function}. It performs a round-trip
+// simulating a JS-to-Wasm-to-JS coercion of parameter and return values.
+MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
+                                       wasm::FunctionSig* sig);
+
 enum CWasmEntryParameters {
   kCodeEntry,
   kObjectRef,
@@ -179,14 +179,14 @@ class WasmGraphBuilder {
       wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
       wasm::FunctionSig* sig, compiler::SourcePositionTable* spt = nullptr);
 
-  Node** Buffer(size_t count) {
+  Vector<Node*> Buffer(size_t count) {
     if (count > cur_bufsize_) {
       size_t new_size = count + cur_bufsize_ + 5;
       cur_buffer_ =
           reinterpret_cast<Node**>(zone_->New(new_size * sizeof(Node*)));
       cur_bufsize_ = new_size;
     }
-    return cur_buffer_;
+    return {cur_buffer_, count};
   }
 
   //-----------------------------------------------------------------------
@@ -223,8 +223,8 @@ class WasmGraphBuilder {
   Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag);
   Node* LoadExceptionTagFromTable(uint32_t exception_index);
   Node* GetExceptionTag(Node* except_obj);
-  Node** GetExceptionValues(Node* except_obj,
-                            const wasm::WasmException* exception);
+  Vector<Node*> GetExceptionValues(Node* except_obj,
+                                   const wasm::WasmException* exception);
   bool IsPhiWithMerge(Node* phi, Node* merge);
   bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
   void AppendToMerge(Node* merge, Node* from);
@@ -267,13 +267,12 @@ class WasmGraphBuilder {
   Node* Switch(unsigned count, Node* key);
   Node* IfValue(int32_t value, Node* sw);
   Node* IfDefault(Node* sw);
-  Node* Return(unsigned count, Node** nodes);
+  Node* Return(Vector<Node*> nodes);
   template <typename... Nodes>
   Node* Return(Node* fst, Nodes*... more) {
     Node* arr[] = {fst, more...};
-    return Return(arraysize(arr), arr);
+    return Return(ArrayVector(arr));
   }
-  Node* ReturnVoid();
   Node* Unreachable(wasm::WasmCodePosition position);
 
   Node* CallDirect(uint32_t index, Node** args, Node*** rets,
@@ -364,7 +363,9 @@ class WasmGraphBuilder {
 
   wasm::FunctionSig* GetFunctionSignature() { return sig_; }
 
-  V8_EXPORT_PRIVATE void LowerInt64();
+  enum CallOrigin { kCalledFromWasm, kCalledFromJS };
+
+  V8_EXPORT_PRIVATE void LowerInt64(CallOrigin origin);
 
   V8_EXPORT_PRIVATE void SimdScalarLoweringForTesting();
 
@@ -379,9 +380,6 @@ class WasmGraphBuilder {
 
   Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane, Node* const* inputs);
 
-  Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
-                    Node* const* inputs);
-
   Node* Simd8x16ShuffleOp(const uint8_t shuffle[16], Node* const* inputs);
 
   Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
@@ -443,6 +441,7 @@ class WasmGraphBuilder {
   SetOncePointer<Node> globals_start_;
   SetOncePointer<Node> imported_mutable_globals_;
   SetOncePointer<Node> stack_check_code_node_;
+  SetOncePointer<Node> isolate_root_node_;
   SetOncePointer<const Operator> stack_check_call_operator_;
 
   Node** cur_buffer_;
@@ -458,8 +457,12 @@ class WasmGraphBuilder {
 
   compiler::SourcePositionTable* const source_position_table_ = nullptr;
 
+  std::unique_ptr<Int64LoweringSpecialCase> lowering_special_case_;
+
   Node* NoContextConstant();
 
+  Node* BuildLoadIsolateRoot();
+
   Node* MemBuffer(uint32_t offset);
   // BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
   Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
@@ -596,9 +599,13 @@ class WasmGraphBuilder {
   Node* BuildDecodeException32BitValue(Node* values_array, uint32_t* index);
   Node* BuildDecodeException64BitValue(Node* values_array, uint32_t* index);
 
-  Node** Realloc(Node* const* buffer, size_t old_count, size_t new_count) {
-    Node** buf = Buffer(new_count);
-    if (buf != buffer) memcpy(buf, buffer, old_count * sizeof(Node*));
+  Vector<Node*> Realloc(Node* const* buffer, size_t old_count,
+                        size_t new_count) {
+    DCHECK_GE(new_count, old_count);  // Only support growing.
+    Vector<Node*> buf = Buffer(new_count);
+    if (buf.begin() != buffer) {
+      memcpy(buf.begin(), buffer, old_count * sizeof(Node*));
+    }
     return buf;
   }
 
@@ -624,7 +631,7 @@ V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
     WasmCallKind kind = kWasmFunction);
 
 V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
-    Zone* zone, CallDescriptor* call_descriptor);
+    Zone* zone, const CallDescriptor* call_descriptor);
 
 V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
     Zone* zone, CallDescriptor* call_descriptor);
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 6656ab608dc56a..13a35b0cd349c2 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -36,6 +36,7 @@
 #include "src/init/v8.h"
 #include "src/interpreter/interpreter.h"
 #include "src/logging/counters.h"
+#include "src/objects/managed.h"
 #include "src/objects/objects-inl.h"
 #include "src/objects/objects.h"
 #include "src/parsing/parse-info.h"
@@ -76,7 +77,6 @@ namespace {
 
 const int kMB = 1024 * 1024;
 
-const int kMaxWorkers = 100;
 const int kMaxSerializerMemoryUsage =
     1 * kMB;  // Arbitrary maximum for testing.
 
@@ -227,14 +227,13 @@ Worker* GetWorkerFromInternalField(Isolate* isolate, Local<Object> object) {
     return nullptr;
   }
 
-  Worker* worker =
-      static_cast<Worker*>(object->GetAlignedPointerFromInternalField(0));
-  if (worker == nullptr) {
+  i::Handle<i::Object> handle = Utils::OpenHandle(*object->GetInternalField(0));
+  if (handle->IsSmi()) {
     Throw(isolate, "Worker is defunct because main thread is terminating");
     return nullptr;
   }
-
-  return worker;
+  auto managed = i::Handle<i::Managed<Worker>>::cast(handle);
+  return managed->raw();
 }
 
 base::Thread::Options GetThreadOptions(const char* name) {
@@ -333,7 +332,7 @@ const base::TimeTicks Shell::kInitialTicks =
 Global<Function> Shell::stringify_function_;
 base::LazyMutex Shell::workers_mutex_;
 bool Shell::allow_new_workers_ = true;
-std::vector<Worker*> Shell::workers_;
+std::unordered_set<std::shared_ptr<Worker>> Shell::running_workers_;
 std::vector<ExternalizedContents> Shell::externalized_contents_;
 std::atomic<bool> Shell::script_executed_{false};
 base::LazyMutex Shell::isolate_status_lock_;
@@ -485,7 +484,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
     } else if (options.stress_background_compile) {
       // Start a background thread compiling the script.
       BackgroundCompileThread background_compile_thread(isolate, source);
-      background_compile_thread.Start();
+      CHECK(background_compile_thread.Start());
 
       // In parallel, compile on the main thread to flush out any data races.
       {
@@ -762,6 +761,16 @@ MaybeLocal<Promise> Shell::HostImportModuleDynamically(
   return MaybeLocal<Promise>();
 }
 
+void Shell::HostCleanupFinalizationGroup(Local<Context> context,
+                                         Local<FinalizationGroup> fg) {
+  Isolate* isolate = context->GetIsolate();
+  PerIsolateData::Get(isolate)->HostCleanupFinalizationGroup(fg);
+}
+
+void PerIsolateData::HostCleanupFinalizationGroup(Local<FinalizationGroup> fg) {
+  cleanup_finalization_groups_.emplace(isolate_, fg);
+}
+
 void Shell::HostInitializeImportMetaObject(Local<Context> context,
                                            Local<Module> module,
                                            Local<Object> meta) {
@@ -908,6 +917,15 @@ MaybeLocal<Context> PerIsolateData::GetTimeoutContext() {
   return result;
 }
 
+MaybeLocal<FinalizationGroup> PerIsolateData::GetCleanupFinalizationGroup() {
+  if (cleanup_finalization_groups_.empty())
+    return MaybeLocal<FinalizationGroup>();
+  Local<FinalizationGroup> result =
+      cleanup_finalization_groups_.front().Get(isolate_);
+  cleanup_finalization_groups_.pop();
+  return result;
+}
+
 PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
   data_->realm_count_ = 1;
   data_->realm_current_ = 0;
@@ -1392,30 +1410,36 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
     return;
   }
 
+  // Initialize the embedder field to 0; if we return early without
+  // creating a new Worker (because the main thread is terminating) we can
+  // early-out from the instance calls.
+  args.Holder()->SetInternalField(0, v8::Integer::New(isolate, 0));
+
   {
+    // Don't allow workers to create more workers if the main thread
+    // is waiting for existing running workers to terminate.
     base::MutexGuard lock_guard(workers_mutex_.Pointer());
-    if (workers_.size() >= kMaxWorkers) {
-      Throw(args.GetIsolate(), "Too many workers, I won't let you create more");
-      return;
-    }
-
-    // Initialize the embedder field to nullptr; if we return early without
-    // creating a new Worker (because the main thread is terminating) we can
-    // early-out from the instance calls.
-    args.Holder()->SetAlignedPointerInInternalField(0, nullptr);
-
     if (!allow_new_workers_) return;
 
-    Worker* worker = new Worker;
-    args.Holder()->SetAlignedPointerInInternalField(0, worker);
-    workers_.push_back(worker);
-
     String::Utf8Value script(args.GetIsolate(), source);
     if (!*script) {
       Throw(args.GetIsolate(), "Can't get worker script");
       return;
     }
-    worker->StartExecuteInThread(*script);
+
+    // The C++ worker object's lifetime is shared between the Managed<Worker>
+    // object on the heap, which the JavaScript object points to, and an
+    // internal std::shared_ptr in the worker thread itself.
+    auto worker = std::make_shared<Worker>(*script);
+    i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+    const size_t kWorkerSizeEstimate = 4 * 1024 * 1024;  // stack + heap.
+    i::Handle<i::Object> managed = i::Managed<Worker>::FromSharedPtr(
+        i_isolate, kWorkerSizeEstimate, worker);
+    args.Holder()->SetInternalField(0, Utils::ToLocal(managed));
+    if (!Worker::StartWorkerThread(std::move(worker))) {
+      Throw(args.GetIsolate(), "Can't start thread");
+      return;
+    }
   }
 }
 
@@ -1475,7 +1499,7 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
   int exit_code = (*args)[0]
                       ->Int32Value(args->GetIsolate()->GetCurrentContext())
                       .FromMaybe(0);
-  CleanupWorkers();
+  WaitForRunningWorkers();
   args->GetIsolate()->Exit();
   OnExit(args->GetIsolate());
   base::OS::ExitProcess(exit_code);
@@ -1920,6 +1944,9 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
   EscapableHandleScope handle_scope(isolate);
   Local<Context> context = Context::New(isolate, nullptr, global_template);
   DCHECK(!context.IsEmpty());
+  if (i::FLAG_perf_prof_annotate_wasm) {
+    isolate->SetWasmLoadSourceMapCallback(ReadFile);
+  }
   InitializeModuleEmbedderData(context);
   if (options.include_arguments) {
     Context::Scope scope(context);
@@ -2468,6 +2495,8 @@ void SourceGroup::ExecuteInThread() {
   Isolate::CreateParams create_params;
   create_params.array_buffer_allocator = Shell::array_buffer_allocator;
   Isolate* isolate = Isolate::New(create_params);
+  isolate->SetHostCleanupFinalizationGroupCallback(
+      Shell::HostCleanupFinalizationGroup);
   isolate->SetHostImportModuleDynamicallyCallback(
       Shell::HostImportModuleDynamically);
   isolate->SetHostInitializeImportMetaObjectCallback(
@@ -2504,7 +2533,7 @@ void SourceGroup::ExecuteInThread() {
 void SourceGroup::StartExecuteInThread() {
   if (thread_ == nullptr) {
     thread_ = new IsolateThread(this);
-    thread_->Start();
+    CHECK(thread_->Start());
   }
   next_semaphore_.Signal();
 }
@@ -2550,11 +2579,11 @@ void SerializationDataQueue::Clear() {
   data_.clear();
 }
 
-Worker::Worker()
+Worker::Worker(const char* script)
     : in_semaphore_(0),
       out_semaphore_(0),
       thread_(nullptr),
-      script_(nullptr),
+      script_(i::StrDup(script)),
       running_(false) {}
 
 Worker::~Worker() {
@@ -2562,15 +2591,29 @@ Worker::~Worker() {
   thread_ = nullptr;
   delete[] script_;
   script_ = nullptr;
-  in_queue_.Clear();
-  out_queue_.Clear();
 }
 
-void Worker::StartExecuteInThread(const char* script) {
-  running_ = true;
-  script_ = i::StrDup(script);
-  thread_ = new WorkerThread(this);
-  thread_->Start();
+bool Worker::StartWorkerThread(std::shared_ptr<Worker> worker) {
+  worker->running_ = true;
+  auto thread = new WorkerThread(worker);
+  worker->thread_ = thread;
+  if (thread->Start()) {
+    Shell::AddRunningWorker(std::move(worker));
+    return true;
+  }
+  return false;
+}
+
+void Worker::WorkerThread::Run() {
+  // Prevent a lifetime cycle from Worker -> WorkerThread -> Worker.
+  // We must clear the worker_ field of the thread, but we keep the
+  // worker alive via a stack root until the thread finishes execution
+  // and removes itself from the running set. Thereafter the only
+  // remaining reference can be from a JavaScript object via a Managed.
+  auto worker = std::move(worker_);
+  worker_ = nullptr;
+  worker->ExecuteInThread();
+  Shell::RemoveRunningWorker(worker);
 }
 
 void Worker::PostMessage(std::unique_ptr<SerializationData> data) {
@@ -2605,6 +2648,8 @@ void Worker::ExecuteInThread() {
   Isolate::CreateParams create_params;
   create_params.array_buffer_allocator = Shell::array_buffer_allocator;
   Isolate* isolate = Isolate::New(create_params);
+  isolate->SetHostCleanupFinalizationGroupCallback(
+      Shell::HostCleanupFinalizationGroup);
   isolate->SetHostImportModuleDynamicallyCallback(
       Shell::HostImportModuleDynamically);
   isolate->SetHostInitializeImportMetaObjectCallback(
@@ -2657,6 +2702,7 @@ void Worker::ExecuteInThread() {
                   .ToLocalChecked();
           if (onmessage->IsFunction()) {
             Local<Function> onmessage_fun = Local<Function>::Cast(onmessage);
+            SealHandleScope shs(isolate);
             // Now wait for messages
             while (true) {
               in_semaphore_.Wait();
@@ -2666,6 +2712,7 @@ void Worker::ExecuteInThread() {
                 break;
               }
               v8::TryCatch try_catch(isolate);
+              HandleScope scope(isolate);
               Local<Value> value;
               if (Shell::DeserializeValue(isolate, std::move(data))
                       .ToLocal(&value)) {
@@ -2936,7 +2983,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
       options.isolate_sources[i].WaitForThread();
     }
   }
-  CleanupWorkers();
+  WaitForRunningWorkers();
   // In order to finish successfully, success must be != expected_to_throw.
   return success == Shell::options.expected_to_throw ? 1 : 0;
 }
@@ -2966,6 +3013,40 @@ void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
 }
 
 namespace {
+bool RunSetTimeoutCallback(Isolate* isolate, bool* did_run) {
+  PerIsolateData* data = PerIsolateData::Get(isolate);
+  HandleScope handle_scope(isolate);
+  Local<Function> callback;
+  if (!data->GetTimeoutCallback().ToLocal(&callback)) return true;
+  Local<Context> context;
+  if (!data->GetTimeoutContext().ToLocal(&context)) return true;
+  TryCatch try_catch(isolate);
+  try_catch.SetVerbose(true);
+  Context::Scope context_scope(context);
+  if (callback->Call(context, Undefined(isolate), 0, nullptr).IsEmpty()) {
+    Shell::ReportException(isolate, &try_catch);
+    return false;
+  }
+  *did_run = true;
+  return true;
+}
+
+bool RunCleanupFinalizationGroupCallback(Isolate* isolate, bool* did_run) {
+  PerIsolateData* data = PerIsolateData::Get(isolate);
+  HandleScope handle_scope(isolate);
+  while (true) {
+    Local<FinalizationGroup> fg;
+    if (!data->GetCleanupFinalizationGroup().ToLocal(&fg)) return true;
+    *did_run = true;
+    TryCatch try_catch(isolate);
+    try_catch.SetVerbose(true);
+    if (FinalizationGroup::Cleanup(fg).IsNothing()) {
+      Shell::ReportException(isolate, &try_catch);
+      return false;
+    }
+  }
+}
+
 bool ProcessMessages(
     Isolate* isolate,
     const std::function<platform::MessageLoopBehavior()>& behavior) {
@@ -2976,24 +3057,23 @@ bool ProcessMessages(
     while (v8::platform::PumpMessageLoop(g_default_platform, isolate,
                                          behavior())) {
       MicrotasksScope::PerformCheckpoint(isolate);
+      isolate->ClearKeptObjects();
     }
     if (g_default_platform->IdleTasksEnabled(isolate)) {
       v8::platform::RunIdleTasks(g_default_platform, isolate,
                                  50.0 / base::Time::kMillisecondsPerSecond);
     }
-    HandleScope handle_scope(isolate);
-    PerIsolateData* data = PerIsolateData::Get(isolate);
-    Local<Function> callback;
-    if (!data->GetTimeoutCallback().ToLocal(&callback)) break;
-    Local<Context> context;
-    if (!data->GetTimeoutContext().ToLocal(&context)) break;
-    TryCatch try_catch(isolate);
-    try_catch.SetVerbose(true);
-    Context::Scope context_scope(context);
-    if (callback->Call(context, Undefined(isolate), 0, nullptr).IsEmpty()) {
-      Shell::ReportException(isolate, &try_catch);
+    bool ran_finalization_callback = false;
+    if (!RunCleanupFinalizationGroupCallback(isolate,
+                                             &ran_finalization_callback)) {
       return false;
     }
+    bool ran_set_timeout = false;
+    if (!RunSetTimeoutCallback(isolate, &ran_set_timeout)) {
+      return false;
+    }
+
+    if (!ran_set_timeout && !ran_finalization_callback) return true;
   }
   return true;
 }
@@ -3267,24 +3347,35 @@ MaybeLocal<Value> Shell::DeserializeValue(
   return deserializer.ReadValue(context);
 }
 
-void Shell::CleanupWorkers() {
-  // Make a copy of workers_, because we don't want to call Worker::Terminate
-  // while holding the workers_mutex_ lock. Otherwise, if a worker is about to
-  // create a new Worker, it would deadlock.
-  std::vector<Worker*> workers_copy;
+void Shell::AddRunningWorker(std::shared_ptr<Worker> worker) {
+  workers_mutex_.Pointer()->AssertHeld();  // caller should hold the mutex.
+  running_workers_.insert(worker);
+}
+
+void Shell::RemoveRunningWorker(const std::shared_ptr<Worker>& worker) {
+  base::MutexGuard lock_guard(workers_mutex_.Pointer());
+  auto it = running_workers_.find(worker);
+  if (it != running_workers_.end()) running_workers_.erase(it);
+}
+
+void Shell::WaitForRunningWorkers() {
+  // Make a copy of running_workers_, because we don't want to call
+  // Worker::Terminate while holding the workers_mutex_ lock. Otherwise, if a
+  // worker is about to create a new Worker, it would deadlock.
+  std::unordered_set<std::shared_ptr<Worker>> workers_copy;
   {
     base::MutexGuard lock_guard(workers_mutex_.Pointer());
     allow_new_workers_ = false;
-    workers_copy.swap(workers_);
+    workers_copy.swap(running_workers_);
   }
 
-  for (Worker* worker : workers_copy) {
+  for (auto& worker : workers_copy) {
     worker->WaitForThread();
-    delete worker;
   }
 
   // Now that all workers are terminated, we can re-enable Worker creation.
   base::MutexGuard lock_guard(workers_mutex_.Pointer());
+  DCHECK(running_workers_.empty());
   allow_new_workers_ = true;
   externalized_contents_.clear();
 }
@@ -3405,6 +3496,8 @@ int Shell::Main(int argc, char* argv[]) {
   }
 
   Isolate* isolate = Isolate::New(create_params);
+  isolate->SetHostCleanupFinalizationGroupCallback(
+      Shell::HostCleanupFinalizationGroup);
   isolate->SetHostImportModuleDynamicallyCallback(
       Shell::HostImportModuleDynamically);
   isolate->SetHostInitializeImportMetaObjectCallback(
@@ -3462,6 +3555,8 @@ int Shell::Main(int argc, char* argv[]) {
       i::FLAG_hash_seed ^= 1337;  // Use a different hash seed.
       Isolate* isolate2 = Isolate::New(create_params);
       i::FLAG_hash_seed ^= 1337;  // Restore old hash seed.
+      isolate2->SetHostCleanupFinalizationGroupCallback(
+          Shell::HostCleanupFinalizationGroup);
       isolate2->SetHostImportModuleDynamicallyCallback(
           Shell::HostImportModuleDynamically);
       isolate2->SetHostInitializeImportMetaObjectCallback(
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 1e0dd43c2d0364..04fc5f5d341919 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -11,6 +11,7 @@
 #include <queue>
 #include <string>
 #include <unordered_map>
+#include <unordered_set>
 #include <vector>
 
 #include "src/base/once.h"
@@ -207,12 +208,9 @@ class SerializationDataQueue {
 
 class Worker {
  public:
-  Worker();
+  explicit Worker(const char* script);
   ~Worker();
 
-  // Run the given script on this Worker. This function should only be called
-  // once, and should only be called by the thread that created the Worker.
-  void StartExecuteInThread(const char* script);
   // Post a message to the worker's incoming message queue. The worker will
   // take ownership of the SerializationData.
   // This function should only be called by the thread that created the Worker.
@@ -231,17 +229,20 @@ class Worker {
   // This function can be called by any thread.
   void WaitForThread();
 
+  // Start running the given worker in another thread.
+  static bool StartWorkerThread(std::shared_ptr<Worker> worker);
+
  private:
   class WorkerThread : public base::Thread {
    public:
-    explicit WorkerThread(Worker* worker)
+    explicit WorkerThread(std::shared_ptr<Worker> worker)
         : base::Thread(base::Thread::Options("WorkerThread")),
-          worker_(worker) {}
+          worker_(std::move(worker)) {}
 
-    void Run() override { worker_->ExecuteInThread(); }
+    void Run() override;
 
    private:
-    Worker* worker_;
+    std::shared_ptr<Worker> worker_;
   };
 
   void ExecuteInThread();
@@ -275,6 +276,8 @@ class PerIsolateData {
     PerIsolateData* data_;
   };
 
+  inline void HostCleanupFinalizationGroup(Local<FinalizationGroup> fg);
+  inline MaybeLocal<FinalizationGroup> GetCleanupFinalizationGroup();
   inline void SetTimeout(Local<Function> callback, Local<Context> context);
   inline MaybeLocal<Function> GetTimeoutCallback();
   inline MaybeLocal<Context> GetTimeoutContext();
@@ -292,6 +295,7 @@ class PerIsolateData {
   Global<Value> realm_shared_;
   std::queue<Global<Function>> set_timeout_callbacks_;
   std::queue<Global<Context>> set_timeout_contexts_;
+  std::queue<Global<FinalizationGroup>> cleanup_finalization_groups_;
   AsyncHooks* async_hooks_wrapper_;
 
   int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
@@ -378,7 +382,6 @@ class Shell : public i::AllStatic {
       Isolate* isolate, Local<Value> value, Local<Value> transfer);
   static MaybeLocal<Value> DeserializeValue(
       Isolate* isolate, std::unique_ptr<SerializationData> data);
-  static void CleanupWorkers();
   static int* LookupCounter(const char* name);
   static void* CreateHistogram(const char* name, int min, int max,
                                size_t buckets);
@@ -465,6 +468,8 @@ class Shell : public i::AllStatic {
   static void SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
+  static void HostCleanupFinalizationGroup(Local<Context> context,
+                                           Local<FinalizationGroup> fg);
   static MaybeLocal<Promise> HostImportModuleDynamically(
       Local<Context> context, Local<ScriptOrModule> referrer,
       Local<String> specifier);
@@ -493,6 +498,10 @@ class Shell : public i::AllStatic {
            !options.test_shell;
   }
 
+  static void WaitForRunningWorkers();
+  static void AddRunningWorker(std::shared_ptr<Worker> worker);
+  static void RemoveRunningWorker(const std::shared_ptr<Worker>& worker);
+
  private:
   static Global<Context> evaluation_context_;
   static base::OnceType quit_once_;
@@ -509,7 +518,7 @@ class Shell : public i::AllStatic {
 
   static base::LazyMutex workers_mutex_;  // Guards the following members.
   static bool allow_new_workers_;
-  static std::vector<Worker*> workers_;
+  static std::unordered_set<std::shared_ptr<Worker>> running_workers_;
   static std::vector<ExternalizedContents> externalized_contents_;
 
   // Multiple isolates may update this flag concurrently.
diff --git a/deps/v8/src/date/dateparser-inl.h b/deps/v8/src/date/dateparser-inl.h
index b2099ca88d72ce..436b144478d15c 100644
--- a/deps/v8/src/date/dateparser-inl.h
+++ b/deps/v8/src/date/dateparser-inl.h
@@ -13,8 +13,7 @@ namespace v8 {
 namespace internal {
 
 template <typename Char>
-bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray out) {
-  DCHECK(out.length() >= OUTPUT_SIZE);
+bool DateParser::Parse(Isolate* isolate, Vector<Char> str, double* out) {
   InputReader<Char> in(str);
   DateStringTokenizer<Char> scanner(&in);
   TimeZoneComposer tz;
diff --git a/deps/v8/src/date/dateparser.cc b/deps/v8/src/date/dateparser.cc
index 252fe54e5bffa2..f7ea4c726ca9d4 100644
--- a/deps/v8/src/date/dateparser.cc
+++ b/deps/v8/src/date/dateparser.cc
@@ -10,7 +10,7 @@
 namespace v8 {
 namespace internal {
 
-bool DateParser::DayComposer::Write(FixedArray output) {
+bool DateParser::DayComposer::Write(double* output) {
   if (index_ < 1) return false;
   // Day and month defaults to 1.
   while (index_ < kSize) {
@@ -58,13 +58,13 @@ bool DateParser::DayComposer::Write(FixedArray output) {
 
   if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
 
-  output.set(YEAR, Smi::FromInt(year));
-  output.set(MONTH, Smi::FromInt(month - 1));  // 0-based
-  output.set(DAY, Smi::FromInt(day));
+  output[YEAR] = year;
+  output[MONTH] = month - 1;  // 0-based
+  output[DAY] = day;
   return true;
 }
 
-bool DateParser::TimeComposer::Write(FixedArray output) {
+bool DateParser::TimeComposer::Write(double* output) {
   // All time slots default to 0
   while (index_ < kSize) {
     comp_[index_++] = 0;
@@ -89,14 +89,14 @@ bool DateParser::TimeComposer::Write(FixedArray output) {
     }
   }
 
-  output.set(HOUR, Smi::FromInt(hour));
-  output.set(MINUTE, Smi::FromInt(minute));
-  output.set(SECOND, Smi::FromInt(second));
-  output.set(MILLISECOND, Smi::FromInt(millisecond));
+  output[HOUR] = hour;
+  output[MINUTE] = minute;
+  output[SECOND] = second;
+  output[MILLISECOND] = millisecond;
   return true;
 }
 
-bool DateParser::TimeZoneComposer::Write(FixedArray output) {
+bool DateParser::TimeZoneComposer::Write(double* output) {
   if (sign_ != kNone) {
     if (hour_ == kNone) hour_ = 0;
     if (minute_ == kNone) minute_ = 0;
@@ -109,9 +109,9 @@ bool DateParser::TimeZoneComposer::Write(FixedArray output) {
       total_seconds = -total_seconds;
     }
     DCHECK(Smi::IsValid(total_seconds));
-    output.set(UTC_OFFSET, Smi::FromInt(total_seconds));
+    output[UTC_OFFSET] = total_seconds;
   } else {
-    output.set_null(UTC_OFFSET);
+    output[UTC_OFFSET] = std::numeric_limits<double>::quiet_NaN();
   }
   return true;
 }
diff --git a/deps/v8/src/date/dateparser.h b/deps/v8/src/date/dateparser.h
index ac6be476924be1..a32db8f9c8cea2 100644
--- a/deps/v8/src/date/dateparser.h
+++ b/deps/v8/src/date/dateparser.h
@@ -13,6 +13,18 @@ namespace internal {
 
 class DateParser : public AllStatic {
  public:
+  enum {
+    YEAR,
+    MONTH,
+    DAY,
+    HOUR,
+    MINUTE,
+    SECOND,
+    MILLISECOND,
+    UTC_OFFSET,
+    OUTPUT_SIZE
+  };
+
   // Parse the string as a date. If parsing succeeds, return true after
   // filling out the output array as follows (all integers are Smis):
   // [0]: year
@@ -25,19 +37,7 @@ class DateParser : public AllStatic {
   // [7]: UTC offset in seconds, or null value if no timezone specified
   // If parsing fails, return false (content of output array is not defined).
   template <typename Char>
-  static bool Parse(Isolate* isolate, Vector<Char> str, FixedArray output);
-
-  enum {
-    YEAR,
-    MONTH,
-    DAY,
-    HOUR,
-    MINUTE,
-    SECOND,
-    MILLISECOND,
-    UTC_OFFSET,
-    OUTPUT_SIZE
-  };
+  static bool Parse(Isolate* isolate, Vector<Char> str, double* output);
 
  private:
   // Range testing
@@ -274,7 +274,7 @@ class DateParser : public AllStatic {
       return hour_ != kNone && minute_ == kNone && TimeComposer::IsMinute(n);
     }
     bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
-    bool Write(FixedArray output);
+    bool Write(double* output);
     bool IsEmpty() { return hour_ == kNone; }
 
    private:
@@ -300,7 +300,7 @@ class DateParser : public AllStatic {
       return true;
     }
     void SetHourOffset(int n) { hour_offset_ = n; }
-    bool Write(FixedArray output);
+    bool Write(double* output);
 
     static bool IsMinute(int x) { return Between(x, 0, 59); }
     static bool IsHour(int x) { return Between(x, 0, 23); }
@@ -329,7 +329,7 @@ class DateParser : public AllStatic {
       return false;
     }
     void SetNamedMonth(int n) { named_month_ = n; }
-    bool Write(FixedArray output);
+    bool Write(double* output);
     void set_iso_date() { is_iso_date_ = true; }
     static bool IsMonth(int x) { return Between(x, 1, 12); }
     static bool IsDay(int x) { return Between(x, 1, 31); }
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 15aad1fcc25556..cb466ab6ab7596 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -476,6 +476,25 @@ void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo info,
   ResetAllBlockCounts(info);
 }
 
+void PrintBlockCoverage(const CoverageFunction* function,
+                        SharedFunctionInfo info, bool has_nonempty_source_range,
+                        bool function_is_relevant) {
+  DCHECK(FLAG_trace_block_coverage);
+  std::unique_ptr<char[]> function_name =
+      function->name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  i::PrintF(
+      "Coverage for function='%s', SFI=%p, has_nonempty_source_range=%d, "
+      "function_is_relevant=%d\n",
+      function_name.get(), reinterpret_cast<void*>(info.ptr()),
+      has_nonempty_source_range, function_is_relevant);
+  i::PrintF("{start: %d, end: %d, count: %d}\n", function->start, function->end,
+            function->count);
+  for (const auto& block : function->blocks) {
+    i::PrintF("{start: %d, end: %d, count: %d}\n", block.start, block.end,
+              block.count);
+  }
+}
+
 void CollectAndMaybeResetCounts(Isolate* isolate,
                                 SharedToCounterMap* counter_map,
                                 v8::debug::CoverageMode coverage_mode) {
@@ -668,9 +687,7 @@ std::unique_ptr<Coverage> Coverage::Collect(
       }
 
       // Only include a function range if itself or its parent function is
-      // covered, or if it contains non-trivial block coverage. It must also
-      // have a non-empty source range (otherwise it is not interesting to
-      // report).
+      // covered, or if it contains non-trivial block coverage.
       bool is_covered = (count != 0);
       bool parent_is_covered =
           (!nesting.empty() && functions->at(nesting.back()).count != 0);
@@ -678,10 +695,19 @@ std::unique_ptr<Coverage> Coverage::Collect(
       bool function_is_relevant =
           (is_covered || parent_is_covered || has_block_coverage);
 
-      if (function.HasNonEmptySourceRange() && function_is_relevant) {
+      // It must also have a non-empty source range (otherwise it is not
+      // interesting to report).
+      bool has_nonempty_source_range = function.HasNonEmptySourceRange();
+
+      if (has_nonempty_source_range && function_is_relevant) {
         nesting.push_back(functions->size());
         functions->emplace_back(function);
       }
+
+      if (FLAG_trace_block_coverage) {
+        PrintBlockCoverage(&function, info, has_nonempty_source_range,
+                           function_is_relevant);
+      }
     }
 
     // Remove entries for scripts that have no coverage.
@@ -691,6 +717,13 @@ std::unique_ptr<Coverage> Coverage::Collect(
 }
 
 void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
+  if (mode != isolate->code_coverage_mode()) {
+    // Changing the coverage mode can change the bytecode that would be
+    // generated for a function, which can interfere with lazy source positions,
+    // so just force source position collection whenever there's such a change.
+    isolate->CollectSourcePositionsForAllBytecodeArrays();
+  }
+
   switch (mode) {
     case debug::CoverageMode::kBestEffort:
       // Note that DevTools switches back to best-effort coverage once the
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 0d8a7b2c7e67d4..203885143fa1c8 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -101,11 +101,14 @@ MaybeHandle<Object> DebugEvaluate::WithTopmostArguments(Isolate* isolate,
       .Check();
 
   // Materialize receiver.
-  Handle<String> this_str = factory->this_string();
-  JSObject::SetOwnPropertyIgnoreAttributes(
-      materialized, this_str, Handle<Object>(it.frame()->receiver(), isolate),
-      NONE)
-      .Check();
+  Handle<Object> this_value(it.frame()->receiver(), isolate);
+  DCHECK_EQ(it.frame()->IsConstructor(), this_value->IsTheHole(isolate));
+  if (!this_value->IsTheHole(isolate)) {
+    Handle<String> this_str = factory->this_string();
+    JSObject::SetOwnPropertyIgnoreAttributes(materialized, this_str, this_value,
+                                             NONE)
+        .Check();
+  }
 
   // Use extension object in a debug-evaluate scope.
   Handle<ScopeInfo> scope_info =
@@ -383,6 +386,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
     case Bytecode::kLdaKeyedProperty:
     case Bytecode::kLdaGlobalInsideTypeof:
     case Bytecode::kLdaLookupSlotInsideTypeof:
+    case Bytecode::kGetIterator:
     // Arithmetics.
     case Bytecode::kAdd:
     case Bytecode::kAddSmi:
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 4fe062b277a382..78c4c323fcdfdb 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -93,10 +93,8 @@ bool FrameInspector::ParameterIsShadowedByContextLocal(
   VariableMode mode;
   InitializationFlag init_flag;
   MaybeAssignedFlag maybe_assigned_flag;
-  RequiresBrandCheckFlag requires_brand_check;
   return ScopeInfo::ContextSlotIndex(*info, *parameter_name, &mode, &init_flag,
-                                     &maybe_assigned_flag,
-                                     &requires_brand_check) != -1;
+                                     &maybe_assigned_flag) != -1;
 }
 
 RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared,
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 1091e3a8196c0d..4569780d00105f 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -774,7 +774,7 @@ void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
     DCHECK(!context_->IsScriptContext());
     DCHECK(!context_->IsNativeContext());
     DCHECK(!context_->IsWithContext());
-    if (!context_->scope_info().CallsSloppyEval()) return;
+    if (!context_->scope_info().SloppyEvalCanExtendVars()) return;
     if (context_->extension_object().is_null()) return;
     Handle<JSObject> extension(context_->extension_object(), isolate_);
     Handle<FixedArray> keys =
@@ -884,10 +884,9 @@ bool ScopeIterator::SetContextVariableValue(Handle<String> variable_name,
   VariableMode mode;
   InitializationFlag flag;
   MaybeAssignedFlag maybe_assigned_flag;
-  RequiresBrandCheckFlag requires_brand_check;
-  int slot_index = ScopeInfo::ContextSlotIndex(
-      context_->scope_info(), *variable_name, &mode, &flag,
-      &maybe_assigned_flag, &requires_brand_check);
+  int slot_index =
+      ScopeInfo::ContextSlotIndex(context_->scope_info(), *variable_name, &mode,
+                                  &flag, &maybe_assigned_flag);
   if (slot_index < 0) return false;
 
   context_->set(slot_index, *new_value);
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index a0c6fa967c8470..4f691e63a22666 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -98,10 +98,9 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
     VariableMode mode;
     InitializationFlag flag;
     MaybeAssignedFlag maybe_assigned_flag;
-    RequiresBrandCheckFlag requires_brand_check;
     int slot_index = ScopeInfo::ContextSlotIndex(
         context->scope_info(), ReadOnlyRoots(isolate_->heap()).this_string(),
-        &mode, &flag, &maybe_assigned_flag, &requires_brand_check);
+        &mode, &flag, &maybe_assigned_flag);
     if (slot_index < 0) return v8::MaybeLocal<v8::Value>();
     Handle<Object> value = handle(context->get(slot_index), isolate_);
     if (value->IsTheHole(isolate_)) return v8::MaybeLocal<v8::Value>();
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index 5ed2dfb116fb31..c0ba96c2484bc6 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -71,6 +71,13 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
 }
 
 void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfileMode mode) {
+  if (mode != isolate->type_profile_mode()) {
+    // Changing the type profile mode can change the bytecode that would be
+    // generated for a function, which can interfere with lazy source positions,
+    // so just force source position collection whenever there's such a change.
+    isolate->CollectSourcePositionsForAllBytecodeArrays();
+  }
+
   HandleScope handle_scope(isolate);
 
   if (mode == debug::TypeProfileMode::kNone) {
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 9b5200e3430948..aa308150acb48c 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -1224,8 +1224,12 @@ void Debug::InstallDebugBreakTrampoline() {
 
   Handle<Code> trampoline = BUILTIN_CODE(isolate_, DebugBreakTrampoline);
   std::vector<Handle<JSFunction>> needs_compile;
-  std::vector<Handle<AccessorPair>> needs_instantiate;
+  using AccessorPairWithContext =
+      std::pair<Handle<AccessorPair>, Handle<NativeContext>>;
+  std::vector<AccessorPairWithContext> needs_instantiate;
   {
+    // Deduplicate {needs_instantiate} by recording all collected AccessorPairs.
+    std::set<AccessorPair> recorded;
     HeapObjectIterator iterator(isolate_->heap());
     for (HeapObject obj = iterator.Next(); !obj.is_null();
          obj = iterator.Next()) {
@@ -1242,11 +1246,26 @@ void Debug::InstallDebugBreakTrampoline() {
         } else {
           fun.set_code(*trampoline);
         }
-      } else if (obj.IsAccessorPair()) {
-        AccessorPair accessor_pair = AccessorPair::cast(obj);
-        if (accessor_pair.getter().IsFunctionTemplateInfo() ||
-            accessor_pair.setter().IsFunctionTemplateInfo()) {
-          needs_instantiate.push_back(handle(accessor_pair, isolate_));
+      } else if (obj.IsJSObject()) {
+        JSObject object = JSObject::cast(obj);
+        DescriptorArray descriptors = object.map().instance_descriptors();
+
+        for (int i = 0; i < object.map().NumberOfOwnDescriptors(); ++i) {
+          if (descriptors.GetDetails(i).kind() == PropertyKind::kAccessor) {
+            Object value = descriptors.GetStrongValue(i);
+            if (!value.IsAccessorPair()) continue;
+
+            AccessorPair accessor_pair = AccessorPair::cast(value);
+            if (!accessor_pair.getter().IsFunctionTemplateInfo() &&
+                !accessor_pair.setter().IsFunctionTemplateInfo()) {
+              continue;
+            }
+            if (recorded.find(accessor_pair) != recorded.end()) continue;
+
+            needs_instantiate.emplace_back(handle(accessor_pair, isolate_),
+                                           object.GetCreationContext());
+            recorded.insert(accessor_pair);
+          }
         }
       }
     }
@@ -1254,10 +1273,13 @@ void Debug::InstallDebugBreakTrampoline() {
 
   // Forcibly instantiate all lazy accessor pairs to make sure that they
   // properly hit the debug break trampoline.
-  for (Handle<AccessorPair> accessor_pair : needs_instantiate) {
+  for (AccessorPairWithContext tuple : needs_instantiate) {
+    Handle<AccessorPair> accessor_pair = tuple.first;
+    Handle<NativeContext> native_context = tuple.second;
     if (accessor_pair->getter().IsFunctionTemplateInfo()) {
       Handle<JSFunction> fun =
           ApiNatives::InstantiateFunction(
+              isolate_, native_context,
               handle(FunctionTemplateInfo::cast(accessor_pair->getter()),
                      isolate_))
               .ToHandleChecked();
@@ -1266,6 +1288,7 @@ void Debug::InstallDebugBreakTrampoline() {
     if (accessor_pair->setter().IsFunctionTemplateInfo()) {
       Handle<JSFunction> fun =
           ApiNatives::InstantiateFunction(
+              isolate_, native_context,
               handle(FunctionTemplateInfo::cast(accessor_pair->setter()),
                      isolate_))
               .ToHandleChecked();
@@ -1734,9 +1757,6 @@ bool Debug::IsFrameBlackboxed(JavaScriptFrame* frame) {
 
 void Debug::OnException(Handle<Object> exception, Handle<Object> promise,
                         v8::debug::ExceptionType exception_type) {
-  // TODO(kozyatinskiy): regress-662674.js test fails on arm without this.
-  if (!AllowJavascriptExecution::IsAllowed(isolate_)) return;
-
   Isolate::CatchType catch_type = isolate_->PredictExceptionCatcher();
 
   // Don't notify listener of exceptions that are internal to a desugaring.
@@ -1775,6 +1795,11 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise,
     if (it.done()) return;  // Do not trigger an event with an empty stack.
   }
 
+  // Do not trigger exception event on stack overflow. We cannot perform
+  // anything useful for debugging in that situation.
+  StackLimitCheck stack_limit_check(isolate_);
+  if (stack_limit_check.JsHasOverflowed()) return;
+
   DebugScope debug_scope(this);
   HandleScope scope(isolate_);
   DisableBreak no_recursive_break(this);
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 684397400ac015..eef89f93725aa6 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -375,6 +375,8 @@ class V8_EXPORT_PRIVATE Debug {
     return thread_local_.break_on_next_function_call_;
   }
 
+  inline bool break_disabled() const { return break_disabled_; }
+
   DebugFeatureTracker* feature_tracker() { return &feature_tracker_; }
 
   // For functions in which we cannot set a break point, use a canonical
@@ -399,7 +401,6 @@ class V8_EXPORT_PRIVATE Debug {
     return is_suppressed_ || !is_active_ ||
            isolate_->debug_execution_mode() == DebugInfo::kSideEffects;
   }
-  inline bool break_disabled() const { return break_disabled_; }
 
   void clear_suspended_generator() {
     thread_local_.suspended_generator_ = Smi::kZero;
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index 89e9988f9eee41..2befb70264abc8 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -12,6 +12,9 @@
 namespace v8 {
 namespace internal {
 
+const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
+const int Deoptimizer::kDeoptExitSize = 0;
+
 #define __ masm->
 
 // This code tries to be close to ia32 code so that any changes can be
@@ -28,7 +31,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
 
   const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kNumRegisters;
-  const int kFloatRegsSize = kFloatSize * SwVfpRegister::kNumRegisters;
 
   // Save all allocatable VFP registers before messing with them.
   {
@@ -48,9 +50,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     // small number and we need to use condition codes.
     __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
     __ vstm(db_w, sp, d0, d15);
-
-    // Push registers s0-s31 on the stack.
-    __ vstm(db_w, sp, s0, s31);
   }
 
   // Push all 16 registers (needed to populate FrameDescription::registers_).
@@ -67,7 +66,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   }
 
   const int kSavedRegistersAreaSize =
-      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
 
   // Get the bailout id is passed as r10 by the caller.
   __ mov(r2, r10);
@@ -119,23 +118,11 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     int dst_offset = code * kDoubleSize + double_regs_offset;
-    int src_offset =
-        code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
+    int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
     __ vldr(d0, sp, src_offset);
     __ vstr(d0, r1, dst_offset);
   }
 
-  // Copy VFP registers to
-  // float_registers_[FloatRegister::kNumAllocatableRegisters]
-  int float_regs_offset = FrameDescription::float_registers_offset();
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    int dst_offset = code * kFloatSize + float_regs_offset;
-    int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
-    __ ldr(r2, MemOperand(sp, src_offset));
-    __ str(r2, MemOperand(r1, dst_offset));
-  }
-
   // Remove the saved registers from the stack.
   __ add(sp, sp, Operand(kSavedRegistersAreaSize));
 
@@ -234,7 +221,12 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   __ stop();
 }
 
-bool Deoptimizer::PadTopOfStackRegister() { return false; }
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+  const int kShift = n % 2 == 0 ? 0 : 32;
+
+  return Float32::FromBits(
+      static_cast<uint32_t>(double_registers_[n / 2].get_bits() >> kShift));
+}
 
 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
   SetFrameSlot(offset, value);
diff --git a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index a96b1263abb681..82ae764e506602 100644
--- a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -13,6 +13,9 @@
 namespace v8 {
 namespace internal {
 
+const bool Deoptimizer::kSupportsFixedDeoptExitSize = true;
+const int Deoptimizer::kDeoptExitSize = kInstrSize;
+
 #define __ masm->
 
 namespace {
@@ -111,12 +114,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   DCHECK_EQ(saved_double_registers.Count() % 2, 0);
   __ PushCPURegList(saved_double_registers);
 
-  CPURegList saved_float_registers(
-      CPURegister::kVRegister, kSRegSizeInBits,
-      RegisterConfiguration::Default()->allocatable_float_codes_mask());
-  DCHECK_EQ(saved_float_registers.Count() % 4, 0);
-  __ PushCPURegList(saved_float_registers);
-
   // We save all the registers except sp, lr, platform register (x18) and the
   // masm scratches.
   CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
@@ -134,17 +131,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
 
   const int kSavedRegistersAreaSize =
       (saved_registers.Count() * kXRegSize) +
-      (saved_double_registers.Count() * kDRegSize) +
-      (saved_float_registers.Count() * kSRegSize);
+      (saved_double_registers.Count() * kDRegSize);
 
   // Floating point registers are saved on the stack above core registers.
-  const int kFloatRegistersOffset = saved_registers.Count() * kXRegSize;
-  const int kDoubleRegistersOffset =
-      kFloatRegistersOffset + saved_float_registers.Count() * kSRegSize;
+  const int kDoubleRegistersOffset = saved_registers.Count() * kXRegSize;
 
-  // The bailout id was passed by the caller in x26.
+  // We don't use a bailout id for arm64, because we can compute the id from the
+  // address. Pass kMaxUInt32 instead to signify this.
   Register bailout_id = x2;
-  __ Mov(bailout_id, x26);
+  __ Mov(bailout_id, kMaxUInt32);
 
   Register code_object = x3;
   Register fp_to_sp = x4;
@@ -194,12 +189,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
                      saved_double_registers, x2, x3, kDoubleRegistersOffset);
 
-  // Copy float registers to the input frame.
-  // TODO(arm): these are the lower 32-bits of the double registers stored
-  // above, so we shouldn't need to store them again.
-  CopyRegListToFrame(masm, x1, FrameDescription::float_registers_offset(),
-                     saved_float_registers, w2, w3, kFloatRegistersOffset);
-
   // Remove the saved registers from the stack.
   DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
   __ Drop(kSavedRegistersAreaSize / kXRegSize);
@@ -285,7 +274,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   __ Br(continuation);
 }
 
-bool Deoptimizer::PadTopOfStackRegister() { return true; }
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+  return Float32::FromBits(
+      static_cast<uint32_t>(double_registers_[n].get_bits()));
+}
 
 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
   SetFrameSlot(offset, value);
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 47c40d373eb179..64551c68996f8c 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -452,6 +452,15 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind) {
   return nullptr;
 }
 
+namespace {
+
+uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) {
+  static constexpr int kTheReceiver = 1;
+  return sfi.internal_formal_parameter_count() + kTheReceiver;
+}
+
+}  // namespace
+
 Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
                          DeoptimizeKind kind, unsigned bailout_id, Address from,
                          int fp_to_sp_delta)
@@ -503,8 +512,22 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
             CodeDeoptEvent(compiled_code_, kind, from_, fp_to_sp_delta_));
   }
   unsigned size = ComputeInputFrameSize();
-  int parameter_count = function.shared().internal_formal_parameter_count() + 1;
+  const int parameter_count =
+      InternalFormalParameterCountWithReceiver(function.shared());
   input_ = new (size) FrameDescription(size, parameter_count);
+
+  if (kSupportsFixedDeoptExitSize) {
+    DCHECK_EQ(bailout_id_, kMaxUInt32);
+    // Calculate bailout id from return address.
+    DCHECK_GT(kDeoptExitSize, 0);
+    DeoptimizationData deopt_data =
+        DeoptimizationData::cast(compiled_code_.deoptimization_data());
+    Address deopt_start = compiled_code_.raw_instruction_start() +
+                          deopt_data.DeoptExitStart().value();
+    int offset = static_cast<int>(from_ - kDeoptExitSize - deopt_start);
+    DCHECK_EQ(0, offset % kDeoptExitSize);
+    bailout_id_ = offset / kDeoptExitSize;
+  }
 }
 
 Code Deoptimizer::FindOptimizedCode() {
@@ -624,10 +647,6 @@ int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
   return -1;
 }
 
-bool ShouldPadArguments(int arg_count) {
-  return kPadArguments && (arg_count % 2 != 0);
-}
-
 }  // namespace
 
 // We rely on this function not causing a GC.  It is called from generated code
@@ -787,45 +806,33 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
   SharedFunctionInfo shared = translated_frame->raw_shared_info();
 
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
-  bool is_bottommost = (0 == frame_index);
-  bool is_topmost = (output_count_ - 1 == frame_index);
+  const bool is_bottommost = (0 == frame_index);
+  const bool is_topmost = (output_count_ - 1 == frame_index);
 
-  int bytecode_offset = translated_frame->node_id().ToInt();
-  int height = translated_frame->height();
-  int register_count = height - 1;  // Exclude accumulator.
-  int register_stack_slot_count =
-      InterpreterFrameConstants::RegisterStackSlotCount(register_count);
-  int height_in_bytes = register_stack_slot_count * kSystemPointerSize;
+  const int real_bytecode_offset = translated_frame->node_id().ToInt();
+  const int bytecode_offset =
+      goto_catch_handler ? catch_handler_pc_offset_ : real_bytecode_offset;
 
-  // The topmost frame will contain the accumulator.
-  if (is_topmost) {
-    height_in_bytes += kSystemPointerSize;
-    if (PadTopOfStackRegister()) height_in_bytes += kSystemPointerSize;
-  }
+  const int parameters_count = InternalFormalParameterCountWithReceiver(shared);
+  const int locals_count = translated_frame->height();
+  InterpretedFrameInfo frame_info =
+      InterpretedFrameInfo::Precise(parameters_count, locals_count, is_topmost);
+  const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
 
   TranslatedFrame::iterator function_iterator = value_iterator++;
   if (trace_scope_ != nullptr) {
     PrintF(trace_scope_->file(), "  translating interpreted frame ");
     std::unique_ptr<char[]> name = shared.DebugName().ToCString();
     PrintF(trace_scope_->file(), "%s", name.get());
-    PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d%s\n",
-           bytecode_offset, height_in_bytes,
-           goto_catch_handler ? " (throw)" : "");
-  }
-  if (goto_catch_handler) {
-    bytecode_offset = catch_handler_pc_offset_;
+    PrintF(trace_scope_->file(),
+           " => bytecode_offset=%d, variable_frame_size=%d, frame_size=%d%s\n",
+           real_bytecode_offset, frame_info.frame_size_in_bytes_without_fixed(),
+           output_frame_size, goto_catch_handler ? " (throw)" : "");
   }
 
-  // The 'fixed' part of the frame consists of the incoming parameters and
-  // the part described by InterpreterFrameConstants. This will include
-  // argument padding, when needed.
-  unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared);
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
   // Allocate and store the output frame description.
-  int parameter_count = shared.internal_formal_parameter_count() + 1;
   FrameDescription* output_frame = new (output_frame_size)
-      FrameDescription(output_frame_size, parameter_count);
+      FrameDescription(output_frame_size, parameters_count);
   FrameWriter frame_writer(this, output_frame, trace_scope_);
 
   CHECK(frame_index >= 0 && frame_index < output_count_);
@@ -834,22 +841,19 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
 
   // The top address of the frame is computed from the previous frame's top and
   // this frame's size.
-  intptr_t top_address;
-  if (is_bottommost) {
-    top_address = caller_frame_top_ - output_frame_size;
-  } else {
-    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  }
+  const intptr_t top_address =
+      is_bottommost ? caller_frame_top_ - output_frame_size
+                    : output_[frame_index - 1]->GetTop() - output_frame_size;
   output_frame->SetTop(top_address);
 
   // Compute the incoming parameter translation.
 
   ReadOnlyRoots roots(isolate());
-  if (ShouldPadArguments(parameter_count)) {
+  if (ShouldPadArguments(parameters_count)) {
     frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
   }
 
-  for (int i = 0; i < parameter_count; ++i, ++value_iterator) {
+  for (int i = 0; i < parameters_count; ++i, ++value_iterator) {
     frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
   }
 
@@ -880,7 +884,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
       is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp();
   frame_writer.PushCallerFp(caller_fp);
 
-  intptr_t fp_value = top_address + frame_writer.top_offset();
+  const intptr_t fp_value = top_address + frame_writer.top_offset();
   output_frame->SetFp(fp_value);
   if (is_topmost) {
     Register fp_reg = InterpretedFrame::fp_register();
@@ -926,7 +930,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
   frame_writer.PushRawObject(bytecode_array, "bytecode array\n");
 
   // The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
-  int raw_bytecode_offset =
+  const int raw_bytecode_offset =
       BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
   Smi smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
   frame_writer.PushRawObject(smi_bytecode_offset, "bytecode offset\n");
@@ -938,16 +942,16 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
   // Translate the rest of the interpreter registers in the frame.
   // The return_value_offset is counted from the top. Here, we compute the
   // register index (counted from the start).
-  int return_value_first_reg =
-      register_count - translated_frame->return_value_offset();
-  int return_value_count = translated_frame->return_value_count();
-  for (int i = 0; i < register_count; ++i, ++value_iterator) {
+  const int return_value_first_reg =
+      locals_count - translated_frame->return_value_offset();
+  const int return_value_count = translated_frame->return_value_count();
+  for (int i = 0; i < locals_count; ++i, ++value_iterator) {
     // Ensure we write the return value if we have one and we are returning
     // normally to a lazy deopt point.
     if (is_topmost && !goto_catch_handler &&
         deopt_kind_ == DeoptimizeKind::kLazy && i >= return_value_first_reg &&
         i < return_value_first_reg + return_value_count) {
-      int return_index = i - return_value_first_reg;
+      const int return_index = i - return_value_first_reg;
       if (return_index == 0) {
         frame_writer.PushRawValue(input_->GetRegister(kReturnRegister0.code()),
                                   "return value 0\n");
@@ -955,7 +959,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
         // the accumulator and another one into an ordinary register. Since
         // the interpreter should never create such situation, just assert
         // this does not happen.
-        CHECK_LE(return_value_first_reg + return_value_count, register_count);
+        CHECK_LE(return_value_first_reg + return_value_count, locals_count);
       } else {
         CHECK_EQ(return_index, 1);
         frame_writer.PushRawValue(input_->GetRegister(kReturnRegister1.code()),
@@ -967,18 +971,18 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
     }
   }
 
-  int register_slots_written = register_count;
-  DCHECK_LE(register_slots_written, register_stack_slot_count);
+  uint32_t register_slots_written = static_cast<uint32_t>(locals_count);
+  DCHECK_LE(register_slots_written, frame_info.register_stack_slot_count());
   // Some architectures must pad the stack frame with extra stack slots
   // to ensure the stack frame is aligned. Do this now.
-  while (register_slots_written < register_stack_slot_count) {
+  while (register_slots_written < frame_info.register_stack_slot_count()) {
     register_slots_written++;
     frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
   }
 
   // Translate the accumulator register (depending on frame position).
   if (is_topmost) {
-    if (PadTopOfStackRegister()) {
+    if (kPadArguments) {
       frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
     }
     // For topmost frame, put the accumulator on the stack. The
@@ -1054,26 +1058,24 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
 void Deoptimizer::DoComputeArgumentsAdaptorFrame(
     TranslatedFrame* translated_frame, int frame_index) {
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
-  bool is_bottommost = (0 == frame_index);
+  const bool is_bottommost = (0 == frame_index);
 
-  unsigned height = translated_frame->height();
-  unsigned height_in_bytes = height * kSystemPointerSize;
-  int parameter_count = height;
-  if (ShouldPadArguments(parameter_count))
-    height_in_bytes += kSystemPointerSize;
+  const int parameters_count = translated_frame->height();
+  ArgumentsAdaptorFrameInfo frame_info =
+      ArgumentsAdaptorFrameInfo::Precise(parameters_count);
+  const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
 
   TranslatedFrame::iterator function_iterator = value_iterator++;
   if (trace_scope_ != nullptr) {
     PrintF(trace_scope_->file(),
-           "  translating arguments adaptor => height=%d\n", height_in_bytes);
+           "  translating arguments adaptor => variable_frame_size=%d, "
+           "frame_size=%d\n",
+           frame_info.frame_size_in_bytes_without_fixed(), output_frame_size);
   }
 
-  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFixedFrameSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
   // Allocate and store the output frame description.
   FrameDescription* output_frame = new (output_frame_size)
-      FrameDescription(output_frame_size, parameter_count);
+      FrameDescription(output_frame_size, parameters_count);
   FrameWriter frame_writer(this, output_frame, trace_scope_);
 
   // Arguments adaptor can not be topmost.
@@ -1083,21 +1085,18 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
 
   // The top address of the frame is computed from the previous frame's top and
   // this frame's size.
-  intptr_t top_address;
-  if (is_bottommost) {
-    top_address = caller_frame_top_ - output_frame_size;
-  } else {
-    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  }
+  const intptr_t top_address =
+      is_bottommost ? caller_frame_top_ - output_frame_size
+                    : output_[frame_index - 1]->GetTop() - output_frame_size;
   output_frame->SetTop(top_address);
 
   ReadOnlyRoots roots(isolate());
-  if (ShouldPadArguments(parameter_count)) {
+  if (ShouldPadArguments(parameters_count)) {
     frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
   }
 
   // Compute the incoming parameter translation.
-  for (int i = 0; i < parameter_count; ++i, ++value_iterator) {
+  for (int i = 0; i < parameters_count; ++i, ++value_iterator) {
     frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
   }
 
@@ -1133,7 +1132,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
   frame_writer.PushTranslatedValue(function_iterator, "function\n");
 
   // Number of incoming arguments.
-  frame_writer.PushRawObject(Smi::FromInt(height - 1), "argc\n");
+  const uint32_t parameters_count_without_receiver = parameters_count - 1;
+  frame_writer.PushRawObject(Smi::FromInt(parameters_count_without_receiver),
+                             "argc\n");
 
   frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
 
@@ -1157,7 +1158,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
 void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
                                               int frame_index) {
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
-  bool is_topmost = (output_count_ - 1 == frame_index);
+  const bool is_topmost = (output_count_ - 1 == frame_index);
   // The construct frame could become topmost only if we inlined a constructor
   // call which does a tail call (otherwise the tail callee's frame would be
   // the topmost one). So it could only be the DeoptimizeKind::kLazy case.
@@ -1166,38 +1167,25 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
   Builtins* builtins = isolate_->builtins();
   Code construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   BailoutId bailout_id = translated_frame->node_id();
-  unsigned height = translated_frame->height();
-  unsigned parameter_count = height - 1;  // Exclude the context.
-  unsigned height_in_bytes = parameter_count * kSystemPointerSize;
-
-  // If the construct frame appears to be topmost we should ensure that the
-  // value of result register is preserved during continuation execution.
-  // We do this here by "pushing" the result of the constructor function to the
-  // top of the reconstructed stack and popping it in
-  // {Builtins::kNotifyDeoptimized}.
-  if (is_topmost) {
-    height_in_bytes += kSystemPointerSize;
-    if (PadTopOfStackRegister()) height_in_bytes += kSystemPointerSize;
-  }
 
-  if (ShouldPadArguments(parameter_count))
-    height_in_bytes += kSystemPointerSize;
+  const int parameters_count = translated_frame->height();
+  ConstructStubFrameInfo frame_info =
+      ConstructStubFrameInfo::Precise(parameters_count, is_topmost);
+  const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
 
   TranslatedFrame::iterator function_iterator = value_iterator++;
   if (trace_scope_ != nullptr) {
     PrintF(trace_scope_->file(),
-           "  translating construct stub => bailout_id=%d (%s), height=%d\n",
+           "  translating construct stub => bailout_id=%d (%s), "
+           "variable_frame_size=%d, frame_size=%d\n",
            bailout_id.ToInt(),
            bailout_id == BailoutId::ConstructStubCreate() ? "create" : "invoke",
-           height_in_bytes);
+           frame_info.frame_size_in_bytes_without_fixed(), output_frame_size);
   }
 
-  unsigned fixed_frame_size = ConstructFrameConstants::kFixedFrameSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
   // Allocate and store the output frame description.
   FrameDescription* output_frame = new (output_frame_size)
-      FrameDescription(output_frame_size, parameter_count);
+      FrameDescription(output_frame_size, parameters_count);
   FrameWriter frame_writer(this, output_frame, trace_scope_);
 
   // Construct stub can not be topmost.
@@ -1207,12 +1195,12 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
 
   // The top address of the frame is computed from the previous frame's top and
   // this frame's size.
-  intptr_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  const intptr_t top_address =
+      output_[frame_index - 1]->GetTop() - output_frame_size;
   output_frame->SetTop(top_address);
 
   ReadOnlyRoots roots(isolate());
-  if (ShouldPadArguments(parameter_count)) {
+  if (ShouldPadArguments(parameters_count)) {
     frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
   }
 
@@ -1222,7 +1210,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
   TranslatedFrame::iterator receiver_iterator = value_iterator;
 
   // Compute the incoming parameter translation.
-  for (unsigned i = 0; i < parameter_count; ++i, ++value_iterator) {
+  for (int i = 0; i < parameters_count; ++i, ++value_iterator) {
     frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
   }
 
@@ -1237,7 +1225,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
   const intptr_t caller_fp = output_[frame_index - 1]->GetFp();
   frame_writer.PushCallerFp(caller_fp);
 
-  intptr_t fp_value = top_address + frame_writer.top_offset();
+  const intptr_t fp_value = top_address + frame_writer.top_offset();
   output_frame->SetFp(fp_value);
   if (is_topmost) {
     Register fp_reg = JavaScriptFrame::fp_register();
@@ -1257,7 +1245,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
   frame_writer.PushTranslatedValue(value_iterator++, "context");
 
   // Number of incoming arguments.
-  frame_writer.PushRawObject(Smi::FromInt(parameter_count - 1), "argc\n");
+  const uint32_t parameters_count_without_receiver = parameters_count - 1;
+  frame_writer.PushRawObject(Smi::FromInt(parameters_count_without_receiver),
+                             "argc\n");
 
   // The constructor function was mentioned explicitly in the
   // CONSTRUCT_STUB_FRAME.
@@ -1277,7 +1267,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
   frame_writer.PushTranslatedValue(receiver_iterator, debug_hint);
 
   if (is_topmost) {
-    if (PadTopOfStackRegister()) {
+    if (kPadArguments) {
       frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
     }
     // Ensure the result is restored back when we return to the stub.
@@ -1292,7 +1282,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
   // Compute this frame's PC.
   DCHECK(bailout_id.IsValidForConstructStub());
   Address start = construct_stub.InstructionStart();
-  int pc_offset =
+  const int pc_offset =
       bailout_id == BailoutId::ConstructStubCreate()
           ? isolate_->heap()->construct_stub_create_deopt_pc_offset().value()
           : isolate_->heap()->construct_stub_invoke_deopt_pc_offset().value();
@@ -1330,8 +1320,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
   }
 }
 
-bool Deoptimizer::BuiltinContinuationModeIsJavaScript(
-    BuiltinContinuationMode mode) {
+namespace {
+
+bool BuiltinContinuationModeIsJavaScript(BuiltinContinuationMode mode) {
   switch (mode) {
     case BuiltinContinuationMode::STUB:
       return false;
@@ -1343,31 +1334,16 @@ bool Deoptimizer::BuiltinContinuationModeIsJavaScript(
   UNREACHABLE();
 }
 
-bool Deoptimizer::BuiltinContinuationModeIsWithCatch(
+StackFrame::Type BuiltinContinuationModeToFrameType(
     BuiltinContinuationMode mode) {
   switch (mode) {
     case BuiltinContinuationMode::STUB:
-    case BuiltinContinuationMode::JAVASCRIPT:
-      return false;
-    case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
-    case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
-      return true;
-  }
-  UNREACHABLE();
-}
-
-namespace {
-
-StackFrame::Type BuiltinContinuationModeToFrameType(
-    Deoptimizer::BuiltinContinuationMode mode) {
-  switch (mode) {
-    case Deoptimizer::BuiltinContinuationMode::STUB:
       return StackFrame::BUILTIN_CONTINUATION;
-    case Deoptimizer::BuiltinContinuationMode::JAVASCRIPT:
+    case BuiltinContinuationMode::JAVASCRIPT:
       return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION;
-    case Deoptimizer::BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
+    case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
       return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
-    case Deoptimizer::BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
+    case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
       return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
   }
   UNREACHABLE();
@@ -1454,65 +1430,31 @@ void Deoptimizer::DoComputeBuiltinContinuation(
     BuiltinContinuationMode mode) {
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
 
-  // The output frame must have room for all of the parameters that need to be
-  // passed to the builtin continuation.
-  const int height_in_words = translated_frame->height();
-
-  BailoutId bailout_id = translated_frame->node_id();
+  const BailoutId bailout_id = translated_frame->node_id();
   Builtins::Name builtin_name = Builtins::GetBuiltinFromBailoutId(bailout_id);
-  Code builtin = isolate()->builtins()->builtin(builtin_name);
-  Callable continuation_callable =
-      Builtins::CallableFor(isolate(), builtin_name);
   CallInterfaceDescriptor continuation_descriptor =
-      continuation_callable.descriptor();
+      Builtins::CallInterfaceDescriptorFor(builtin_name);
+
+  const RegisterConfiguration* config = RegisterConfiguration::Default();
 
   const bool is_bottommost = (0 == frame_index);
   const bool is_topmost = (output_count_ - 1 == frame_index);
-  const bool must_handle_result =
-      !is_topmost || deopt_kind_ == DeoptimizeKind::kLazy;
 
-  const RegisterConfiguration* config(RegisterConfiguration::Default());
-  const int allocatable_register_count =
-      config->num_allocatable_general_registers();
-  const int padding_slot_count =
-      BuiltinContinuationFrameConstants::PaddingSlotCount(
-          allocatable_register_count);
-
-  const int register_parameter_count =
-      continuation_descriptor.GetRegisterParameterCount();
-  // Make sure to account for the context by removing it from the register
-  // parameter count.
-  const int translated_stack_parameters =
-      height_in_words - register_parameter_count - 1;
-  const int stack_param_count =
-      translated_stack_parameters + (must_handle_result ? 1 : 0) +
-      (BuiltinContinuationModeIsWithCatch(mode) ? 1 : 0);
-  const int stack_param_pad_count =
-      ShouldPadArguments(stack_param_count) ? 1 : 0;
-
-  // If the builtins frame appears to be topmost we should ensure that the
-  // value of result register is preserved during continuation execution.
-  // We do this here by "pushing" the result of callback function to the
-  // top of the reconstructed stack and popping it in
-  // {Builtins::kNotifyDeoptimized}.
-  const int push_result_count =
-      is_topmost ? (PadTopOfStackRegister() ? 2 : 1) : 0;
-
-  const unsigned output_frame_size =
-      kSystemPointerSize * (stack_param_count + stack_param_pad_count +
-                            allocatable_register_count + padding_slot_count +
-                            push_result_count) +
-      BuiltinContinuationFrameConstants::kFixedFrameSize;
+  const int parameters_count = translated_frame->height();
+  BuiltinContinuationFrameInfo frame_info =
+      BuiltinContinuationFrameInfo::Precise(parameters_count,
+                                            continuation_descriptor, config,
+                                            is_topmost, deopt_kind_, mode);
 
+  const unsigned output_frame_size = frame_info.frame_size_in_bytes();
   const unsigned output_frame_size_above_fp =
-      kSystemPointerSize * (allocatable_register_count + padding_slot_count +
-                            push_result_count) +
-      (BuiltinContinuationFrameConstants::kFixedFrameSize -
-       BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp);
+      frame_info.frame_size_in_bytes_above_fp();
 
   // Validate types of parameters. They must all be tagged except for argc for
   // JS builtins.
   bool has_argc = false;
+  const int register_parameter_count =
+      continuation_descriptor.GetRegisterParameterCount();
   for (int i = 0; i < register_parameter_count; ++i) {
     MachineType type = continuation_descriptor.GetParameterType(i);
     int code = continuation_descriptor.GetRegisterParameter(i).code();
@@ -1531,25 +1473,22 @@ void Deoptimizer::DoComputeBuiltinContinuation(
   if (trace_scope_ != nullptr) {
     PrintF(trace_scope_->file(),
            "  translating BuiltinContinuation to %s,"
-           " register param count %d,"
-           " stack param count %d\n",
+           " => register_param_count=%d,"
+           " stack_param_count=%d, frame_size=%d\n",
            Builtins::name(builtin_name), register_parameter_count,
-           stack_param_count);
+           frame_info.stack_parameter_count(), output_frame_size);
   }
 
   FrameDescription* output_frame = new (output_frame_size)
-      FrameDescription(output_frame_size, stack_param_count);
+      FrameDescription(output_frame_size, frame_info.stack_parameter_count());
   output_[frame_index] = output_frame;
   FrameWriter frame_writer(this, output_frame, trace_scope_);
 
   // The top address of the frame is computed from the previous frame's top and
   // this frame's size.
-  intptr_t top_address;
-  if (is_bottommost) {
-    top_address = caller_frame_top_ - output_frame_size;
-  } else {
-    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  }
+  const intptr_t top_address =
+      is_bottommost ? caller_frame_top_ - output_frame_size
+                    : output_[frame_index - 1]->GetTop() - output_frame_size;
   output_frame->SetTop(top_address);
 
   // Get the possible JSFunction for the case that this is a
@@ -1559,11 +1498,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
   ++value_iterator;
 
   ReadOnlyRoots roots(isolate());
-  if (ShouldPadArguments(stack_param_count)) {
+  if (ShouldPadArguments(frame_info.stack_parameter_count())) {
     frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
   }
 
-  for (int i = 0; i < translated_stack_parameters; ++i, ++value_iterator) {
+  for (uint32_t i = 0; i < frame_info.translated_stack_parameter_count();
+       ++i, ++value_iterator) {
     frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
   }
 
@@ -1584,7 +1524,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
     } break;
   }
 
-  if (must_handle_result) {
+  if (frame_info.frame_has_result_stack_slot()) {
     frame_writer.PushRawObject(roots.the_hole_value(),
                                "placeholder for return result on lazy deopt\n");
   }
@@ -1659,9 +1599,10 @@ void Deoptimizer::DoComputeBuiltinContinuation(
                                    "builtin JavaScript context\n");
 
   // The builtin to continue to.
-  frame_writer.PushRawObject(Smi::FromInt(builtin.builtin_index()),
-                             "builtin index\n");
+  frame_writer.PushRawObject(Smi::FromInt(builtin_name), "builtin index\n");
 
+  const int allocatable_register_count =
+      config->num_allocatable_general_registers();
   for (int i = 0; i < allocatable_register_count; ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     ScopedVector<char> str(128);
@@ -1683,17 +1624,20 @@ void Deoptimizer::DoComputeBuiltinContinuation(
 
   // Some architectures must pad the stack frame with extra stack slots
   // to ensure the stack frame is aligned.
+  const int padding_slot_count =
+      BuiltinContinuationFrameConstants::PaddingSlotCount(
+          allocatable_register_count);
   for (int i = 0; i < padding_slot_count; ++i) {
     frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
   }
 
   if (is_topmost) {
-    if (PadTopOfStackRegister()) {
+    if (kPadArguments) {
       frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
     }
-    // Ensure the result is restored back when we return to the stub.
 
-    if (must_handle_result) {
+    // Ensure the result is restored back when we return to the stub.
+    if (frame_info.frame_has_result_stack_slot()) {
       Register result_reg = kReturnRegister0;
       frame_writer.PushRawValue(input_->GetRegister(result_reg.code()),
                                 "callback result\n");
@@ -1719,8 +1663,9 @@ void Deoptimizer::DoComputeBuiltinContinuation(
   Register fp_reg = JavaScriptFrame::fp_register();
   output_frame->SetRegister(fp_reg.code(), fp_value);
 
-  Code continue_to_builtin = isolate()->builtins()->builtin(
-      TrampolineForBuiltinContinuation(mode, must_handle_result));
+  Code continue_to_builtin =
+      isolate()->builtins()->builtin(TrampolineForBuiltinContinuation(
+          mode, frame_info.frame_has_result_stack_slot()));
   output_frame->SetPc(
       static_cast<intptr_t>(continue_to_builtin.InstructionStart()));
 
@@ -1800,18 +1745,10 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
   return result;
 }
 
-// static
-unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo shared) {
-  // The fixed part of the frame consists of the return address, frame
-  // pointer, function, context, bytecode offset and all the incoming arguments.
-  return ComputeIncomingArgumentSize(shared) +
-         InterpreterFrameConstants::kFixedFrameSize;
-}
-
 // static
 unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
-  int parameter_slots = shared.internal_formal_parameter_count() + 1;
-  if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
+  int parameter_slots = InternalFormalParameterCountWithReceiver(shared);
+  if (ShouldPadArguments(parameter_slots)) parameter_slots++;
   return parameter_slots * kSystemPointerSize;
 }
 
@@ -2254,12 +2191,9 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
   stack_it++;
 
   // Get the expression stack.
-  int stack_height = frame_it->height();
-  if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
-    // For interpreter frames, we should not count the accumulator.
-    // TODO(jarin): Clean up the indexing in translated frames.
-    stack_height--;
-  }
+  DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind());
+  const int stack_height = frame_it->height();  // Accumulator *not* included.
+
   expression_stack_.resize(static_cast<size_t>(stack_height));
   for (int i = 0; i < stack_height; i++) {
     Handle<Object> expression = GetValueForDebugger(stack_it, isolate);
@@ -2267,10 +2201,9 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
     stack_it++;
   }
 
-  // For interpreter frame, skip the accumulator.
-  if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
-    stack_it++;
-  }
+  DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind());
+  stack_it++;  // Skip the accumulator.
+
   CHECK(stack_it == frame_it->end());
 }
 
@@ -2688,20 +2621,30 @@ TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
 }
 
 int TranslatedFrame::GetValueCount() {
+  // The function is added to all frame state descriptors in
+  // InstructionSelector::AddInputsToFrameStateDescriptor.
+  static constexpr int kTheFunction = 1;
+
   switch (kind()) {
     case kInterpretedFunction: {
       int parameter_count =
-          raw_shared_info_.internal_formal_parameter_count() + 1;
-      // + 2 for function and context.
-      return height_ + parameter_count + 2;
+          InternalFormalParameterCountWithReceiver(raw_shared_info_);
+      static constexpr int kTheContext = 1;
+      static constexpr int kTheAccumulator = 1;
+      return height() + parameter_count + kTheContext + kTheFunction +
+             kTheAccumulator;
     }
 
     case kArgumentsAdaptor:
+      return height() + kTheFunction;
+
     case kConstructStub:
     case kBuiltinContinuation:
     case kJavaScriptBuiltinContinuation:
-    case kJavaScriptBuiltinContinuationWithCatch:
-      return 1 + height_;
+    case kJavaScriptBuiltinContinuationWithCatch: {
+      static constexpr int kTheContext = 1;
+      return height() + kTheContext + kTheFunction;
+    }
 
     case kInvalid:
       UNREACHABLE();
@@ -2736,7 +2679,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
       if (trace_file != nullptr) {
         std::unique_ptr<char[]> name = shared_info.DebugName().ToCString();
         PrintF(trace_file, "  reading input frame %s", name.get());
-        int arg_count = shared_info.internal_formal_parameter_count() + 1;
+        int arg_count = InternalFormalParameterCountWithReceiver(shared_info);
         PrintF(trace_file,
                " => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); "
                "inputs:\n",
@@ -2787,11 +2730,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
         PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
                bailout_id.ToInt(), height);
       }
-      // Add one to the height to account for the context which was implicitly
-      // added to the translation during code generation.
-      int height_with_context = height + 1;
       return TranslatedFrame::BuiltinContinuationFrame(bailout_id, shared_info,
-                                                       height_with_context);
+                                                       height);
     }
 
     case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
@@ -2806,11 +2746,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
         PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
                bailout_id.ToInt(), height);
       }
-      // Add one to the height to account for the context which was implicitly
-      // added to the translation during code generation.
-      int height_with_context = height + 1;
       return TranslatedFrame::JavaScriptBuiltinContinuationFrame(
-          bailout_id, shared_info, height_with_context);
+          bailout_id, shared_info, height);
     }
     case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
       BailoutId bailout_id = BailoutId(iterator->Next());
@@ -2825,11 +2762,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
         PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
                bailout_id.ToInt(), height);
       }
-      // Add one to the height to account for the context which was implicitly
-      // added to the translation during code generation.
-      int height_with_context = height + 1;
       return TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
-          bailout_id, shared_info, height_with_context);
+          bailout_id, shared_info, height);
     }
     case Translation::UPDATE_FEEDBACK:
     case Translation::BEGIN:
@@ -3450,7 +3384,7 @@ void TranslatedState::InitializeCapturedObjectAt(
 
   // Handle the special cases.
   switch (map->instance_type()) {
-    case MUTABLE_HEAP_NUMBER_TYPE:
+    case HEAP_NUMBER_TYPE:
     case FIXED_DOUBLE_ARRAY_TYPE:
       return;
 
@@ -3528,15 +3462,14 @@ void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame,
   slot->set_storage(array);
 }
 
-void TranslatedState::MaterializeMutableHeapNumber(TranslatedFrame* frame,
-                                                   int* value_index,
-                                                   TranslatedValue* slot) {
+void TranslatedState::MaterializeHeapNumber(TranslatedFrame* frame,
+                                            int* value_index,
+                                            TranslatedValue* slot) {
   CHECK_NE(TranslatedValue::kCapturedObject,
            frame->values_[*value_index].kind());
   Handle<Object> value = frame->values_[*value_index].GetValue();
   CHECK(value->IsNumber());
-  Handle<MutableHeapNumber> box =
-      isolate()->factory()->NewMutableHeapNumber(value->Number());
+  Handle<HeapNumber> box = isolate()->factory()->NewHeapNumber(value->Number());
   (*value_index)++;
   slot->set_storage(box);
 }
@@ -3592,10 +3525,10 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
       // there is no need to process the children.
       return MaterializeFixedDoubleArray(frame, &value_index, slot, map);
 
-    case MUTABLE_HEAP_NUMBER_TYPE:
+    case HEAP_NUMBER_TYPE:
       // Materialize (i.e. allocate&initialize) the heap number and return.
       // There is no need to process the children.
-      return MaterializeMutableHeapNumber(frame, &value_index, slot);
+      return MaterializeHeapNumber(frame, &value_index, slot);
 
     case FIXED_ARRAY_TYPE:
     case SCRIPT_CONTEXT_TABLE_TYPE:
@@ -3813,7 +3746,7 @@ void TranslatedState::InitializeJSObjectAt(
       }
       object_storage->WriteField<double>(offset, double_field_value);
     } else if (marker == kStoreMutableHeapNumber) {
-      CHECK(field_value->IsMutableHeapNumber());
+      CHECK(field_value->IsHeapNumber());
       WRITE_FIELD(*object_storage, offset, *field_value);
       WRITE_BARRIER(*object_storage, offset, *field_value);
     } else {
@@ -3848,10 +3781,9 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
     int offset = i * kTaggedSize;
     uint8_t marker = object_storage->ReadField<uint8_t>(offset);
     if (i > 1 && marker == kStoreMutableHeapNumber) {
-      CHECK(field_value->IsMutableHeapNumber());
+      CHECK(field_value->IsHeapNumber());
     } else {
       CHECK(marker == kStoreTagged || i == 1);
-      CHECK(!field_value->IsMutableHeapNumber());
     }
 
     WRITE_FIELD(*object_storage, offset, *field_value);
@@ -3918,15 +3850,16 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
           // to last value in the TranslatedFrame. It should also always be
           // {1}, as the GenericLazyDeoptContinuation builtin only has one
           // argument (the receiver).
-          const int height = frames_[i].height();
+          static constexpr int kTheContext = 1;
+          const int height = frames_[i].height() + kTheContext;
           Object argc_object = frames_[i].ValueAt(height - 1)->GetRawValue();
           CHECK(argc_object.IsSmi());
           *args_count = Smi::ToInt(argc_object);
 
           DCHECK_EQ(*args_count, 1);
         } else {
-          *args_count =
-              frames_[i].shared_info()->internal_formal_parameter_count() + 1;
+          *args_count = InternalFormalParameterCountWithReceiver(
+              *frames_[i].shared_info());
         }
         return &(frames_[i]);
       }
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index a2471247ef2032..6d0a350aaceb59 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -35,6 +35,8 @@ class TranslatedState;
 class RegisterValues;
 class MacroAssembler;
 
+enum class BuiltinContinuationMode;
+
 class TranslatedValue {
  public:
   // Allocation-less getter of the value.
@@ -172,7 +174,14 @@ class TranslatedFrame {
   Kind kind() const { return kind_; }
   BailoutId node_id() const { return node_id_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+
+  // TODO(jgruber): Simplify/clarify the semantics of this field. The name
+  // `height` is slightly misleading. Yes, this value is related to stack frame
+  // height, but must undergo additional mutations to arrive at the real stack
+  // frame height (e.g.: addition/subtraction of context, accumulator, fixed
+  // frame sizes, padding).
   int height() const { return height_; }
+
   int return_value_offset() const { return return_value_offset_; }
   int return_value_count() const { return return_value_count_; }
 
@@ -352,8 +361,8 @@ class TranslatedState {
   void UpdateFromPreviouslyMaterializedObjects();
   void MaterializeFixedDoubleArray(TranslatedFrame* frame, int* value_index,
                                    TranslatedValue* slot, Handle<Map> map);
-  void MaterializeMutableHeapNumber(TranslatedFrame* frame, int* value_index,
-                                    TranslatedValue* slot);
+  void MaterializeHeapNumber(TranslatedFrame* frame, int* value_index,
+                             TranslatedValue* slot);
 
   void EnsureObjectAllocatedAt(TranslatedValue* slot);
 
@@ -501,12 +510,14 @@ class Deoptimizer : public Malloced {
 
   static const int kMaxNumberOfEntries = 16384;
 
-  enum class BuiltinContinuationMode {
-    STUB,
-    JAVASCRIPT,
-    JAVASCRIPT_WITH_CATCH,
-    JAVASCRIPT_HANDLE_EXCEPTION
-  };
+  // Set to true when the architecture supports deoptimization exit sequences
+  // of a fixed size, that can be sorted so that the deoptimization index is
+  // deduced from the address of the deoptimization exit.
+  static const bool kSupportsFixedDeoptExitSize;
+
+  // Size of deoptimization exit sequence. This is only meaningful when
+  // kSupportsFixedDeoptExitSize is true.
+  static const int kDeoptExitSize;
 
  private:
   friend class FrameWriter;
@@ -530,8 +541,6 @@ class Deoptimizer : public Malloced {
   void DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
                                    int frame_index);
 
-  static bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode);
-  static bool BuiltinContinuationModeIsJavaScript(BuiltinContinuationMode mode);
   static Builtins::Name TrampolineForBuiltinContinuation(
       BuiltinContinuationMode mode, bool must_handle_result);
 
@@ -541,7 +550,6 @@ class Deoptimizer : public Malloced {
 
   unsigned ComputeInputFrameAboveFpFixedSize() const;
   unsigned ComputeInputFrameSize() const;
-  static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo shared);
 
   static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo shared);
   static unsigned ComputeOutgoingArgumentSize(Code code, unsigned bailout_id);
@@ -552,11 +560,6 @@ class Deoptimizer : public Malloced {
 
   static void MarkAllCodeForContext(NativeContext native_context);
   static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
-
-  // Some architectures need to push padding together with the TOS register
-  // in order to maintain stack alignment.
-  static bool PadTopOfStackRegister();
-
   // Searches the list of known deoptimizing code for a Code object
   // containing the given address (which is supposedly faster than
   // searching all code objects).
@@ -626,10 +629,7 @@ class RegisterValues {
     return registers_[n];
   }
 
-  Float32 GetFloatRegister(unsigned n) const {
-    DCHECK(n < arraysize(float_registers_));
-    return float_registers_[n];
-  }
+  Float32 GetFloatRegister(unsigned n) const;
 
   Float64 GetDoubleRegister(unsigned n) const {
     DCHECK(n < arraysize(double_registers_));
@@ -641,23 +641,10 @@ class RegisterValues {
     registers_[n] = value;
   }
 
-  void SetFloatRegister(unsigned n, Float32 value) {
-    DCHECK(n < arraysize(float_registers_));
-    float_registers_[n] = value;
-  }
-
-  void SetDoubleRegister(unsigned n, Float64 value) {
-    DCHECK(n < arraysize(double_registers_));
-    double_registers_[n] = value;
-  }
-
-  // Generated code is writing directly into the below arrays, make sure their
-  // element sizes fit what the machine instructions expect.
-  static_assert(sizeof(Float32) == kFloatSize, "size mismatch");
-  static_assert(sizeof(Float64) == kDoubleSize, "size mismatch");
-
   intptr_t registers_[Register::kNumRegisters];
-  Float32 float_registers_[FloatRegister::kNumRegisters];
+  // Generated code writes directly into the following array, make sure the
+  // element size matches what the machine instructions expect.
+  static_assert(sizeof(Float64) == kDoubleSize, "size mismatch");
   Float64 double_registers_[DoubleRegister::kNumRegisters];
 };
 
@@ -687,7 +674,7 @@ class FrameDescription {
 
   unsigned GetLastArgumentSlotOffset() {
     int parameter_slots = parameter_count();
-    if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
+    if (ShouldPadArguments(parameter_slots)) parameter_slots++;
     return GetFrameSize() - parameter_slots * kSystemPointerSize;
   }
 
@@ -721,10 +708,6 @@ class FrameDescription {
     register_values_.SetRegister(n, value);
   }
 
-  void SetDoubleRegister(unsigned n, Float64 value) {
-    register_values_.SetDoubleRegister(n, value);
-  }
-
   intptr_t GetTop() const { return top_; }
   void SetTop(intptr_t top) { top_ = top; }
 
@@ -755,10 +738,6 @@ class FrameDescription {
     return OFFSET_OF(FrameDescription, register_values_.double_registers_);
   }
 
-  static int float_registers_offset() {
-    return OFFSET_OF(FrameDescription, register_values_.float_registers_);
-  }
-
   static int frame_size_offset() {
     return offsetof(FrameDescription, frame_size_);
   }
diff --git a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
index f40ff562bed71b..6c82512d40db44 100644
--- a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
@@ -13,6 +13,9 @@
 namespace v8 {
 namespace internal {
 
+const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
+const int Deoptimizer::kDeoptExitSize = 0;
+
 #define __ masm->
 
 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
@@ -33,24 +36,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     __ movsd(Operand(esp, offset), xmm_reg);
   }
 
-  STATIC_ASSERT(kFloatSize == kSystemPointerSize);
-  const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
-  __ AllocateStackSpace(kFloatRegsSize);
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    XMMRegister xmm_reg = XMMRegister::from_code(code);
-    int offset = code * kFloatSize;
-    __ movss(Operand(esp, offset), xmm_reg);
-  }
-
   __ pushad();
 
   ExternalReference c_entry_fp_address =
       ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
   __ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
 
-  const int kSavedRegistersAreaSize = kNumberOfRegisters * kSystemPointerSize +
-                                      kDoubleRegsSize + kFloatRegsSize;
+  const int kSavedRegistersAreaSize =
+      kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
 
   // The bailout id is passed in ebx by the caller.
 
@@ -94,13 +87,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     __ pop(Operand(esi, offset));
   }
 
-  int float_regs_offset = FrameDescription::float_registers_offset();
-  // Fill in the float input registers.
-  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
-    int dst_offset = i * kFloatSize + float_regs_offset;
-    __ pop(Operand(esi, dst_offset));
-  }
-
   int double_regs_offset = FrameDescription::double_registers_offset();
   // Fill in the double input registers.
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -213,7 +199,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   __ ret(0);
 }
 
-bool Deoptimizer::PadTopOfStackRegister() { return false; }
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+  return Float32::FromBits(
+      static_cast<uint32_t>(double_registers_[n].get_bits()));
+}
 
 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
   SetFrameSlot(offset, value);
diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index 07bc9a511baa4d..6f4edee46c7077 100644
--- a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -10,6 +10,9 @@
 namespace v8 {
 namespace internal {
 
+const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
+const int Deoptimizer::kDeoptExitSize = 0;
+
 #define __ masm->
 
 // This code tries to be close to ia32 code so that any changes can be
@@ -27,7 +30,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   RegList saved_regs = restored_regs | sp.bit() | ra.bit();
 
   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-  const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
 
   // Save all FPU registers before messing with them.
   __ Subu(sp, sp, Operand(kDoubleRegsSize));
@@ -39,14 +41,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     __ Sdc1(fpu_reg, MemOperand(sp, offset));
   }
 
-  __ Subu(sp, sp, Operand(kFloatRegsSize));
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    const FloatRegister fpu_reg = FloatRegister::from_code(code);
-    int offset = code * kFloatSize;
-    __ swc1(fpu_reg, MemOperand(sp, offset));
-  }
-
   // Push saved_regs (needed to populate FrameDescription::registers_).
   // Leave gaps for other registers.
   __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
@@ -61,7 +55,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   __ sw(fp, MemOperand(a2));
 
   const int kSavedRegistersAreaSize =
-      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
 
   // Get the bailout id is passed as kRootRegister by the caller.
   __ mov(a2, kRootRegister);
@@ -120,22 +114,11 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     int code = config->GetAllocatableDoubleCode(i);
     int dst_offset = code * kDoubleSize + double_regs_offset;
     int src_offset =
-        code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
+        code * kDoubleSize + kNumberOfRegisters * kPointerSize;
     __ Ldc1(f0, MemOperand(sp, src_offset));
     __ Sdc1(f0, MemOperand(a1, dst_offset));
   }
 
-  // Copy FPU registers to
-  // float_registers_[FloatRegister::kNumAllocatableRegisters]
-  int float_regs_offset = FrameDescription::float_registers_offset();
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    int dst_offset = code * kFloatSize + float_regs_offset;
-    int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
-    __ lwc1(f0, MemOperand(sp, src_offset));
-    __ swc1(f0, MemOperand(a1, dst_offset));
-  }
-
   // Remove the saved registers from the stack.
   __ Addu(sp, sp, Operand(kSavedRegistersAreaSize));
 
@@ -235,7 +218,10 @@ const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
 const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
 #endif
 
-bool Deoptimizer::PadTopOfStackRegister() { return false; }
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+  return Float32::FromBits(
+      static_cast<uint32_t>(double_registers_[n].get_bits()));
+}
 
 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
   SetFrameSlot(offset, value);
diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index f85659c4ab6797..8f8183b4923a82 100644
--- a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -10,6 +10,9 @@
 namespace v8 {
 namespace internal {
 
+const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
+const int Deoptimizer::kDeoptExitSize = 0;
+
 #define __ masm->
 
 // This code tries to be close to ia32 code so that any changes can be
@@ -27,7 +30,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   RegList saved_regs = restored_regs | sp.bit() | ra.bit();
 
   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-  const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
 
   // Save all double FPU registers before messing with them.
   __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
@@ -39,15 +41,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     __ Sdc1(fpu_reg, MemOperand(sp, offset));
   }
 
-  // Save all float FPU registers before messing with them.
-  __ Dsubu(sp, sp, Operand(kFloatRegsSize));
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    const FloatRegister fpu_reg = FloatRegister::from_code(code);
-    int offset = code * kFloatSize;
-    __ Swc1(fpu_reg, MemOperand(sp, offset));
-  }
-
   // Push saved_regs (needed to populate FrameDescription::registers_).
   // Leave gaps for other registers.
   __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
@@ -62,7 +55,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   __ Sd(fp, MemOperand(a2));
 
   const int kSavedRegistersAreaSize =
-      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
 
   // Get the bailout is passed as kRootRegister by the caller.
   __ mov(a2, kRootRegister);
@@ -122,22 +115,11 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     int code = config->GetAllocatableDoubleCode(i);
     int dst_offset = code * kDoubleSize + double_regs_offset;
     int src_offset =
-        code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
+        code * kDoubleSize + kNumberOfRegisters * kPointerSize;
     __ Ldc1(f0, MemOperand(sp, src_offset));
     __ Sdc1(f0, MemOperand(a1, dst_offset));
   }
 
-  int float_regs_offset = FrameDescription::float_registers_offset();
-  // Copy FPU registers to
-  // float_registers_[FloatRegister::kNumAllocatableRegisters]
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    int dst_offset = code * kFloatSize + float_regs_offset;
-    int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
-    __ Lwc1(f0, MemOperand(sp, src_offset));
-    __ Swc1(f0, MemOperand(a1, dst_offset));
-  }
-
   // Remove the saved registers from the stack.
   __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize));
 
@@ -236,7 +218,10 @@ const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
 const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
 #endif
 
-bool Deoptimizer::PadTopOfStackRegister() { return false; }
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+  return Float32::FromBits(
+      static_cast<uint32_t>(double_registers_[n].get_bits()));
+}
 
 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
   SetFrameSlot(offset, value);
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index 41616a5af2926d..864e9dbe368b63 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -11,6 +11,9 @@
 namespace v8 {
 namespace internal {
 
+const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
+const int Deoptimizer::kDeoptExitSize = 0;
+
 #define __ masm->
 
 // This code tries to be close to ia32 code so that any changes can be
@@ -28,7 +31,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   RegList saved_regs = restored_regs | sp.bit();
 
   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-  const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
 
   // Save all double registers before messing with them.
   __ subi(sp, sp, Operand(kDoubleRegsSize));
@@ -39,14 +41,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     int offset = code * kDoubleSize;
     __ stfd(dreg, MemOperand(sp, offset));
   }
-  // Save all float registers before messing with them.
-  __ subi(sp, sp, Operand(kFloatRegsSize));
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    const FloatRegister freg = FloatRegister::from_code(code);
-    int offset = code * kFloatSize;
-    __ stfs(freg, MemOperand(sp, offset));
-  }
 
   // Push saved_regs (needed to populate FrameDescription::registers_).
   // Leave gaps for other registers.
@@ -64,7 +58,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     __ StoreP(fp, MemOperand(scratch));
   }
   const int kSavedRegistersAreaSize =
-      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
 
   // Get the bailout id is passed as r29 by the caller.
   __ mr(r5, r29);
@@ -114,21 +108,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     int dst_offset = code * kDoubleSize + double_regs_offset;
-    int src_offset =
-        code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
+    int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
     __ lfd(d0, MemOperand(sp, src_offset));
     __ stfd(d0, MemOperand(r4, dst_offset));
   }
-  int float_regs_offset = FrameDescription::float_registers_offset();
-  // Copy float registers to
-  // float_registers_[FloatRegister::kNumRegisters]
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    int dst_offset = code * kFloatSize + float_regs_offset;
-    int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
-    __ lfs(d0, MemOperand(sp, src_offset));
-    __ stfs(d0, MemOperand(r4, dst_offset));
-  }
 
   // Remove the saved registers from the stack.
   __ addi(sp, sp, Operand(kSavedRegistersAreaSize));
@@ -236,7 +219,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   __ stop();
 }
 
-bool Deoptimizer::PadTopOfStackRegister() { return false; }
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+  float float_val = static_cast<float>(double_registers_[n].get_scalar());
+  return Float32::FromBits(bit_cast<uint32_t>(float_val));
+}
 
 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
   SetFrameSlot(offset, value);
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index 6da740b0e5f5e6..616a57ba0e420c 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -10,6 +10,9 @@
 namespace v8 {
 namespace internal {
 
+const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
+const int Deoptimizer::kDeoptExitSize = 0;
+
 #define __ masm->
 
 // This code tries to be close to ia32 code so that any changes can be
@@ -25,7 +28,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   RegList restored_regs = kJSCallerSaved | kCalleeSaved;
 
   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-  const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
 
   // Save all double registers before messing with them.
   __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
@@ -36,14 +38,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     int offset = code * kDoubleSize;
     __ StoreDouble(dreg, MemOperand(sp, offset));
   }
-  // Save all float registers before messing with them.
-  __ lay(sp, MemOperand(sp, -kFloatRegsSize));
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    const FloatRegister dreg = FloatRegister::from_code(code);
-    int offset = code * kFloatSize;
-    __ StoreFloat32(dreg, MemOperand(sp, offset));
-  }
 
   // Push all GPRs onto the stack
   __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
@@ -54,7 +48,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   __ StoreP(fp, MemOperand(r1));
 
   const int kSavedRegistersAreaSize =
-      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
 
   // The bailout id is passed using r10
   __ LoadRR(r4, r10);
@@ -116,25 +110,12 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     int dst_offset = code * kDoubleSize + double_regs_offset;
-    int src_offset =
-        code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
+    int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
     // TODO(joransiu): MVC opportunity
     __ LoadDouble(d0, MemOperand(sp, src_offset));
     __ StoreDouble(d0, MemOperand(r3, dst_offset));
   }
 
-  int float_regs_offset = FrameDescription::float_registers_offset();
-  // Copy float registers to
-  // float_registers_[FloatRegister::kNumRegisters]
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    int dst_offset = code * kFloatSize + float_regs_offset;
-    int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
-    // TODO(joransiu): MVC opportunity
-    __ LoadFloat32(d0, MemOperand(sp, src_offset));
-    __ StoreFloat32(d0, MemOperand(r3, dst_offset));
-  }
-
   // Remove the saved registers from the stack.
   __ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
 
@@ -231,7 +212,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   __ stop();
 }
 
-bool Deoptimizer::PadTopOfStackRegister() { return false; }
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+  return Float32::FromBits(
+      static_cast<uint32_t>(double_registers_[n].get_bits() >> 32));
+}
 
 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
   SetFrameSlot(offset, value);
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index cfdd6c9ef1151d..29c81f195c1679 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -13,6 +13,9 @@
 namespace v8 {
 namespace internal {
 
+const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
+const int Deoptimizer::kDeoptExitSize = 0;
+
 #define __ masm->
 
 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
@@ -34,16 +37,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     __ Movsd(Operand(rsp, offset), xmm_reg);
   }
 
-  const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
-  __ AllocateStackSpace(kFloatRegsSize);
-
-  for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
-    int code = config->GetAllocatableFloatCode(i);
-    XMMRegister xmm_reg = XMMRegister::from_code(code);
-    int offset = code * kFloatSize;
-    __ Movss(Operand(rsp, offset), xmm_reg);
-  }
-
   // We push all registers onto the stack, even though we do not need
   // to restore all later.
   for (int i = 0; i < kNumberOfRegisters; i++) {
@@ -51,8 +44,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     __ pushq(r);
   }
 
-  const int kSavedRegistersAreaSize = kNumberOfRegisters * kSystemPointerSize +
-                                      kDoubleRegsSize + kFloatRegsSize;
+  const int kSavedRegistersAreaSize =
+      kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
 
   __ Store(
       ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
@@ -112,16 +105,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
     __ PopQuad(Operand(rbx, offset));
   }
 
-  // Fill in the float input registers.
-  int float_regs_offset = FrameDescription::float_registers_offset();
-  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
-    int src_offset = i * kFloatSize;
-    int dst_offset = i * kFloatSize + float_regs_offset;
-    __ movl(rcx, Operand(rsp, src_offset));
-    __ movl(Operand(rbx, dst_offset), rcx);
-  }
-  __ addq(rsp, Immediate(kFloatRegsSize));
-
   // Fill in the double input registers.
   int double_regs_offset = FrameDescription::double_registers_offset();
   for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
@@ -232,7 +215,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
   __ ret(0);
 }
 
-bool Deoptimizer::PadTopOfStackRegister() { return false; }
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+  return Float32::FromBits(
+      static_cast<uint32_t>(double_registers_[n].get_bits()));
+}
 
 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
   if (kPCOnStackSize == 2 * kSystemPointerSize) {
diff --git a/deps/v8/src/diagnostics/OWNERS b/deps/v8/src/diagnostics/OWNERS
index 852d438bb0a884..48d72aea5eec22 100644
--- a/deps/v8/src/diagnostics/OWNERS
+++ b/deps/v8/src/diagnostics/OWNERS
@@ -1 +1 @@
-file://COMMON_OWNERS
+file:../../COMMON_OWNERS
diff --git a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
index e51986ee4c3456..7141cdf2837dbd 100644
--- a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -1417,14 +1417,33 @@ void DisassemblingDecoder::VisitFPFixedPointConvert(Instruction* instr) {
   Format(instr, mnemonic, form);
 }
 
+// clang-format off
+#define PAUTH_SYSTEM_MNEMONICS(V) \
+  V(PACIA1716, "pacia1716")       \
+  V(AUTIA1716, "autia1716")       \
+  V(PACIASP,   "paciasp")         \
+  V(AUTIASP,   "autiasp")
+// clang-format on
+
 void DisassemblingDecoder::VisitSystem(Instruction* instr) {
   // Some system instructions hijack their Op and Cp fields to represent a
   // range of immediates instead of indicating a different instruction. This
   // makes the decoding tricky.
   const char* mnemonic = "unimplemented";
   const char* form = "(System)";
+  if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) {
+    switch (instr->Mask(SystemPAuthMask)) {
+#define PAUTH_CASE(NAME, MN) \
+  case NAME:                 \
+    mnemonic = MN;           \
+    form = NULL;             \
+    break;
 
-  if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+      PAUTH_SYSTEM_MNEMONICS(PAUTH_CASE)
+#undef PAUTH_CASE
+#undef PAUTH_SYSTEM_MNEMONICS
+    }
+  } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
     switch (instr->Mask(SystemSysRegMask)) {
       case MRS: {
         mnemonic = "mrs";
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index dc3b3b8091c303..6860ead0223dec 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -164,9 +164,6 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
     case HEAP_NUMBER_TYPE:
       CHECK(IsHeapNumber());
       break;
-    case MUTABLE_HEAP_NUMBER_TYPE:
-      CHECK(IsMutableHeapNumber());
-      break;
     case BIGINT_TYPE:
       BigInt::cast(*this).BigIntVerify(isolate);
       break;
@@ -582,7 +579,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
       // There are two reasons why this can happen:
       // - in the middle of StoreTransitionStub when the new extended backing
       //   store is already set into the object and the allocation of the
-      //   MutableHeapNumber triggers GC while the map isn't updated yet.
+      //   HeapNumber triggers GC while the map isn't updated yet.
       // - deletion of the last property can leave additional backing store
       //   capacity behind.
       CHECK_GT(actual_unused_property_fields, map().UnusedPropertyFields());
@@ -607,7 +604,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
           VerifyObjectField(isolate, index.offset());
         }
         Object value = RawFastPropertyAt(index);
-        if (r.IsDouble()) DCHECK(value.IsMutableHeapNumber());
+        if (r.IsDouble()) DCHECK(value.IsHeapNumber());
         if (value.IsUninitialized(isolate)) continue;
         if (r.IsSmi()) DCHECK(value.IsSmi());
         if (r.IsHeapObject()) DCHECK(value.IsHeapObject());
@@ -638,7 +635,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
   // pointer may point to a one pointer filler map.
   if (ElementsAreSafeToExamine(isolate)) {
     CHECK_EQ((map().has_fast_smi_or_object_elements() ||
-              map().has_frozen_or_sealed_elements() ||
+              map().has_any_nonextensible_elements() ||
               (elements() == GetReadOnlyRoots().empty_fixed_array()) ||
               HasFastStringWrapperElements()),
              (elements().map() == GetReadOnlyRoots().fixed_array_map() ||
@@ -681,7 +678,7 @@ void Map::MapVerify(Isolate* isolate) {
   CHECK_IMPLIES(IsJSObjectMap() && !CanHaveFastTransitionableElementsKind(),
                 IsDictionaryElementsKind(elements_kind()) ||
                     IsTerminalElementsKind(elements_kind()) ||
-                    IsHoleyFrozenOrSealedElementsKind(elements_kind()));
+                    IsAnyHoleyNonextensibleElementsKind(elements_kind()));
   CHECK_IMPLIES(is_deprecated(), !is_stable());
   if (is_prototype_map()) {
     DCHECK(prototype_info() == Smi::kZero ||
@@ -699,8 +696,6 @@ void Map::DictionaryMapVerify(Isolate* isolate) {
   CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
 }
 
-USE_TORQUE_VERIFIER(AliasedArgumentsEntry)
-
 void EmbedderDataArray::EmbedderDataArrayVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::EmbedderDataArrayVerify(*this, isolate);
   EmbedderDataSlot start(*this, 0);
@@ -770,7 +765,7 @@ void Context::ContextVerify(Isolate* isolate) {
 void NativeContext::NativeContextVerify(Isolate* isolate) {
   ContextVerify(isolate);
   CHECK_EQ(length(), NativeContext::NATIVE_CONTEXT_SLOTS);
-  CHECK_EQ(kSize, map().instance_size());
+  CHECK_EQ(kVariableSizeSentinel, map().instance_size());
 }
 
 void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
@@ -914,8 +909,6 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
   CHECK_LE(maxMappedIndex, arg_elements.length());
 }
 
-USE_TORQUE_VERIFIER(JSGeneratorObject)
-
 void JSAsyncFunctionObject::JSAsyncFunctionObjectVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::JSAsyncFunctionObjectVerify(*this, isolate);
   promise().HeapObjectVerify(isolate);
@@ -1140,8 +1133,6 @@ void Oddball::OddballVerify(Isolate* isolate) {
   }
 }
 
-USE_TORQUE_VERIFIER(Cell)
-
 USE_TORQUE_VERIFIER(PropertyCell)
 
 void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
@@ -1185,10 +1176,11 @@ void JSArray::JSArrayVerify(Isolate* isolate) {
     CHECK_EQ(elements(), ReadOnlyRoots(isolate).empty_fixed_array());
   }
   // Verify that the length and the elements backing store are in sync.
-  if (length().IsSmi() && (HasFastElements() || HasFrozenOrSealedElements())) {
+  if (length().IsSmi() &&
+      (HasFastElements() || HasAnyNonextensibleElements())) {
     if (elements().length() > 0) {
       CHECK_IMPLIES(HasDoubleElements(), elements().IsFixedDoubleArray());
-      CHECK_IMPLIES(HasSmiOrObjectElements() || HasFrozenOrSealedElements(),
+      CHECK_IMPLIES(HasSmiOrObjectElements() || HasAnyNonextensibleElements(),
                     elements().IsFixedArray());
     }
     int size = Smi::ToInt(length());
@@ -1215,8 +1207,6 @@ void JSArray::JSArrayVerify(Isolate* isolate) {
   }
 }
 
-USE_TORQUE_VERIFIER(JSCollection)
-
 void JSSet::JSSetVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::JSSetVerify(*this, isolate);
   CHECK(table().IsOrderedHashSet() || table().IsUndefined(isolate));
@@ -1229,8 +1219,6 @@ void JSMap::JSMapVerify(Isolate* isolate) {
   // TODO(arv): Verify OrderedHashTable too.
 }
 
-USE_TORQUE_VERIFIER(JSCollectionIterator)
-
 void JSSetIterator::JSSetIteratorVerify(Isolate* isolate) {
   CHECK(IsJSSetIterator());
   JSCollectionIteratorVerify(isolate);
@@ -1301,12 +1289,6 @@ void JSFinalizationGroupCleanupIterator::
   VerifyHeapPointer(isolate, finalization_group());
 }
 
-void FinalizationGroupCleanupJobTask::FinalizationGroupCleanupJobTaskVerify(
-    Isolate* isolate) {
-  CHECK(IsFinalizationGroupCleanupJobTask());
-  CHECK(finalization_group().IsJSFinalizationGroup());
-}
-
 void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::JSWeakMapVerify(*this, isolate);
   CHECK(table().IsEphemeronHashTable() || table().IsUndefined(isolate));
@@ -1334,36 +1316,16 @@ void JSStringIterator::JSStringIteratorVerify(Isolate* isolate) {
   CHECK_LE(index(), String::kMaxLength);
 }
 
-USE_TORQUE_VERIFIER(JSAsyncFromSyncIterator)
-
-USE_TORQUE_VERIFIER(JSWeakCollection)
-
 void JSWeakSet::JSWeakSetVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::JSWeakSetVerify(*this, isolate);
   CHECK(table().IsEphemeronHashTable() || table().IsUndefined(isolate));
 }
 
-USE_TORQUE_VERIFIER(Microtask)
-
 void CallableTask::CallableTaskVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::CallableTaskVerify(*this, isolate);
   CHECK(callable().IsCallable());
 }
 
-USE_TORQUE_VERIFIER(CallbackTask)
-
-USE_TORQUE_VERIFIER(PromiseReactionJobTask)
-
-USE_TORQUE_VERIFIER(PromiseFulfillReactionJobTask)
-
-USE_TORQUE_VERIFIER(PromiseRejectReactionJobTask)
-
-USE_TORQUE_VERIFIER(PromiseResolveThenableJobTask)
-
-USE_TORQUE_VERIFIER(PromiseCapability)
-
-USE_TORQUE_VERIFIER(PromiseReaction)
-
 void JSPromise::JSPromiseVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::JSPromiseVerify(*this, isolate);
   if (status() == Promise::kPending) {
@@ -1456,22 +1418,38 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
       break;
     }
     case JSRegExp::IRREGEXP: {
-      bool is_native = RegExp::GeneratesNativeCode();
+      bool can_be_interpreted = RegExp::CanGenerateBytecode();
 
       FixedArray arr = FixedArray::cast(data());
       Object one_byte_data = arr.get(JSRegExp::kIrregexpLatin1CodeIndex);
       // Smi : Not compiled yet (-1).
-      // Code/ByteArray: Compiled code.
+      // Code: Compiled irregexp code or trampoline to the interpreter.
       CHECK((one_byte_data.IsSmi() &&
              Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
-            (is_native ? one_byte_data.IsCode() : one_byte_data.IsByteArray()));
+            one_byte_data.IsCode());
       Object uc16_data = arr.get(JSRegExp::kIrregexpUC16CodeIndex);
       CHECK((uc16_data.IsSmi() &&
              Smi::ToInt(uc16_data) == JSRegExp::kUninitializedValue) ||
-            (is_native ? uc16_data.IsCode() : uc16_data.IsByteArray()));
+            uc16_data.IsCode());
+
+      Object one_byte_bytecode =
+          arr.get(JSRegExp::kIrregexpLatin1BytecodeIndex);
+      // Smi : Not compiled yet (-1).
+      // ByteArray: Bytecode to interpret regexp.
+      CHECK((one_byte_bytecode.IsSmi() &&
+             Smi::ToInt(one_byte_bytecode) == JSRegExp::kUninitializedValue) ||
+            (can_be_interpreted && one_byte_bytecode.IsByteArray()));
+      Object uc16_bytecode = arr.get(JSRegExp::kIrregexpUC16BytecodeIndex);
+      CHECK((uc16_bytecode.IsSmi() &&
+             Smi::ToInt(uc16_bytecode) == JSRegExp::kUninitializedValue) ||
+            (can_be_interpreted && uc16_bytecode.IsByteArray()));
+
+      CHECK_IMPLIES(one_byte_data.IsSmi(), one_byte_bytecode.IsSmi());
+      CHECK_IMPLIES(uc16_data.IsSmi(), uc16_bytecode.IsSmi());
 
       CHECK(arr.get(JSRegExp::kIrregexpCaptureCountIndex).IsSmi());
       CHECK(arr.get(JSRegExp::kIrregexpMaxRegisterCountIndex).IsSmi());
+      CHECK(arr.get(JSRegExp::kIrregexpTierUpTicksIndex).IsSmi());
       break;
     }
     default:
@@ -1481,8 +1459,6 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
   }
 }
 
-USE_TORQUE_VERIFIER(JSRegExpStringIterator)
-
 void JSProxy::JSProxyVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::JSProxyVerify(*this, isolate);
   CHECK(map().GetConstructor().IsJSFunction());
@@ -1540,8 +1516,6 @@ void BigInt::BigIntVerify(Isolate* isolate) {
   CHECK_IMPLIES(is_zero(), !sign());  // There is no -0n.
 }
 
-USE_TORQUE_VERIFIER(JSModuleNamespace)
-
 void SourceTextModuleInfoEntry::SourceTextModuleInfoEntryVerify(
     Isolate* isolate) {
   TorqueGeneratedClassVerifiers::SourceTextModuleInfoEntryVerify(*this,
@@ -1626,8 +1600,6 @@ void PrototypeUsers::Verify(WeakArrayList array) {
   CHECK_EQ(weak_maps_count + empty_slots_count + 1, array.length());
 }
 
-USE_TORQUE_VERIFIER(TemplateObjectDescription)
-
 void EnumCache::EnumCacheVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::EnumCacheVerify(*this, isolate);
   Heap* heap = isolate->heap();
@@ -1637,8 +1609,6 @@ void EnumCache::EnumCacheVerify(Isolate* isolate) {
   }
 }
 
-USE_TORQUE_VERIFIER(ClassPositions)
-
 void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
     Isolate* isolate) {
   CHECK(IsObjectBoilerplateDescription());
@@ -1647,14 +1617,10 @@ void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
   this->FixedArrayVerify(isolate);
 }
 
-USE_TORQUE_VERIFIER(ArrayBoilerplateDescription)
-
 USE_TORQUE_VERIFIER(AsmWasmData)
 
 USE_TORQUE_VERIFIER(WasmDebugInfo)
 
-USE_TORQUE_VERIFIER(WasmExceptionTag)
-
 void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
   JSObjectVerify(isolate);
   CHECK(IsWasmInstanceObject());
@@ -1715,8 +1681,6 @@ USE_TORQUE_VERIFIER(AccessorInfo)
 
 USE_TORQUE_VERIFIER(AccessorPair)
 
-USE_TORQUE_VERIFIER(AccessCheckInfo)
-
 void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
   TorqueGeneratedClassVerifiers::CallHandlerInfoVerify(*this, isolate);
   CHECK(map() == ReadOnlyRoots(isolate).side_effect_call_handler_info_map() ||
@@ -1726,22 +1690,12 @@ void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
                      .next_call_side_effect_free_call_handler_info_map());
 }
 
-USE_TORQUE_VERIFIER(InterceptorInfo)
-
-USE_TORQUE_VERIFIER(TemplateInfo)
-
-USE_TORQUE_VERIFIER(FunctionTemplateInfo)
-
-USE_TORQUE_VERIFIER(FunctionTemplateRareData)
-
 USE_TORQUE_VERIFIER(WasmCapiFunctionData)
 
 USE_TORQUE_VERIFIER(WasmJSFunctionData)
 
 USE_TORQUE_VERIFIER(WasmIndirectFunctionTable)
 
-USE_TORQUE_VERIFIER(ObjectTemplateInfo)
-
 void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
   CHECK(IsAllocationSite());
   CHECK(dependent_code().IsDependentCode());
@@ -1781,8 +1735,6 @@ void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
 
 USE_TORQUE_VERIFIER(DebugInfo)
 
-USE_TORQUE_VERIFIER(StackTraceFrame)
-
 USE_TORQUE_VERIFIER(StackFrameInfo)
 
 void PreparseData::PreparseDataVerify(Isolate* isolate) {
@@ -1868,9 +1820,11 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
     case HOLEY_ELEMENTS:
     case HOLEY_FROZEN_ELEMENTS:
     case HOLEY_SEALED_ELEMENTS:
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
     case PACKED_ELEMENTS:
     case PACKED_FROZEN_ELEMENTS:
     case PACKED_SEALED_ELEMENTS:
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
     case FAST_STRING_WRAPPER_ELEMENTS: {
       info->number_of_objects_with_fast_elements_++;
       int holes = 0;
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 5284208285acd9..39614091c74b94 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -130,11 +130,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {  // NOLINT
       HeapNumber::cast(*this).HeapNumberPrint(os);
       os << "\n";
       break;
-    case MUTABLE_HEAP_NUMBER_TYPE:
-      os << "<mutable ";
-      MutableHeapNumber::cast(*this).MutableHeapNumberPrint(os);
-      os << ">\n";
-      break;
     case BIGINT_TYPE:
       BigInt::cast(*this).BigIntPrint(os);
       os << "\n";
@@ -683,9 +678,11 @@ void JSObject::PrintElements(std::ostream& os) {  // NOLINT
     case HOLEY_ELEMENTS:
     case HOLEY_FROZEN_ELEMENTS:
     case HOLEY_SEALED_ELEMENTS:
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
     case PACKED_ELEMENTS:
     case PACKED_FROZEN_ELEMENTS:
     case PACKED_SEALED_ELEMENTS:
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
     case FAST_STRING_WRAPPER_ELEMENTS: {
       PrintFixedArrayElements(os, FixedArray::cast(elements()));
       break;
@@ -868,8 +865,8 @@ void JSRegExp::JSRegExpPrint(std::ostream& os) {  // NOLINT
 void JSRegExpStringIterator::JSRegExpStringIteratorPrint(
     std::ostream& os) {  // NOLINT
   JSObjectPrintHeader(os, *this, "JSRegExpStringIterator");
-  os << "\n - regex: " << Brief(iterating_regexp());
-  os << "\n - string: " << Brief(iterating_string());
+  os << "\n - regex: " << Brief(iterating_reg_exp());
+  os << "\n - string: " << Brief(iterated_string());
   os << "\n - done: " << done();
   os << "\n - global: " << global();
   os << "\n - unicode: " << unicode();
@@ -1340,7 +1337,17 @@ void JSFinalizationGroup::JSFinalizationGroupPrint(std::ostream& os) {
   os << "\n - native_context: " << Brief(native_context());
   os << "\n - cleanup: " << Brief(cleanup());
   os << "\n - active_cells: " << Brief(active_cells());
+  Object active_cell = active_cells();
+  while (active_cell.IsWeakCell()) {
+    os << "\n   - " << Brief(active_cell);
+    active_cell = WeakCell::cast(active_cell).next();
+  }
   os << "\n - cleared_cells: " << Brief(cleared_cells());
+  Object cleared_cell = cleared_cells();
+  while (cleared_cell.IsWeakCell()) {
+    os << "\n   - " << Brief(cleared_cell);
+    cleared_cell = WeakCell::cast(cleared_cell).next();
+  }
   os << "\n - key_map: " << Brief(key_map());
   JSObjectPrintBody(os, *this);
 }
@@ -1352,12 +1359,6 @@ void JSFinalizationGroupCleanupIterator::
   JSObjectPrintBody(os, *this);
 }
 
-void FinalizationGroupCleanupJobTask::FinalizationGroupCleanupJobTaskPrint(
-    std::ostream& os) {
-  PrintHeader(os, "FinalizationGroupCleanupJobTask");
-  os << "\n - finalization_group: " << Brief(finalization_group());
-}
-
 void JSWeakMap::JSWeakMapPrint(std::ostream& os) {  // NOLINT
   JSObjectPrintHeader(os, *this, "JSWeakMap");
   os << "\n - table: " << Brief(table());
@@ -1466,13 +1467,16 @@ void JSFunction::JSFunctionPrint(std::ostream& os) {  // NOLINT
   }
   if (WasmExportedFunction::IsWasmExportedFunction(*this)) {
     WasmExportedFunction function = WasmExportedFunction::cast(*this);
-    os << "\n - WASM instance "
-       << reinterpret_cast<void*>(function.instance().ptr());
-    os << "\n - WASM function index " << function.function_index();
+    os << "\n - WASM instance: " << Brief(function.instance());
+    os << "\n - WASM function index: " << function.function_index();
+  }
+  if (WasmJSFunction::IsWasmJSFunction(*this)) {
+    WasmJSFunction function = WasmJSFunction::cast(*this);
+    os << "\n - WASM wrapper around: " << Brief(function.GetCallable());
   }
   shared().PrintSourceCode(os);
   JSObjectPrintBody(os, *this);
-  os << "\n - feedback vector: ";
+  os << " - feedback vector: ";
   if (!shared().HasFeedbackMetadata()) {
     os << "feedback metadata is not available in SFI\n";
   } else if (has_feedback_vector()) {
@@ -1506,6 +1510,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {  // NOLINT
     os << "\n - inferred name: " << Brief(inferred_name());
   }
   os << "\n - kind: " << kind();
+  os << "\n - syntax kind: " << syntax_kind();
   if (needs_home_object()) {
     os << "\n - needs_home_object";
   }
@@ -1522,13 +1527,6 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {  // NOLINT
   // Script files are often large, hard to read.
   // os << "\n - script =";
   // script()->Print(os);
-  if (is_named_expression()) {
-    os << "\n - named expression";
-  } else if (is_anonymous_expression()) {
-    os << "\n - anonymous expression";
-  } else if (is_declaration()) {
-    os << "\n - declaration";
-  }
   os << "\n - function token position: " << function_token_position();
   os << "\n - start position: " << StartPosition();
   os << "\n - end position: " << EndPosition();
@@ -2065,7 +2063,7 @@ void WasmCapiFunctionData::WasmCapiFunctionDataPrint(
     std::ostream& os) {  // NOLINT
   PrintHeader(os, "WasmCapiFunctionData");
   os << "\n - call_target: " << call_target();
-  os << "\n - embedder_data: " << embedder_data();
+  os << "\n - embedder_data: " << Brief(embedder_data());
   os << "\n - wrapper_code: " << Brief(wrapper_code());
   os << "\n - serialized_signature: " << Brief(serialized_signature());
   os << "\n";
@@ -2275,7 +2273,7 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) {  // NOLINT
   os << "\n - context locals : " << ContextLocalCount();
 
   os << "\n - scope type: " << scope_type();
-  if (CallsSloppyEval()) os << "\n - sloppy eval";
+  if (SloppyEvalCanExtendVars()) os << "\n - sloppy eval";
   os << "\n - language mode: " << language_mode();
   if (is_declaration_scope()) os << "\n - declaration scope";
   if (HasReceiver()) {
@@ -2460,10 +2458,6 @@ void TaggedImpl<kRefType, StorageType>::Print(std::ostream& os) {
 
 void HeapNumber::HeapNumberPrint(std::ostream& os) { os << value(); }
 
-void MutableHeapNumber::MutableHeapNumberPrint(std::ostream& os) {
-  os << value();
-}
-
 // TODO(cbruni): remove once the new maptracer is in place.
 void Name::NameShortPrint() {
   if (this->IsString()) {
diff --git a/deps/v8/src/diagnostics/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index 57133964b0429a..d84c2e4f53406a 100644
--- a/deps/v8/src/diagnostics/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -243,6 +243,10 @@ void PerfJitLogger::LogRecordedBuffer(const wasm::WasmCode* code,
 
   if (perf_output_handle_ == nullptr) return;
 
+  if (FLAG_perf_prof_annotate_wasm) {
+    LogWriteDebugInfo(code);
+  }
+
   WriteJitCodeLoadEntry(code->instructions().begin(),
                         code->instructions().length(), name, length);
 }
@@ -387,6 +391,73 @@ void PerfJitLogger::LogWriteDebugInfo(Code code, SharedFunctionInfo shared) {
   LogWriteBytes(padding_bytes, padding);
 }
 
+void PerfJitLogger::LogWriteDebugInfo(const wasm::WasmCode* code) {
+  wasm::WasmModuleSourceMap* source_map =
+      code->native_module()->GetWasmSourceMap();
+  wasm::WireBytesRef code_ref =
+      code->native_module()->module()->functions[code->index()].code;
+  uint32_t code_offset = code_ref.offset();
+  uint32_t code_end_offset = code_ref.end_offset();
+
+  uint32_t entry_count = 0;
+  uint32_t size = 0;
+
+  if (!source_map || !source_map->IsValid() ||
+      !source_map->HasSource(code_offset, code_end_offset)) {
+    return;
+  }
+
+  for (SourcePositionTableIterator iterator(code->source_positions());
+       !iterator.done(); iterator.Advance()) {
+    uint32_t offset = iterator.source_position().ScriptOffset() + code_offset;
+    if (!source_map->HasValidEntry(code_offset, offset)) continue;
+    entry_count++;
+    size += source_map->GetFilename(offset).size() + 1;
+  }
+
+  if (entry_count == 0) return;
+
+  PerfJitCodeDebugInfo debug_info;
+
+  debug_info.event_ = PerfJitCodeLoad::kDebugInfo;
+  debug_info.time_stamp_ = GetTimestamp();
+  debug_info.address_ =
+      reinterpret_cast<uintptr_t>(code->instructions().begin());
+  debug_info.entry_count_ = entry_count;
+
+  size += sizeof(debug_info);
+  // Add the sizes of fixed parts of entries.
+  size += entry_count * sizeof(PerfJitDebugEntry);
+
+  int padding = ((size + 7) & (~7)) - size;
+  debug_info.size_ = size + padding;
+  LogWriteBytes(reinterpret_cast<const char*>(&debug_info), sizeof(debug_info));
+
+  uintptr_t code_begin =
+      reinterpret_cast<uintptr_t>(code->instructions().begin());
+
+  for (SourcePositionTableIterator iterator(code->source_positions());
+       !iterator.done(); iterator.Advance()) {
+    uint32_t offset = iterator.source_position().ScriptOffset() + code_offset;
+    if (!source_map->HasValidEntry(code_offset, offset)) continue;
+    PerfJitDebugEntry entry;
+    // The entry point of the function will be placed straight after the ELF
+    // header when processed by "perf inject". Adjust the position addresses
+    // accordingly.
+    entry.address_ = code_begin + iterator.code_offset() + kElfHeaderSize;
+    entry.line_number_ =
+        static_cast<int>(source_map->GetSourceLine(offset)) + 1;
+    entry.column_ = 1;
+    LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
+    std::string name_string = source_map->GetFilename(offset);
+    LogWriteBytes(name_string.c_str(),
+                  static_cast<int>(name_string.size() + 1));
+  }
+
+  char padding_bytes[8] = {0};
+  LogWriteBytes(padding_bytes, padding);
+}
+
 void PerfJitLogger::LogWriteUnwindingInfo(Code code) {
   PerfJitCodeUnwindingInfo unwinding_info_header;
   unwinding_info_header.event_ = PerfJitCodeLoad::kUnwindingInfo;
diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h
index 492a5509760dcd..36ab844110038d 100644
--- a/deps/v8/src/diagnostics/perf-jit.h
+++ b/deps/v8/src/diagnostics/perf-jit.h
@@ -71,6 +71,7 @@ class PerfJitLogger : public CodeEventLogger {
   void LogWriteBytes(const char* bytes, int size);
   void LogWriteHeader();
   void LogWriteDebugInfo(Code code, SharedFunctionInfo shared);
+  void LogWriteDebugInfo(const wasm::WasmCode* code);
   void LogWriteUnwindingInfo(Code code);
 
   static const uint32_t kElfMachIA32 = 3;
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 096ffa2d48dd7e..6cc53da51f16a4 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -4,42 +4,17 @@
 
 #include "src/diagnostics/unwinding-info-win64.h"
 
-#if defined(V8_OS_WIN_X64)
-
 #include "src/codegen/macro-assembler.h"
-#include "src/codegen/x64/assembler-x64.h"
 #include "src/utils/allocation.h"
 
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
-    _Out_ PVOID* DynamicTable,
-    _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
-    _In_ DWORD EntryCount,
-    _In_ DWORD MaximumEntryCount,
-    _In_ ULONG_PTR RangeBase,
-    _In_ ULONG_PTR RangeEnd
-    );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
-    _Inout_ PVOID DynamicTable,
-    _In_ DWORD NewEntryCount
-    );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
-    _In_ PVOID DynamicTable
-    );
-
+#if defined(V8_OS_WIN_X64)
+#include "src/codegen/x64/assembler-x64.h"
+#elif defined(V8_OS_WIN_ARM64)
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#else
+#error "Unsupported OS"
+#endif  // V8_OS_WIN_X64
 
 namespace v8 {
 namespace internal {
@@ -53,9 +28,36 @@ bool CanRegisterUnwindInfoForNonABICompliantCodeRange() {
 
 bool RegisterUnwindInfoForExceptionHandlingOnly() {
   DCHECK(CanRegisterUnwindInfoForNonABICompliantCodeRange());
+#if defined(V8_OS_WIN_ARM64)
+  return !FLAG_win64_unwinding_info;
+#else
   return !IsWindows8OrGreater() || !FLAG_win64_unwinding_info;
+#endif
+}
+
+v8::UnhandledExceptionCallback unhandled_exception_callback_g = nullptr;
+
+void SetUnhandledExceptionCallback(
+    v8::UnhandledExceptionCallback unhandled_exception_callback) {
+  unhandled_exception_callback_g = unhandled_exception_callback;
+}
+
+// This function is registered as exception handler for V8-generated code as
+// part of the registration of unwinding info. It is referenced by
+// RegisterNonABICompliantCodeRange(), below, and by the unwinding info for
+// builtins declared in the embedded blob.
+extern "C" __declspec(dllexport) int CRASH_HANDLER_FUNCTION_NAME(
+    PEXCEPTION_RECORD ExceptionRecord, ULONG64 EstablisherFrame,
+    PCONTEXT ContextRecord, PDISPATCHER_CONTEXT DispatcherContext) {
+  if (unhandled_exception_callback_g != nullptr) {
+    EXCEPTION_POINTERS info = {ExceptionRecord, ContextRecord};
+    return unhandled_exception_callback_g(&info);
+  }
+  return ExceptionContinueSearch;
 }
 
+#if defined(V8_OS_WIN_X64)
+
 #pragma pack(push, 1)
 
 /*
@@ -80,9 +82,12 @@ struct UNWIND_INFO {
   unsigned char FrameOffset : 4;
 };
 
+static constexpr int kNumberOfUnwindCodes = 2;
+static constexpr int kMaxExceptionThunkSize = 12;
+
 struct V8UnwindData {
   UNWIND_INFO unwind_info;
-  UNWIND_CODE unwind_codes[2];
+  UNWIND_CODE unwind_codes[kNumberOfUnwindCodes];
 
   V8UnwindData() {
     static constexpr int kOpPushNonvol = 0;
@@ -118,46 +123,244 @@ struct ExceptionHandlerUnwindData {
   }
 };
 
+struct CodeRangeUnwindingRecord {
+  void* dynamic_table;
+  uint32_t runtime_function_count;
+  V8UnwindData unwind_info;
+  uint32_t exception_handler;
+  uint8_t exception_thunk[kMaxExceptionThunkSize];
+  RUNTIME_FUNCTION runtime_function[kDefaultRuntimeFunctionCount];
+};
+
+struct ExceptionHandlerRecord {
+  uint32_t runtime_function_count;
+  RUNTIME_FUNCTION runtime_function[kDefaultRuntimeFunctionCount];
+  ExceptionHandlerUnwindData unwind_info;
+  uint32_t exception_handler;
+  uint8_t exception_thunk[kMaxExceptionThunkSize];
+};
+
 #pragma pack(pop)
 
-v8::UnhandledExceptionCallback unhandled_exception_callback_g = nullptr;
+std::vector<uint8_t> GetUnwindInfoForBuiltinFunctions() {
+  V8UnwindData xdata;
+  return std::vector<uint8_t>(
+      reinterpret_cast<uint8_t*>(&xdata),
+      reinterpret_cast<uint8_t*>(&xdata) + sizeof(xdata));
+}
 
-void SetUnhandledExceptionCallback(
-    v8::UnhandledExceptionCallback unhandled_exception_callback) {
-  unhandled_exception_callback_g = unhandled_exception_callback;
+template <typename Record>
+void InitUnwindingRecord(Record* record, size_t code_size_in_bytes) {
+  // We assume that the first page of the code range is executable and
+  // committed and reserved to contain PDATA/XDATA.
+
+  // All addresses are 32bit relative offsets to start.
+  record->runtime_function[0].BeginAddress = 0;
+  record->runtime_function[0].EndAddress =
+      static_cast<DWORD>(code_size_in_bytes);
+  record->runtime_function[0].UnwindData = offsetof(Record, unwind_info);
+  record->runtime_function_count = 1;
+  record->exception_handler = offsetof(Record, exception_thunk);
+
+  // Hardcoded thunk.
+  AssemblerOptions options;
+  options.record_reloc_info_for_serialization = false;
+  MacroAssembler masm(nullptr, options, CodeObjectRequired::kNo,
+                      NewAssemblerBuffer(64));
+  masm.movq(rax, reinterpret_cast<uint64_t>(&CRASH_HANDLER_FUNCTION_NAME));
+  masm.jmp(rax);
+  DCHECK_LE(masm.instruction_size(), sizeof(record->exception_thunk));
+  memcpy(&record->exception_thunk[0], masm.buffer_start(),
+         masm.instruction_size());
 }
 
-// This function is registered as exception handler for V8-generated code as
-// part of the registration of unwinding info. It is referenced by
-// RegisterNonABICompliantCodeRange(), below, and by the unwinding info for
-// builtins declared in the embedded blob.
-extern "C" __declspec(dllexport) int CRASH_HANDLER_FUNCTION_NAME(
-    PEXCEPTION_RECORD ExceptionRecord, ULONG64 EstablisherFrame,
-    PCONTEXT ContextRecord, PDISPATCHER_CONTEXT DispatcherContext) {
-  if (unhandled_exception_callback_g != nullptr) {
-    EXCEPTION_POINTERS info = {ExceptionRecord, ContextRecord};
-    return unhandled_exception_callback_g(&info);
-  }
-  return ExceptionContinueSearch;
+#elif defined(V8_OS_WIN_ARM64)
+
+#pragma pack(push, 1)
+
+// ARM64 unwind codes are defined in below doc.
+// https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling#unwind-codes
+enum UnwindOp8Bit {
+  OpNop = 0xE3,
+  OpSaveFpLr = 0x40,
+  OpSaveFpLrX = 0x80,
+  OpSetFp = 0xE1,
+  OpEnd = 0xE4,
+};
+
+typedef uint32_t UNWIND_CODE;
+
+constexpr UNWIND_CODE Combine8BitUnwindCodes(uint8_t code0,
+                                             uint8_t code1 = OpNop,
+                                             uint8_t code2 = OpNop,
+                                             uint8_t code3 = OpNop) {
+  return static_cast<uint32_t>(code0) | (static_cast<uint32_t>(code1) << 8) |
+         (static_cast<uint32_t>(code2) << 16) |
+         (static_cast<uint32_t>(code3) << 24);
 }
 
-static constexpr int kMaxExceptionThunkSize = 12;
+// UNWIND_INFO defines the static part (first 32-bit) of the .xdata record in
+// below doc.
+// https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling#xdata-records
+struct UNWIND_INFO {
+  uint32_t FunctionLength : 18;
+  uint32_t Version : 2;
+  uint32_t X : 1;
+  uint32_t E : 1;
+  uint32_t EpilogCount : 5;
+  uint32_t CodeWords : 5;
+};
+
+static constexpr int kNumberOfUnwindCodes = 1;
+static constexpr int kMaxExceptionThunkSize = 16;
+static constexpr int kFunctionLengthShiftSize = 2;
+static constexpr int kFunctionLengthMask = (1 << kFunctionLengthShiftSize) - 1;
+static constexpr int kFramePointerAdjustmentShiftSize = 3;
+static constexpr int kFramePointerAdjustmentShiftMask =
+    (1 << kFramePointerAdjustmentShiftSize) - 1;
+
+struct V8UnwindData {
+  UNWIND_INFO unwind_info;
+  UNWIND_CODE unwind_codes[kNumberOfUnwindCodes];
+
+  V8UnwindData() {
+    memset(&unwind_info, 0, sizeof(UNWIND_INFO));
+    unwind_info.X = 1;  // has exception handler after unwind-codes.
+    unwind_info.CodeWords = 1;
+
+    // stp fp, lr, [sp, #offset]!
+    unwind_codes[0] = Combine8BitUnwindCodes(OpSetFp, OpSaveFpLrX, OpEnd);
+  }
+};
 
 struct CodeRangeUnwindingRecord {
-  RUNTIME_FUNCTION runtime_function;
+  void* dynamic_table;
+  uint32_t runtime_function_count;
   V8UnwindData unwind_info;
   uint32_t exception_handler;
-  uint8_t exception_thunk[kMaxExceptionThunkSize];
-  void* dynamic_table;
-};
 
-struct ExceptionHandlerRecord {
-  RUNTIME_FUNCTION runtime_function;
-  ExceptionHandlerUnwindData unwind_info;
-  uint32_t exception_handler;
+  // For Windows ARM64 unwinding, register 2 unwind_info for each code range,
+  // unwind_info for all full size ranges (1MB - 4 bytes) and unwind_info1 for
+  // the remaining non full size range. There is at most 1 range which is less
+  // than full size.
+  V8UnwindData unwind_info1;
+  uint32_t exception_handler1;
   uint8_t exception_thunk[kMaxExceptionThunkSize];
+
+  // More RUNTIME_FUNCTION structs could follow below array because the number
+  // of RUNTIME_FUNCTION needed to cover given code range is computed at
+  // runtime.
+  RUNTIME_FUNCTION runtime_function[kDefaultRuntimeFunctionCount];
 };
 
+#pragma pack(pop)
+
+std::vector<uint8_t> GetUnwindInfoForBuiltinFunction(uint32_t func_len,
+                                                     int32_t fp_adjustment) {
+  DCHECK_LE(func_len, kMaxFunctionLength);
+  DCHECK_EQ((func_len & kFunctionLengthMask), 0);
+  USE(kFunctionLengthMask);
+
+  // Unwind code save_fplr requires the offset to be within range [0, 504].
+  // This range is defined in below doc for unwind code save_fplr.
+  // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling#unwind-codes
+  DCHECK_GE(fp_adjustment, 0);
+  DCHECK_LE(fp_adjustment, 504);
+  DCHECK_EQ((fp_adjustment & kFramePointerAdjustmentShiftMask), 0);
+  USE(kFramePointerAdjustmentShiftMask);
+
+  V8UnwindData xdata;
+  // FunctionLength is ensured to be aligned at instruction size and Windows
+  // ARM64 doesn't encoding its 2 LSB.
+  xdata.unwind_info.FunctionLength = func_len >> kFunctionLengthShiftSize;
+  xdata.unwind_info.CodeWords = 1;
+  xdata.unwind_codes[0] = Combine8BitUnwindCodes(
+      OpSetFp,
+      (OpSaveFpLr | (fp_adjustment >> kFramePointerAdjustmentShiftSize)),
+      OpEnd);
+
+  return std::vector<uint8_t>(
+      reinterpret_cast<uint8_t*>(&xdata),
+      reinterpret_cast<uint8_t*>(&xdata) + sizeof(xdata));
+}
+
+template <typename Record>
+void InitUnwindingRecord(Record* record, size_t code_size_in_bytes) {
+  // We assume that the first page of the code range is executable and
+  // committed and reserved to contain multiple PDATA/XDATA to cover the whole
+  // range. All addresses are 32bit relative offsets to start.
+
+  // Maximum RUNTIME_FUNCTION count available in reserved memory, this includes
+  // static part in Record as kDefaultRuntimeFunctionCount plus dynamic part in
+  // the remaining reserved memory.
+  constexpr uint32_t max_runtime_function_count = static_cast<uint32_t>(
+      (kOSPageSize - sizeof(Record)) / sizeof(RUNTIME_FUNCTION) +
+      kDefaultRuntimeFunctionCount);
+
+  uint32_t runtime_function_index = 0;
+  uint32_t current_unwind_start_address = 0;
+  int64_t remaining_size_in_bytes = static_cast<int64_t>(code_size_in_bytes);
+
+  // Divide the code range into chunks in size kMaxFunctionLength and create a
+  // RUNTIME_FUNCTION for each of them. All the chunks in the same size can
+  // share 1 unwind_info struct, but a separate unwind_info is needed for the
+  // last chunk if it is smaller than kMaxFunctionLength, because unlike X64,
+  // unwind_info encodes the function/chunk length.
+  while (remaining_size_in_bytes >= kMaxFunctionLength &&
+         runtime_function_index < max_runtime_function_count) {
+    record->runtime_function[runtime_function_index].BeginAddress =
+        current_unwind_start_address;
+    record->runtime_function[runtime_function_index].UnwindData =
+        static_cast<DWORD>(offsetof(Record, unwind_info));
+
+    runtime_function_index++;
+    current_unwind_start_address += kMaxFunctionLength;
+    remaining_size_in_bytes -= kMaxFunctionLength;
+  }
+  // FunctionLength is ensured to be aligned at instruction size and Windows
+  // ARM64 doesn't encoding 2 LSB.
+  record->unwind_info.unwind_info.FunctionLength = kMaxFunctionLength >> 2;
+
+  if (remaining_size_in_bytes > 0 &&
+      runtime_function_index < max_runtime_function_count) {
+    DCHECK_EQ(remaining_size_in_bytes % kInstrSize, 0);
+
+    record->unwind_info1.unwind_info.FunctionLength = static_cast<uint32_t>(
+        remaining_size_in_bytes >> kFunctionLengthShiftSize);
+    record->runtime_function[runtime_function_index].BeginAddress =
+        current_unwind_start_address;
+    record->runtime_function[runtime_function_index].UnwindData =
+        static_cast<DWORD>(offsetof(Record, unwind_info1));
+
+    remaining_size_in_bytes -= kMaxFunctionLength;
+    record->exception_handler1 = offsetof(Record, exception_thunk);
+    record->runtime_function_count = runtime_function_index + 1;
+  } else {
+    record->runtime_function_count = runtime_function_index;
+  }
+
+  // 1 page can cover kMaximalCodeRangeSize for ARM64 (128MB). If
+  // kMaximalCodeRangeSize is changed for ARM64 and makes 1 page insufficient to
+  // cover it, more pages will need to reserved for unwind data.
+  DCHECK_LE(remaining_size_in_bytes, 0);
+
+  record->exception_handler = offsetof(Record, exception_thunk);
+
+  // Hardcoded thunk.
+  AssemblerOptions options;
+  options.record_reloc_info_for_serialization = false;
+  TurboAssembler masm(nullptr, options, CodeObjectRequired::kNo,
+                      NewAssemblerBuffer(64));
+  masm.Mov(x16,
+           Operand(reinterpret_cast<uint64_t>(&CRASH_HANDLER_FUNCTION_NAME)));
+  masm.Br(x16);
+  DCHECK_LE(masm.instruction_size(), sizeof(record->exception_thunk));
+  memcpy(&record->exception_thunk[0], masm.buffer_start(),
+         masm.instruction_size());
+}
+
+#endif  // V8_OS_WIN_X64
+
 namespace {
 
 V8_DECLARE_ONCE(load_ntdll_unwinding_functions_once);
@@ -216,37 +419,6 @@ void DeleteGrowableFunctionTable(PVOID dynamic_table) {
 
 }  // namespace
 
-std::vector<uint8_t> GetUnwindInfoForBuiltinFunctions() {
-  V8UnwindData xdata;
-  return std::vector<uint8_t>(
-      reinterpret_cast<uint8_t*>(&xdata),
-      reinterpret_cast<uint8_t*>(&xdata) + sizeof(xdata));
-}
-
-template <typename Record>
-void InitUnwindingRecord(Record* record, size_t code_size_in_bytes) {
-  // We assume that the first page of the code range is executable and
-  // committed and reserved to contain PDATA/XDATA.
-
-  // All addresses are 32bit relative offsets to start.
-  record->runtime_function.BeginAddress = 0;
-  record->runtime_function.EndAddress = static_cast<DWORD>(code_size_in_bytes);
-  record->runtime_function.UnwindData = offsetof(Record, unwind_info);
-
-  record->exception_handler = offsetof(Record, exception_thunk);
-
-  // Hardcoded thunk.
-  AssemblerOptions options;
-  options.record_reloc_info_for_serialization = false;
-  MacroAssembler masm(nullptr, options, CodeObjectRequired::kNo,
-                      NewAssemblerBuffer(64));
-  masm.movq(rax, reinterpret_cast<uint64_t>(&CRASH_HANDLER_FUNCTION_NAME));
-  masm.jmp(rax);
-  DCHECK_LE(masm.instruction_size(), sizeof(record->exception_thunk));
-  memcpy(&record->exception_thunk[0], masm.buffer_start(),
-         masm.instruction_size());
-}
-
 void RegisterNonABICompliantCodeRange(void* start, size_t size_in_bytes) {
   DCHECK(CanRegisterUnwindInfoForNonABICompliantCodeRange());
 
@@ -262,24 +434,30 @@ void RegisterNonABICompliantCodeRange(void* start, size_t size_in_bytes) {
   // by the embedder (like Crashpad).
 
   if (RegisterUnwindInfoForExceptionHandlingOnly()) {
+#if defined(V8_OS_WIN_X64)
+    // Windows ARM64 starts since 1709 Windows build, no need to have exception
+    // handling only unwind info for compatibility.
     if (unhandled_exception_callback_g) {
       ExceptionHandlerRecord* record = new (start) ExceptionHandlerRecord();
       InitUnwindingRecord(record, size_in_bytes);
 
-      CHECK(::RtlAddFunctionTable(&record->runtime_function, 1,
+      CHECK(::RtlAddFunctionTable(record->runtime_function,
+                                  kDefaultRuntimeFunctionCount,
                                   reinterpret_cast<DWORD64>(start)));
 
       // Protect reserved page against modifications.
       DWORD old_protect;
-      CHECK(VirtualProtect(start, sizeof(CodeRangeUnwindingRecord),
+      CHECK(VirtualProtect(start, sizeof(ExceptionHandlerRecord),
                            PAGE_EXECUTE_READ, &old_protect));
     }
+#endif  // V8_OS_WIN_X64
   } else {
     CodeRangeUnwindingRecord* record = new (start) CodeRangeUnwindingRecord();
     InitUnwindingRecord(record, size_in_bytes);
 
     CHECK(AddGrowableFunctionTable(
-        &record->dynamic_table, &record->runtime_function, 1, 1,
+        &record->dynamic_table, record->runtime_function,
+        record->runtime_function_count, record->runtime_function_count,
         reinterpret_cast<DWORD64>(start),
         reinterpret_cast<DWORD64>(reinterpret_cast<uint8_t*>(start) +
                                   size_in_bytes)));
@@ -295,11 +473,15 @@ void UnregisterNonABICompliantCodeRange(void* start) {
   DCHECK(CanRegisterUnwindInfoForNonABICompliantCodeRange());
 
   if (RegisterUnwindInfoForExceptionHandlingOnly()) {
+#if defined(V8_OS_WIN_X64)
+    // Windows ARM64 starts since 1709 Windows build, no need to have exception
+    // handling only unwind info for compatibility.
     if (unhandled_exception_callback_g) {
       ExceptionHandlerRecord* record =
           reinterpret_cast<ExceptionHandlerRecord*>(start);
-      CHECK(::RtlDeleteFunctionTable(&record->runtime_function));
+      CHECK(::RtlDeleteFunctionTable(record->runtime_function));
     }
+#endif  // V8_OS_WIN_X64
   } else {
     CodeRangeUnwindingRecord* record =
         reinterpret_cast<CodeRangeUnwindingRecord*>(start);
@@ -309,19 +491,41 @@ void UnregisterNonABICompliantCodeRange(void* start) {
   }
 }
 
+#if defined(V8_OS_WIN_X64)
+
 void XdataEncoder::onPushRbp() {
-  current_push_rbp_offset_ = assembler_.pc_offset() - kPushRbpInstructionLength;
+  current_frame_code_offset_ =
+      assembler_.pc_offset() - kPushRbpInstructionLength;
 }
 
 void XdataEncoder::onMovRbpRsp() {
-  if (current_push_rbp_offset_ >= 0 &&
-      current_push_rbp_offset_ == assembler_.pc_offset() - kRbpPrefixLength) {
-    fp_offsets_.push_back(current_push_rbp_offset_);
+  if (current_frame_code_offset_ >= 0 &&
+      current_frame_code_offset_ == assembler_.pc_offset() - kRbpPrefixLength) {
+    fp_offsets_.push_back(current_frame_code_offset_);
+  }
+}
+
+#elif defined(V8_OS_WIN_ARM64)
+
+void XdataEncoder::onSaveFpLr() {
+  current_frame_code_offset_ = assembler_.pc_offset() - 4;
+  fp_offsets_.push_back(current_frame_code_offset_);
+  fp_adjustments_.push_back(current_frame_adjustment_);
+  if (current_frame_adjustment_ != 0) {
+    current_frame_adjustment_ = 0;
   }
 }
 
+void XdataEncoder::onFramePointerAdjustment(int bytes) {
+  // According to below doc, offset for save_fplr is aligned to pointer size.
+  // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling#unwind-codes
+  DCHECK_EQ((bytes & kPointerAlignmentMask), 0);
+
+  current_frame_adjustment_ = bytes;
+}
+
+#endif  // V8_OS_WIN_X64
+
 }  // namespace win64_unwindinfo
 }  // namespace internal
 }  // namespace v8
-
-#endif  // defined(V8_OS_WIN_X64)
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.h b/deps/v8/src/diagnostics/unwinding-info-win64.h
index f6611e7e2ec5a8..8f8c9469ebf92c 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.h
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.h
@@ -9,7 +9,7 @@
 #include "include/v8config.h"
 #include "src/common/globals.h"
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
 #include "src/base/win32-headers.h"
 
 namespace v8 {
@@ -21,11 +21,7 @@ namespace win64_unwindinfo {
 #define CRASH_HANDLER_FUNCTION_NAME_STRING \
   "CrashForExceptionInNonABICompliantCodeRange"
 
-static const int kPushRbpInstructionLength = 1;
-static const int kMovRbpRspInstructionLength = 3;
-static const int kRbpPrefixCodes = 2;
-static const int kRbpPrefixLength =
-    kPushRbpInstructionLength + kMovRbpRspInstructionLength;
+static const int kOSPageSize = 4096;
 
 /**
  * Returns true if V8 is configured to emit unwinding data for embedded in the
@@ -50,15 +46,33 @@ bool CanRegisterUnwindInfoForNonABICompliantCodeRange();
 void SetUnhandledExceptionCallback(
     v8::UnhandledExceptionCallback unhandled_exception_callback);
 
+void RegisterNonABICompliantCodeRange(void* start, size_t size_in_bytes);
+void UnregisterNonABICompliantCodeRange(void* start);
+
 /**
- * Returns a vector of bytes that contains the Win64 unwind data used for all
+ * Default count of RUNTIME_FUNCTION needed. For Windows X64, 1 RUNTIME_FUNCTION
+ * covers 4GB range which is sufficient to cover the whole code range of an
+ * isolate or WASM module. For Windows ARM64, 1 RUNTIME_FUNCTION covers
+ * kMaxFunctionLength bytes so multiple RUNTIME_FUNCTION structs could be needed
+ * to cover the whole code range of an isolate or WASM module. The extra
+ * RUNTIME_FUNCTIONs are assumed following the first one in the reserved page.
+ */
+static const uint32_t kDefaultRuntimeFunctionCount = 1;
+
+#if defined(V8_OS_WIN_X64)
+
+static const int kPushRbpInstructionLength = 1;
+static const int kMovRbpRspInstructionLength = 3;
+static const int kRbpPrefixCodes = 2;
+static const int kRbpPrefixLength =
+    kPushRbpInstructionLength + kMovRbpRspInstructionLength;
+
+/**
+ * Returns a vector of bytes that contains the Win X64 unwind data used for all
  * V8 builtin functions.
  */
 std::vector<uint8_t> GetUnwindInfoForBuiltinFunctions();
 
-void RegisterNonABICompliantCodeRange(void* start, size_t size_in_bytes);
-void UnregisterNonABICompliantCodeRange(void* start);
-
 class BuiltinUnwindInfo {
  public:
   BuiltinUnwindInfo() : is_leaf_function_(true) {}
@@ -76,7 +90,7 @@ class BuiltinUnwindInfo {
 class XdataEncoder {
  public:
   explicit XdataEncoder(const Assembler& assembler)
-      : assembler_(assembler), current_push_rbp_offset_(-1) {}
+      : assembler_(assembler), current_frame_code_offset_(-1) {}
 
   void onPushRbp();
   void onMovRbpRsp();
@@ -88,14 +102,77 @@ class XdataEncoder {
  private:
   const Assembler& assembler_;
   std::vector<int> fp_offsets_;
-  int current_push_rbp_offset_;
+  int current_frame_code_offset_;
 };
 
-}  // namespace win64_unwindinfo
+#elif defined(V8_OS_WIN_ARM64)
 
+/**
+ * Base on below doc, unwind record has 18 bits (unsigned) to encode function
+ * length, besides 2 LSB which are always 0.
+ * https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling#xdata-records
+ */
+static const int kMaxFunctionLength = ((1 << 18) - 1) << 2;
+
+/**
+ * Returns a vector of bytes that contains the Win ARM64 unwind data used for
+ * all V8 builtin functions.
+ *
+ * func_len: length in bytes of current function/region to unwind.
+ * fp_adjustment: offset of the saved caller's fp based on fp in current frame.
+ *                this is necessary to encode unwind data for Windows stack
+ *                unwinder to find correct caller's fp.
+ */
+std::vector<uint8_t> GetUnwindInfoForBuiltinFunction(uint32_t func_len,
+                                                     int32_t fp_adjustment);
+class BuiltinUnwindInfo {
+ public:
+  BuiltinUnwindInfo() : is_leaf_function_(true) {}
+  explicit BuiltinUnwindInfo(const std::vector<int>& fp_offsets,
+                             const std::vector<int>& fp_adjustments)
+      : is_leaf_function_(false),
+        fp_offsets_(fp_offsets),
+        fp_adjustments_(fp_adjustments) {}
+
+  const std::vector<int>& fp_adjustments() const { return fp_adjustments_; }
+
+  bool is_leaf_function() const { return is_leaf_function_; }
+  const std::vector<int>& fp_offsets() const { return fp_offsets_; }
+
+ private:
+  bool is_leaf_function_;
+  std::vector<int> fp_offsets_;
+  std::vector<int> fp_adjustments_;
+};
+
+class XdataEncoder {
+ public:
+  explicit XdataEncoder(const Assembler& assembler)
+      : assembler_(assembler),
+        current_frame_code_offset_(-1),
+        current_frame_adjustment_(0) {}
+
+  void onSaveFpLr();
+  void onFramePointerAdjustment(int bytes);
+
+  BuiltinUnwindInfo unwinding_info() const {
+    return BuiltinUnwindInfo(fp_offsets_, fp_adjustments_);
+  }
+
+ private:
+  const Assembler& assembler_;
+  std::vector<int> fp_offsets_;
+  int current_frame_code_offset_;
+  int current_frame_adjustment_;
+  std::vector<int> fp_adjustments_;
+};
+
+#endif
+
+}  // namespace win64_unwindinfo
 }  // namespace internal
 }  // namespace v8
 
-#endif  // defined(V8_OS_WIN_X64)
+#endif  // V8_OS_WIN64
 
 #endif  // V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 493c56996ba5fe..aada6a43813daa 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -1847,12 +1847,26 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
         const char* mnemonic;
         if (opcode == 0x54) {
           mnemonic = "andpd";
+        } else if (opcode == 0x55) {
+          mnemonic = "andnpd";
         } else if (opcode == 0x56) {
           mnemonic = "orpd";
         } else if (opcode == 0x57) {
           mnemonic = "xorpd";
+        } else if (opcode == 0x58) {
+          mnemonic = "addpd";
+        } else if (opcode == 0x59) {
+          mnemonic = "mulpd";
         } else if (opcode == 0x5B) {
           mnemonic = "cvtps2dq";
+        } else if (opcode == 0x5C) {
+          mnemonic = "subpd";
+        } else if (opcode == 0x5D) {
+          mnemonic = "minpd";
+        } else if (opcode == 0x5E) {
+          mnemonic = "divpd";
+        } else if (opcode == 0x5F) {
+          mnemonic = "maxpd";
         } else if (opcode == 0x60) {
           mnemonic = "punpcklbw";
         } else if (opcode == 0x61) {
@@ -1895,6 +1909,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
           mnemonic = "psrlw";
         } else if (opcode == 0xD2) {
           mnemonic = "psrld";
+        } else if (opcode == 0xD3) {
+          mnemonic = "psrld";
         } else if (opcode == 0xD4) {
           mnemonic = "paddq";
         } else if (opcode == 0xD5) {
@@ -1939,6 +1955,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
           mnemonic = "psllw";
         } else if (opcode == 0xF2) {
           mnemonic = "pslld";
+        } else if (opcode == 0xF3) {
+          mnemonic = "psllq";
         } else if (opcode == 0xF4) {
           mnemonic = "pmuludq";
         } else if (opcode == 0xF8) {
@@ -1985,6 +2003,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
         AppendToBuffer("%s,", NameOfXMMRegister(regop));
         current += PrintRightXMMOperand(current);
       }
+    } else if (opcode == 0x12) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("movddup %s,", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
     } else if (opcode == 0x2A) {
       // CVTSI2SD: integer to XMM double conversion.
       int mod, regop, rm;
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index 0b3ebcf879eef8..26771350961f7d 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -1562,7 +1562,7 @@ using SimulatorRuntimeCall = int64_t (*)(int32_t arg0, int32_t arg1,
                                          int32_t arg2, int32_t arg3,
                                          int32_t arg4, int32_t arg5,
                                          int32_t arg6, int32_t arg7,
-                                         int32_t arg8);
+                                         int32_t arg8, int32_t arg9);
 
 // These prototypes handle the four types of FP calls.
 using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
@@ -1602,7 +1602,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
       int32_t arg6 = stack_pointer[2];
       int32_t arg7 = stack_pointer[3];
       int32_t arg8 = stack_pointer[4];
-      STATIC_ASSERT(kMaxCParameters == 9);
+      int32_t arg9 = stack_pointer[5];
+      STATIC_ASSERT(kMaxCParameters == 10);
 
       bool fp_call =
           (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -1761,9 +1762,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
         if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
           PrintF(
               "Call to host function at %p "
-              "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x",
+              "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x",
               reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
-              arg3, arg4, arg5, arg6, arg7, arg8);
+              arg3, arg4, arg5, arg6, arg7, arg8, arg9);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08x\n", get_register(sp));
           }
@@ -1771,7 +1772,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
         }
         CHECK(stack_aligned);
         int64_t result =
-            target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+            target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
         int32_t lo_res = static_cast<int32_t>(result);
         int32_t hi_res = static_cast<int32_t>(result >> 32);
         if (::v8::internal::FLAG_trace_sim) {
@@ -4070,6 +4071,39 @@ void ShiftRightAndInsert(Simulator* simulator, int Vd, int Vm, int shift) {
   simulator->set_neon_register<T, SIZE>(Vd, dst);
 }
 
+template <typename T, typename S_T, int SIZE>
+void ShiftByRegister(Simulator* simulator, int Vd, int Vm, int Vn) {
+  static const int kElems = SIZE / sizeof(T);
+  T src[kElems];
+  S_T shift[kElems];
+  simulator->get_neon_register<T, SIZE>(Vm, src);
+  simulator->get_neon_register<S_T, SIZE>(Vn, shift);
+  for (int i = 0; i < kElems; i++) {
+    // Take lowest 8 bits of shift value (see F6.1.217 of ARM Architecture
+    // Reference Manual ARMv8), as signed 8-bit value.
+    int8_t shift_value = static_cast<int8_t>(shift[i]);
+    int size = static_cast<int>(sizeof(T) * 8);
+    // When shift value is greater/equal than size, we end up relying on
+    // undefined behavior, handle that and emulate what the hardware does.
+    if ((shift_value) >= 0) {
+      // If the shift value is greater/equal than size, zero out the result.
+      if (shift_value >= size) {
+        src[i] = 0;
+      } else {
+        src[i] <<= shift_value;
+      }
+    } else {
+      // If the shift value is greater/equal than size, always end up with -1.
+      if (-shift_value >= size) {
+        src[i] = -1;
+      } else {
+        src[i] = ArithmeticShiftRight(src[i], -shift_value);
+      }
+    }
+  }
+  simulator->set_neon_register<T, SIZE>(Vd, src);
+}
+
 template <typename T, int SIZE>
 void CompareEqual(Simulator* simulator, int Vd, int Vm, int Vn) {
   static const int kElems = SIZE / sizeof(T);
@@ -4255,6 +4289,25 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
           }
           break;
         }
+        case 0x4: {
+          // vshl s<size> Qd, Qm, Qn.
+          NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+          switch (size) {
+            case Neon8:
+              ShiftByRegister<int8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
+              break;
+            case Neon16:
+              ShiftByRegister<int16_t, int16_t, kSimd128Size>(this, Vd, Vm, Vn);
+              break;
+            case Neon32:
+              ShiftByRegister<int32_t, int32_t, kSimd128Size>(this, Vd, Vm, Vn);
+              break;
+            default:
+              UNREACHABLE();
+              break;
+          }
+          break;
+        }
         case 0x6: {
           // vmin/vmax.s<size> Qd, Qm, Qn.
           NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
@@ -4644,6 +4697,27 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
           }
           break;
         }
+        case 0x4: {
+          // vshl s<size> Qd, Qm, Qn.
+          NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+          switch (size) {
+            case Neon8:
+              ShiftByRegister<uint8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
+              break;
+            case Neon16:
+              ShiftByRegister<uint16_t, int16_t, kSimd128Size>(this, Vd, Vm,
+                                                               Vn);
+              break;
+            case Neon32:
+              ShiftByRegister<uint32_t, int32_t, kSimd128Size>(this, Vd, Vm,
+                                                               Vn);
+              break;
+            default:
+              UNREACHABLE();
+              break;
+          }
+          break;
+        }
         case 0x6: {
           // vmin/vmax.u<size> Qd, Qm, Qn.
           NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
diff --git a/deps/v8/src/execution/arm64/pointer-auth-arm64.cc b/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
new file mode 100644
index 00000000000000..cb8ff2c7401955
--- /dev/null
+++ b/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
@@ -0,0 +1,269 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/arm64/simulator-arm64.h"
+
+#if defined(USE_SIMULATOR)
+
+namespace v8 {
+namespace internal {
+
+// Randomly generated example key for simulating only.
+const Simulator::PACKey Simulator::kPACKeyIA = {0xc31718727de20f71,
+                                                0xab9fd4e14b2fec51, 0};
+
+namespace {
+
+uint64_t GetNibble(uint64_t in_data, int position) {
+  return (in_data >> position) & 0xf;
+}
+
+uint64_t PACCellShuffle(uint64_t in_data) {
+  static int in_positions[16] = {52, 24, 44, 0,  28, 48, 4,  40,
+                                 32, 12, 56, 20, 8,  36, 16, 60};
+  uint64_t out_data = 0;
+  for (int i = 0; i < 16; ++i) {
+    out_data |= GetNibble(in_data, in_positions[i]) << (4 * i);
+  }
+  return out_data;
+}
+
+uint64_t PACCellInvShuffle(uint64_t in_data) {
+  static int in_positions[16] = {12, 24, 48, 36, 56, 44, 4,  16,
+                                 32, 52, 28, 8,  20, 0,  40, 60};
+  uint64_t out_data = 0;
+  for (int i = 0; i < 16; ++i) {
+    out_data |= GetNibble(in_data, in_positions[i]) << (4 * i);
+  }
+  return out_data;
+}
+
+uint64_t RotCell(uint64_t in_cell, int amount) {
+  DCHECK((amount >= 1) && (amount <= 3));
+
+  in_cell &= 0xf;
+  uint8_t temp = in_cell << 4 | in_cell;
+  return static_cast<uint64_t>((temp >> (4 - amount)) & 0xf);
+}
+
+uint64_t PACMult(uint64_t s_input) {
+  uint8_t t0;
+  uint8_t t1;
+  uint8_t t2;
+  uint8_t t3;
+  uint64_t s_output = 0;
+
+  for (int i = 0; i < 4; ++i) {
+    uint8_t s12 = (s_input >> (4 * (i + 12))) & 0xf;
+    uint8_t s8 = (s_input >> (4 * (i + 8))) & 0xf;
+    uint8_t s4 = (s_input >> (4 * (i + 4))) & 0xf;
+    uint8_t s0 = (s_input >> (4 * (i + 0))) & 0xf;
+
+    t0 = RotCell(s8, 1) ^ RotCell(s4, 2) ^ RotCell(s0, 1);
+    t1 = RotCell(s12, 1) ^ RotCell(s4, 1) ^ RotCell(s0, 2);
+    t2 = RotCell(s12, 2) ^ RotCell(s8, 1) ^ RotCell(s0, 1);
+    t3 = RotCell(s12, 1) ^ RotCell(s8, 2) ^ RotCell(s4, 1);
+
+    s_output |= static_cast<uint64_t>(t3) << (4 * (i + 0));
+    s_output |= static_cast<uint64_t>(t2) << (4 * (i + 4));
+    s_output |= static_cast<uint64_t>(t1) << (4 * (i + 8));
+    s_output |= static_cast<uint64_t>(t0) << (4 * (i + 12));
+  }
+  return s_output;
+}
+
+uint64_t PACSub(uint64_t t_input) {
+  uint64_t t_output = 0;
+  uint8_t substitutions[16] = {0xb, 0x6, 0x8, 0xf, 0xc, 0x0, 0x9, 0xe,
+                               0x3, 0x7, 0x4, 0x5, 0xd, 0x2, 0x1, 0xa};
+  for (int i = 0; i < 16; ++i) {
+    unsigned index = ((t_input >> (4 * i)) & 0xf);
+    t_output |= static_cast<uint64_t>(substitutions[index]) << (4 * i);
+  }
+  return t_output;
+}
+
+uint64_t PACInvSub(uint64_t t_input) {
+  uint64_t t_output = 0;
+  uint8_t substitutions[16] = {0x5, 0xe, 0xd, 0x8, 0xa, 0xb, 0x1, 0x9,
+                               0x2, 0x6, 0xf, 0x0, 0x4, 0xc, 0x7, 0x3};
+  for (int i = 0; i < 16; ++i) {
+    unsigned index = ((t_input >> (4 * i)) & 0xf);
+    t_output |= static_cast<uint64_t>(substitutions[index]) << (4 * i);
+  }
+  return t_output;
+}
+
+uint64_t TweakCellInvRot(uint64_t in_cell) {
+  uint64_t out_cell = 0;
+  out_cell |= (in_cell & 0x7) << 1;
+  out_cell |= (in_cell & 0x1) ^ ((in_cell >> 3) & 0x1);
+  return out_cell;
+}
+
+uint64_t TweakInvShuffle(uint64_t in_data) {
+  uint64_t out_data = 0;
+  out_data |= TweakCellInvRot(in_data >> 48) << 0;
+  out_data |= ((in_data >> 52) & 0xf) << 4;
+  out_data |= ((in_data >> 20) & 0xff) << 8;
+  out_data |= ((in_data >> 0) & 0xff) << 16;
+  out_data |= TweakCellInvRot(in_data >> 8) << 24;
+  out_data |= ((in_data >> 12) & 0xf) << 28;
+  out_data |= TweakCellInvRot(in_data >> 28) << 32;
+  out_data |= TweakCellInvRot(in_data >> 60) << 36;
+  out_data |= TweakCellInvRot(in_data >> 56) << 40;
+  out_data |= TweakCellInvRot(in_data >> 16) << 44;
+  out_data |= ((in_data >> 32) & 0xfff) << 48;
+  out_data |= TweakCellInvRot(in_data >> 44) << 60;
+  return out_data;
+}
+
+uint64_t TweakCellRot(uint64_t in_cell) {
+  uint64_t out_cell = 0;
+  out_cell |= ((in_cell & 0x1) ^ ((in_cell >> 1) & 0x1)) << 3;
+  out_cell |= (in_cell >> 0x1) & 0x7;
+  return out_cell;
+}
+
+uint64_t TweakShuffle(uint64_t in_data) {
+  uint64_t out_data = 0;
+  out_data |= ((in_data >> 16) & 0xff) << 0;
+  out_data |= TweakCellRot(in_data >> 24) << 8;
+  out_data |= ((in_data >> 28) & 0xf) << 12;
+  out_data |= TweakCellRot(in_data >> 44) << 16;
+  out_data |= ((in_data >> 8) & 0xff) << 20;
+  out_data |= TweakCellRot(in_data >> 32) << 28;
+  out_data |= ((in_data >> 48) & 0xfff) << 32;
+  out_data |= TweakCellRot(in_data >> 60) << 44;
+  out_data |= TweakCellRot(in_data >> 0) << 48;
+  out_data |= ((in_data >> 4) & 0xf) << 52;
+  out_data |= TweakCellRot(in_data >> 40) << 56;
+  out_data |= TweakCellRot(in_data >> 36) << 60;
+  return out_data;
+}
+
+}  // namespace
+
+// For a description of QARMA see:
+// The QARMA Block Cipher Family, Roberto Avanzi, Qualcomm Product Security
+// Initiative.
+// The pseudocode is available in ARM DDI 0487D.b, J1-6946.
+uint64_t Simulator::ComputePAC(uint64_t data, uint64_t context, PACKey key) {
+  uint64_t key0 = key.high;
+  uint64_t key1 = key.low;
+  const uint64_t RC[5] = {0x0000000000000000, 0x13198a2e03707344,
+                          0xa4093822299f31d0, 0x082efa98ec4e6c89,
+                          0x452821e638d01377};
+  const uint64_t Alpha = 0xc0ac29B7c97c50dd;
+
+  uint64_t modk0 = ((key0 & 0x1) << 63) | ((key0 >> 2) << 1) |
+                   ((key0 >> 63) ^ ((key0 >> 1) & 0x1));
+  uint64_t running_mod = context;
+  uint64_t working_val = data ^ key0;
+  uint64_t round_key;
+  for (int i = 0; i < 5; ++i) {
+    round_key = key1 ^ running_mod;
+    working_val ^= round_key;
+    working_val ^= RC[i];
+    if (i > 0) {
+      working_val = PACCellShuffle(working_val);
+      working_val = PACMult(working_val);
+    }
+    working_val = PACSub(working_val);
+    running_mod = TweakShuffle(running_mod);
+  }
+
+  round_key = modk0 ^ running_mod;
+  working_val ^= round_key;
+  working_val = PACCellShuffle(working_val);
+  working_val = PACMult(working_val);
+  working_val = PACSub(working_val);
+  working_val = PACCellShuffle(working_val);
+  working_val = PACMult(working_val);
+  working_val ^= key1;
+  working_val = PACCellInvShuffle(working_val);
+  working_val = PACInvSub(working_val);
+  working_val = PACMult(working_val);
+  working_val = PACCellInvShuffle(working_val);
+  working_val ^= key0;
+  working_val ^= running_mod;
+
+  for (int i = 0; i < 5; ++i) {
+    working_val = PACInvSub(working_val);
+    if (i < 4) {
+      working_val = PACMult(working_val);
+      working_val = PACCellInvShuffle(working_val);
+    }
+    running_mod = TweakInvShuffle(running_mod);
+    round_key = key1 ^ running_mod;
+    working_val ^= RC[4 - i];
+    working_val ^= round_key;
+    working_val ^= Alpha;
+  }
+
+  return working_val ^ modk0;
+}
+
+// The TTBR is selected by bit 63 or 55 depending on TBI for pointers without
+// codes, but is always 55 once a PAC code is added to a pointer. For this
+// reason, it must be calculated at the call site.
+uint64_t Simulator::CalculatePACMask(uint64_t ptr, PointerType type, int ttbr) {
+  int bottom_pac_bit = GetBottomPACBit(ptr, ttbr);
+  int top_pac_bit = GetTopPACBit(ptr, type);
+  return unsigned_bitextract_64(top_pac_bit, bottom_pac_bit,
+                                0xffffffffffffffff & ~kTTBRMask)
+         << bottom_pac_bit;
+}
+
+uint64_t Simulator::AuthPAC(uint64_t ptr, uint64_t context, PACKey key,
+                            PointerType type) {
+  DCHECK((key.number == 0) || (key.number == 1));
+
+  uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
+  uint64_t original_ptr =
+      ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
+
+  uint64_t pac = ComputePAC(original_ptr, context, key);
+
+  uint64_t error_code = 1 << key.number;
+  if ((pac & pac_mask) == (ptr & pac_mask)) {
+    return original_ptr;
+  } else {
+    int error_lsb = GetTopPACBit(ptr, type) - 2;
+    uint64_t error_mask = UINT64_C(0x3) << error_lsb;
+    return (original_ptr & ~error_mask) | (error_code << error_lsb);
+  }
+}
+
+uint64_t Simulator::AddPAC(uint64_t ptr, uint64_t context, PACKey key,
+                           PointerType type) {
+  int top_pac_bit = GetTopPACBit(ptr, type);
+
+  DCHECK(HasTBI(ptr, type));
+  int ttbr = (ptr >> 55) & 1;
+  uint64_t pac_mask = CalculatePACMask(ptr, type, ttbr);
+  uint64_t ext_ptr = (ttbr == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
+
+  uint64_t pac = ComputePAC(ext_ptr, context, key);
+
+  // If the pointer isn't all zeroes or all ones in the PAC bitfield, corrupt
+  // the resulting code.
+  if (((ptr & (pac_mask | kTTBRMask)) != 0x0) &&
+      ((~ptr & (pac_mask | kTTBRMask)) != 0x0)) {
+    pac ^= UINT64_C(1) << (top_pac_bit - 1);
+  }
+
+  uint64_t ttbr_shifted = static_cast<uint64_t>(ttbr) << 55;
+  return (pac & pac_mask) | ttbr_shifted | (ptr & ~pac_mask);
+}
+
+uint64_t Simulator::StripPAC(uint64_t ptr, PointerType type) {
+  uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
+  return ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // USE_SIMULATOR
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 8618dd85517c43..71fedd5b2fff5c 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -390,14 +390,14 @@ using SimulatorRuntimeCall_ReturnPtr = int64_t (*)(int64_t arg0, int64_t arg1,
                                                    int64_t arg2, int64_t arg3,
                                                    int64_t arg4, int64_t arg5,
                                                    int64_t arg6, int64_t arg7,
-                                                   int64_t arg8);
+                                                   int64_t arg8, int64_t arg9);
 #endif
 
 using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
                                             int64_t arg2, int64_t arg3,
                                             int64_t arg4, int64_t arg5,
                                             int64_t arg6, int64_t arg7,
-                                            int64_t arg8);
+                                            int64_t arg8, int64_t arg9);
 
 using SimulatorRuntimeCompareCall = int64_t (*)(double arg1, double arg2);
 using SimulatorRuntimeFPFPCall = double (*)(double arg1, double arg2);
@@ -445,7 +445,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
   const int64_t arg6 = xreg(6);
   const int64_t arg7 = xreg(7);
   const int64_t arg8 = stack_pointer[0];
-  STATIC_ASSERT(kMaxCParameters == 9);
+  const int64_t arg9 = stack_pointer[1];
+  STATIC_ASSERT(kMaxCParameters == 10);
 
   switch (redirection->type()) {
     default:
@@ -477,14 +478,14 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
           ", "
           "0x%016" PRIx64 ", 0x%016" PRIx64
           ", "
-          "0x%016" PRIx64,
-          arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+          "0x%016" PRIx64 ", 0x%016" PRIx64,
+          arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
 
       SimulatorRuntimeCall_ReturnPtr target =
           reinterpret_cast<SimulatorRuntimeCall_ReturnPtr>(external);
 
       int64_t result =
-          target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+          target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
       TraceSim("Returned: 0x%16\n", result);
 #ifdef DEBUG
       CorruptAllCallerSavedCPURegisters();
@@ -512,12 +513,12 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
           ", "
           "0x%016" PRIx64 ", 0x%016" PRIx64
           ", "
-          "0x%016" PRIx64,
-          arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+          "0x%016" PRIx64 ", 0x%016" PRIx64,
+          arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
       SimulatorRuntimeCall target =
           reinterpret_cast<SimulatorRuntimeCall>(external);
       ObjectPair result =
-          target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+          target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
       TraceSim("Returned: {%p, %p}\n", reinterpret_cast<void*>(result.x),
                reinterpret_cast<void*>(result.y));
 #ifdef DEBUG
@@ -3037,11 +3038,31 @@ bool Simulator::FPProcessNaNs(Instruction* instr) {
   return done;
 }
 
+// clang-format off
+#define PAUTH_SYSTEM_MODES(V)                            \
+  V(A1716, 17, xreg(16),                      kPACKeyIA) \
+  V(ASP,   30, xreg(31, Reg31IsStackPointer), kPACKeyIA)
+// clang-format on
+
 void Simulator::VisitSystem(Instruction* instr) {
   // Some system instructions hijack their Op and Cp fields to represent a
   // range of immediates instead of indicating a different instruction. This
   // makes the decoding tricky.
-  if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+  if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) {
+    switch (instr->Mask(SystemPAuthMask)) {
+#define DEFINE_PAUTH_FUNCS(SUFFIX, DST, MOD, KEY)                     \
+  case PACI##SUFFIX:                                                  \
+    set_xreg(DST, AddPAC(xreg(DST), MOD, KEY, kInstructionPointer));  \
+    break;                                                            \
+  case AUTI##SUFFIX:                                                  \
+    set_xreg(DST, AuthPAC(xreg(DST), MOD, KEY, kInstructionPointer)); \
+    break;
+
+      PAUTH_SYSTEM_MODES(DEFINE_PAUTH_FUNCS)
+#undef DEFINE_PAUTH_FUNCS
+#undef PAUTH_SYSTEM_MODES
+    }
+  } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
     switch (instr->Mask(SystemSysRegMask)) {
       case MRS: {
         switch (instr->ImmSystemRegister()) {
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.h b/deps/v8/src/execution/arm64/simulator-arm64.h
index ca1cef61aef9c5..4a493ec6963335 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.h
+++ b/deps/v8/src/execution/arm64/simulator-arm64.h
@@ -1273,6 +1273,45 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
   static inline const char* VRegNameForCode(unsigned code);
   static inline int CodeFromName(const char* name);
 
+  enum PointerType { kDataPointer, kInstructionPointer };
+
+  struct PACKey {
+    uint64_t high;
+    uint64_t low;
+    int number;
+  };
+
+  static const PACKey kPACKeyIA;
+
+  // Current implementation is that all pointers are tagged.
+  static bool HasTBI(uint64_t ptr, PointerType type) {
+    USE(ptr, type);
+    return true;
+  }
+
+  // Current implementation uses 48-bit virtual addresses.
+  static int GetBottomPACBit(uint64_t ptr, int ttbr) {
+    USE(ptr, ttbr);
+    DCHECK((ttbr == 0) || (ttbr == 1));
+    return 48;
+  }
+
+  // The top PAC bit is 55 for the purposes of relative bit fields with TBI,
+  // however bit 55 is the TTBR bit regardless of TBI so isn't part of the PAC
+  // codes in pointers.
+  static int GetTopPACBit(uint64_t ptr, PointerType type) {
+    return HasTBI(ptr, type) ? 55 : 63;
+  }
+
+  // Armv8.3 Pointer authentication helpers.
+  static uint64_t CalculatePACMask(uint64_t ptr, PointerType type, int ext_bit);
+  static uint64_t ComputePAC(uint64_t data, uint64_t context, PACKey key);
+  static uint64_t AuthPAC(uint64_t ptr, uint64_t context, PACKey key,
+                          PointerType type);
+  static uint64_t AddPAC(uint64_t ptr, uint64_t context, PACKey key,
+                         PointerType type);
+  static uint64_t StripPAC(uint64_t ptr, PointerType type);
+
  protected:
   // Simulation helpers ------------------------------------
   bool ConditionPassed(Condition cond) {
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 126cb9530ee7b9..3b334739da389c 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -8,6 +8,7 @@
 #include <sstream>
 
 #include "src/base/bits.h"
+#include "src/codegen/interface-descriptors.h"
 #include "src/codegen/macro-assembler.h"
 #include "src/codegen/register-configuration.h"
 #include "src/codegen/safepoint-table.h"
@@ -270,6 +271,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
       low_bound_(sp),
       high_bound_(js_entry_sp),
       top_frame_type_(StackFrame::NONE),
+      top_context_address_(kNullAddress),
       external_callback_scope_(isolate->external_callback_scope()),
       top_link_register_(lr) {
   StackFrame::State state;
@@ -342,6 +344,13 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
       if (type != StackFrame::INTERPRETED) {
         advance_frame = true;
       }
+      MSAN_MEMORY_IS_INITIALIZED(
+          fp + CommonFrameConstants::kContextOrFrameTypeOffset,
+          kSystemPointerSize);
+      Address type_or_context_address =
+          Memory<Address>(fp + CommonFrameConstants::kContextOrFrameTypeOffset);
+      if (!StackFrame::IsTypeMarker(type_or_context_address))
+        top_context_address_ = type_or_context_address;
     } else {
       // Mark the frame as OPTIMIZED if we cannot determine its type.
       // We chose OPTIMIZED rather than INTERPRETED because it's closer to
@@ -579,6 +588,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
             return OPTIMIZED;
           case Code::JS_TO_WASM_FUNCTION:
             return JS_TO_WASM;
+          case Code::JS_TO_JS_FUNCTION:
+            return STUB;
           case Code::C_WASM_ENTRY:
             return C_WASM_ENTRY;
           case Code::WASM_FUNCTION:
@@ -1136,11 +1147,11 @@ void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
   DCHECK(functions->empty());
   Code code = LookupCode();
   int offset = static_cast<int>(pc() - code.InstructionStart());
-  AbstractCode abstract_code = AbstractCode::cast(code);
+  Handle<AbstractCode> abstract_code(AbstractCode::cast(code), isolate());
   Handle<FixedArray> params = GetParameters();
   FrameSummary::JavaScriptFrameSummary summary(
-      isolate(), receiver(), function(), abstract_code, offset, IsConstructor(),
-      *params);
+      isolate(), receiver(), function(), *abstract_code, offset,
+      IsConstructor(), *params);
   functions->push_back(summary);
 }
 
@@ -1813,10 +1824,11 @@ void InterpretedFrame::WriteInterpreterRegister(int register_index,
 
 void InterpretedFrame::Summarize(std::vector<FrameSummary>* functions) const {
   DCHECK(functions->empty());
-  AbstractCode abstract_code = AbstractCode::cast(GetBytecodeArray());
+  Handle<AbstractCode> abstract_code(AbstractCode::cast(GetBytecodeArray()),
+                                     isolate());
   Handle<FixedArray> params = GetParameters();
   FrameSummary::JavaScriptFrameSummary summary(
-      isolate(), receiver(), function(), abstract_code, GetBytecodeOffset(),
+      isolate(), receiver(), function(), *abstract_code, GetBytecodeOffset(),
       IsConstructor(), *params);
   functions->push_back(summary);
 }
@@ -2258,5 +2270,161 @@ InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
   }
   return entry;
 }
+
+// Frame layout helper class implementation.
+// -------------------------------------------------------------------------
+
+namespace {
+
+int ArgumentPaddingSlots(int arg_count) {
+  return ShouldPadArguments(arg_count) ? 1 : 0;
+}
+
+// Some architectures need to push padding together with the TOS register
+// in order to maintain stack alignment.
+constexpr int TopOfStackRegisterPaddingSlots() { return kPadArguments ? 1 : 0; }
+
+bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode) {
+  switch (mode) {
+    case BuiltinContinuationMode::STUB:
+    case BuiltinContinuationMode::JAVASCRIPT:
+      return false;
+    case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
+    case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
+      return true;
+  }
+  UNREACHABLE();
+}
+
+}  // namespace
+
+InterpretedFrameInfo::InterpretedFrameInfo(int parameters_count_with_receiver,
+                                           int translation_height,
+                                           bool is_topmost,
+                                           FrameInfoKind frame_info_kind) {
+  const int locals_count = translation_height;
+
+  register_stack_slot_count_ =
+      InterpreterFrameConstants::RegisterStackSlotCount(locals_count);
+
+  static constexpr int kTheAccumulator = 1;
+  static constexpr int kTopOfStackPadding = TopOfStackRegisterPaddingSlots();
+  int maybe_additional_slots =
+      (is_topmost || frame_info_kind == FrameInfoKind::kConservative)
+          ? (kTheAccumulator + kTopOfStackPadding)
+          : 0;
+  frame_size_in_bytes_without_fixed_ =
+      (register_stack_slot_count_ + maybe_additional_slots) *
+      kSystemPointerSize;
+
+  // The 'fixed' part of the frame consists of the incoming parameters and
+  // the part described by InterpreterFrameConstants. This will include
+  // argument padding, when needed.
+  const int parameter_padding_slots =
+      ArgumentPaddingSlots(parameters_count_with_receiver);
+  const int fixed_frame_size =
+      InterpreterFrameConstants::kFixedFrameSize +
+      (parameters_count_with_receiver + parameter_padding_slots) *
+          kSystemPointerSize;
+  frame_size_in_bytes_ = frame_size_in_bytes_without_fixed_ + fixed_frame_size;
+}
+
+ArgumentsAdaptorFrameInfo::ArgumentsAdaptorFrameInfo(int translation_height) {
+  // Note: This is according to the Translation's notion of 'parameters' which
+  // differs to that of the SharedFunctionInfo, e.g. by including the receiver.
+  const int parameters_count = translation_height;
+  frame_size_in_bytes_without_fixed_ =
+      (parameters_count + ArgumentPaddingSlots(parameters_count)) *
+      kSystemPointerSize;
+  frame_size_in_bytes_ = frame_size_in_bytes_without_fixed_ +
+                         ArgumentsAdaptorFrameConstants::kFixedFrameSize;
+}
+
+ConstructStubFrameInfo::ConstructStubFrameInfo(int translation_height,
+                                               bool is_topmost,
+                                               FrameInfoKind frame_info_kind) {
+  // Note: This is according to the Translation's notion of 'parameters' which
+  // differs to that of the SharedFunctionInfo, e.g. by including the receiver.
+  const int parameters_count = translation_height;
+
+  // If the construct frame appears to be topmost we should ensure that the
+  // value of result register is preserved during continuation execution.
+  // We do this here by "pushing" the result of the constructor function to
+  // the top of the reconstructed stack and popping it in
+  // {Builtins::kNotifyDeoptimized}.
+
+  static constexpr int kTopOfStackPadding = TopOfStackRegisterPaddingSlots();
+  static constexpr int kTheResult = 1;
+  const int argument_padding = ArgumentPaddingSlots(parameters_count);
+
+  const int adjusted_height =
+      (is_topmost || frame_info_kind == FrameInfoKind::kConservative)
+          ? parameters_count + argument_padding + kTheResult +
+                kTopOfStackPadding
+          : parameters_count + argument_padding;
+  frame_size_in_bytes_without_fixed_ = adjusted_height * kSystemPointerSize;
+  frame_size_in_bytes_ = frame_size_in_bytes_without_fixed_ +
+                         ConstructFrameConstants::kFixedFrameSize;
+}
+
+BuiltinContinuationFrameInfo::BuiltinContinuationFrameInfo(
+    int translation_height,
+    const CallInterfaceDescriptor& continuation_descriptor,
+    const RegisterConfiguration* register_config, bool is_topmost,
+    DeoptimizeKind deopt_kind, BuiltinContinuationMode continuation_mode,
+    FrameInfoKind frame_info_kind) {
+  const bool is_conservative = frame_info_kind == FrameInfoKind::kConservative;
+
+  // Note: This is according to the Translation's notion of 'parameters' which
+  // differs to that of the SharedFunctionInfo, e.g. by including the receiver.
+  const int parameters_count = translation_height;
+  frame_has_result_stack_slot_ =
+      !is_topmost || deopt_kind == DeoptimizeKind::kLazy;
+  const int result_slot_count =
+      (frame_has_result_stack_slot_ || is_conservative) ? 1 : 0;
+
+  const int exception_slot_count =
+      (BuiltinContinuationModeIsWithCatch(continuation_mode) || is_conservative)
+          ? 1
+          : 0;
+
+  const int allocatable_register_count =
+      register_config->num_allocatable_general_registers();
+  const int padding_slot_count =
+      BuiltinContinuationFrameConstants::PaddingSlotCount(
+          allocatable_register_count);
+
+  const int register_parameter_count =
+      continuation_descriptor.GetRegisterParameterCount();
+  translated_stack_parameter_count_ =
+      parameters_count - register_parameter_count;
+  stack_parameter_count_ = translated_stack_parameter_count_ +
+                           result_slot_count + exception_slot_count;
+  const int stack_param_pad_count =
+      ArgumentPaddingSlots(stack_parameter_count_);
+
+  // If the builtins frame appears to be topmost we should ensure that the
+  // value of result register is preserved during continuation execution.
+  // We do this here by "pushing" the result of callback function to the
+  // top of the reconstructed stack and popping it in
+  // {Builtins::kNotifyDeoptimized}.
+  static constexpr int kTopOfStackPadding = TopOfStackRegisterPaddingSlots();
+  static constexpr int kTheResult = 1;
+  const int push_result_count =
+      (is_topmost || is_conservative) ? kTheResult + kTopOfStackPadding : 0;
+
+  frame_size_in_bytes_ =
+      kSystemPointerSize * (stack_parameter_count_ + stack_param_pad_count +
+                            allocatable_register_count + padding_slot_count +
+                            push_result_count) +
+      BuiltinContinuationFrameConstants::kFixedFrameSize;
+
+  frame_size_in_bytes_above_fp_ =
+      kSystemPointerSize * (allocatable_register_count + padding_slot_count +
+                            push_result_count) +
+      (BuiltinContinuationFrameConstants::kFixedFrameSize -
+       BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 1f83984f974e04..d1e7a7890d65ec 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -1285,6 +1285,7 @@ class SafeStackFrameIterator : public StackFrameIteratorBase {
   void Advance();
 
   StackFrame::Type top_frame_type() const { return top_frame_type_; }
+  Address top_context_address() const { return top_context_address_; }
 
  private:
   void AdvanceOneFrame();
@@ -1308,9 +1309,178 @@ class SafeStackFrameIterator : public StackFrameIteratorBase {
   const Address low_bound_;
   const Address high_bound_;
   StackFrame::Type top_frame_type_;
+  Address top_context_address_;
   ExternalCallbackScope* external_callback_scope_;
   Address top_link_register_;
 };
+
+// Frame layout helper classes. Used by the deoptimizer and instruction
+// selector.
+// -------------------------------------------------------------------------
+
+// How to calculate the frame layout information. Precise, when all information
+// is available during deoptimization. Conservative, when an overapproximation
+// is fine.
+// TODO(jgruber): Investigate whether the conservative kind can be removed. It
+// seems possible: 1. is_topmost should be known through the outer_state chain
+// of FrameStateDescriptor; 2. the deopt_kind may be a property of the bailout
+// id; 3. for continuation_mode, we only care whether it is a mode with catch,
+// and that is likewise known at compile-time.
+// There is nothing specific blocking this, the investigation just requires time
+// and it is not that important to get the exact frame height at compile-time.
+enum class FrameInfoKind {
+  kPrecise,
+  kConservative,
+};
+
+// Used by the deoptimizer. Corresponds to frame kinds:
+enum class BuiltinContinuationMode {
+  STUB,                        // BuiltinContinuationFrame
+  JAVASCRIPT,                  // JavaScriptBuiltinContinuationFrame
+  JAVASCRIPT_WITH_CATCH,       // JavaScriptBuiltinContinuationWithCatchFrame
+  JAVASCRIPT_HANDLE_EXCEPTION  // JavaScriptBuiltinContinuationWithCatchFrame
+};
+
+class InterpretedFrameInfo {
+ public:
+  static InterpretedFrameInfo Precise(int parameters_count_with_receiver,
+                                      int translation_height, bool is_topmost) {
+    return {parameters_count_with_receiver, translation_height, is_topmost,
+            FrameInfoKind::kPrecise};
+  }
+
+  static InterpretedFrameInfo Conservative(int parameters_count_with_receiver,
+                                           int locals_count) {
+    return {parameters_count_with_receiver, locals_count, false,
+            FrameInfoKind::kConservative};
+  }
+
+  uint32_t register_stack_slot_count() const {
+    return register_stack_slot_count_;
+  }
+  uint32_t frame_size_in_bytes_without_fixed() const {
+    return frame_size_in_bytes_without_fixed_;
+  }
+  uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
+
+ private:
+  InterpretedFrameInfo(int parameters_count_with_receiver,
+                       int translation_height, bool is_topmost,
+                       FrameInfoKind frame_info_kind);
+
+  uint32_t register_stack_slot_count_;
+  uint32_t frame_size_in_bytes_without_fixed_;
+  uint32_t frame_size_in_bytes_;
+};
+
+class ArgumentsAdaptorFrameInfo {
+ public:
+  static ArgumentsAdaptorFrameInfo Precise(int translation_height) {
+    return ArgumentsAdaptorFrameInfo{translation_height};
+  }
+
+  static ArgumentsAdaptorFrameInfo Conservative(int parameters_count) {
+    return ArgumentsAdaptorFrameInfo{parameters_count};
+  }
+
+  uint32_t frame_size_in_bytes_without_fixed() const {
+    return frame_size_in_bytes_without_fixed_;
+  }
+  uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
+
+ private:
+  explicit ArgumentsAdaptorFrameInfo(int translation_height);
+
+  uint32_t frame_size_in_bytes_without_fixed_;
+  uint32_t frame_size_in_bytes_;
+};
+
+class ConstructStubFrameInfo {
+ public:
+  static ConstructStubFrameInfo Precise(int translation_height,
+                                        bool is_topmost) {
+    return {translation_height, is_topmost, FrameInfoKind::kPrecise};
+  }
+
+  static ConstructStubFrameInfo Conservative(int parameters_count) {
+    return {parameters_count, false, FrameInfoKind::kConservative};
+  }
+
+  uint32_t frame_size_in_bytes_without_fixed() const {
+    return frame_size_in_bytes_without_fixed_;
+  }
+  uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
+
+ private:
+  ConstructStubFrameInfo(int translation_height, bool is_topmost,
+                         FrameInfoKind frame_info_kind);
+
+  uint32_t frame_size_in_bytes_without_fixed_;
+  uint32_t frame_size_in_bytes_;
+};
+
+// Used by BuiltinContinuationFrameInfo.
+class CallInterfaceDescriptor;
+class RegisterConfiguration;
+
+class BuiltinContinuationFrameInfo {
+ public:
+  static BuiltinContinuationFrameInfo Precise(
+      int translation_height,
+      const CallInterfaceDescriptor& continuation_descriptor,
+      const RegisterConfiguration* register_config, bool is_topmost,
+      DeoptimizeKind deopt_kind, BuiltinContinuationMode continuation_mode) {
+    return {translation_height,
+            continuation_descriptor,
+            register_config,
+            is_topmost,
+            deopt_kind,
+            continuation_mode,
+            FrameInfoKind::kPrecise};
+  }
+
+  static BuiltinContinuationFrameInfo Conservative(
+      int parameters_count,
+      const CallInterfaceDescriptor& continuation_descriptor,
+      const RegisterConfiguration* register_config) {
+    // It doesn't matter what we pass as is_topmost, deopt_kind and
+    // continuation_mode; these values are ignored in conservative mode.
+    return {parameters_count,
+            continuation_descriptor,
+            register_config,
+            false,
+            DeoptimizeKind::kEager,
+            BuiltinContinuationMode::STUB,
+            FrameInfoKind::kConservative};
+  }
+
+  bool frame_has_result_stack_slot() const {
+    return frame_has_result_stack_slot_;
+  }
+  uint32_t translated_stack_parameter_count() const {
+    return translated_stack_parameter_count_;
+  }
+  uint32_t stack_parameter_count() const { return stack_parameter_count_; }
+  uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
+  uint32_t frame_size_in_bytes_above_fp() const {
+    return frame_size_in_bytes_above_fp_;
+  }
+
+ private:
+  BuiltinContinuationFrameInfo(
+      int translation_height,
+      const CallInterfaceDescriptor& continuation_descriptor,
+      const RegisterConfiguration* register_config, bool is_topmost,
+      DeoptimizeKind deopt_kind, BuiltinContinuationMode continuation_mode,
+      FrameInfoKind frame_info_kind);
+
+  bool frame_has_result_stack_slot_;
+  uint32_t translated_stack_parameter_count_;
+  uint32_t stack_parameter_count_;
+  uint32_t frame_size_in_bytes_;
+  uint32_t frame_size_in_bytes_above_fp_;
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/deps/v8/src/execution/futex-emulation.cc b/deps/v8/src/execution/futex-emulation.cc
index 74828079219bf3..8c3b54c2a7e6af 100644
--- a/deps/v8/src/execution/futex-emulation.cc
+++ b/deps/v8/src/execution/futex-emulation.cc
@@ -11,6 +11,7 @@
 #include "src/execution/isolate.h"
 #include "src/handles/handles-inl.h"
 #include "src/numbers/conversions.h"
+#include "src/objects/bigint.h"
 #include "src/objects/js-array-buffer-inl.h"
 #include "src/objects/objects-inl.h"
 
@@ -80,10 +81,9 @@ void AtomicsWaitWakeHandle::Wake() {
 
 enum WaitReturnValue : int { kOk = 0, kNotEqual = 1, kTimedOut = 2 };
 
-Object FutexEmulation::WaitJs(Isolate* isolate,
-                              Handle<JSArrayBuffer> array_buffer, size_t addr,
-                              int32_t value, double rel_timeout_ms) {
-  Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
+namespace {
+
+Object WaitJsTranslateReturn(Isolate* isolate, Object res) {
   if (res.IsSmi()) {
     int val = Smi::ToInt(res);
     switch (val) {
@@ -100,6 +100,22 @@ Object FutexEmulation::WaitJs(Isolate* isolate,
   return res;
 }
 
+}  // namespace
+
+Object FutexEmulation::WaitJs32(Isolate* isolate,
+                                Handle<JSArrayBuffer> array_buffer, size_t addr,
+                                int32_t value, double rel_timeout_ms) {
+  Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
+  return WaitJsTranslateReturn(isolate, res);
+}
+
+Object FutexEmulation::WaitJs64(Isolate* isolate,
+                                Handle<JSArrayBuffer> array_buffer, size_t addr,
+                                int64_t value, double rel_timeout_ms) {
+  Object res = Wait64(isolate, array_buffer, addr, value, rel_timeout_ms);
+  return WaitJsTranslateReturn(isolate, res);
+}
+
 Object FutexEmulation::Wait32(Isolate* isolate,
                               Handle<JSArrayBuffer> array_buffer, size_t addr,
                               int32_t value, double rel_timeout_ms) {
diff --git a/deps/v8/src/execution/futex-emulation.h b/deps/v8/src/execution/futex-emulation.h
index c6fee5c3f7c31a..052b3c9c17c029 100644
--- a/deps/v8/src/execution/futex-emulation.h
+++ b/deps/v8/src/execution/futex-emulation.h
@@ -117,8 +117,12 @@ class FutexEmulation : public AllStatic {
   // |rel_timeout_ms| can be Infinity.
   // If woken, return "ok", otherwise return "timed-out". The initial check and
   // the decision to wait happen atomically.
-  static Object WaitJs(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
-                       size_t addr, int32_t value, double rel_timeout_ms);
+  static Object WaitJs32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+                         size_t addr, int32_t value, double rel_timeout_ms);
+
+  // An version of WaitJs32 for int64_t values.
+  static Object WaitJs64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+                         size_t addr, int64_t value, double rel_timeout_ms);
 
   // Same as WaitJs above except it returns 0 (ok), 1 (not equal) and 2 (timed
   // out) as expected by Wasm.
diff --git a/deps/v8/src/execution/interrupts-scope.cc b/deps/v8/src/execution/interrupts-scope.cc
index cf8611f8d6878f..7bf98216853d77 100644
--- a/deps/v8/src/execution/interrupts-scope.cc
+++ b/deps/v8/src/execution/interrupts-scope.cc
@@ -9,7 +9,7 @@
 namespace v8 {
 namespace internal {
 
-InterruptsScope::InterruptsScope(Isolate* isolate, int intercept_mask,
+InterruptsScope::InterruptsScope(Isolate* isolate, intptr_t intercept_mask,
                                  Mode mode)
     : stack_guard_(isolate->stack_guard()),
       intercept_mask_(intercept_mask),
diff --git a/deps/v8/src/execution/interrupts-scope.h b/deps/v8/src/execution/interrupts-scope.h
index 3d74850a846254..6419ee2d99ffa9 100644
--- a/deps/v8/src/execution/interrupts-scope.h
+++ b/deps/v8/src/execution/interrupts-scope.h
@@ -18,7 +18,7 @@ class InterruptsScope {
  public:
   enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
 
-  V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, int intercept_mask,
+  V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, intptr_t intercept_mask,
                                     Mode mode);
 
   virtual ~InterruptsScope() {
@@ -33,8 +33,8 @@ class InterruptsScope {
 
  private:
   StackGuard* stack_guard_;
-  int intercept_mask_;
-  int intercepted_flags_;
+  intptr_t intercept_mask_;
+  intptr_t intercepted_flags_;
   Mode mode_;
   InterruptsScope* prev_;
 
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index adeb7f54d3363b..6eb23db2a22adb 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -8,6 +8,7 @@
 #include "src/builtins/builtins.h"
 #include "src/codegen/constants-arch.h"
 #include "src/codegen/external-reference-table.h"
+#include "src/execution/stack-guard.h"
 #include "src/execution/thread-local-top.h"
 #include "src/roots/roots.h"
 #include "src/utils/utils.h"
@@ -27,7 +28,7 @@ class Isolate;
 // register.
 class IsolateData final {
  public:
-  IsolateData() = default;
+  explicit IsolateData(Isolate* isolate) : stack_guard_(isolate) {}
 
   static constexpr intptr_t kIsolateRootBias = kRootRegisterBias;
 
@@ -81,6 +82,7 @@ class IsolateData final {
   // The FP and PC that are saved right before TurboAssembler::CallCFunction.
   Address* fast_c_call_caller_fp_address() { return &fast_c_call_caller_fp_; }
   Address* fast_c_call_caller_pc_address() { return &fast_c_call_caller_pc_; }
+  StackGuard* stack_guard() { return &stack_guard_; }
   uint8_t* stack_is_iterable_address() { return &stack_is_iterable_; }
   Address fast_c_call_caller_fp() { return fast_c_call_caller_fp_; }
   Address fast_c_call_caller_pc() { return fast_c_call_caller_pc_; }
@@ -109,20 +111,27 @@ class IsolateData final {
   Address* builtins() { return builtins_; }
 
  private:
-// Static layout definition.
+  // Static layout definition.
+  //
+  // Note: The location of fields within IsolateData is significant. The
+  // closer they are to the value of kRootRegister (i.e.: isolate_root()), the
+  // cheaper it is to access them. See also: https://crbug.com/993264.
+  // The recommend guideline is to put frequently-accessed fields close to the
+  // beginning of IsolateData.
 #define FIELDS(V)                                                             \
   V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kSystemPointerSize) \
   V(kExternalMemoryOffset, kInt64Size)                                        \
   V(kExternalMemoryLlimitOffset, kInt64Size)                                  \
   V(kExternalMemoryAtLastMarkCompactOffset, kInt64Size)                       \
+  V(kFastCCallCallerFPOffset, kSystemPointerSize)                             \
+  V(kFastCCallCallerPCOffset, kSystemPointerSize)                             \
+  V(kStackGuardOffset, StackGuard::kSizeInBytes)                              \
   V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize)         \
   V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes)      \
   V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes)                      \
   V(kBuiltinEntryTableOffset, Builtins::builtin_count* kSystemPointerSize)    \
   V(kBuiltinsTableOffset, Builtins::builtin_count* kSystemPointerSize)        \
   V(kVirtualCallTargetRegisterOffset, kSystemPointerSize)                     \
-  V(kFastCCallCallerFPOffset, kSystemPointerSize)                             \
-  V(kFastCCallCallerPCOffset, kSystemPointerSize)                             \
   V(kStackIsIterableOffset, kUInt8Size)                                       \
   /* This padding aligns IsolateData size by 8 bytes. */                      \
   V(kPaddingOffset,                                                           \
@@ -150,6 +159,17 @@ class IsolateData final {
   // Caches the amount of external memory registered at the last MC.
   int64_t external_memory_at_last_mark_compact_ = 0;
 
+  // Stores the state of the caller for TurboAssembler::CallCFunction so that
+  // the sampling CPU profiler can iterate the stack during such calls. These
+  // are stored on IsolateData so that they can be stored to with only one move
+  // instruction in compiled code.
+  Address fast_c_call_caller_fp_ = kNullAddress;
+  Address fast_c_call_caller_pc_ = kNullAddress;
+
+  // Fields related to the system and JS stack. In particular, this contains the
+  // stack limit used by stack checks in generated code.
+  StackGuard stack_guard_;
+
   RootsTable roots_;
 
   ExternalReferenceTable external_reference_table_;
@@ -169,12 +189,6 @@ class IsolateData final {
   // ia32 (otherwise the arguments adaptor call runs out of registers).
   void* virtual_call_target_register_ = nullptr;
 
-  // Stores the state of the caller for TurboAssembler::CallCFunction so that
-  // the sampling CPU profiler can iterate the stack during such calls. These
-  // are stored on IsolateData so that they can be stored to with only one move
-  // instruction in compiled code.
-  Address fast_c_call_caller_fp_ = kNullAddress;
-  Address fast_c_call_caller_pc_ = kNullAddress;
   // Whether the SafeStackFrameIterator can successfully iterate the current
   // stack. Only valid values are 0 or 1.
   uint8_t stack_is_iterable_ = 1;
@@ -225,6 +239,7 @@ void IsolateData::AssertPredictableLayout() {
                 kFastCCallCallerFPOffset);
   STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
                 kFastCCallCallerPCOffset);
+  STATIC_ASSERT(offsetof(IsolateData, stack_guard_) == kStackGuardOffset);
   STATIC_ASSERT(offsetof(IsolateData, stack_is_iterable_) ==
                 kStackIsIterableOffset);
   STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index 7e037fb410cbd4..e1b021b921d2d4 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -119,25 +119,6 @@ bool Isolate::IsArrayConstructorIntact() {
   return array_constructor_cell.value() == Smi::FromInt(kProtectorValid);
 }
 
-bool Isolate::IsArraySpeciesLookupChainIntact() {
-  // Note: It would be nice to have debug checks to make sure that the
-  // species protector is accurate, but this would be hard to do for most of
-  // what the protector stands for:
-  // - You'd need to traverse the heap to check that no Array instance has
-  //   a constructor property
-  // - To check that Array[Symbol.species] == Array, JS code has to execute,
-  //   but JS cannot be invoked in callstack overflow situations
-  // All that could be checked reliably is that
-  // Array.prototype.constructor == Array. Given that limitation, no check is
-  // done here. In place, there are mjsunit tests harmony/array-species* which
-  // ensure that behavior is correct in various invalid protector cases.
-
-  PropertyCell species_cell =
-      PropertyCell::cast(root(RootIndex::kArraySpeciesProtector));
-  return species_cell.value().IsSmi() &&
-         Smi::ToInt(species_cell.value()) == kProtectorValid;
-}
-
 bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
   PropertyCell species_cell =
       PropertyCell::cast(root(RootIndex::kTypedArraySpeciesProtector));
@@ -145,14 +126,6 @@ bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
          Smi::ToInt(species_cell.value()) == kProtectorValid;
 }
 
-bool Isolate::IsRegExpSpeciesLookupChainIntact(
-    Handle<NativeContext> native_context) {
-  DCHECK_EQ(*native_context, this->raw_native_context());
-  PropertyCell species_cell = native_context->regexp_species_protector();
-  return species_cell.value().IsSmi() &&
-         Smi::ToInt(species_cell.value()) == kProtectorValid;
-}
-
 bool Isolate::IsPromiseSpeciesLookupChainIntact() {
   PropertyCell species_cell =
       PropertyCell::cast(root(RootIndex::kPromiseSpeciesProtector));
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 2b3551cdfb3ec1..d090ed5260806d 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -56,6 +56,7 @@
 #include "src/objects/hash-table-inl.h"
 #include "src/objects/js-array-inl.h"
 #include "src/objects/js-generator-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
 #include "src/objects/module-inl.h"
 #include "src/objects/promise-inl.h"
 #include "src/objects/prototype.h"
@@ -85,9 +86,9 @@
 #include "unicode/uobject.h"
 #endif  // V8_INTL_SUPPORT
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
 #include "src/diagnostics/unwinding-info-win64.h"
-#endif
+#endif  // V8_OS_WIN64
 
 extern "C" const uint8_t* v8_Default_embedded_blob_;
 extern "C" uint32_t v8_Default_embedded_blob_size_;
@@ -2022,7 +2023,7 @@ void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
     DCHECK_EQ(scheduled_exception(),
               ReadOnlyRoots(heap()).termination_exception());
     // Clear termination once we returned from all V8 frames.
-    if (handle_scope_implementer()->CallDepthIsZero()) {
+    if (thread_local_top()->CallDepthIsZero()) {
       thread_local_top()->external_caught_exception_ = false;
       clear_scheduled_exception();
     }
@@ -2648,21 +2649,12 @@ Handle<Context> Isolate::GetIncumbentContext() {
 char* Isolate::ArchiveThread(char* to) {
   MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
           sizeof(ThreadLocalTop));
-  InitializeThreadLocal();
-  clear_pending_exception();
-  clear_pending_message();
-  clear_scheduled_exception();
   return to + sizeof(ThreadLocalTop);
 }
 
 char* Isolate::RestoreThread(char* from) {
   MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
           sizeof(ThreadLocalTop));
-// This might be just paranoia, but it seems to be needed in case a
-// thread_local_top_ is restored on a separate OS thread.
-#ifdef USE_SIMULATOR
-  thread_local_top()->simulator_ = Simulator::current(this);
-#endif
   DCHECK(context().is_null() || context().IsContext());
   return from + sizeof(ThreadLocalTop);
 }
@@ -2884,9 +2876,9 @@ v8::PageAllocator* Isolate::page_allocator() {
 }
 
 Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
-    : isolate_allocator_(std::move(isolate_allocator)),
+    : isolate_data_(this),
+      isolate_allocator_(std::move(isolate_allocator)),
       id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
-      stack_guard_(this),
       allocator_(FLAG_trace_zone_stats
                      ? new VerboseAccountingAllocator(&heap_, 256 * KB)
                      : new AccountingAllocator()),
@@ -2925,6 +2917,14 @@ void Isolate::CheckIsolateLayout() {
   CHECK_EQ(OFFSET_OF(Isolate, isolate_data_), 0);
   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.embedder_data_)),
            Internals::kIsolateEmbedderDataOffset);
+  CHECK_EQ(static_cast<int>(
+               OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_fp_)),
+           Internals::kIsolateFastCCallCallerFpOffset);
+  CHECK_EQ(static_cast<int>(
+               OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_pc_)),
+           Internals::kIsolateFastCCallCallerPcOffset);
+  CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.stack_guard_)),
+           Internals::kIsolateStackGuardOffset);
   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
            Internals::kIsolateRootsOffset);
   CHECK_EQ(Internals::kExternalMemoryOffset % 8, 0);
@@ -2961,7 +2961,7 @@ void Isolate::Deinit() {
     heap_profiler()->StopSamplingHeapProfiler();
   }
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
       heap()->memory_allocator()) {
     const base::AddressRegion& code_range =
@@ -2969,7 +2969,7 @@ void Isolate::Deinit() {
     void* start = reinterpret_cast<void*>(code_range.begin());
     win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
   }
-#endif
+#endif  // V8_OS_WIN64
 
   debug()->Unload();
 
@@ -3139,7 +3139,12 @@ Isolate::~Isolate() {
   default_microtask_queue_ = nullptr;
 }
 
-void Isolate::InitializeThreadLocal() { thread_local_top()->Initialize(this); }
+void Isolate::InitializeThreadLocal() {
+  thread_local_top()->Initialize(this);
+  clear_pending_exception();
+  clear_pending_message();
+  clear_scheduled_exception();
+}
 
 void Isolate::SetTerminationOnExternalTryCatch() {
   if (try_catch_handler() == nullptr) return;
@@ -3308,19 +3313,31 @@ bool Isolate::InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
   return Init(read_only_deserializer, startup_deserializer);
 }
 
-static void AddCrashKeysForIsolateAndHeapPointers(Isolate* isolate) {
-  v8::Platform* platform = V8::GetCurrentPlatform();
+static std::string AddressToString(uintptr_t address) {
+  std::stringstream stream_address;
+  stream_address << "0x" << std::hex << address;
+  return stream_address.str();
+}
+
+void Isolate::AddCrashKeysForIsolateAndHeapPointers() {
+  DCHECK_NOT_NULL(add_crash_key_callback_);
 
-  const int id = isolate->id();
-  platform->AddCrashKey(id, "isolate", reinterpret_cast<uintptr_t>(isolate));
+  const uintptr_t isolate_address = reinterpret_cast<uintptr_t>(this);
+  add_crash_key_callback_(v8::CrashKeyId::kIsolateAddress,
+                          AddressToString(isolate_address));
 
-  auto heap = isolate->heap();
-  platform->AddCrashKey(id, "ro_space",
-    reinterpret_cast<uintptr_t>(heap->read_only_space()->first_page()));
-  platform->AddCrashKey(id, "map_space",
-    reinterpret_cast<uintptr_t>(heap->map_space()->first_page()));
-  platform->AddCrashKey(id, "code_space",
-    reinterpret_cast<uintptr_t>(heap->code_space()->first_page()));
+  const uintptr_t ro_space_firstpage_address =
+      reinterpret_cast<uintptr_t>(heap()->read_only_space()->first_page());
+  add_crash_key_callback_(v8::CrashKeyId::kReadonlySpaceFirstPageAddress,
+                          AddressToString(ro_space_firstpage_address));
+  const uintptr_t map_space_firstpage_address =
+      reinterpret_cast<uintptr_t>(heap()->map_space()->first_page());
+  add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress,
+                          AddressToString(map_space_firstpage_address));
+  const uintptr_t code_space_firstpage_address =
+      reinterpret_cast<uintptr_t>(heap()->code_space()->first_page());
+  add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress,
+                          AddressToString(code_space_firstpage_address));
 }
 
 bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
@@ -3343,9 +3360,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
   // The initialization process does not handle memory exhaustion.
   AlwaysAllocateScope always_allocate(this);
 
-  // Safe after setting Heap::isolate_, and initializing StackGuard
-  heap_.SetStackLimits();
-
 #define ASSIGN_ELEMENT(CamelName, hacker_name)                  \
   isolate_addresses_[IsolateAddressId::k##CamelName##Address] = \
       reinterpret_cast<Address>(hacker_name##_address());
@@ -3379,7 +3393,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
     // will ensure this too, but we don't have to use lockers if we are only
     // using one thread.
     ExecutionAccess lock(this);
-    stack_guard_.InitThread(lock);
+    stack_guard()->InitThread(lock);
   }
 
   // SetUp the object heap.
@@ -3524,10 +3538,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
   clear_pending_message();
   clear_scheduled_exception();
 
-  // Deserializing may put strange things in the root array's copy of the
-  // stack guard.
-  heap_.SetStackLimits();
-
   // Quiet the heap NaN if needed on target platform.
   if (!create_heap_objects)
     Assembler::QuietNaN(ReadOnlyRoots(this).nan_value());
@@ -3553,7 +3563,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
                                                sampling_flags);
   }
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
     const base::AddressRegion& code_range =
         heap()->memory_allocator()->code_range();
@@ -3561,14 +3571,13 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
     size_t size_in_bytes = code_range.size();
     win64_unwindinfo::RegisterNonABICompliantCodeRange(start, size_in_bytes);
   }
-#endif
+#endif  // V8_OS_WIN64
 
   if (create_heap_objects && FLAG_profile_deserialization) {
     double ms = timer.Elapsed().InMillisecondsF();
     PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
   }
 
-  AddCrashKeysForIsolateAndHeapPointers(this);
   return true;
 }
 
@@ -3990,15 +3999,6 @@ void Isolate::InvalidateArrayConstructorProtector() {
   DCHECK(!IsArrayConstructorIntact());
 }
 
-void Isolate::InvalidateArraySpeciesProtector() {
-  DCHECK(factory()->array_species_protector()->value().IsSmi());
-  DCHECK(IsArraySpeciesLookupChainIntact());
-  PropertyCell::SetValueWithInvalidation(
-      this, "array_species_protector", factory()->array_species_protector(),
-      handle(Smi::FromInt(kProtectorInvalid), this));
-  DCHECK(!IsArraySpeciesLookupChainIntact());
-}
-
 void Isolate::InvalidateTypedArraySpeciesProtector() {
   DCHECK(factory()->typed_array_species_protector()->value().IsSmi());
   DCHECK(IsTypedArraySpeciesLookupChainIntact());
@@ -4009,19 +4009,6 @@ void Isolate::InvalidateTypedArraySpeciesProtector() {
   DCHECK(!IsTypedArraySpeciesLookupChainIntact());
 }
 
-void Isolate::InvalidateRegExpSpeciesProtector(
-    Handle<NativeContext> native_context) {
-  DCHECK_EQ(*native_context, this->raw_native_context());
-  DCHECK(native_context->regexp_species_protector().value().IsSmi());
-  DCHECK(IsRegExpSpeciesLookupChainIntact(native_context));
-  Handle<PropertyCell> species_cell(native_context->regexp_species_protector(),
-                                    this);
-  PropertyCell::SetValueWithInvalidation(
-      this, "regexp_species_protector", species_cell,
-      handle(Smi::FromInt(kProtectorInvalid), this));
-  DCHECK(!IsRegExpSpeciesLookupChainIntact(native_context));
-}
-
 void Isolate::InvalidatePromiseSpeciesProtector() {
   DCHECK(factory()->promise_species_protector()->value().IsSmi());
   DCHECK(IsPromiseSpeciesLookupChainIntact());
@@ -4189,7 +4176,7 @@ Handle<Symbol> Isolate::SymbolFor(RootIndex dictionary_index,
                                      PropertyDetails::Empty(), &entry);
     switch (dictionary_index) {
       case RootIndex::kPublicSymbolTable:
-        symbol->set_is_public(true);
+        symbol->set_is_in_public_symbol_table(true);
         heap()->set_public_symbol_table(*dictionary);
         break;
       case RootIndex::kApiSymbolTable:
@@ -4237,7 +4224,7 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
 }
 
 void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
-  if (!handle_scope_implementer()->CallDepthIsZero()) return;
+  if (!thread_local_top()->CallDepthIsZero()) return;
 
   bool run_microtasks =
       microtask_queue && microtask_queue->size() &&
@@ -4246,12 +4233,6 @@ void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
 
   if (run_microtasks) {
     microtask_queue->RunMicrotasks(this);
-  } else {
-    // TODO(marja): (spec) The discussion about when to clear the KeepDuringJob
-    // set is still open (whether to clear it after every microtask or once
-    // during a microtask checkpoint). See also
-    // https://github.com/tc39/proposal-weakrefs/issues/39 .
-    heap()->ClearKeptObjects();
   }
 
   if (call_completed_callbacks_.empty()) return;
@@ -4330,6 +4311,23 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
   return v8::Utils::OpenHandle(*promise);
 }
 
+void Isolate::ClearKeptObjects() { heap()->ClearKeptObjects(); }
+
+void Isolate::SetHostCleanupFinalizationGroupCallback(
+    HostCleanupFinalizationGroupCallback callback) {
+  host_cleanup_finalization_group_callback_ = callback;
+}
+
+void Isolate::RunHostCleanupFinalizationGroupCallback(
+    Handle<JSFinalizationGroup> fg) {
+  if (host_cleanup_finalization_group_callback_ != nullptr) {
+    v8::Local<v8::Context> api_context =
+        v8::Utils::ToLocal(handle(Context::cast(fg->native_context()), this));
+    host_cleanup_finalization_group_callback_(api_context,
+                                              v8::Utils::ToLocal(fg));
+  }
+}
+
 void Isolate::SetHostImportModuleDynamicallyCallback(
     HostImportModuleDynamicallyCallback callback) {
   host_import_module_dynamically_callback_ = callback;
@@ -4337,7 +4335,7 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
 
 Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
     Handle<SourceTextModule> module) {
-  Handle<Object> host_meta(module->import_meta(), this);
+  Handle<HeapObject> host_meta(module->import_meta(), this);
   if (host_meta->IsTheHole(this)) {
     host_meta = factory()->NewJSObjectWithNullProto();
     if (host_initialize_import_meta_object_callback_ != nullptr) {
@@ -4399,7 +4397,7 @@ void Isolate::PrepareBuiltinSourcePositionMap() {
   }
 }
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
 void Isolate::SetBuiltinUnwindData(
     int builtin_index,
     const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) {
@@ -4407,7 +4405,7 @@ void Isolate::SetBuiltinUnwindData(
     embedded_file_writer_->SetBuiltinUnwindData(builtin_index, unwinding_info);
   }
 }
-#endif
+#endif  // V8_OS_WIN64
 
 void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
   prepare_stack_trace_callback_ = callback;
@@ -4417,6 +4415,13 @@ bool Isolate::HasPrepareStackTraceCallback() const {
   return prepare_stack_trace_callback_ != nullptr;
 }
 
+void Isolate::SetAddCrashKeyCallback(AddCrashKeyCallback callback) {
+  add_crash_key_callback_ = callback;
+
+  // Log the initial set of data.
+  AddCrashKeysForIsolateAndHeapPointers();
+}
+
 void Isolate::SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
                                      void* data) {
   atomics_wait_callback_ = callback;
@@ -4663,6 +4668,27 @@ void Isolate::SetIdle(bool is_idle) {
   }
 }
 
+void Isolate::CollectSourcePositionsForAllBytecodeArrays() {
+  HandleScope scope(this);
+  std::vector<Handle<SharedFunctionInfo>> sfis;
+  {
+    DisallowHeapAllocation no_gc;
+    HeapObjectIterator iterator(heap());
+    for (HeapObject obj = iterator.Next(); !obj.is_null();
+         obj = iterator.Next()) {
+      if (obj.IsSharedFunctionInfo()) {
+        SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
+        if (sfi.HasBytecodeArray()) {
+          sfis.push_back(Handle<SharedFunctionInfo>(sfi, this));
+        }
+      }
+    }
+  }
+  for (auto sfi : sfis) {
+    SharedFunctionInfo::EnsureSourcePositionsAvailable(this, sfi);
+  }
+}
+
 #ifdef V8_INTL_SUPPORT
 icu::UMemory* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type) {
   return icu_object_cache_[cache_type].get();
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 2ead7bf8443685..4eadb42438f9c4 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -404,6 +404,7 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
   V(ExtensionCallback, wasm_instance_callback, &NoExtension)                   \
   V(WasmStreamingCallback, wasm_streaming_callback, nullptr)                   \
   V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr)        \
+  V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr)         \
   /* State for Relocatable. */                                                 \
   V(Relocatable*, relocatable_top, nullptr)                                    \
   V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)              \
@@ -902,7 +903,7 @@ class Isolate final : private HiddenFactory {
     DCHECK_NOT_NULL(logger_);
     return logger_;
   }
-  StackGuard* stack_guard() { return &stack_guard_; }
+  StackGuard* stack_guard() { return isolate_data()->stack_guard(); }
   Heap* heap() { return &heap_; }
   ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
   static Isolate* FromHeap(Heap* heap) {
@@ -959,6 +960,7 @@ class Isolate final : private HiddenFactory {
   void set_deoptimizer_lazy_throw(bool value) {
     deoptimizer_lazy_throw_ = value;
   }
+  void InitializeThreadLocal();
   ThreadLocalTop* thread_local_top() {
     return &isolate_data_.thread_local_top_;
   }
@@ -1174,10 +1176,7 @@ class Isolate final : private HiddenFactory {
 
   bool IsArrayOrObjectOrStringPrototype(Object object);
 
-  inline bool IsArraySpeciesLookupChainIntact();
   inline bool IsTypedArraySpeciesLookupChainIntact();
-  inline bool IsRegExpSpeciesLookupChainIntact(
-      Handle<NativeContext> native_context);
 
   // Check that the @@species protector is intact, which guards the lookup of
   // "constructor" on JSPromise instances, whose [[Prototype]] is the initial
@@ -1264,7 +1263,6 @@ class Isolate final : private HiddenFactory {
   void TraceProtectorInvalidation(const char* protector_name);
 
   void InvalidateArrayConstructorProtector();
-  void InvalidateArraySpeciesProtector();
   void InvalidateTypedArraySpeciesProtector();
   void InvalidateRegExpSpeciesProtector(Handle<NativeContext> native_context);
   void InvalidatePromiseSpeciesProtector();
@@ -1473,6 +1471,11 @@ class Isolate final : private HiddenFactory {
 
   bool IsInAnyContext(Object object, uint32_t index);
 
+  void ClearKeptObjects();
+  void SetHostCleanupFinalizationGroupCallback(
+      HostCleanupFinalizationGroupCallback callback);
+  void RunHostCleanupFinalizationGroupCallback(Handle<JSFinalizationGroup> fg);
+
   void SetHostImportModuleDynamicallyCallback(
       HostImportModuleDynamicallyCallback callback);
   V8_EXPORT_PRIVATE MaybeHandle<JSPromise>
@@ -1497,11 +1500,11 @@ class Isolate final : private HiddenFactory {
   // annotate the builtin blob with debugging information.
   void PrepareBuiltinSourcePositionMap();
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   void SetBuiltinUnwindData(
       int builtin_index,
       const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info);
-#endif
+#endif  // V8_OS_WIN64
 
   void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
   MaybeHandle<Object> RunPrepareStackTraceCallback(Handle<Context>,
@@ -1509,6 +1512,8 @@ class Isolate final : private HiddenFactory {
                                                    Handle<JSArray> sites);
   bool HasPrepareStackTraceCallback() const;
 
+  void SetAddCrashKeyCallback(AddCrashKeyCallback callback);
+
   void SetRAILMode(RAILMode rail_mode);
 
   RAILMode rail_mode() { return rail_mode_.load(); }
@@ -1558,6 +1563,11 @@ class Isolate final : private HiddenFactory {
 
   V8_EXPORT_PRIVATE void SetIdle(bool is_idle);
 
+  // Changing various modes can cause differences in generated bytecode which
+  // interferes with lazy source positions, so this should be called immediately
+  // before such a mode change to ensure that this cannot happen.
+  V8_EXPORT_PRIVATE void CollectSourcePositionsForAllBytecodeArrays();
+
  private:
   explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
   ~Isolate();
@@ -1622,8 +1632,6 @@ class Isolate final : private HiddenFactory {
   static void SetIsolateThreadLocals(Isolate* isolate,
                                      PerIsolateThreadData* data);
 
-  void InitializeThreadLocal();
-
   void MarkCompactPrologue(bool is_compacting,
                            ThreadLocalTop* archived_thread_data);
   void MarkCompactEpilogue(bool is_compacting,
@@ -1653,6 +1661,8 @@ class Isolate final : private HiddenFactory {
     return "";
   }
 
+  void AddCrashKeysForIsolateAndHeapPointers();
+
   // This class contains a collection of data accessible from both C++ runtime
   // and compiled code (including assembly stubs, builtins, interpreter bytecode
   // handlers and optimized code).
@@ -1673,7 +1683,6 @@ class Isolate final : private HiddenFactory {
   std::shared_ptr<Counters> async_counters_;
   base::RecursiveMutex break_access_;
   Logger* logger_ = nullptr;
-  StackGuard stack_guard_;
   StubCache* load_stub_cache_ = nullptr;
   StubCache* store_stub_cache_ = nullptr;
   DeoptimizerData* deoptimizer_data_ = nullptr;
@@ -1710,6 +1719,8 @@ class Isolate final : private HiddenFactory {
   v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
   void* atomics_wait_callback_data_ = nullptr;
   PromiseHook promise_hook_ = nullptr;
+  HostCleanupFinalizationGroupCallback
+      host_cleanup_finalization_group_callback_ = nullptr;
   HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
       nullptr;
   HostInitializeImportMetaObjectCallback
@@ -1770,6 +1781,8 @@ class Isolate final : private HiddenFactory {
   interpreter::Interpreter* interpreter_ = nullptr;
 
   compiler::PerIsolateCompilerCache* compiler_cache_ = nullptr;
+  // The following zone is for compiler-related objects that should live
+  // through all compilations (and thus all JSHeapBroker instances).
   Zone* compiler_zone_ = nullptr;
 
   CompilerDispatcher* compiler_dispatcher_ = nullptr;
@@ -1877,6 +1890,11 @@ class Isolate final : private HiddenFactory {
   base::Mutex thread_data_table_mutex_;
   ThreadDataTable thread_data_table_;
 
+  // Enables the host application to provide a mechanism for recording a
+  // predefined set of data as crash keys to be used in postmortem debugging
+  // in case of a crash.
+  AddCrashKeyCallback add_crash_key_callback_ = nullptr;
+
   // Delete new/delete operators to ensure that Isolate::New() and
   // Isolate::Delete() are used for Isolate creation and deletion.
   void* operator new(size_t, void* ptr) { return ptr; }
@@ -1930,6 +1948,14 @@ class V8_EXPORT_PRIVATE SaveAndSwitchContext : public SaveContext {
   SaveAndSwitchContext(Isolate* isolate, Context new_context);
 };
 
+// A scope which sets the given isolate's context to null for its lifetime to
+// ensure that code does not make assumptions on a context being available.
+class NullContextScope : public SaveAndSwitchContext {
+ public:
+  explicit NullContextScope(Isolate* isolate)
+      : SaveAndSwitchContext(isolate, Context()) {}
+};
+
 class AssertNoContextChange {
 #ifdef DEBUG
  public:
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index d216d3bc396325..63d1e2be1ff7af 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -7,8 +7,11 @@
 #include <memory>
 
 #include "src/api/api-inl.h"
+#include "src/ast/ast.h"
+#include "src/ast/prettyprinter.h"
 #include "src/base/v8-fallthrough.h"
 #include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
 #include "src/execution/frames.h"
 #include "src/execution/isolate-inl.h"
 #include "src/logging/counters.h"
@@ -18,6 +21,9 @@
 #include "src/objects/keys.h"
 #include "src/objects/stack-frame-info-inl.h"
 #include "src/objects/struct-inl.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parsing.h"
+#include "src/roots/roots.h"
 #include "src/strings/string-builder-inl.h"
 #include "src/wasm/wasm-code-manager.h"
 #include "src/wasm/wasm-objects.h"
@@ -314,6 +320,10 @@ Handle<Object> StackFrameBase::GetWasmModuleName() {
   return isolate_->factory()->undefined_value();
 }
 
+Handle<Object> StackFrameBase::GetWasmInstance() {
+  return isolate_->factory()->undefined_value();
+}
+
 int StackFrameBase::GetScriptId() const {
   if (!HasScript()) return kNone;
   return GetScript()->id();
@@ -332,6 +342,7 @@ void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
   function_ = handle(array->Function(frame_ix), isolate);
   code_ = handle(array->Code(frame_ix), isolate);
   offset_ = array->Offset(frame_ix).value();
+  cached_position_ = base::nullopt;
 
   const int flags = array->Flags(frame_ix).value();
   is_constructor_ = (flags & FrameArray::kIsConstructor) != 0;
@@ -348,6 +359,7 @@ JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
       function_(function),
       code_(code),
       offset_(offset),
+      cached_position_(base::nullopt),
       is_async_(false),
       is_constructor_(false),
       is_strict_(false) {}
@@ -512,9 +524,12 @@ bool JSStackFrame::IsToplevel() {
 }
 
 int JSStackFrame::GetPosition() const {
+  if (cached_position_) return *cached_position_;
+
   Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
   SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
-  return code_->SourcePosition(offset_);
+  cached_position_ = code_->SourcePosition(offset_);
+  return *cached_position_;
 }
 
 bool JSStackFrame::HasScript() const {
@@ -575,6 +590,8 @@ Handle<Object> WasmStackFrame::GetWasmModuleName() {
   return module_name;
 }
 
+Handle<Object> WasmStackFrame::GetWasmInstance() { return wasm_instance_; }
+
 int WasmStackFrame::GetPosition() const {
   return IsInterpreted()
              ? offset_
@@ -1155,5 +1172,232 @@ MaybeHandle<Object> ErrorUtils::MakeGenericError(
                                no_caller, StackTraceCollection::kDetailed);
 }
 
+namespace {
+
+bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
+  JavaScriptFrameIterator it(isolate);
+  if (!it.done()) {
+    // Compute the location from the function and the relocation info of the
+    // baseline code. For optimized code this will use the deoptimization
+    // information to get canonical location information.
+    std::vector<FrameSummary> frames;
+    it.frame()->Summarize(&frames);
+    auto& summary = frames.back().AsJavaScript();
+    Handle<SharedFunctionInfo> shared(summary.function()->shared(), isolate);
+    Handle<Object> script(shared->script(), isolate);
+    SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared);
+    int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
+    if (script->IsScript() &&
+        !(Handle<Script>::cast(script)->source().IsUndefined(isolate))) {
+      Handle<Script> casted_script = Handle<Script>::cast(script);
+      *target = MessageLocation(casted_script, pos, pos + 1, shared);
+      return true;
+    }
+  }
+  return false;
+}
+
+Handle<String> BuildDefaultCallSite(Isolate* isolate, Handle<Object> object) {
+  IncrementalStringBuilder builder(isolate);
+
+  builder.AppendString(Object::TypeOf(isolate, object));
+  if (object->IsString()) {
+    builder.AppendCString(" \"");
+    builder.AppendString(Handle<String>::cast(object));
+    builder.AppendCString("\"");
+  } else if (object->IsNull(isolate)) {
+    builder.AppendCString(" ");
+    builder.AppendString(isolate->factory()->null_string());
+  } else if (object->IsTrue(isolate)) {
+    builder.AppendCString(" ");
+    builder.AppendString(isolate->factory()->true_string());
+  } else if (object->IsFalse(isolate)) {
+    builder.AppendCString(" ");
+    builder.AppendString(isolate->factory()->false_string());
+  } else if (object->IsNumber()) {
+    builder.AppendCString(" ");
+    builder.AppendString(isolate->factory()->NumberToString(object));
+  }
+
+  return builder.Finish().ToHandleChecked();
+}
+
+Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
+                              MessageLocation* location,
+                              CallPrinter::ErrorHint* hint) {
+  if (ComputeLocation(isolate, location)) {
+    ParseInfo info(isolate, location->shared());
+    if (parsing::ParseAny(&info, location->shared(), isolate)) {
+      info.ast_value_factory()->Internalize(isolate);
+      CallPrinter printer(isolate, location->shared()->IsUserJavaScript());
+      Handle<String> str = printer.Print(info.literal(), location->start_pos());
+      *hint = printer.GetErrorHint();
+      if (str->length() > 0) return str;
+    } else {
+      isolate->clear_pending_exception();
+    }
+  }
+  return BuildDefaultCallSite(isolate, object);
+}
+
+MessageTemplate UpdateErrorTemplate(CallPrinter::ErrorHint hint,
+                                    MessageTemplate default_id) {
+  switch (hint) {
+    case CallPrinter::ErrorHint::kNormalIterator:
+      return MessageTemplate::kNotIterable;
+
+    case CallPrinter::ErrorHint::kCallAndNormalIterator:
+      return MessageTemplate::kNotCallableOrIterable;
+
+    case CallPrinter::ErrorHint::kAsyncIterator:
+      return MessageTemplate::kNotAsyncIterable;
+
+    case CallPrinter::ErrorHint::kCallAndAsyncIterator:
+      return MessageTemplate::kNotCallableOrAsyncIterable;
+
+    case CallPrinter::ErrorHint::kNone:
+      return default_id;
+  }
+  return default_id;
+}
+
+}  // namespace
+
+Handle<Object> ErrorUtils::NewIteratorError(Isolate* isolate,
+                                            Handle<Object> source) {
+  MessageLocation location;
+  CallPrinter::ErrorHint hint = CallPrinter::kNone;
+  Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
+  MessageTemplate id = MessageTemplate::kNotIterableNoSymbolLoad;
+
+  if (hint == CallPrinter::kNone) {
+    Handle<Symbol> iterator_symbol = isolate->factory()->iterator_symbol();
+    return isolate->factory()->NewTypeError(id, callsite, iterator_symbol);
+  }
+
+  id = UpdateErrorTemplate(hint, id);
+  return isolate->factory()->NewTypeError(id, callsite);
+}
+
+Handle<Object> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
+                                                     Handle<Object> source) {
+  MessageLocation location;
+  CallPrinter::ErrorHint hint = CallPrinter::kNone;
+  Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
+  MessageTemplate id = MessageTemplate::kCalledNonCallable;
+  id = UpdateErrorTemplate(hint, id);
+  return isolate->factory()->NewTypeError(id, callsite);
+}
+
+Handle<Object> ErrorUtils::NewConstructedNonConstructable(
+    Isolate* isolate, Handle<Object> source) {
+  MessageLocation location;
+  CallPrinter::ErrorHint hint = CallPrinter::kNone;
+  Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
+  MessageTemplate id = MessageTemplate::kNotConstructor;
+  return isolate->factory()->NewTypeError(id, callsite);
+}
+
+Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
+                                                Handle<Object> object) {
+  return ThrowLoadFromNullOrUndefined(isolate, object, MaybeHandle<Object>());
+}
+Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
+                                                Handle<Object> object,
+                                                MaybeHandle<Object> key) {
+  DCHECK(object->IsNullOrUndefined());
+
+  MaybeHandle<String> maybe_property_name;
+
+  // Try to extract the property name from the given key, if any.
+  Handle<Object> key_handle;
+  if (key.ToHandle(&key_handle)) {
+    if (key_handle->IsString()) {
+      maybe_property_name = Handle<String>::cast(key_handle);
+    }
+  }
+
+  Handle<String> callsite;
+
+  // Inline the RenderCallSite logic here so that we can additonally access the
+  // destructuring property.
+  bool location_computed = false;
+  bool is_destructuring = false;
+  MessageLocation location;
+  if (ComputeLocation(isolate, &location)) {
+    location_computed = true;
+
+    ParseInfo info(isolate, location.shared());
+    if (parsing::ParseAny(&info, location.shared(), isolate)) {
+      info.ast_value_factory()->Internalize(isolate);
+      CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
+      Handle<String> str = printer.Print(info.literal(), location.start_pos());
+
+      int pos = -1;
+      is_destructuring = printer.destructuring_assignment() != nullptr;
+
+      if (is_destructuring) {
+        // If we don't have one yet, try to extract the property name from the
+        // destructuring property in the AST.
+        ObjectLiteralProperty* destructuring_prop =
+            printer.destructuring_prop();
+        if (maybe_property_name.is_null() && destructuring_prop != nullptr &&
+            destructuring_prop->key()->IsPropertyName()) {
+          maybe_property_name = destructuring_prop->key()
+                                    ->AsLiteral()
+                                    ->AsRawPropertyName()
+                                    ->string();
+          // Change the message location to point at the property name.
+          pos = destructuring_prop->key()->position();
+        }
+        if (maybe_property_name.is_null()) {
+          // Change the message location to point at the destructured value.
+          pos = printer.destructuring_assignment()->value()->position();
+        }
+
+        // If we updated the pos to a valid pos, rewrite the location.
+        if (pos != -1) {
+          location = MessageLocation(location.script(), pos, pos + 1,
+                                     location.shared());
+        }
+      }
+
+      if (str->length() > 0) callsite = str;
+    } else {
+      isolate->clear_pending_exception();
+    }
+  }
+
+  if (callsite.is_null()) {
+    callsite = BuildDefaultCallSite(isolate, object);
+  }
+
+  Handle<Object> error;
+  Handle<String> property_name;
+  if (is_destructuring) {
+    if (maybe_property_name.ToHandle(&property_name)) {
+      error = isolate->factory()->NewTypeError(
+          MessageTemplate::kNonCoercibleWithProperty, property_name, callsite,
+          object);
+    } else {
+      error = isolate->factory()->NewTypeError(MessageTemplate::kNonCoercible,
+                                               callsite, object);
+    }
+  } else {
+    Handle<Object> key_handle;
+    if (!key.ToHandle(&key_handle)) {
+      key_handle = ReadOnlyRoots(isolate).undefined_value_handle();
+    }
+    if (*key_handle == ReadOnlyRoots(isolate).iterator_symbol()) {
+      error = NewIteratorError(isolate, object);
+    } else {
+      error = isolate->factory()->NewTypeError(
+          MessageTemplate::kNonObjectPropertyLoad, key_handle, object);
+    }
+  }
+
+  return isolate->Throw(*error, location_computed ? &location : nullptr);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index 23f32c2fe1984c..5da2d3a9ebda3c 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -12,6 +12,7 @@
 
 #include <memory>
 
+#include "src/base/optional.h"
 #include "src/common/message-template.h"
 #include "src/handles/handles.h"
 
@@ -72,6 +73,7 @@ class StackFrameBase {
   virtual Handle<Object> GetTypeName() = 0;
   virtual Handle<Object> GetEvalOrigin();
   virtual Handle<Object> GetWasmModuleName();
+  virtual Handle<Object> GetWasmInstance();
 
   // Returns the script ID if one is attached, -1 otherwise.
   int GetScriptId() const;
@@ -146,6 +148,7 @@ class JSStackFrame : public StackFrameBase {
   Handle<JSFunction> function_;
   Handle<AbstractCode> code_;
   int offset_;
+  mutable base::Optional<int> cached_position_;
 
   bool is_async_ : 1;
   bool is_constructor_ : 1;
@@ -168,12 +171,13 @@ class WasmStackFrame : public StackFrameBase {
   Handle<Object> GetMethodName() override { return Null(); }
   Handle<Object> GetTypeName() override { return Null(); }
   Handle<Object> GetWasmModuleName() override;
+  Handle<Object> GetWasmInstance() override;
 
   int GetPosition() const override;
   int GetLineNumber() override { return wasm_func_index_; }
   int GetColumnNumber() override;
 
-  int GetPromiseIndex() const override { return kNone; }
+  int GetPromiseIndex() const override { return GetPosition(); }
 
   bool IsNative() override { return false; }
   bool IsToplevel() override { return false; }
@@ -279,6 +283,18 @@ class ErrorUtils : public AllStatic {
   static MaybeHandle<Object> FormatStackTrace(Isolate* isolate,
                                               Handle<JSObject> error,
                                               Handle<Object> stack_trace);
+
+  static Handle<Object> NewIteratorError(Isolate* isolate,
+                                         Handle<Object> source);
+  static Handle<Object> NewCalledNonCallableError(Isolate* isolate,
+                                                  Handle<Object> source);
+  static Handle<Object> NewConstructedNonConstructable(Isolate* isolate,
+                                                       Handle<Object> source);
+  static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
+                                             Handle<Object> object);
+  static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
+                                             Handle<Object> object,
+                                             MaybeHandle<Object> key);
 };
 
 class MessageFormatter {
diff --git a/deps/v8/src/execution/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index 3cc95205fa5199..ed76e9d79c50c6 100644
--- a/deps/v8/src/execution/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -159,10 +159,13 @@ int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
     HandleScopeImplementer::EnteredContextRewindScope rewind_scope(
         isolate->handle_scope_implementer());
     TRACE_EVENT_BEGIN0("v8.execute", "RunMicrotasks");
-    TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.RunMicrotasks");
-    maybe_result = Execution::TryRunMicrotasks(isolate, this, &maybe_exception);
-    processed_microtask_count =
-        static_cast<int>(finished_microtask_count_ - base_count);
+    {
+      TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.RunMicrotasks");
+      maybe_result = Execution::TryRunMicrotasks(isolate, this,
+                                                 &maybe_exception);
+      processed_microtask_count =
+          static_cast<int>(finished_microtask_count_ - base_count);
+    }
     TRACE_EVENT_END1("v8.execute", "RunMicrotasks", "microtask_count",
                      processed_microtask_count);
   }
@@ -249,12 +252,6 @@ Microtask MicrotaskQueue::get(intptr_t index) const {
 }
 
 void MicrotaskQueue::OnCompleted(Isolate* isolate) {
-  // TODO(marja): (spec) The discussion about when to clear the KeepDuringJob
-  // set is still open (whether to clear it after every microtask or once
-  // during a microtask checkpoint). See also
-  // https://github.com/tc39/proposal-weakrefs/issues/39 .
-  isolate->heap()->ClearKeptObjects();
-
   FireMicrotasksCompletedCallback(isolate);
 }
 
diff --git a/deps/v8/src/execution/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index 6a3a160ec37b1c..2d9a924c14f238 100644
--- a/deps/v8/src/execution/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -2152,7 +2152,7 @@ using SimulatorRuntimeCall = int64_t (*)(int32_t arg0, int32_t arg1,
                                          int32_t arg2, int32_t arg3,
                                          int32_t arg4, int32_t arg5,
                                          int32_t arg6, int32_t arg7,
-                                         int32_t arg8);
+                                         int32_t arg8, int32_t arg9);
 
 // These prototypes handle the four types of FP calls.
 using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
@@ -2194,7 +2194,8 @@ void Simulator::SoftwareInterrupt() {
     int32_t arg6 = stack_pointer[6];
     int32_t arg7 = stack_pointer[7];
     int32_t arg8 = stack_pointer[8];
-    STATIC_ASSERT(kMaxCParameters == 9);
+    int32_t arg9 = stack_pointer[9];
+    STATIC_ASSERT(kMaxCParameters == 10);
 
     bool fp_call =
         (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -2378,12 +2379,12 @@ void Simulator::SoftwareInterrupt() {
       if (::v8::internal::FLAG_trace_sim) {
         PrintF(
             "Call to host function at %p "
-            "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x\n",
+            "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x\n",
             reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
-            arg3, arg4, arg5, arg6, arg7, arg8);
+            arg3, arg4, arg5, arg6, arg7, arg8, arg9);
       }
       int64_t result =
-          target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+          target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
       set_register(v0, static_cast<int32_t>(result));
       set_register(v1, static_cast<int32_t>(result >> 32));
     }
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index 3fbf1961a8f032..78dbc29a0b53d2 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -2159,7 +2159,7 @@ using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
                                             int64_t arg2, int64_t arg3,
                                             int64_t arg4, int64_t arg5,
                                             int64_t arg6, int64_t arg7,
-                                            int64_t arg8);
+                                            int64_t arg8, int64_t arg9);
 
 // These prototypes handle the four types of FP calls.
 using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
@@ -2200,7 +2200,8 @@ void Simulator::SoftwareInterrupt() {
     int64_t arg6 = get_register(a6);
     int64_t arg7 = get_register(a7);
     int64_t arg8 = stack_pointer[0];
-    STATIC_ASSERT(kMaxCParameters == 9);
+    int64_t arg9 = stack_pointer[1];
+    STATIC_ASSERT(kMaxCParameters == 10);
 
     bool fp_call =
         (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -2372,12 +2373,12 @@ void Simulator::SoftwareInterrupt() {
             "Call to host function at %p "
             "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
             " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
-            " , %08" PRIx64 " \n",
+            " , %08" PRIx64 " , %08" PRIx64 " \n",
             reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
-            arg3, arg4, arg5, arg6, arg7, arg8);
+            arg3, arg4, arg5, arg6, arg7, arg8, arg9);
       }
       ObjectPair result =
-          target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+          target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
       set_register(v0, (int64_t)(result.x));
       set_register(v1, (int64_t)(result.y));
     }
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 96308f7f5b0a73..ab8786713b196d 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -931,10 +931,12 @@ using SimulatorRuntimeCall = intptr_t (*)(intptr_t arg0, intptr_t arg1,
                                           intptr_t arg2, intptr_t arg3,
                                           intptr_t arg4, intptr_t arg5,
                                           intptr_t arg6, intptr_t arg7,
-                                          intptr_t arg8);
+                                          intptr_t arg8, intptr_t arg9);
 using SimulatorRuntimePairCall = ObjectPair (*)(intptr_t arg0, intptr_t arg1,
                                                 intptr_t arg2, intptr_t arg3,
-                                                intptr_t arg4, intptr_t arg5);
+                                                intptr_t arg4, intptr_t arg5,
+                                                intptr_t arg6, intptr_t arg7,
+                                                intptr_t arg8, intptr_t arg9);
 
 // These prototypes handle the four types of FP calls.
 using SimulatorRuntimeCompareCall = int (*)(double darg0, double darg1);
@@ -964,7 +966,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
           (get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
           0;
       Redirection* redirection = Redirection::FromInstruction(instr);
-      const int kArgCount = 9;
+      const int kArgCount = 10;
       const int kRegisterArgCount = 8;
       int arg0_regnum = 3;
       intptr_t result_buffer = 0;
@@ -982,9 +984,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
       }
       intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
       // Remaining argument on stack
-      arg[kRegisterArgCount] = stack_pointer[kStackFrameExtraParamSlot];
-      STATIC_ASSERT(kArgCount == kRegisterArgCount + 1);
-      STATIC_ASSERT(kMaxCParameters == 9);
+      for (int i = kRegisterArgCount, j = 0; i < kArgCount; i++, j++) {
+        arg[i] = stack_pointer[kStackFrameExtraParamSlot + j];
+      }
+      STATIC_ASSERT(kArgCount == kRegisterArgCount + 2);
+      STATIC_ASSERT(kMaxCParameters == kArgCount);
       bool fp_call =
           (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
           (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -1161,9 +1165,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
               "Call to host function at %p,\n"
               "\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
               ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
-              ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
+              ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+              ", %08" V8PRIxPTR,
               reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1],
-              arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8]);
+              arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9]);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
                    get_register(sp));
@@ -1174,8 +1179,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
         if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
           SimulatorRuntimePairCall target =
               reinterpret_cast<SimulatorRuntimePairCall>(external);
-          ObjectPair result =
-              target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+          ObjectPair result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
+                                     arg[5], arg[6], arg[7], arg[8], arg[9]);
           intptr_t x;
           intptr_t y;
           decodeObjectPair(&result, &x, &y);
@@ -1195,7 +1200,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
           SimulatorRuntimeCall target =
               reinterpret_cast<SimulatorRuntimeCall>(external);
           intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
-                                   arg[5], arg[6], arg[7], arg[8]);
+                                   arg[5], arg[6], arg[7], arg[8], arg[9]);
           if (::v8::internal::FLAG_trace_sim) {
             PrintF("Returned %08" V8PRIxPTR "\n", result);
           }
diff --git a/deps/v8/src/execution/protectors-inl.h b/deps/v8/src/execution/protectors-inl.h
new file mode 100644
index 00000000000000..b2428063e1ec2a
--- /dev/null
+++ b/deps/v8/src/execution/protectors-inl.h
@@ -0,0 +1,36 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_PROTECTORS_INL_H_
+#define V8_EXECUTION_PROTECTORS_INL_H_
+
+#include "src/execution/protectors.h"
+#include "src/objects/contexts-inl.h"
+#include "src/objects/property-cell-inl.h"
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_PROTECTOR_ON_NATIVE_CONTEXT_CHECK(name, cell)                \
+  bool Protectors::Is##name##Intact(Handle<NativeContext> native_context) { \
+    PropertyCell species_cell = native_context->cell();                     \
+    return species_cell.value().IsSmi() &&                                  \
+           Smi::ToInt(species_cell.value()) == kProtectorValid;             \
+  }
+DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DEFINE_PROTECTOR_ON_NATIVE_CONTEXT_CHECK)
+
+#define DEFINE_PROTECTOR_ON_ISOLATE_CHECK(name, root_index, unused_cell) \
+  bool Protectors::Is##name##Intact(Isolate* isolate) {                  \
+    PropertyCell cell =                                                  \
+        PropertyCell::cast(isolate->root(RootIndex::k##root_index));     \
+    return cell.value().IsSmi() &&                                       \
+           Smi::ToInt(cell.value()) == kProtectorValid;                  \
+  }
+DECLARED_PROTECTORS_ON_ISOLATE(DEFINE_PROTECTOR_ON_ISOLATE_CHECK)
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_EXECUTION_PROTECTORS_INL_H_
diff --git a/deps/v8/src/execution/protectors.cc b/deps/v8/src/execution/protectors.cc
new file mode 100644
index 00000000000000..3ac07eede38326
--- /dev/null
+++ b/deps/v8/src/execution/protectors.cc
@@ -0,0 +1,48 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/protectors.h"
+
+#include "src/execution/isolate-inl.h"
+#include "src/execution/protectors-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/contexts.h"
+#include "src/objects/property-cell.h"
+#include "src/objects/smi.h"
+#include "src/tracing/trace-event.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+#define INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION(name, cell)       \
+  void Protectors::Invalidate##name(Isolate* isolate,                       \
+                                    Handle<NativeContext> native_context) { \
+    DCHECK_EQ(*native_context, isolate->raw_native_context());              \
+    DCHECK(native_context->cell().value().IsSmi());                         \
+    DCHECK(Is##name##Intact(native_context));                               \
+    Handle<PropertyCell> species_cell(native_context->cell(), isolate);     \
+    PropertyCell::SetValueWithInvalidation(                                 \
+        isolate, #cell, species_cell,                                       \
+        handle(Smi::FromInt(kProtectorInvalid), isolate));                  \
+    DCHECK(!Is##name##Intact(native_context));                              \
+  }
+DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(
+    INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION)
+#undef INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION
+
+#define INVALIDATE_PROTECTOR_ON_ISOLATE_DEFINITION(name, unused_index, cell) \
+  void Protectors::Invalidate##name(Isolate* isolate) {                      \
+    DCHECK(isolate->factory()->cell()->value().IsSmi());                     \
+    DCHECK(Is##name##Intact(isolate));                                       \
+    PropertyCell::SetValueWithInvalidation(                                  \
+        isolate, #cell, isolate->factory()->cell(),                          \
+        handle(Smi::FromInt(kProtectorInvalid), isolate));                   \
+    DCHECK(!Is##name##Intact(isolate));                                      \
+  }
+DECLARED_PROTECTORS_ON_ISOLATE(INVALIDATE_PROTECTOR_ON_ISOLATE_DEFINITION)
+#undef INVALIDATE_PROTECTOR_ON_ISOLATE_DEFINITION
+
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/execution/protectors.h b/deps/v8/src/execution/protectors.h
new file mode 100644
index 00000000000000..5c54613bb19633
--- /dev/null
+++ b/deps/v8/src/execution/protectors.h
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_PROTECTORS_H_
+#define V8_EXECUTION_PROTECTORS_H_
+
+#include "src/handles/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Protectors : public AllStatic {
+ public:
+  static const int kProtectorValid = 1;
+  static const int kProtectorInvalid = 0;
+
+#define DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(V) \
+  V(RegExpSpeciesLookupChainProtector, regexp_species_protector)
+
+#define DECLARED_PROTECTORS_ON_ISOLATE(V) \
+  V(ArraySpeciesLookupChain, ArraySpeciesProtector, array_species_protector)
+
+#define DECLARE_PROTECTOR_ON_NATIVE_CONTEXT(name, unused_cell)               \
+  static inline bool Is##name##Intact(Handle<NativeContext> native_context); \
+  static void Invalidate##name(Isolate* isolate,                             \
+                               Handle<NativeContext> native_context);
+  DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DECLARE_PROTECTOR_ON_NATIVE_CONTEXT)
+#undef DECLARE_PROTECTOR_ON_NATIVE_CONTEXT
+
+#define DECLARE_PROTECTOR_ON_ISOLATE(name, unused_root_index, unused_cell) \
+  static inline bool Is##name##Intact(Isolate* isolate);                   \
+  static void Invalidate##name(Isolate* isolate);
+
+  DECLARED_PROTECTORS_ON_ISOLATE(DECLARE_PROTECTOR_ON_ISOLATE)
+#undef DECLARE_PROTECTOR_ON_ISOLATE
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_EXECUTION_PROTECTORS_H_
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index 0ed36cbe10bda2..65476e346f25c7 100644
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -8,6 +8,7 @@
 #include "src/codegen/assembler.h"
 #include "src/codegen/compilation-cache.h"
 #include "src/codegen/compiler.h"
+#include "src/codegen/pending-optimization-table.h"
 #include "src/execution/execution.h"
 #include "src/execution/frames-inl.h"
 #include "src/handles/global-handles.h"
@@ -119,6 +120,17 @@ void RuntimeProfiler::MaybeOptimize(JSFunction function,
     }
     return;
   }
+  if (FLAG_testing_d8_test_runner) {
+    if (!PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
+                                                                  function)) {
+      if (FLAG_trace_opt_verbose) {
+        PrintF("[function ");
+        function.PrintName();
+        PrintF(" has been marked manually for optimization]\n");
+      }
+      return;
+    }
+  }
 
   if (FLAG_always_osr) {
     AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 8a82e3224398e9..985a9418746230 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -1858,10 +1858,12 @@ using SimulatorRuntimeCall = intptr_t (*)(intptr_t arg0, intptr_t arg1,
                                           intptr_t arg2, intptr_t arg3,
                                           intptr_t arg4, intptr_t arg5,
                                           intptr_t arg6, intptr_t arg7,
-                                          intptr_t arg8);
+                                          intptr_t arg8, intptr_t arg9);
 using SimulatorRuntimePairCall = ObjectPair (*)(intptr_t arg0, intptr_t arg1,
                                                 intptr_t arg2, intptr_t arg3,
-                                                intptr_t arg4, intptr_t arg5);
+                                                intptr_t arg4, intptr_t arg5,
+                                                intptr_t arg6, intptr_t arg7,
+                                                intptr_t arg8, intptr_t arg9);
 
 // These prototypes handle the four types of FP calls.
 using SimulatorRuntimeCompareCall = int (*)(double darg0, double darg1);
@@ -1891,7 +1893,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
           (get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
           0;
       Redirection* redirection = Redirection::FromInstruction(instr);
-      const int kArgCount = 9;
+      const int kArgCount = 10;
       const int kRegisterArgCount = 5;
       int arg0_regnum = 2;
       intptr_t result_buffer = 0;
@@ -1913,8 +1915,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
         arg[i] = stack_pointer[(kCalleeRegisterSaveAreaSize / kPointerSize) +
                                (i - kRegisterArgCount)];
       }
-      STATIC_ASSERT(kArgCount == kRegisterArgCount + 4);
-      STATIC_ASSERT(kMaxCParameters == 9);
+      STATIC_ASSERT(kArgCount == kRegisterArgCount + 5);
+      STATIC_ASSERT(kMaxCParameters == kArgCount);
       bool fp_call =
           (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
           (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -2094,9 +2096,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
               "Call to host function at %p,\n"
               "\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
               ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
-              ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
+              ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+              ", %08" V8PRIxPTR,
               reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1],
-              arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8]);
+              arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9]);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
                    static_cast<intptr_t>(get_register(sp)));
@@ -2107,8 +2110,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
         if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
           SimulatorRuntimePairCall target =
               reinterpret_cast<SimulatorRuntimePairCall>(external);
-          ObjectPair result =
-              target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+          ObjectPair result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
+                                     arg[5], arg[6], arg[7], arg[8], arg[9]);
           intptr_t x;
           intptr_t y;
           decodeObjectPair(&result, &x, &y);
@@ -2128,7 +2131,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
           SimulatorRuntimeCall target =
               reinterpret_cast<SimulatorRuntimeCall>(external);
           intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
-                                   arg[5], arg[6], arg[7], arg[8]);
+                                   arg[5], arg[6], arg[7], arg[8], arg[9]);
           if (::v8::internal::FLAG_trace_sim) {
             PrintF("Returned %08" V8PRIxPTR "\n", result);
           }
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
index e5c24cef1e680d..1cf4c4605a6d7d 100644
--- a/deps/v8/src/execution/stack-guard.cc
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -21,14 +21,12 @@ void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
   DCHECK_NOT_NULL(isolate_);
   thread_local_.set_jslimit(kInterruptLimit);
   thread_local_.set_climit(kInterruptLimit);
-  isolate_->heap()->SetStackLimits();
 }
 
 void StackGuard::reset_limits(const ExecutionAccess& lock) {
   DCHECK_NOT_NULL(isolate_);
   thread_local_.set_jslimit(thread_local_.real_jslimit_);
   thread_local_.set_climit(thread_local_.real_climit_);
-  isolate_->heap()->SetStackLimits();
 }
 
 void StackGuard::SetStackLimit(uintptr_t limit) {
@@ -54,7 +52,6 @@ void StackGuard::AdjustStackLimitForSimulator() {
   uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
   if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
     thread_local_.set_jslimit(jslimit);
-    isolate_->heap()->SetStackLimits();
   }
 }
 
@@ -75,7 +72,8 @@ void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
   DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
   if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
     // Intercept already requested interrupts.
-    int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
+    intptr_t intercepted =
+        thread_local_.interrupt_flags_ & scope->intercept_mask_;
     scope->intercepted_flags_ = intercepted;
     thread_local_.interrupt_flags_ &= ~intercepted;
   } else {
@@ -124,7 +122,7 @@ void StackGuard::PopInterruptsScope() {
 
 bool StackGuard::CheckInterrupt(InterruptFlag flag) {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & flag;
+  return (thread_local_.interrupt_flags_ & flag) != 0;
 }
 
 void StackGuard::RequestInterrupt(InterruptFlag flag) {
@@ -160,7 +158,7 @@ int StackGuard::FetchAndClearInterrupts() {
   ExecutionAccess access(isolate_);
 
   int result = 0;
-  if (thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) {
+  if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) {
     // The TERMINATE_EXECUTION interrupt is special, since it terminates
     // execution but should leave V8 in a resumable state. If it exists, we only
     // fetch and clear that bit. On resume, V8 can continue processing other
@@ -169,7 +167,7 @@ int StackGuard::FetchAndClearInterrupts() {
     thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
     if (!has_pending_interrupts(access)) reset_limits(access);
   } else {
-    result = thread_local_.interrupt_flags_;
+    result = static_cast<int>(thread_local_.interrupt_flags_);
     thread_local_.interrupt_flags_ = 0;
     reset_limits(access);
   }
@@ -180,23 +178,13 @@ int StackGuard::FetchAndClearInterrupts() {
 char* StackGuard::ArchiveStackGuard(char* to) {
   ExecutionAccess access(isolate_);
   MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
-  ThreadLocal blank;
-
-  // Set the stack limits using the old thread_local_.
-  // TODO(isolates): This was the old semantics of constructing a ThreadLocal
-  //                 (as the ctor called SetStackLimits, which looked at the
-  //                 current thread_local_ from StackGuard)-- but is this
-  //                 really what was intended?
-  isolate_->heap()->SetStackLimits();
-  thread_local_ = blank;
-
+  thread_local_ = {};
   return to + sizeof(ThreadLocal);
 }
 
 char* StackGuard::RestoreStackGuard(char* from) {
   ExecutionAccess access(isolate_);
   MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
-  isolate_->heap()->SetStackLimits();
   return from + sizeof(ThreadLocal);
 }
 
@@ -206,39 +194,21 @@ void StackGuard::FreeThreadResources() {
   per_thread->set_stack_limit(thread_local_.real_climit_);
 }
 
-void StackGuard::ThreadLocal::Clear() {
-  real_jslimit_ = kIllegalLimit;
-  set_jslimit(kIllegalLimit);
-  real_climit_ = kIllegalLimit;
-  set_climit(kIllegalLimit);
+void StackGuard::ThreadLocal::Initialize(Isolate* isolate,
+                                         const ExecutionAccess& lock) {
+  const uintptr_t kLimitSize = FLAG_stack_size * KB;
+  DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
+  uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
+  real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
+  set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
+  real_climit_ = limit;
+  set_climit(limit);
   interrupt_scopes_ = nullptr;
   interrupt_flags_ = 0;
 }
 
-bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
-  bool should_set_stack_limits = false;
-  if (real_climit_ == kIllegalLimit) {
-    const uintptr_t kLimitSize = FLAG_stack_size * KB;
-    DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
-    uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
-    real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
-    set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
-    real_climit_ = limit;
-    set_climit(limit);
-    should_set_stack_limits = true;
-  }
-  interrupt_scopes_ = nullptr;
-  interrupt_flags_ = 0;
-  return should_set_stack_limits;
-}
-
-void StackGuard::ClearThread(const ExecutionAccess& lock) {
-  thread_local_.Clear();
-  isolate_->heap()->SetStackLimits();
-}
-
 void StackGuard::InitThread(const ExecutionAccess& lock) {
-  if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
+  thread_local_.Initialize(isolate_, lock);
   Isolate::PerIsolateThreadData* per_thread =
       isolate_->FindOrAllocatePerThreadDataForThisThread();
   uintptr_t stored_limit = per_thread->stack_limit();
diff --git a/deps/v8/src/execution/stack-guard.h b/deps/v8/src/execution/stack-guard.h
index d7477f1623f56b..febd1ecb0ac0be 100644
--- a/deps/v8/src/execution/stack-guard.h
+++ b/deps/v8/src/execution/stack-guard.h
@@ -7,6 +7,7 @@
 
 #include "include/v8-internal.h"
 #include "src/base/atomicops.h"
+#include "src/common/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -37,12 +38,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
   char* RestoreStackGuard(char* from);
   static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
   void FreeThreadResources();
-  // Sets up the default stack guard for this thread if it has not
-  // already been set up.
+  // Sets up the default stack guard for this thread.
   void InitThread(const ExecutionAccess& lock);
-  // Clears the stack guard for this thread so it does not look as if
-  // it has been set up.
-  void ClearThread(const ExecutionAccess& lock);
 
 #define INTERRUPT_LIST(V)                                         \
   V(TERMINATE_EXECUTION, TerminateExecution, 0)                   \
@@ -89,6 +86,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
   // stack overflow, then handle the interruption accordingly.
   Object HandleInterrupts();
 
+  static constexpr int kSizeInBytes = 7 * kSystemPointerSize;
+
  private:
   bool CheckInterrupt(InterruptFlag flag);
   void RequestInterrupt(InterruptFlag flag);
@@ -124,13 +123,9 @@ class V8_EXPORT_PRIVATE StackGuard final {
 
   class ThreadLocal final {
    public:
-    ThreadLocal() { Clear(); }
-    // You should hold the ExecutionAccess lock when you call Initialize or
-    // Clear.
-    void Clear();
+    ThreadLocal() {}
 
-    // Returns true if the heap's stack limits should be set, false if not.
-    bool Initialize(Isolate* isolate);
+    void Initialize(Isolate* isolate, const ExecutionAccess& lock);
 
     // The stack limit is split into a JavaScript and a C++ stack limit. These
     // two are the same except when running on a simulator where the C++ and
@@ -141,13 +136,16 @@ class V8_EXPORT_PRIVATE StackGuard final {
     // break or preemption) in which case it is lowered to make stack checks
     // fail. Both the generated code and the runtime system check against the
     // one without the real_ prefix.
-    uintptr_t real_jslimit_;  // Actual JavaScript stack limit set for the VM.
-    uintptr_t real_climit_;   // Actual C++ stack limit set for the VM.
+
+    // Actual JavaScript stack limit set for the VM.
+    uintptr_t real_jslimit_ = kIllegalLimit;
+    // Actual C++ stack limit set for the VM.
+    uintptr_t real_climit_ = kIllegalLimit;
 
     // jslimit_ and climit_ can be read without any lock.
     // Writing requires the ExecutionAccess lock.
-    base::AtomicWord jslimit_;
-    base::AtomicWord climit_;
+    base::AtomicWord jslimit_ = kIllegalLimit;
+    base::AtomicWord climit_ = kIllegalLimit;
 
     uintptr_t jslimit() {
       return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
@@ -164,8 +162,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
                                  static_cast<base::AtomicWord>(limit));
     }
 
-    InterruptsScope* interrupt_scopes_;
-    int interrupt_flags_;
+    InterruptsScope* interrupt_scopes_ = nullptr;
+    intptr_t interrupt_flags_ = 0;
   };
 
   // TODO(isolates): Technically this could be calculated directly from a
@@ -180,6 +178,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
   DISALLOW_COPY_AND_ASSIGN(StackGuard);
 };
 
+STATIC_ASSERT(StackGuard::kSizeInBytes == sizeof(StackGuard));
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/deps/v8/src/execution/thread-local-top.cc b/deps/v8/src/execution/thread-local-top.cc
index 569333f27619f4..cb69fb56efb38d 100644
--- a/deps/v8/src/execution/thread-local-top.cc
+++ b/deps/v8/src/execution/thread-local-top.cc
@@ -26,5 +26,15 @@ void ThreadLocalTop::Free() {
   while (promise_on_stack_) isolate_->PopPromise();
 }
 
+#if defined(USE_SIMULATOR)
+void ThreadLocalTop::StoreCurrentStackPosition() {
+  last_api_entry_ = simulator_->get_sp();
+}
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+void ThreadLocalTop::StoreCurrentStackPosition() {
+  last_api_entry_ = reinterpret_cast<Address>(GetCurrentStackPosition());
+}
+#endif
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/execution/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index 625fcc41ddcb95..57166299c566fd 100644
--- a/deps/v8/src/execution/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -8,6 +8,7 @@
 #include "src/common/globals.h"
 #include "src/execution/thread-id.h"
 #include "src/objects/contexts.h"
+#include "src/utils/utils.h"
 
 namespace v8 {
 
@@ -25,7 +26,7 @@ class ThreadLocalTop {
   // TODO(all): This is not particularly beautiful. We should probably
   // refactor this to really consist of just Addresses and 32-bit
   // integer fields.
-  static constexpr uint32_t kSizeInBytes = 23 * kSystemPointerSize;
+  static constexpr uint32_t kSizeInBytes = 24 * kSystemPointerSize;
 
   // Does early low-level initialization that does not depend on the
   // isolate being present.
@@ -56,6 +57,31 @@ class ThreadLocalTop {
         v8::TryCatch::JSStackComparableAddress(try_catch_handler_));
   }
 
+  // Call depth represents nested v8 api calls. Instead of storing the nesting
+  // level as an integer, we store the stack height of the last API entry. This
+  // additional information is used when we decide whether to trigger a debug
+  // break at a function entry.
+  template <typename Scope>
+  void IncrementCallDepth(Scope* stack_allocated_scope) {
+    stack_allocated_scope->previous_stack_height_ = last_api_entry_;
+#if defined(USE_SIMULATOR) || defined(V8_USE_ADDRESS_SANITIZER)
+    StoreCurrentStackPosition();
+#else
+    last_api_entry_ = reinterpret_cast<i::Address>(stack_allocated_scope);
+#endif
+  }
+
+#if defined(USE_SIMULATOR) || defined(V8_USE_ADDRESS_SANITIZER)
+  void StoreCurrentStackPosition();
+#endif
+
+  template <typename Scope>
+  void DecrementCallDepth(Scope* stack_allocated_scope) {
+    last_api_entry_ = stack_allocated_scope->previous_stack_height_;
+  }
+
+  bool CallDepthIsZero() const { return last_api_entry_ == kNullAddress; }
+
   void Free();
 
   Isolate* isolate_ = nullptr;
@@ -77,6 +103,8 @@ class ThreadLocalTop {
   Address pending_handler_fp_ = kNullAddress;
   Address pending_handler_sp_ = kNullAddress;
 
+  Address last_api_entry_ = kNullAddress;
+
   // Communication channel between Isolate::Throw and message consumers.
   Object pending_message_obj_;
   bool rethrowing_message_ = false;
diff --git a/deps/v8/src/execution/v8threads.cc b/deps/v8/src/execution/v8threads.cc
index 6b99b81ef7a8b2..e16988b27529c7 100644
--- a/deps/v8/src/execution/v8threads.cc
+++ b/deps/v8/src/execution/v8threads.cc
@@ -40,10 +40,6 @@ void Locker::Initialize(v8::Isolate* isolate) {
     // get the saved state for this thread and restore it.
     if (isolate_->thread_manager()->RestoreThread()) {
       top_level_ = false;
-    } else {
-      internal::ExecutionAccess access(isolate_);
-      isolate_->stack_guard()->ClearThread(access);
-      isolate_->thread_manager()->InitThread(access);
     }
   }
   DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread());
@@ -88,6 +84,7 @@ Unlocker::~Unlocker() {
 namespace internal {
 
 void ThreadManager::InitThread(const ExecutionAccess& lock) {
+  isolate_->InitializeThreadLocal();
   isolate_->stack_guard()->InitThread(lock);
   isolate_->debug()->InitThread(lock);
 }
diff --git a/deps/v8/src/extensions/OWNERS b/deps/v8/src/extensions/OWNERS
index 852d438bb0a884..48d72aea5eec22 100644
--- a/deps/v8/src/extensions/OWNERS
+++ b/deps/v8/src/extensions/OWNERS
@@ -1 +1 @@
-file://COMMON_OWNERS
+file:../../COMMON_OWNERS
diff --git a/deps/v8/src/extensions/cputracemark-extension.cc b/deps/v8/src/extensions/cputracemark-extension.cc
index af85130ee83646..6162afad5fe3ea 100644
--- a/deps/v8/src/extensions/cputracemark-extension.cc
+++ b/deps/v8/src/extensions/cputracemark-extension.cc
@@ -26,7 +26,7 @@ void CpuTraceMarkExtension::Mark(
 
 #if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
 
-#if !V8_LIBC_MSVCRT
+#if defined(__clang__)
   // for non msvc build
   uint32_t param =
       args[0]->Uint32Value(args.GetIsolate()->GetCurrentContext()).ToChecked();
diff --git a/deps/v8/src/flags/OWNERS b/deps/v8/src/flags/OWNERS
index 852d438bb0a884..48d72aea5eec22 100644
--- a/deps/v8/src/flags/OWNERS
+++ b/deps/v8/src/flags/OWNERS
@@ -1 +1 @@
-file://COMMON_OWNERS
+file:../../COMMON_OWNERS
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index c32bb034078828..c7c07e6dc654e8 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -206,7 +206,9 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
 #define HARMONY_INPROGRESS_BASE(V)                                        \
   V(harmony_private_methods, "harmony private methods in class literals") \
   V(harmony_regexp_sequence, "RegExp Unicode sequence properties")        \
-  V(harmony_weak_refs, "harmony weak references")
+  V(harmony_weak_refs, "harmony weak references")                         \
+  V(harmony_optional_chaining, "harmony optional chaining syntax")        \
+  V(harmony_nullish, "harmony nullish operator")
 
 #ifdef V8_INTL_SUPPORT
 #define HARMONY_INPROGRESS(V)                              \
@@ -240,7 +242,6 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
   V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")                \
   V(harmony_import_meta, "harmony import.meta property")                   \
   V(harmony_dynamic_import, "harmony dynamic import")                      \
-  V(harmony_numeric_separator, "harmony numeric separator between digits") \
   V(harmony_promise_all_settled, "harmony Promise.allSettled")
 
 #ifdef V8_INTL_SUPPORT
@@ -298,15 +299,47 @@ DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
 #define V8_LITE_BOOL false
 #endif
 
+#ifdef V8_ENABLE_LAZY_SOURCE_POSITIONS
+#define V8_LAZY_SOURCE_POSITIONS_BOOL true
+#else
+#define V8_LAZY_SOURCE_POSITIONS_BOOL false
+#endif
+
 DEFINE_BOOL(lite_mode, V8_LITE_BOOL,
             "enables trade-off of performance for memory savings")
 
 // Lite mode implies other flags to trade-off performance for memory.
 DEFINE_IMPLICATION(lite_mode, jitless)
 DEFINE_IMPLICATION(lite_mode, lazy_feedback_allocation)
-DEFINE_IMPLICATION(lite_mode, enable_lazy_source_positions)
 DEFINE_IMPLICATION(lite_mode, optimize_for_size)
 
+#ifdef V8_DISABLE_WRITE_BARRIERS
+#define V8_DISABLE_WRITE_BARRIERS_BOOL true
+#else
+#define V8_DISABLE_WRITE_BARRIERS_BOOL false
+#endif
+
+DEFINE_BOOL_READONLY(disable_write_barriers, V8_DISABLE_WRITE_BARRIERS_BOOL,
+                     "disable write barriers when GC is non-incremental "
+                     "and heap contains single generation.")
+
+// Disable incremental marking barriers
+DEFINE_NEG_IMPLICATION(disable_write_barriers, incremental_marking)
+
+#ifdef V8_ENABLE_SINGLE_GENERATION
+#define V8_GENERATION_BOOL true
+#else
+#define V8_GENERATION_BOOL false
+#endif
+
+DEFINE_BOOL_READONLY(
+    single_generation, V8_GENERATION_BOOL,
+    "allocate all objects from young generation to old generation")
+
+// Prevent inline allocation into new space
+DEFINE_NEG_IMPLICATION(single_generation, inline_new)
+DEFINE_NEG_IMPLICATION(single_generation, turbo_allocation_folding)
+
 #ifdef V8_ENABLE_FUTURE
 #define FUTURE_BOOL true
 #else
@@ -351,10 +384,6 @@ DEFINE_BOOL(feedback_normalization, false,
 DEFINE_BOOL_READONLY(internalize_on_the_fly, true,
                      "internalize string keys for generic keyed ICs on the fly")
 
-// Flag to faster calls with arguments mismatches (https://crbug.com/v8/8895)
-DEFINE_BOOL(fast_calls_with_arguments_mismatches, true,
-            "skip arguments adaptor frames when it's provably safe")
-
 // Flag for one shot optimiztions.
 DEFINE_BOOL(enable_one_shot_optimization, true,
             "Enable size optimizations for the code that will "
@@ -409,7 +438,7 @@ DEFINE_BOOL(ignition_share_named_property_feedback, true,
             "the same object")
 DEFINE_BOOL(print_bytecode, false,
             "print bytecode generated by ignition interpreter")
-DEFINE_BOOL(enable_lazy_source_positions, false,
+DEFINE_BOOL(enable_lazy_source_positions, V8_LAZY_SOURCE_POSITIONS_BOOL,
             "skip generating source positions during initial compile but "
             "regenerate when actually required")
 DEFINE_BOOL(stress_lazy_source_positions, false,
@@ -677,13 +706,18 @@ DEFINE_STRING(dump_wasm_module_path, nullptr,
 // for configurability.
 #include "src/wasm/wasm-feature-flags.h"
 
-#define SPACE
 #define DECL_WASM_FLAG(feat, desc, val)      \
   DEFINE_BOOL(experimental_wasm_##feat, val, \
               "enable prototype " desc " for wasm")
-FOREACH_WASM_FEATURE_FLAG(DECL_WASM_FLAG, SPACE)
+FOREACH_WASM_FEATURE_FLAG(DECL_WASM_FLAG)
 #undef DECL_WASM_FLAG
-#undef SPACE
+
+DEFINE_BOOL(wasm_staging, false, "enable staged wasm features")
+
+#define WASM_STAGING_IMPLICATION(feat, desc, val) \
+  DEFINE_IMPLICATION(wasm_staging, experimental_wasm_##feat)
+FOREACH_WASM_STAGING_FEATURE_FLAG(WASM_STAGING_IMPLICATION)
+#undef WASM_STAGING_IMPLICATION
 
 DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
 DEFINE_BOOL(wasm_no_bounds_checks, false,
@@ -752,6 +786,7 @@ DEFINE_SIZE_T(
     "max size of the heap (in Mbytes) "
     "both max_semi_space_size and max_old_space_size take precedence. "
     "All three flags cannot be specified at the same time.")
+DEFINE_SIZE_T(initial_heap_size, 0, "initial size of the heap (in Mbytes)")
 DEFINE_BOOL(huge_max_old_generation_size, false,
             "Increase max size of the old space to 4 GB for x64 systems with"
             "the physical memory bigger than 16 GB")
@@ -788,9 +823,18 @@ DEFINE_BOOL(trace_gc_freelists_verbose, false,
 DEFINE_IMPLICATION(trace_gc_freelists_verbose, trace_gc_freelists)
 DEFINE_BOOL(trace_evacuation_candidates, false,
             "Show statistics about the pages evacuation by the compaction")
-DEFINE_INT(gc_freelist_strategy, 0,
+DEFINE_BOOL(
+    trace_allocations_origins, false,
+    "Show statistics about the origins of allocations. "
+    "Combine with --no-inline-new to track allocations from generated code")
+DEFINE_INT(gc_freelist_strategy, 5,
            "Freelist strategy to use: "
-           "1=FreeListFastAlloc. 2=FreeListMany. Anything else=FreeListLegacy")
+           "0:FreeListLegacy. "
+           "1:FreeListFastAlloc. "
+           "2:FreeListMany. "
+           "3:FreeListManyCached. "
+           "4:FreeListManyCachedFastPath. "
+           "5:FreeListManyCachedOrigin. ")
 
 DEFINE_INT(trace_allocation_stack_interval, -1,
            "print stack trace after <n> free-list allocations")
@@ -1210,6 +1254,9 @@ DEFINE_UINT(serialization_chunk_size, 4096,
 DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
 DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.")
 DEFINE_BOOL(regexp_interpret_all, false, "interpret all regexp code")
+DEFINE_BOOL(regexp_tier_up, false,
+            "enable regexp interpreter and tier up to the compiler")
+DEFINE_NEG_IMPLICATION(regexp_interpret_all, regexp_tier_up)
 
 // Testing flags test/cctest/test-{flags,api,serialization}.cc
 DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag")
@@ -1348,6 +1395,7 @@ DEFINE_BOOL(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
 DEFINE_BOOL(trace_regexp_assembler, false,
             "trace regexp macro assembler calls.")
 DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
+DEFINE_BOOL(trace_regexp_tier_up, false, "trace regexp tiering up execution")
 
 // Debugger
 DEFINE_BOOL(print_break_location, false, "print source location on debug break")
@@ -1407,6 +1455,9 @@ DEFINE_BOOL(perf_basic_prof_only_functions, false,
 DEFINE_IMPLICATION(perf_basic_prof_only_functions, perf_basic_prof)
 DEFINE_BOOL(perf_prof, false,
             "Enable perf linux profiler (experimental annotate support).")
+DEFINE_BOOL(perf_prof_annotate_wasm, false,
+            "Used with --perf-prof, load wasm source map and provide annotate "
+            "support (experimental).")
 DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
 // TODO(v8:8462) Remove implication once perf supports remapping.
 DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory)
@@ -1463,7 +1514,6 @@ DEFINE_BOOL(trace_elements_transitions, false, "trace elements transitions")
 DEFINE_BOOL(trace_creation_allocation_sites, false,
             "trace the creation of allocation sites")
 
-// codegen-ia32.cc / codegen-arm.cc
 DEFINE_BOOL(print_code, false, "print generated code")
 DEFINE_BOOL(print_opt_code, false, "print optimized code")
 DEFINE_STRING(print_opt_code_filter, "*", "filter for printing optimized code")
@@ -1471,6 +1521,8 @@ DEFINE_BOOL(print_code_verbose, false, "print more information for code")
 DEFINE_BOOL(print_builtin_code, false, "print generated code for builtins")
 DEFINE_STRING(print_builtin_code_filter, "*",
               "filter for printing builtin code")
+DEFINE_BOOL(print_regexp_code, false, "print generated regexp code")
+DEFINE_BOOL(print_regexp_bytecode, false, "print generated regexp bytecode")
 DEFINE_BOOL(print_builtin_size, false, "print code size for builtins")
 
 #ifdef ENABLE_DISASSEMBLER
@@ -1487,6 +1539,7 @@ DEFINE_IMPLICATION(print_all_code, print_code)
 DEFINE_IMPLICATION(print_all_code, print_opt_code)
 DEFINE_IMPLICATION(print_all_code, print_code_verbose)
 DEFINE_IMPLICATION(print_all_code, print_builtin_code)
+DEFINE_IMPLICATION(print_all_code, print_regexp_code)
 DEFINE_IMPLICATION(print_all_code, code_comments)
 #endif
 
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index db4f806e58151d..aed5b3fa834e84 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -22,6 +22,10 @@ namespace internal {
 
 namespace {
 
+// Specifies whether V8 expects the holder memory of a global handle to be live
+// or dead.
+enum class HandleHolder { kLive, kDead };
+
 constexpr size_t kBlockSize = 256;
 
 }  // namespace
@@ -32,6 +36,7 @@ class GlobalHandles::NodeBlock final {
   using BlockType = NodeBlock<_NodeType>;
   using NodeType = _NodeType;
 
+  V8_INLINE static const NodeBlock* From(const NodeType* node);
   V8_INLINE static NodeBlock* From(NodeType* node);
 
   NodeBlock(GlobalHandles* global_handles,
@@ -66,6 +71,16 @@ class GlobalHandles::NodeBlock final {
   DISALLOW_COPY_AND_ASSIGN(NodeBlock);
 };
 
+template <class NodeType>
+const GlobalHandles::NodeBlock<NodeType>*
+GlobalHandles::NodeBlock<NodeType>::From(const NodeType* node) {
+  uintptr_t ptr = reinterpret_cast<const uintptr_t>(node) -
+                  sizeof(NodeType) * node->index();
+  const BlockType* block = reinterpret_cast<const BlockType*>(ptr);
+  DCHECK_EQ(node, block->at(node->index()));
+  return block;
+}
+
 template <class NodeType>
 GlobalHandles::NodeBlock<NodeType>* GlobalHandles::NodeBlock<NodeType>::From(
     NodeType* node) {
@@ -239,6 +254,10 @@ void GlobalHandles::NodeSpace<NodeType>::Free(NodeType* node) {
 template <class Child>
 class NodeBase {
  public:
+  static const Child* FromLocation(const Address* location) {
+    return reinterpret_cast<const Child*>(location);
+  }
+
   static Child* FromLocation(Address* location) {
     return reinterpret_cast<Child*>(location);
   }
@@ -532,7 +551,8 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
     set_state(NEAR_DEATH);
   }
 
-  void ResetPhantomHandle() {
+  void ResetPhantomHandle(HandleHolder handle_holder) {
+    DCHECK_EQ(HandleHolder::kLive, handle_holder);
     DCHECK_EQ(PHANTOM_WEAK_RESET_HANDLE, weakness_type());
     DCHECK_EQ(PENDING, state());
     DCHECK_NULL(weak_callback_);
@@ -580,10 +600,9 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
 
   // This stores three flags (independent, partially_dependent and
   // in_young_list) and a State.
-  class NodeState : public BitField8<State, 0, 3> {};
-  class IsInYoungList : public BitField8<bool, NodeState::kNext, 1> {};
-  class NodeWeaknessType
-      : public BitField8<WeaknessType, IsInYoungList::kNext, 2> {};
+  using NodeState = BitField8<State, 0, 3>;
+  using IsInYoungList = NodeState::Next<bool, 1>;
+  using NodeWeaknessType = IsInYoungList::Next<WeaknessType, 2>;
 
   // Handle specific callback - might be a weak reference in disguise.
   WeakCallbackInfo<void>::Callback weak_callback_;
@@ -615,6 +634,9 @@ class GlobalHandles::TracedNode final
   bool is_root() const { return IsRoot::decode(flags_); }
   void set_root(bool v) { flags_ = IsRoot::update(flags_, v); }
 
+  bool has_destructor() const { return HasDestructor::decode(flags_); }
+  void set_has_destructor(bool v) { flags_ = HasDestructor::update(flags_, v); }
+
   void SetFinalizationCallback(void* parameter,
                                WeakCallbackInfo<void>::Callback callback) {
     set_parameter(parameter);
@@ -641,18 +663,21 @@ class GlobalHandles::TracedNode final
     set_state(NEAR_DEATH);
   }
 
-  void ResetPhantomHandle() {
+  void ResetPhantomHandle(HandleHolder handle_holder) {
     DCHECK(IsInUse());
-    Address** handle = reinterpret_cast<Address**>(data_.parameter);
-    *handle = nullptr;
+    if (handle_holder == HandleHolder::kLive) {
+      Address** handle = reinterpret_cast<Address**>(data_.parameter);
+      *handle = nullptr;
+    }
     NodeSpace<TracedNode>::Release(this);
     DCHECK(!IsInUse());
   }
 
  protected:
-  class NodeState : public BitField8<State, 0, 2> {};
-  class IsInYoungList : public BitField8<bool, NodeState::kNext, 1> {};
-  class IsRoot : public BitField8<bool, IsInYoungList::kNext, 1> {};
+  using NodeState = BitField8<State, 0, 2>;
+  using IsInYoungList = NodeState::Next<bool, 1>;
+  using IsRoot = IsInYoungList::Next<bool, 1>;
+  using HasDestructor = IsRoot::Next<bool, 1>;
 
   void ClearImplFields() {
     set_root(true);
@@ -691,18 +716,21 @@ Handle<Object> GlobalHandles::Create(Address value) {
   return Create(Object(value));
 }
 
-Handle<Object> GlobalHandles::CreateTraced(Object value, Address* slot) {
+Handle<Object> GlobalHandles::CreateTraced(Object value, Address* slot,
+                                           bool has_destructor) {
   GlobalHandles::TracedNode* result = traced_nodes_->Acquire(value);
   if (ObjectInYoungGeneration(value) && !result->is_in_young_list()) {
     traced_young_nodes_.push_back(result);
     result->set_in_young_list(true);
   }
   result->set_parameter(slot);
+  result->set_has_destructor(has_destructor);
   return result->handle();
 }
 
-Handle<Object> GlobalHandles::CreateTraced(Address value, Address* slot) {
-  return CreateTraced(Object(value), slot);
+Handle<Object> GlobalHandles::CreateTraced(Address value, Address* slot,
+                                           bool has_destructor) {
+  return CreateTraced(Object(value), slot, has_destructor);
 }
 
 Handle<Object> GlobalHandles::CopyGlobal(Address* location) {
@@ -717,6 +745,27 @@ Handle<Object> GlobalHandles::CopyGlobal(Address* location) {
   return global_handles->Create(*location);
 }
 
+// static
+void GlobalHandles::CopyTracedGlobal(const Address* const* from, Address** to) {
+  DCHECK_NOT_NULL(*from);
+  DCHECK_NULL(*to);
+  const TracedNode* node = TracedNode::FromLocation(*from);
+  // Copying a traced handle with finalization callback is prohibited because
+  // the callback may require knowing about multiple copies of the traced
+  // handle.
+  CHECK(!node->HasFinalizationCallback());
+  GlobalHandles* global_handles =
+      NodeBlock<TracedNode>::From(node)->global_handles();
+  Handle<Object> o = global_handles->CreateTraced(
+      node->object(), reinterpret_cast<Address*>(to), node->has_destructor());
+  *to = o.location();
+#ifdef VERIFY_HEAP
+  if (i::FLAG_verify_heap) {
+    Object(**to).ObjectVerify(global_handles->isolate());
+  }
+#endif  // VERIFY_HEAP
+}
+
 void GlobalHandles::MoveGlobal(Address** from, Address** to) {
   DCHECK_NOT_NULL(*from);
   DCHECK_NOT_NULL(*to);
@@ -809,7 +858,7 @@ void GlobalHandles::IterateWeakRootsForPhantomHandles(
         should_reset_handle(isolate()->heap(), node->location())) {
       if (node->IsPhantomResetHandle()) {
         node->MarkPending();
-        node->ResetPhantomHandle();
+        node->ResetPhantomHandle(HandleHolder::kLive);
         ++number_of_phantom_handle_resets_;
       } else if (node->IsPhantomCallback()) {
         node->MarkPending();
@@ -821,7 +870,8 @@ void GlobalHandles::IterateWeakRootsForPhantomHandles(
     if (node->IsInUse() &&
         should_reset_handle(isolate()->heap(), node->location())) {
       if (node->IsPhantomResetHandle()) {
-        node->ResetPhantomHandle();
+        node->ResetPhantomHandle(node->has_destructor() ? HandleHolder::kLive
+                                                        : HandleHolder::kDead);
         ++number_of_phantom_handle_resets_;
       } else {
         node->CollectPhantomCallbackData(&traced_pending_phantom_callbacks_);
@@ -907,7 +957,7 @@ void GlobalHandles::IterateYoungWeakUnmodifiedRootsForPhantomHandles(
         DCHECK(node->IsPhantomResetHandle() || node->IsPhantomCallback());
         if (node->IsPhantomResetHandle()) {
           node->MarkPending();
-          node->ResetPhantomHandle();
+          node->ResetPhantomHandle(HandleHolder::kLive);
           ++number_of_phantom_handle_resets_;
         } else if (node->IsPhantomCallback()) {
           node->MarkPending();
@@ -922,6 +972,9 @@ void GlobalHandles::IterateYoungWeakUnmodifiedRootsForPhantomHandles(
       }
     }
   }
+
+  LocalEmbedderHeapTracer* const tracer =
+      isolate()->heap()->local_embedder_heap_tracer();
   for (TracedNode* node : traced_young_nodes_) {
     if (!node->IsInUse()) continue;
 
@@ -929,7 +982,18 @@ void GlobalHandles::IterateYoungWeakUnmodifiedRootsForPhantomHandles(
                    !should_reset_handle(isolate_->heap(), node->location()));
     if (should_reset_handle(isolate_->heap(), node->location())) {
       if (node->IsPhantomResetHandle()) {
-        node->ResetPhantomHandle();
+        if (node->has_destructor()) {
+          // For handles with destructor it is guaranteed that the embedder
+          // memory is still alive as the destructor would have otherwise
+          // removed the memory.
+          node->ResetPhantomHandle(HandleHolder::kLive);
+        } else {
+          v8::Value* value = ToApi<v8::Value>(node->handle());
+          tracer->ResetHandleInNonTracingGC(
+              *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value));
+          DCHECK(!node->IsInUse());
+        }
+
         ++number_of_phantom_handle_resets_;
       } else {
         node->CollectPhantomCallbackData(&traced_pending_phantom_callbacks_);
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index a08bc1fd13c78d..a07f7a772a9878 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -81,6 +81,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
   //
 
   static void MoveTracedGlobal(Address** from, Address** to);
+  static void CopyTracedGlobal(const Address* const* from, Address** to);
   static void DestroyTraced(Address* location);
   static void SetFinalizationCallbackForTraced(
       Address* location, void* parameter,
@@ -101,8 +102,9 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
     return Handle<T>::cast(Create(Object(value)));
   }
 
-  Handle<Object> CreateTraced(Object value, Address* slot);
-  Handle<Object> CreateTraced(Address value, Address* slot);
+  Handle<Object> CreateTraced(Object value, Address* slot, bool has_destructor);
+  Handle<Object> CreateTraced(Address value, Address* slot,
+                              bool has_destructor);
 
   void RecordStats(HeapStats* stats);
 
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 65d3f4a732a6f9..763300cffe2cd9 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -80,8 +80,7 @@ void LocalArrayBufferTracker::Free(Callback should_free) {
         ExternalBackingStoreType::kArrayBuffer, freed_memory);
 
     // TODO(wez): Remove backing-store from external memory accounting.
-    page_->heap()->update_external_memory_concurrently_freed(
-        static_cast<intptr_t>(freed_memory));
+    page_->heap()->update_external_memory_concurrently_freed(freed_memory);
   }
 }
 
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 0c04d7b6ae2595..fdca6e8df27b56 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -68,8 +68,7 @@ void LocalArrayBufferTracker::Process(Callback callback) {
     page_->DecrementExternalBackingStoreBytes(
         ExternalBackingStoreType::kArrayBuffer, freed_memory);
     // TODO(wez): Remove backing-store from external memory accounting.
-    page_->heap()->update_external_memory_concurrently_freed(
-        static_cast<intptr_t>(freed_memory));
+    page_->heap()->update_external_memory_concurrently_freed(freed_memory);
   }
 
   array_buffers_.swap(kept_array_buffers);
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index eae29cbf5ce277..7c67ccfab71ea6 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -57,6 +57,12 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
   bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
     return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
   }
+  void ResetHandleInNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
+    // Resetting is only called when IsRootForNonTracingGC returns false which
+    // can only happen the EmbedderHeapTracer is set on API level.
+    DCHECK(InUse());
+    remote_tracer_->ResetHandleInNonTracingGC(handle);
+  }
 
   void NotifyV8MarkingWorklistWasEmpty() {
     num_v8_marking_worklist_was_empty_++;
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 9aa705047c8094..bcad5d271410cf 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -71,13 +71,6 @@ Handle<HeapNumber> Factory::NewHeapNumber(double value,
   return heap_number;
 }
 
-Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
-    double value, AllocationType allocation) {
-  Handle<MutableHeapNumber> number = NewMutableHeapNumber(allocation);
-  number->set_value(value);
-  return number;
-}
-
 Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
                                                   AllocationType allocation) {
   Handle<HeapNumber> heap_number = NewHeapNumber(allocation);
@@ -85,16 +78,9 @@ Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
   return heap_number;
 }
 
-Handle<MutableHeapNumber> Factory::NewMutableHeapNumberFromBits(
-    uint64_t bits, AllocationType allocation) {
-  Handle<MutableHeapNumber> number = NewMutableHeapNumber(allocation);
-  number->set_value_as_bits(bits);
-  return number;
-}
-
-Handle<MutableHeapNumber> Factory::NewMutableHeapNumberWithHoleNaN(
+Handle<HeapNumber> Factory::NewHeapNumberWithHoleNaN(
     AllocationType allocation) {
-  return NewMutableHeapNumberFromBits(kHoleNanInt64, allocation);
+  return NewHeapNumberFromBits(kHoleNanInt64, allocation);
 }
 
 Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 19c36656225b46..9bf46be6e815a1 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -285,11 +285,12 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
 }
 
 Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
-                                            AllocationType allocation) {
+                                            AllocationType allocation,
+                                            AllocationOrigin origin) {
   AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
   Heap* heap = isolate()->heap();
   HeapObject result =
-      heap->AllocateRawWithRetryOrFail(size, allocation, alignment);
+      heap->AllocateRawWithRetryOrFail(size, allocation, origin, alignment);
   heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo);
   return Handle<HeapObject>(result, isolate());
 }
@@ -685,16 +686,19 @@ Handle<SmallOrderedNameDictionary> Factory::NewSmallOrderedNameDictionary(
 }
 
 Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
-  return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity);
+  return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity)
+      .ToHandleChecked();
 }
 
 Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
-  return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity);
+  return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity)
+      .ToHandleChecked();
 }
 
 Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary() {
   return OrderedNameDictionary::Allocate(isolate(),
-                                         OrderedNameDictionary::kMinCapacity);
+                                         OrderedNameDictionary::kMinCapacity)
+      .ToHandleChecked();
 }
 
 Handle<AccessorPair> Factory::NewAccessorPair() {
@@ -1744,16 +1748,6 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
   return microtask;
 }
 
-Handle<FinalizationGroupCleanupJobTask>
-Factory::NewFinalizationGroupCleanupJobTask(
-    Handle<JSFinalizationGroup> finalization_group) {
-  Handle<FinalizationGroupCleanupJobTask> microtask =
-      Handle<FinalizationGroupCleanupJobTask>::cast(
-          NewStruct(FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE));
-  microtask->set_finalization_group(*finalization_group);
-  return microtask;
-}
-
 Handle<Foreign> Factory::NewForeign(Address addr, AllocationType allocation) {
   // Statically ensure that it is safe to allocate foreigns in paged spaces.
   STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
@@ -2010,7 +2004,8 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
   HeapObject raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail(
       adjusted_object_size, AllocationType::kYoung);
 
-  DCHECK(Heap::InYoungGeneration(raw_clone));
+  DCHECK(Heap::InYoungGeneration(raw_clone) || FLAG_single_generation);
+
   // Since we know the clone is allocated in new space, we can copy
   // the contents without worrying about updating the write barrier.
   Heap::CopyBlock(raw_clone.address(), source->address(), object_size);
@@ -2234,13 +2229,10 @@ Handle<HeapNumber> Factory::NewHeapNumber(AllocationType allocation) {
   return handle(HeapNumber::cast(result), isolate());
 }
 
-Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
-    AllocationType allocation) {
-  STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
-  Map map = *mutable_heap_number_map();
-  HeapObject result = AllocateRawWithImmortalMap(
-      MutableHeapNumber::kSize, allocation, map, kDoubleUnaligned);
-  return handle(MutableHeapNumber::cast(result), isolate());
+Handle<HeapNumber> Factory::NewHeapNumberForCodeAssembler(double value) {
+  return NewHeapNumber(value, isolate()->heap()->CanAllocateInReadOnlySpace()
+                                  ? AllocationType::kReadOnly
+                                  : AllocationType::kOld);
 }
 
 Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
@@ -2518,7 +2510,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
       NewFunction(initial_map, info, context, allocation);
 
   // Give compiler a chance to pre-initialize.
-  Compiler::PostInstantiation(result, allocation);
+  Compiler::PostInstantiation(result);
 
   return result;
 }
@@ -2550,14 +2542,15 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
   result->set_raw_feedback_cell(*feedback_cell);
 
   // Give compiler a chance to pre-initialize.
-  Compiler::PostInstantiation(result, allocation);
+  Compiler::PostInstantiation(result);
 
   return result;
 }
 
-Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
+Handle<ScopeInfo> Factory::NewScopeInfo(int length, AllocationType type) {
+  DCHECK(type == AllocationType::kOld || type == AllocationType::kReadOnly);
   return NewFixedArrayWithMap<ScopeInfo>(RootIndex::kScopeInfoMap, length,
-                                         AllocationType::kOld);
+                                         type);
 }
 
 Handle<SourceTextModuleInfo> Factory::NewSourceTextModuleInfo() {
@@ -3716,6 +3709,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
   Handle<Object> type_name = undefined_value();
   Handle<Object> eval_origin = frame->GetEvalOrigin();
   Handle<Object> wasm_module_name = frame->GetWasmModuleName();
+  Handle<Object> wasm_instance = frame->GetWasmInstance();
 
   // MethodName and TypeName are expensive to look up, so they are only
   // included when they are strictly needed by the stack trace
@@ -3751,6 +3745,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
   info->set_type_name(*type_name);
   info->set_eval_origin(*eval_origin);
   info->set_wasm_module_name(*wasm_module_name);
+  info->set_wasm_instance(*wasm_instance);
 
   info->set_is_eval(frame->IsEval());
   info->set_is_constructor(is_constructor);
@@ -3904,9 +3899,12 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
   store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
   store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
   store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpLatin1BytecodeIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpUC16BytecodeIndex, uninitialized);
   store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero);
   store->set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
   store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpTierUpTicksIndex, Smi::kZero);
   regexp->set_data(*store);
 }
 
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 3ccbe6856f8d89..1e47926e8e4166 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -37,7 +37,6 @@ class ArrayBoilerplateDescription;
 class CoverageInfo;
 class DebugInfo;
 class EnumCache;
-class FinalizationGroupCleanupJobTask;
 class FreshlyAllocatedBigInt;
 class Isolate;
 class JSArrayBufferView;
@@ -478,8 +477,6 @@ class V8_EXPORT_PRIVATE Factory {
   Handle<PromiseResolveThenableJobTask> NewPromiseResolveThenableJobTask(
       Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
       Handle<JSReceiver> thenable, Handle<Context> context);
-  Handle<FinalizationGroupCleanupJobTask> NewFinalizationGroupCleanupJobTask(
-      Handle<JSFinalizationGroup> finalization_group);
 
   // Foreign objects are pretenured when allocated by the bootstrapper.
   Handle<Foreign> NewForeign(
@@ -521,8 +518,9 @@ class V8_EXPORT_PRIVATE Factory {
 
   // Allocate a block of memory of the given AllocationType (filled with a
   // filler). Used as a fall-back for generated code when the space is full.
-  Handle<HeapObject> NewFillerObject(int size, bool double_align,
-                                     AllocationType allocation);
+  Handle<HeapObject> NewFillerObject(
+      int size, bool double_align, AllocationType allocation,
+      AllocationOrigin origin = AllocationOrigin::kRuntime);
 
   Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
 
@@ -589,13 +587,11 @@ class V8_EXPORT_PRIVATE Factory {
   Handle<HeapNumber> NewHeapNumber(
       AllocationType allocation = AllocationType::kYoung);
 
-  Handle<MutableHeapNumber> NewMutableHeapNumber(
-      AllocationType allocation = AllocationType::kYoung);
-  inline Handle<MutableHeapNumber> NewMutableHeapNumber(
-      double value, AllocationType allocation = AllocationType::kYoung);
-  inline Handle<MutableHeapNumber> NewMutableHeapNumberFromBits(
-      uint64_t bits, AllocationType allocation = AllocationType::kYoung);
-  inline Handle<MutableHeapNumber> NewMutableHeapNumberWithHoleNaN(
+  // Creates a new HeapNumber in read-only space if possible otherwise old
+  // space.
+  Handle<HeapNumber> NewHeapNumberForCodeAssembler(double value);
+
+  inline Handle<HeapNumber> NewHeapNumberWithHoleNaN(
       AllocationType allocation = AllocationType::kYoung);
 
   // Allocates a new BigInt with {length} digits. Only to be used by
@@ -771,7 +767,8 @@ class V8_EXPORT_PRIVATE Factory {
       AllocationType allocation = AllocationType::kOld);
 
   // Create a serialized scope info.
-  Handle<ScopeInfo> NewScopeInfo(int length);
+  Handle<ScopeInfo> NewScopeInfo(int length,
+                                 AllocationType type = AllocationType::kOld);
 
   Handle<SourceTextModuleInfo> NewSourceTextModuleInfo();
 
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 77e6b999970c30..85152c7bfe2975 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -26,6 +26,9 @@ static size_t CountTotalHolesSize(Heap* heap) {
   }
   return holes_size;
 }
+WorkerThreadRuntimeCallStats* GCTracer::worker_thread_runtime_call_stats() {
+  return heap_->isolate()->counters()->worker_thread_runtime_call_stats();
+}
 
 RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
   STATIC_ASSERT(Scope::FIRST_SCOPE == Scope::MC_INCREMENTAL);
@@ -34,10 +37,20 @@ RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
       static_cast<int>(id));
 }
 
+RuntimeCallCounterId GCTracer::RCSCounterFromBackgroundScope(
+    BackgroundScope::ScopeId id) {
+  STATIC_ASSERT(Scope::FIRST_BACKGROUND_SCOPE ==
+                Scope::BACKGROUND_ARRAY_BUFFER_FREE);
+  STATIC_ASSERT(
+      0 == static_cast<int>(BackgroundScope::BACKGROUND_ARRAY_BUFFER_FREE));
+  return static_cast<RuntimeCallCounterId>(
+      static_cast<int>(RCSCounterFromScope(Scope::FIRST_BACKGROUND_SCOPE)) +
+      static_cast<int>(id));
+}
+
 GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
     : tracer_(tracer), scope_(scope) {
   start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
-  // TODO(cbruni): remove once we fully moved to a trace-based system.
   if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
   runtime_stats_ = tracer_->heap_->isolate()->counters()->runtime_call_stats();
   runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
@@ -46,30 +59,25 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
 GCTracer::Scope::~Scope() {
   tracer_->AddScopeSample(
       scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
-  // TODO(cbruni): remove once we fully moved to a trace-based system.
   if (V8_LIKELY(runtime_stats_ == nullptr)) return;
   runtime_stats_->Leave(&timer_);
 }
 
-GCTracer::BackgroundScope::BackgroundScope(GCTracer* tracer, ScopeId scope)
-    : tracer_(tracer), scope_(scope), runtime_stats_enabled_(false) {
+GCTracer::BackgroundScope::BackgroundScope(GCTracer* tracer, ScopeId scope,
+                                           RuntimeCallStats* runtime_stats)
+    : tracer_(tracer), scope_(scope), runtime_stats_(runtime_stats) {
   start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
-  // TODO(cbruni): remove once we fully moved to a trace-based system.
   if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
-  timer_.Start(&counter_, nullptr);
-  runtime_stats_enabled_ = true;
+  runtime_stats_->Enter(&timer_,
+                        GCTracer::RCSCounterFromBackgroundScope(scope));
 }
 
 GCTracer::BackgroundScope::~BackgroundScope() {
   double duration_ms =
       tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
-  // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (V8_LIKELY(!runtime_stats_enabled_)) {
-    tracer_->AddBackgroundScopeSample(scope_, duration_ms, nullptr);
-  } else {
-    timer_.Stop();
-    tracer_->AddBackgroundScopeSample(scope_, duration_ms, &counter_);
-  }
+  tracer_->AddBackgroundScopeSample(scope_, duration_ms);
+  if (V8_LIKELY(runtime_stats_ == nullptr)) return;
+  runtime_stats_->Leave(&timer_);
 }
 
 const char* GCTracer::Scope::Name(ScopeId id) {
@@ -170,7 +178,6 @@ GCTracer::GCTracer(Heap* heap)
   current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
   for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
     background_counter_[i].total_duration_ms = 0;
-    background_counter_[i].runtime_call_counter = RuntimeCallCounter(nullptr);
   }
 }
 
@@ -204,7 +211,6 @@ void GCTracer::ResetForTesting() {
   base::MutexGuard guard(&background_counter_mutex_);
   for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
     background_counter_[i].total_duration_ms = 0;
-    background_counter_[i].runtime_call_counter.Reset();
   }
 }
 
@@ -391,6 +397,12 @@ void GCTracer::NotifySweepingCompleted() {
                  "FreeLists statistics after sweeping completed:\n");
     heap_->PrintFreeListsStats();
   }
+  if (FLAG_trace_allocations_origins) {
+    heap_->new_space()->PrintAllocationsOrigins();
+    heap_->old_space()->PrintAllocationsOrigins();
+    heap_->code_space()->PrintAllocationsOrigins();
+    heap_->map_space()->PrintAllocationsOrigins();
+  }
 }
 
 void GCTracer::SampleAllocation(double current_ms,
@@ -1138,30 +1150,13 @@ void GCTracer::FetchBackgroundCounters(int first_global_scope,
         background_counter_[first_background_scope + i].total_duration_ms;
     background_counter_[first_background_scope + i].total_duration_ms = 0;
   }
-  if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
-  RuntimeCallStats* runtime_stats =
-      heap_->isolate()->counters()->runtime_call_stats();
-  if (!runtime_stats) return;
-  for (int i = 0; i < background_mc_scopes; i++) {
-    runtime_stats
-        ->GetCounter(GCTracer::RCSCounterFromScope(
-            static_cast<Scope::ScopeId>(first_global_scope + i)))
-        ->Add(&background_counter_[first_background_scope + i]
-                   .runtime_call_counter);
-    background_counter_[first_background_scope + i]
-        .runtime_call_counter.Reset();
-  }
 }
 
-void GCTracer::AddBackgroundScopeSample(
-    BackgroundScope::ScopeId scope, double duration,
-    RuntimeCallCounter* runtime_call_counter) {
+void GCTracer::AddBackgroundScopeSample(BackgroundScope::ScopeId scope,
+                                        double duration) {
   base::MutexGuard guard(&background_counter_mutex_);
   BackgroundCounter& counter = background_counter_[scope];
   counter.total_duration_ms += duration;
-  if (runtime_call_counter) {
-    counter.runtime_call_counter.Add(runtime_call_counter);
-  }
 }
 
 void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
@@ -1197,10 +1192,7 @@ void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
       DCHECK_GT(overall_marking_time, 0.0);
       const double overall_v8_marking_time =
           overall_marking_time -
-          current_.scopes[Scope::MC_MARK_EMBEDDER_PROLOGUE] -
-          current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING] -
-          current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE] -
-          current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING];
+          current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING];
       DCHECK_GT(overall_v8_marking_time, 0.0);
       const int main_thread_marking_throughput_mb_per_s =
           static_cast<int>(static_cast<double>(heap_->SizeOfObjects()) /
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index ec54b6c1ab6897..454bb9ff17971c 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -31,9 +31,12 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),             \
                GCTracer::Scope::Name(gc_tracer_scope_id))
 
-#define TRACE_BACKGROUND_GC(tracer, scope_id)                   \
-  GCTracer::BackgroundScope background_scope(tracer, scope_id); \
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),              \
+#define TRACE_BACKGROUND_GC(tracer, scope_id)                                 \
+  WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(                 \
+      tracer->worker_thread_runtime_call_stats());                            \
+  GCTracer::BackgroundScope background_scope(tracer, scope_id,                \
+                                             runtime_call_stats_scope.Get()); \
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),                            \
                GCTracer::BackgroundScope::Name(scope_id))
 
 // GCTracer collects and prints ONE line after each garbage collector
@@ -82,7 +85,8 @@ class V8_EXPORT_PRIVATE GCTracer {
       FIRST_TOP_MC_SCOPE = MC_CLEAR,
       LAST_TOP_MC_SCOPE = MC_SWEEP,
       FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
-      LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL
+      LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL,
+      FIRST_BACKGROUND_SCOPE = FIRST_GENERAL_BACKGROUND_SCOPE
     };
 
     Scope(GCTracer* tracer, ScopeId scope);
@@ -113,7 +117,8 @@ class V8_EXPORT_PRIVATE GCTracer {
       FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
       LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL
     };
-    BackgroundScope(GCTracer* tracer, ScopeId scope);
+    BackgroundScope(GCTracer* tracer, ScopeId scope,
+                    RuntimeCallStats* runtime_stats);
     ~BackgroundScope();
 
     static const char* Name(ScopeId id);
@@ -123,8 +128,7 @@ class V8_EXPORT_PRIVATE GCTracer {
     ScopeId scope_;
     double start_time_;
     RuntimeCallTimer timer_;
-    RuntimeCallCounter counter_;
-    bool runtime_stats_enabled_;
+    RuntimeCallStats* runtime_stats_;
     DISALLOW_COPY_AND_ASSIGN(BackgroundScope);
   };
 
@@ -206,6 +210,8 @@ class V8_EXPORT_PRIVATE GCTracer {
                                                    double optional_speed);
 
   static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
+  static RuntimeCallCounterId RCSCounterFromBackgroundScope(
+      BackgroundScope::ScopeId id);
 
   explicit GCTracer(Heap* heap);
 
@@ -340,13 +346,15 @@ class V8_EXPORT_PRIVATE GCTracer {
     }
   }
 
-  void AddBackgroundScopeSample(BackgroundScope::ScopeId scope, double duration,
-                                RuntimeCallCounter* runtime_call_counter);
+  void AddBackgroundScopeSample(BackgroundScope::ScopeId scope,
+                                double duration);
 
   void RecordGCPhasesHistograms(TimedHistogram* gc_timer);
 
   void RecordEmbedderSpeed(size_t bytes, double duration);
 
+  WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
+
  private:
   FRIEND_TEST(GCTracer, AverageSpeed);
   FRIEND_TEST(GCTracerTest, AllocationThroughput);
@@ -369,7 +377,6 @@ class V8_EXPORT_PRIVATE GCTracer {
 
   struct BackgroundCounter {
     double total_duration_ms;
-    RuntimeCallCounter runtime_call_counter;
   };
 
   // Returns the average speed of the events in the buffer.
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index f2f7a7f6920f91..da803f33395364 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -67,7 +67,7 @@ void Heap::update_external_memory(int64_t delta) {
   isolate()->isolate_data()->external_memory_ += delta;
 }
 
-void Heap::update_external_memory_concurrently_freed(intptr_t freed) {
+void Heap::update_external_memory_concurrently_freed(uintptr_t freed) {
   external_memory_concurrently_freed_ += freed;
 }
 
@@ -159,6 +159,7 @@ size_t Heap::NewSpaceAllocationCounter() {
 }
 
 AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
+                                   AllocationOrigin origin,
                                    AllocationAlignment alignment) {
   DCHECK(AllowHandleAllocation::IsAllowed());
   DCHECK(AllowHeapAllocation::IsAllowed());
@@ -179,6 +180,9 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
   HeapObject object;
   AllocationResult allocation;
 
+  if (FLAG_single_generation && type == AllocationType::kYoung)
+    type = AllocationType::kOld;
+
   if (AllocationType::kYoung == type) {
     if (large_object) {
       if (FLAG_young_generation_large_objects) {
@@ -191,13 +195,13 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
         allocation = lo_space_->AllocateRaw(size_in_bytes);
       }
     } else {
-      allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
+      allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
     }
   } else if (AllocationType::kOld == type) {
     if (large_object) {
       allocation = lo_space_->AllocateRaw(size_in_bytes);
     } else {
-      allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
+      allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
     }
   } else if (AllocationType::kCode == type) {
     if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
@@ -213,7 +217,9 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
 #endif
     DCHECK(!large_object);
     DCHECK(CanAllocateInReadOnlySpace());
-    allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
+    DCHECK_EQ(AllocationOrigin::kRuntime, origin);
+    allocation =
+        read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
   } else {
     UNREACHABLE();
   }
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 7feb1c11ba90cb..51f300b577fc50 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -270,10 +270,11 @@ size_t Heap::MinOldGenerationSize() {
 size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
   size_t max_size = V8HeapTrait::kMaxSize;
   // Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
-  // systems with physical memory bigger than 16GB.
+  // systems with physical memory bigger than 16GB. The physical memory
+  // is rounded up to GB.
   constexpr bool x64_bit = Heap::kPointerMultiplier >= 2;
   if (FLAG_huge_max_old_generation_size && x64_bit &&
-      physical_memory / GB > 16) {
+      (physical_memory + 512 * MB) / GB >= 16) {
     DCHECK_EQ(max_size / GB, 2);
     max_size *= 2;
   }
@@ -1107,6 +1108,15 @@ void Heap::GarbageCollectionEpilogue() {
   AllowHeapAllocation for_the_rest_of_the_epilogue;
 
 #ifdef DEBUG
+  // Old-to-new slot sets must be empty after each collection.
+  for (SpaceIterator it(this); it.HasNext();) {
+    Space* space = it.Next();
+
+    for (MemoryChunk* chunk = space->first_page(); chunk != space->last_page();
+         chunk = chunk->list_node().next())
+      DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
+  }
+
   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
   if (FLAG_print_handles) PrintHandles();
   if (FLAG_gc_verbose) Print();
@@ -1179,16 +1189,9 @@ void Heap::GarbageCollectionEpilogue() {
   }
 
   if (FLAG_harmony_weak_refs) {
-    // TODO(marja): (spec): The exact condition on when to schedule the cleanup
-    // task is unclear. This version schedules the cleanup task for a
-    // JSFinalizationGroup whenever the GC has discovered new dirty WeakCells
-    // for it (at that point it might have leftover dirty WeakCells since an
-    // earlier invocation of the cleanup function didn't iterate through
-    // them). See https://github.com/tc39/proposal-weakrefs/issues/34
     HandleScope handle_scope(isolate());
     while (!isolate()->heap()->dirty_js_finalization_groups().IsUndefined(
         isolate())) {
-      // Enqueue one microtask per JSFinalizationGroup.
       Handle<JSFinalizationGroup> finalization_group(
           JSFinalizationGroup::cast(
               isolate()->heap()->dirty_js_finalization_groups()),
@@ -1196,22 +1199,7 @@ void Heap::GarbageCollectionEpilogue() {
       isolate()->heap()->set_dirty_js_finalization_groups(
           finalization_group->next());
       finalization_group->set_next(ReadOnlyRoots(isolate()).undefined_value());
-      Handle<NativeContext> context(finalization_group->native_context(),
-                                    isolate());
-      // GC has no native context, but we use the creation context of the
-      // JSFinalizationGroup for the EnqueueTask operation. This is consitent
-      // with the Promise implementation, assuming the JSFinalizationGroup's
-      // creation context is the "caller's context" in promise functions. An
-      // alternative would be to use the native context of the cleanup
-      // function. This difference shouldn't be observable from JavaScript,
-      // since we enter the native context of the cleanup function before
-      // calling it. TODO(marja): Revisit when the spec clarifies this. See also
-      // https://github.com/tc39/proposal-weakrefs/issues/38 .
-      Handle<FinalizationGroupCleanupJobTask> task =
-          isolate()->factory()->NewFinalizationGroupCleanupJobTask(
-              finalization_group);
-      MicrotaskQueue* microtask_queue = context->microtask_queue();
-      if (microtask_queue) microtask_queue->EnqueueMicrotask(*task);
+      isolate()->RunHostCleanupFinalizationGroupCallback(finalization_group);
     }
   }
 }
@@ -2841,6 +2829,9 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
                                       ClearFreedMemoryMode clear_memory_mode) {
   if (size == 0) return HeapObject();
   HeapObject filler = HeapObject::FromAddress(addr);
+  bool clear_memory =
+      (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory ||
+       clear_slots_mode == ClearRecordedSlots::kYes);
   if (size == kTaggedSize) {
     filler.set_map_after_allocation(
         Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
@@ -2849,9 +2840,9 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
     filler.set_map_after_allocation(
         Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
         SKIP_WRITE_BARRIER);
-    if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
-      Memory<Tagged_t>(addr + kTaggedSize) =
-          static_cast<Tagged_t>(kClearedFreeMemoryValue);
+    if (clear_memory) {
+      AtomicSlot slot(ObjectSlot(addr) + 1);
+      *slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
     }
   } else {
     DCHECK_GT(size, 2 * kTaggedSize);
@@ -2859,7 +2850,7 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
         Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
         SKIP_WRITE_BARRIER);
     FreeSpace::cast(filler).relaxed_write_size(size);
-    if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
+    if (clear_memory) {
       MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
                    (size / kTaggedSize) - 2);
     }
@@ -2944,6 +2935,9 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
   if (target.IsSharedFunctionInfo()) {
     LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
                                                          target.address()));
+  } else if (target.IsNativeContext()) {
+    PROFILE(isolate_,
+            NativeContextMoveEvent(source.address(), target.address()));
   }
 
   if (FLAG_verify_predictable) {
@@ -3000,11 +2994,21 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
         object, HeapObject::FromAddress(new_start));
   }
 
+#ifdef DEBUG
+  if (MayContainRecordedSlots(object)) {
+    MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+    DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
+    DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
+  }
+#endif
+
   // Technically in new space this write might be omitted (except for
   // debug mode which iterates through the heap), but to play safer
   // we still do it.
-  HeapObject filler =
-      CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
+  CreateFillerObjectAt(old_start, bytes_to_trim,
+                       MayContainRecordedSlots(object)
+                           ? ClearRecordedSlots::kYes
+                           : ClearRecordedSlots::kNo);
 
   // Initialize header of the trimmed array. Since left trimming is only
   // performed on pages which are not concurrently swept creating a filler
@@ -3016,28 +3020,6 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
   FixedArrayBase new_object =
       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
 
-  // Remove recorded slots for the new map and length offset.
-  ClearRecordedSlot(new_object, new_object.RawField(0));
-  ClearRecordedSlot(new_object,
-                    new_object.RawField(FixedArrayBase::kLengthOffset));
-
-  // Handle invalidated old-to-old slots.
-  if (incremental_marking()->IsCompacting() &&
-      MayContainRecordedSlots(new_object)) {
-    // If the array was right-trimmed before, then it is registered in
-    // the invalidated_slots.
-    MemoryChunk::FromHeapObject(new_object)
-        ->MoveObjectWithInvalidatedSlots(filler, new_object);
-    // We have to clear slots in the free space to avoid stale old-to-old slots.
-    // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
-    // we need pointer granularity writes to avoid race with the concurrent
-    // marking.
-    if (filler.Size() > FreeSpace::kSize) {
-      MemsetTagged(filler.RawField(FreeSpace::kSize),
-                   ReadOnlyRoots(this).undefined_value(),
-                   (filler.Size() - FreeSpace::kSize) / kTaggedSize);
-    }
-  }
   // Notify the heap profiler of change in object layout.
   OnMoveEvent(new_object, object, new_object.Size());
 
@@ -3106,26 +3088,24 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
   Address old_end = object.address() + old_size;
   Address new_end = old_end - bytes_to_trim;
 
-  // Register the array as an object with invalidated old-to-old slots. We
-  // cannot use NotifyObjectLayoutChange as it would mark the array black,
-  // which is not safe for left-trimming because left-trimming re-pushes
-  // only grey arrays onto the marking worklist.
-  if (incremental_marking()->IsCompacting() &&
-      MayContainRecordedSlots(object)) {
-    // Ensure that the object survives because the InvalidatedSlotsFilter will
-    // compute its size from its map during pointers updating phase.
-    incremental_marking()->WhiteToGreyAndPush(object);
-    MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
-        object, old_size);
+#ifdef DEBUG
+  if (MayContainRecordedSlots(object)) {
+    MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+    DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
+    DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
   }
+#endif
+
+  bool clear_slots = MayContainRecordedSlots(object);
 
   // Technically in new space this write might be omitted (except for
   // debug mode which iterates through the heap), but to play safer
   // we still do it.
   // We do not create a filler for objects in a large object space.
   if (!IsLargeObject(object)) {
-    HeapObject filler =
-        CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kNo);
+    HeapObject filler = CreateFillerObjectAt(
+        new_end, bytes_to_trim,
+        clear_slots ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
     DCHECK(!filler.is_null());
     // Clear the mark bits of the black area that belongs now to the filler.
     // This is an optimization. The sweeper will release black fillers anyway.
@@ -3136,6 +3116,11 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
           page->AddressToMarkbitIndex(new_end),
           page->AddressToMarkbitIndex(new_end + bytes_to_trim));
     }
+  } else if (clear_slots) {
+    // Large objects are not swept, so it is not necessary to clear the
+    // recorded slot.
+    MemsetTagged(ObjectSlot(new_end), Object(kClearedFreeMemoryValue),
+                 (old_end - new_end) / kTaggedSize);
   }
 
   // Initialize header of the trimmed array. We are storing the new length
@@ -3408,10 +3393,14 @@ void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
     incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
     if (incremental_marking()->IsCompacting() &&
         MayContainRecordedSlots(object)) {
-      MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
-          object, size);
+      MemoryChunk::FromHeapObject(object)
+          ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
     }
   }
+  if (MayContainRecordedSlots(object)) {
+    MemoryChunk::FromHeapObject(object)
+        ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
+  }
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
     DCHECK(pending_layout_change_object_.is_null());
@@ -4451,6 +4440,7 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
         static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
             static_cast<uint64_t>(max_semi_space_size_)));
     max_semi_space_size_ = Max(max_semi_space_size_, kMinSemiSpaceSize);
+    max_semi_space_size_ = Min(max_semi_space_size_, kMaxSemiSpaceSize);
     max_semi_space_size_ = RoundDown<Page::kPageSize>(max_semi_space_size_);
   }
 
@@ -4495,6 +4485,14 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
       initial_semispace_size_ = SemiSpaceSizeFromYoungGenerationSize(
           constraints.initial_young_generation_size_in_bytes());
     }
+    if (FLAG_initial_heap_size > 0) {
+      size_t young_generation, old_generation;
+      Heap::GenerationSizesFromHeapSize(
+          static_cast<size_t>(FLAG_initial_heap_size) * MB, &young_generation,
+          &old_generation);
+      initial_semispace_size_ =
+          SemiSpaceSizeFromYoungGenerationSize(young_generation);
+    }
     if (FLAG_min_semi_space_size > 0) {
       initial_semispace_size_ =
           static_cast<size_t>(FLAG_min_semi_space_size) * MB;
@@ -4513,6 +4511,17 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
           constraints.initial_old_generation_size_in_bytes();
       old_generation_size_configured_ = true;
     }
+    if (FLAG_initial_heap_size > 0) {
+      size_t initial_heap_size =
+          static_cast<size_t>(FLAG_initial_heap_size) * MB;
+      size_t young_generation_size =
+          YoungGenerationSizeFromSemiSpaceSize(initial_semispace_size_);
+      initial_old_generation_size_ =
+          initial_heap_size > young_generation_size
+              ? initial_heap_size - young_generation_size
+              : 0;
+      old_generation_size_configured_ = true;
+    }
     if (FLAG_initial_old_space_size > 0) {
       initial_old_generation_size_ =
           static_cast<size_t>(FLAG_initial_old_space_size) * MB;
@@ -4875,9 +4884,10 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
 }
 
 HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
+                                           AllocationOrigin origin,
                                            AllocationAlignment alignment) {
   HeapObject result;
-  AllocationResult alloc = AllocateRaw(size, allocation, alignment);
+  AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
   if (alloc.To(&result)) {
     DCHECK(result != ReadOnlyRoots(this).exception());
     return result;
@@ -4886,7 +4896,7 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
   for (int i = 0; i < 2; i++) {
     CollectGarbage(alloc.RetrySpace(),
                    GarbageCollectionReason::kAllocationFailure);
-    alloc = AllocateRaw(size, allocation, alignment);
+    alloc = AllocateRaw(size, allocation, origin, alignment);
     if (alloc.To(&result)) {
       DCHECK(result != ReadOnlyRoots(this).exception());
       return result;
@@ -4896,16 +4906,18 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
 }
 
 HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation,
+                                            AllocationOrigin origin,
                                             AllocationAlignment alignment) {
   AllocationResult alloc;
-  HeapObject result = AllocateRawWithLightRetry(size, allocation, alignment);
+  HeapObject result =
+      AllocateRawWithLightRetry(size, allocation, origin, alignment);
   if (!result.is_null()) return result;
 
   isolate()->counters()->gc_last_resort_from_handles()->Increment();
   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
   {
     AlwaysAllocateScope scope(isolate());
-    alloc = AllocateRaw(size, allocation, alignment);
+    alloc = AllocateRaw(size, allocation, origin, alignment);
   }
   if (alloc.To(&result)) {
     DCHECK(result != ReadOnlyRoots(this).exception());
@@ -5087,25 +5099,6 @@ void Heap::InitializeHashSeed() {
       0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
 }
 
-void Heap::SetStackLimits() {
-  DCHECK_NOT_NULL(isolate_);
-  DCHECK(isolate_ == isolate());
-  // On 64 bit machines, pointers are generally out of range of Smis.  We write
-  // something that looks like an out of range Smi to the GC.
-
-  // Set up the special root array entries containing the stack limits.
-  // These are actually addresses, but the tag makes the GC ignore it.
-  roots_table()[RootIndex::kStackLimit] =
-      (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag;
-  roots_table()[RootIndex::kRealStackLimit] =
-      (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag;
-}
-
-void Heap::ClearStackLimits() {
-  roots_table()[RootIndex::kStackLimit] = kNullAddress;
-  roots_table()[RootIndex::kRealStackLimit] = kNullAddress;
-}
-
 int Heap::NextAllocationTimeout(int current_timeout) {
   if (FLAG_random_gc_interval > 0) {
     // If current timeout hasn't reached 0 the GC was caused by something
@@ -5541,7 +5534,8 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
   Page* page = Page::FromAddress(slot.address());
   if (!page->InYoungGeneration()) {
     DCHECK_EQ(page->owner_identity(), OLD_SPACE);
-    store_buffer()->DeleteEntry(slot.address());
+    store_buffer()->MoveAllEntriesToRememberedSet();
+    RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
   }
 }
 
@@ -5555,7 +5549,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
   CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
   // Old to old slots are filtered with invalidated slots.
   CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
-                page->RegisteredObjectWithInvalidatedSlots(object));
+                page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
 }
 #endif
 
@@ -5564,7 +5558,9 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
   DCHECK(!page->IsLargePage());
   if (!page->InYoungGeneration()) {
     DCHECK_EQ(page->owner_identity(), OLD_SPACE);
-    store_buffer()->DeleteEntry(start, end);
+    store_buffer()->MoveAllEntriesToRememberedSet();
+    RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+                                           SlotSet::KEEP_EMPTY_BUCKETS);
   }
 }
 
@@ -5925,7 +5921,7 @@ void Heap::KeepDuringJob(Handle<JSReceiver> target) {
     table =
         handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
   }
-  table = OrderedHashSet::Add(isolate(), table, target);
+  table = OrderedHashSet::Add(isolate(), table, target).ToHandleChecked();
   set_weak_refs_keep_during_job(*table);
 }
 
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 81f2b0dd8c30d5..2b8b963a798b2f 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -96,6 +96,15 @@ enum class TraceRetainingPathMode { kEnabled, kDisabled };
 
 enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
 
+enum class AllocationOrigin {
+  kGeneratedCode = 0,
+  kRuntime = 1,
+  kGC = 2,
+  kFirstAllocationOrigin = kGeneratedCode,
+  kLastAllocationOrigin = kGC,
+  kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
+};
+
 enum class GarbageCollectionReason {
   kUnknown = 0,
   kAllocationFailure = 1,
@@ -576,7 +585,7 @@ class Heap {
 
   V8_INLINE int64_t external_memory();
   V8_INLINE void update_external_memory(int64_t delta);
-  V8_INLINE void update_external_memory_concurrently_freed(intptr_t freed);
+  V8_INLINE void update_external_memory_concurrently_freed(uintptr_t freed);
   V8_INLINE void account_external_memory_concurrently_freed();
 
   size_t backing_store_bytes() const { return backing_store_bytes_; }
@@ -713,15 +722,6 @@ class Heap {
   V8_INLINE void SetMessageListeners(TemplateList value);
   V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode);
 
-  // Set the stack limit in the roots table.  Some architectures generate
-  // code that looks here, because it is faster than loading from the static
-  // jslimit_/real_jslimit_ variable in the StackGuard.
-  void SetStackLimits();
-
-  // The stack limit is thread-dependent. To be able to reproduce the same
-  // snapshot blob, we need to reset it before serializing.
-  void ClearStackLimits();
-
   void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end);
   void UnregisterStrongRoots(FullObjectSlot start);
 
@@ -1729,7 +1729,8 @@ class Heap {
   // inlined allocations, use the Heap::DisableInlineAllocation() support).
   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
       int size_in_bytes, AllocationType allocation,
-      AllocationAlignment aligment = kWordAligned);
+      AllocationOrigin origin = AllocationOrigin::kRuntime,
+      AllocationAlignment alignment = kWordAligned);
 
   // This method will try to perform an allocation of a given size of a given
   // AllocationType. If the allocation fails, a regular full garbage collection
@@ -1737,8 +1738,14 @@ class Heap {
   // times. If after that retry procedure the allocation still fails nullptr is
   // returned.
   HeapObject AllocateRawWithLightRetry(
-      int size, AllocationType allocation,
+      int size, AllocationType allocation, AllocationOrigin origin,
       AllocationAlignment alignment = kWordAligned);
+  HeapObject AllocateRawWithLightRetry(
+      int size, AllocationType allocation,
+      AllocationAlignment alignment = kWordAligned) {
+    return AllocateRawWithLightRetry(size, allocation,
+                                     AllocationOrigin::kRuntime, alignment);
+  }
 
   // This method will try to perform an allocation of a given size of a given
   // AllocationType. If the allocation fails, a regular full garbage collection
@@ -1747,8 +1754,15 @@ class Heap {
   // garbage collection is triggered which tries to significantly reduce memory.
   // If the allocation still fails after that a fatal error is thrown.
   HeapObject AllocateRawWithRetryOrFail(
-      int size, AllocationType allocation,
+      int size, AllocationType allocation, AllocationOrigin origin,
       AllocationAlignment alignment = kWordAligned);
+  HeapObject AllocateRawWithRetryOrFail(
+      int size, AllocationType allocation,
+      AllocationAlignment alignment = kWordAligned) {
+    return AllocateRawWithRetryOrFail(size, allocation,
+                                      AllocationOrigin::kRuntime, alignment);
+  }
+
   HeapObject AllocateRawCodeInLargeObjectSpace(int size);
 
   // Allocates a heap object based on the map.
@@ -1789,7 +1803,7 @@ class Heap {
 #endif  // DEBUG
 
   // The amount of memory that has been freed concurrently.
-  std::atomic<intptr_t> external_memory_concurrently_freed_{0};
+  std::atomic<uintptr_t> external_memory_concurrently_freed_{0};
 
   // This can be calculated directly from a pointer to the heap; however, it is
   // more expedient to get at the isolate directly from within Heap methods.
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 58f6ac9bc83357..35a08108f63e93 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -62,6 +62,48 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
   return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
 }
 
+void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
+#ifdef DEBUG
+  DCHECK_LT(free_start, free_end);
+  // Free regions should come in increasing order and do not overlap
+  DCHECK_LE(last_free_, free_start);
+  last_free_ = free_start;
+#endif
+
+  if (iterator_ == iterator_end_) return;
+
+  // Ignore invalidated objects before free region
+  while (free_start >= invalidated_end_) {
+    ++iterator_;
+    NextInvalidatedObject();
+  }
+
+  // Loop here: Free region might contain multiple invalidated objects
+  while (free_end > invalidated_start_) {
+    // Case: Free region starts before current invalidated object
+    if (free_start <= invalidated_start_) {
+      iterator_ = invalidated_slots_->erase(iterator_);
+
+    } else {
+      // Case: Free region starts within current invalidated object
+      // (Can happen for right-trimmed objects)
+      iterator_++;
+    }
+
+    NextInvalidatedObject();
+  }
+}
+
+void InvalidatedSlotsCleanup::NextInvalidatedObject() {
+  if (iterator_ != iterator_end_) {
+    invalidated_start_ = iterator_->first.address();
+    invalidated_end_ = invalidated_start_ + iterator_->second;
+  } else {
+    invalidated_start_ = sentinel_;
+    invalidated_end_ = sentinel_;
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index 368d189c556e61..8fa1518d683895 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -8,18 +8,35 @@
 namespace v8 {
 namespace internal {
 
-InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
-  // Adjust slots_in_free_space_are_valid_ if more spaces are added.
-  DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
-                 chunk->InOldSpace() || chunk->InLargeObjectSpace());
+InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
   // The sweeper removes invalid slots and makes free space available for
   // allocation. Slots for new objects can be recorded in the free space.
   // Note that we cannot simply check for SweepingDone because pages in large
   // object space are not swept but have SweepingDone() == true.
-  slots_in_free_space_are_valid_ = chunk->SweepingDone() && chunk->InOldSpace();
+  bool slots_in_free_space_are_valid =
+      chunk->SweepingDone() && chunk->InOldSpace();
+  return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
+                                slots_in_free_space_are_valid);
+}
+
+InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
+  // Always treat these slots as valid for old-to-new for now. Invalid
+  // old-to-new slots are always cleared.
+  bool slots_in_free_space_are_valid = true;
+  return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
+                                slots_in_free_space_are_valid);
+}
+
+InvalidatedSlotsFilter::InvalidatedSlotsFilter(
+    MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
+    bool slots_in_free_space_are_valid) {
+  // Adjust slots_in_free_space_are_valid_ if more spaces are added.
+  DCHECK_IMPLIES(invalidated_slots != nullptr,
+                 chunk->InOldSpace() || chunk->InLargeObjectSpace());
+
+  slots_in_free_space_are_valid_ = slots_in_free_space_are_valid;
+  invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
 
-  InvalidatedSlots* invalidated_slots =
-      chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
   iterator_ = invalidated_slots->begin();
   iterator_end_ = invalidated_slots->end();
   sentinel_ = chunk->area_end();
@@ -37,5 +54,33 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
 #endif
 }
 
+InvalidatedSlotsCleanup InvalidatedSlotsCleanup::OldToNew(MemoryChunk* chunk) {
+  return InvalidatedSlotsCleanup(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
+}
+
+InvalidatedSlotsCleanup InvalidatedSlotsCleanup::NoCleanup(MemoryChunk* chunk) {
+  return InvalidatedSlotsCleanup(chunk, nullptr);
+}
+
+InvalidatedSlotsCleanup::InvalidatedSlotsCleanup(
+    MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
+  invalidated_slots_ = invalidated_slots ? invalidated_slots : &empty_;
+  iterator_ = invalidated_slots_->begin();
+  iterator_end_ = invalidated_slots_->end();
+  sentinel_ = chunk->area_end();
+
+  if (iterator_ != iterator_end_) {
+    invalidated_start_ = iterator_->first.address();
+    invalidated_end_ = invalidated_start_ + iterator_->second;
+  } else {
+    invalidated_start_ = sentinel_;
+    invalidated_end_ = sentinel_;
+  }
+
+#ifdef DEBUG
+  last_free_ = chunk->area_start();
+#endif
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 4098595fe468c2..4a722719106fb8 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -30,7 +30,12 @@ using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
 // n is the number of IsValid queries.
 class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
  public:
-  explicit InvalidatedSlotsFilter(MemoryChunk* chunk);
+  static InvalidatedSlotsFilter OldToOld(MemoryChunk* chunk);
+  static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
+
+  explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
+                                  InvalidatedSlots* invalidated_slots,
+                                  bool slots_in_free_space_are_valid);
   inline bool IsValid(Address slot);
 
  private:
@@ -48,6 +53,32 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
 #endif
 };
 
+class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
+ public:
+  static InvalidatedSlotsCleanup OldToNew(MemoryChunk* chunk);
+  static InvalidatedSlotsCleanup NoCleanup(MemoryChunk* chunk);
+
+  explicit InvalidatedSlotsCleanup(MemoryChunk* chunk,
+                                   InvalidatedSlots* invalidated_slots);
+
+  inline void Free(Address free_start, Address free_end);
+
+ private:
+  InvalidatedSlots::iterator iterator_;
+  InvalidatedSlots::iterator iterator_end_;
+  InvalidatedSlots* invalidated_slots_;
+  InvalidatedSlots empty_;
+
+  Address sentinel_;
+  Address invalidated_start_;
+  Address invalidated_end_;
+
+  inline void NextInvalidatedObject();
+#ifdef DEBUG
+  Address last_free_;
+#endif
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/local-allocator-inl.h
index 71dcd98778103f..10d6ce7370f4cb 100644
--- a/deps/v8/src/heap/local-allocator-inl.h
+++ b/deps/v8/src/heap/local-allocator-inl.h
@@ -14,16 +14,17 @@ namespace internal {
 
 AllocationResult LocalAllocator::Allocate(AllocationSpace space,
                                           int object_size,
+                                          AllocationOrigin origin,
                                           AllocationAlignment alignment) {
   switch (space) {
     case NEW_SPACE:
-      return AllocateInNewSpace(object_size, alignment);
+      return AllocateInNewSpace(object_size, origin, alignment);
     case OLD_SPACE:
       return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
-                                                            alignment);
+                                                            alignment, origin);
     case CODE_SPACE:
       return compaction_spaces_.Get(CODE_SPACE)
-          ->AllocateRaw(object_size, alignment);
+          ->AllocateRaw(object_size, alignment, origin);
     default:
       UNREACHABLE();
   }
@@ -94,9 +95,9 @@ bool LocalAllocator::NewLocalAllocationBuffer() {
 }
 
 AllocationResult LocalAllocator::AllocateInNewSpace(
-    int object_size, AllocationAlignment alignment) {
+    int object_size, AllocationOrigin origin, AllocationAlignment alignment) {
   if (object_size > kMaxLabObjectSize) {
-    return new_space_->AllocateRawSynchronized(object_size, alignment);
+    return new_space_->AllocateRawSynchronized(object_size, alignment, origin);
   }
   return AllocateInLAB(object_size, alignment);
 }
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
index 7019a79f21e4d5..56da76a18dab0a 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/local-allocator.h
@@ -42,12 +42,14 @@ class LocalAllocator {
   }
 
   inline AllocationResult Allocate(AllocationSpace space, int object_size,
+                                   AllocationOrigin origin,
                                    AllocationAlignment alignment);
   inline void FreeLast(AllocationSpace space, HeapObject object,
                        int object_size);
 
  private:
   inline AllocationResult AllocateInNewSpace(int object_size,
+                                             AllocationOrigin origin,
                                              AllocationAlignment alignment);
   inline bool NewLocalAllocationBuffer();
   inline AllocationResult AllocateInLAB(int object_size,
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 3cd6620083b6f3..e763d02e9f7c01 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -577,6 +577,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
   heap()->old_space()->RefillFreeList();
   heap()->code_space()->RefillFreeList();
   heap()->map_space()->RefillFreeList();
+  heap()->map_space()->SortFreeList();
 
   heap()->tracer()->NotifySweepingCompleted();
 
@@ -1291,8 +1292,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
     if (AbortCompactionForTesting(object)) return false;
 #endif  // VERIFY_HEAP
     AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
-    AllocationResult allocation =
-        local_allocator_->Allocate(target_space, size, alignment);
+    AllocationResult allocation = local_allocator_->Allocate(
+        target_space, size, AllocationOrigin::kGC, alignment);
     if (allocation.To(target_object)) {
       MigrateObject(*target_object, object, size, target_space);
       if (target_space == CODE_SPACE)
@@ -1398,8 +1399,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
     AllocationAlignment alignment =
         HeapObject::RequiredAlignment(old_object.map());
     AllocationSpace space_allocated_in = NEW_SPACE;
-    AllocationResult allocation =
-        local_allocator_->Allocate(NEW_SPACE, size, alignment);
+    AllocationResult allocation = local_allocator_->Allocate(
+        NEW_SPACE, size, AllocationOrigin::kGC, alignment);
     if (allocation.IsRetry()) {
       allocation = AllocateInOldSpace(size, alignment);
       space_allocated_in = OLD_SPACE;
@@ -1412,8 +1413,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
 
   inline AllocationResult AllocateInOldSpace(int size_in_bytes,
                                              AllocationAlignment alignment) {
-    AllocationResult allocation =
-        local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
+    AllocationResult allocation = local_allocator_->Allocate(
+        OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
     if (allocation.IsRetry()) {
       heap_->FatalProcessOutOfMemory(
           "MarkCompactCollector: semi-space copy, fallback in old gen");
@@ -2688,7 +2689,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
   for (Page* p : *heap()->old_space()) {
     DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
     DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
-    DCHECK_NULL(p->invalidated_slots());
+    DCHECK_NULL(p->invalidated_slots<OLD_TO_OLD>());
+    DCHECK_NULL(p->invalidated_slots<OLD_TO_NEW>());
   }
 #endif
 }
@@ -3416,9 +3418,23 @@ class RememberedSetUpdatingItem : public UpdatingItem {
           },
           SlotSet::PREFREE_EMPTY_BUCKETS);
     }
+
+    if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
+#ifdef DEBUG
+      for (auto object_size : *chunk_->invalidated_slots<OLD_TO_NEW>()) {
+        HeapObject object = object_size.first;
+        int size = object_size.second;
+        DCHECK_LE(object.SizeFromMap(object.map()), size);
+      }
+#endif
+      // The invalidated slots are not needed after old-to-new slots were
+      // processed.
+      chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
+    }
+
     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
         (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
-      InvalidatedSlotsFilter filter(chunk_);
+      InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
       RememberedSet<OLD_TO_OLD>::Iterate(
           chunk_,
           [&filter](MaybeObjectSlot slot) {
@@ -3428,9 +3444,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
           SlotSet::PREFREE_EMPTY_BUCKETS);
     }
     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
-        chunk_->invalidated_slots() != nullptr) {
+        chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
 #ifdef DEBUG
-      for (auto object_size : *chunk_->invalidated_slots()) {
+      for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) {
         HeapObject object = object_size.first;
         int size = object_size.second;
         DCHECK_LE(object.SizeFromMap(object.map()), size);
@@ -3438,7 +3454,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
 #endif
       // The invalidated slots are not needed after old-to-old slots were
       // processsed.
-      chunk_->ReleaseInvalidatedSlots();
+      chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
     }
   }
 
@@ -3552,13 +3568,17 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
     const bool contains_old_to_new_slots =
         chunk->slot_set<OLD_TO_NEW>() != nullptr ||
         chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
-    const bool contains_invalidated_slots =
-        chunk->invalidated_slots() != nullptr;
+    const bool contains_old_to_old_invalidated_slots =
+        chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
+    const bool contains_old_to_new_invalidated_slots =
+        chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
     if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
-        !contains_invalidated_slots)
+        !contains_old_to_old_invalidated_slots &&
+        !contains_old_to_new_invalidated_slots)
       continue;
     if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
-        contains_invalidated_slots) {
+        contains_old_to_old_invalidated_slots ||
+        contains_old_to_new_invalidated_slots) {
       job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
       pages++;
     }
@@ -4635,11 +4655,14 @@ class PageMarkingItem : public MarkingItem {
   inline Heap* heap() { return chunk_->heap(); }
 
   void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
-    RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
-                                       [this, task](MaybeObjectSlot slot) {
-                                         return CheckAndMarkObject(task, slot);
-                                       },
-                                       SlotSet::PREFREE_EMPTY_BUCKETS);
+    InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
+    RememberedSet<OLD_TO_NEW>::Iterate(
+        chunk_,
+        [this, task, &filter](MaybeObjectSlot slot) {
+          if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+          return CheckAndMarkObject(task, slot);
+        },
+        SlotSet::PREFREE_EMPTY_BUCKETS);
   }
 
   void MarkTypedPointers(YoungGenerationMarkingTask* task) {
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 2a63896242a68f..2ee88361c965f3 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -35,11 +35,16 @@ class FieldStatsCollector : public ObjectVisitor {
  public:
   FieldStatsCollector(size_t* tagged_fields_count,
                       size_t* embedder_fields_count,
+                      size_t* inobject_smi_fields_count,
                       size_t* unboxed_double_fields_count,
-                      size_t* raw_fields_count)
+                      size_t* boxed_double_fields_count,
+                      size_t* string_data_count, size_t* raw_fields_count)
       : tagged_fields_count_(tagged_fields_count),
         embedder_fields_count_(embedder_fields_count),
+        inobject_smi_fields_count_(inobject_smi_fields_count),
         unboxed_double_fields_count_(unboxed_double_fields_count),
+        boxed_double_fields_count_(boxed_double_fields_count),
+        string_data_count_(string_data_count),
         raw_fields_count_(raw_fields_count) {}
 
   void RecordStats(HeapObject host) {
@@ -62,11 +67,32 @@ class FieldStatsCollector : public ObjectVisitor {
       *tagged_fields_count_ -= field_stats.embedded_fields_count_;
       *embedder_fields_count_ += field_stats.embedded_fields_count_;
 
+      // Smi fields are also included into pointer words.
+      DCHECK_LE(
+          field_stats.unboxed_double_fields_count_ * kDoubleSize / kTaggedSize,
+          raw_fields_count_in_object);
+      tagged_fields_count_in_object -= field_stats.smi_fields_count_;
+      *tagged_fields_count_ -= field_stats.smi_fields_count_;
+      *inobject_smi_fields_count_ += field_stats.smi_fields_count_;
+
       // The rest are data words.
-      DCHECK_LE(field_stats.unboxed_double_fields_count_,
-                raw_fields_count_in_object);
-      raw_fields_count_in_object -= field_stats.unboxed_double_fields_count_;
+      DCHECK_LE(
+          field_stats.unboxed_double_fields_count_ * kDoubleSize / kTaggedSize,
+          raw_fields_count_in_object);
+      raw_fields_count_in_object -=
+          field_stats.unboxed_double_fields_count_ * kDoubleSize / kTaggedSize;
       *unboxed_double_fields_count_ += field_stats.unboxed_double_fields_count_;
+    } else if (host.IsHeapNumber()) {
+      DCHECK_LE(kDoubleSize / kTaggedSize, raw_fields_count_in_object);
+      raw_fields_count_in_object -= kDoubleSize / kTaggedSize;
+      *boxed_double_fields_count_ += 1;
+    } else if (host.IsSeqString()) {
+      int string_data = SeqString::cast(host).synchronized_length() *
+                        (String::cast(host).IsOneByteRepresentation() ? 1 : 2) /
+                        kTaggedSize;
+      DCHECK_LE(string_data, raw_fields_count_in_object);
+      raw_fields_count_in_object -= string_data;
+      *string_data_count_ += string_data;
     }
     *raw_fields_count_ += raw_fields_count_in_object;
   }
@@ -92,9 +118,12 @@ class FieldStatsCollector : public ObjectVisitor {
  private:
   struct JSObjectFieldStats {
     JSObjectFieldStats()
-        : embedded_fields_count_(0), unboxed_double_fields_count_(0) {}
+        : embedded_fields_count_(0),
+          smi_fields_count_(0),
+          unboxed_double_fields_count_(0) {}
 
     unsigned embedded_fields_count_ : kDescriptorIndexBitCount;
+    unsigned smi_fields_count_ : kDescriptorIndexBitCount;
     unsigned unboxed_double_fields_count_ : kDescriptorIndexBitCount;
   };
   std::unordered_map<Map, JSObjectFieldStats, Object::Hasher>
@@ -104,7 +133,10 @@ class FieldStatsCollector : public ObjectVisitor {
 
   size_t* const tagged_fields_count_;
   size_t* const embedder_fields_count_;
+  size_t* const inobject_smi_fields_count_;
   size_t* const unboxed_double_fields_count_;
+  size_t* const boxed_double_fields_count_;
+  size_t* const string_data_count_;
   size_t* const raw_fields_count_;
 };
 
@@ -130,6 +162,9 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
             map.IsUnboxedDoubleField(index)) {
           ++stats.unboxed_double_fields_count_;
         }
+        if (details.representation().IsSmi()) {
+          ++stats.smi_fields_count_;
+        }
       }
     }
   }
@@ -149,7 +184,10 @@ void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
   }
   tagged_fields_count_ = 0;
   embedder_fields_count_ = 0;
+  inobject_smi_fields_count_ = 0;
   unboxed_double_fields_count_ = 0;
+  boxed_double_fields_count_ = 0;
+  string_data_count_ = 0;
   raw_fields_count_ = 0;
 }
 
@@ -208,8 +246,13 @@ void ObjectStats::PrintJSON(const char* key) {
   PrintF(", \"tagged_fields\": %zu", tagged_fields_count_ * kTaggedSize);
   PrintF(", \"embedder_fields\": %zu",
          embedder_fields_count_ * kEmbedderDataSlotSize);
+  PrintF(", \"inobject_smi_fields\": %zu",
+         inobject_smi_fields_count_ * kTaggedSize);
   PrintF(", \"unboxed_double_fields\": %zu",
          unboxed_double_fields_count_ * kDoubleSize);
+  PrintF(", \"boxed_double_fields\": %zu",
+         boxed_double_fields_count_ * kDoubleSize);
+  PrintF(", \"string_data\": %zu", string_data_count_ * kTaggedSize);
   PrintF(", \"other_raw_fields\": %zu", raw_fields_count_ * kSystemPointerSize);
   PrintF(" }\n");
   // bucket_sizes
@@ -263,8 +306,13 @@ void ObjectStats::Dump(std::stringstream& stream) {
   stream << "\"tagged_fields\":" << (tagged_fields_count_ * kTaggedSize);
   stream << ",\"embedder_fields\":"
          << (embedder_fields_count_ * kEmbedderDataSlotSize);
+  stream << ",\"inobject_smi_fields\": "
+         << (inobject_smi_fields_count_ * kTaggedSize);
   stream << ",\"unboxed_double_fields\": "
          << (unboxed_double_fields_count_ * kDoubleSize);
+  stream << ",\"boxed_double_fields\": "
+         << (boxed_double_fields_count_ * kDoubleSize);
+  stream << ",\"string_data\": " << (string_data_count_ * kTaggedSize);
   stream << ",\"other_raw_fields\":"
          << (raw_fields_count_ * kSystemPointerSize);
   stream << "}, ";
@@ -427,7 +475,10 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
           heap->mark_compact_collector()->non_atomic_marking_state()),
       field_stats_collector_(
           &stats->tagged_fields_count_, &stats->embedder_fields_count_,
-          &stats->unboxed_double_fields_count_, &stats->raw_fields_count_) {}
+          &stats->inobject_smi_fields_count_,
+          &stats->unboxed_double_fields_count_,
+          &stats->boxed_double_fields_count_, &stats->string_data_count_,
+          &stats->raw_fields_count_) {}
 
 bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject obj,
                                                   CowMode check_cow_array) {
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 0bd2a1e3e4fe3c..2a9b9675ef2145 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -167,7 +167,10 @@ class ObjectStats {
 
   size_t tagged_fields_count_;
   size_t embedder_fields_count_;
+  size_t inobject_smi_fields_count_;
   size_t unboxed_double_fields_count_;
+  size_t boxed_double_fields_count_;
+  size_t string_data_count_;
   size_t raw_fields_count_;
 
   friend class ObjectStatsCollectorImpl;
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index ea7fe0149bac87..eefc565e0083dc 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -122,7 +122,7 @@ class RememberedSet : public AllStatic {
       SlotSet* slots = chunk->slot_set<type>();
       TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
       if (slots != nullptr || typed_slots != nullptr ||
-          chunk->invalidated_slots() != nullptr) {
+          chunk->invalidated_slots<type>() != nullptr) {
         callback(chunk);
       }
     }
@@ -256,7 +256,7 @@ class RememberedSet : public AllStatic {
     while ((chunk = it.next()) != nullptr) {
       chunk->ReleaseSlotSet<OLD_TO_OLD>();
       chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
-      chunk->ReleaseInvalidatedSlots();
+      chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
     }
   }
 
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 9c605f70893652..7729807a8a43b8 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -135,8 +135,8 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
                 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
   DCHECK(heap()->AllowedToBeMigrated(map, object, NEW_SPACE));
   AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
-  AllocationResult allocation =
-      allocator_.Allocate(NEW_SPACE, object_size, alignment);
+  AllocationResult allocation = allocator_.Allocate(
+      NEW_SPACE, object_size, AllocationOrigin::kGC, alignment);
 
   HeapObject target;
   if (allocation.To(&target)) {
@@ -171,8 +171,8 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
                     std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
                 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
   AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
-  AllocationResult allocation =
-      allocator_.Allocate(OLD_SPACE, object_size, alignment);
+  AllocationResult allocation = allocator_.Allocate(
+      OLD_SPACE, object_size, AllocationOrigin::kGC, alignment);
 
   HeapObject target;
   if (allocation.To(&target)) {
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 70b514142fecdb..e08717ac279de7 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -8,6 +8,7 @@
 #include "src/heap/barrier.h"
 #include "src/heap/gc-tracer.h"
 #include "src/heap/heap-inl.h"
+#include "src/heap/invalidated-slots-inl.h"
 #include "src/heap/item-parallel-job.h"
 #include "src/heap/mark-compact-inl.h"
 #include "src/heap/objects-visiting-inl.h"
@@ -371,7 +372,7 @@ void ScavengerCollector::MergeSurvivingNewLargeObjects(
 int ScavengerCollector::NumberOfScavengeTasks() {
   if (!FLAG_parallel_scavenge) return 1;
   const int num_scavenge_tasks =
-      static_cast<int>(heap_->new_space()->TotalCapacity()) / MB;
+      static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1;
   static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
   int tasks =
       Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
@@ -431,12 +432,26 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
 
 void Scavenger::ScavengePage(MemoryChunk* page) {
   CodePageMemoryModificationScope memory_modification_scope(page);
-  RememberedSet<OLD_TO_NEW>::Iterate(page,
-                                     [this](MaybeObjectSlot addr) {
-                                       return CheckAndScavengeObject(heap_,
-                                                                     addr);
-                                     },
-                                     SlotSet::KEEP_EMPTY_BUCKETS);
+  RememberedSet<OLD_TO_NEW>::Iterate(
+      page,
+      [this](MaybeObjectSlot addr) {
+        return CheckAndScavengeObject(heap_, addr);
+      },
+      SlotSet::KEEP_EMPTY_BUCKETS);
+
+  if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
+#ifdef DEBUG
+    for (auto object_size : *page->invalidated_slots<OLD_TO_NEW>()) {
+      HeapObject object = object_size.first;
+      int size = object_size.second;
+      DCHECK_LE(object.SizeFromMap(object.map()), size);
+    }
+#endif
+    // The invalidated slots are not needed after old-to-new slots were
+    // processed.
+    page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
+  }
+
   RememberedSet<OLD_TO_NEW>::IterateTyped(
       page, [=](SlotType type, Address addr) {
         return UpdateTypedSlotHelper::UpdateTypedSlot(
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index a936521a7e42b4..15ca6d7930383a 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -358,8 +358,6 @@ bool Heap::CreateInitialMaps() {
     ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
     ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
                            Context::NUMBER_FUNCTION_INDEX)
-    ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, MutableHeapNumber::kSize,
-                 mutable_heap_number)
     ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
                            Context::SYMBOL_FUNCTION_INDEX)
     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
@@ -463,6 +461,7 @@ bool Heap::CreateInitialMaps() {
     ALLOCATE_VARSIZE_MAP(AWAIT_CONTEXT_TYPE, await_context)
     ALLOCATE_VARSIZE_MAP(BLOCK_CONTEXT_TYPE, block_context)
     ALLOCATE_VARSIZE_MAP(MODULE_CONTEXT_TYPE, module_context)
+    ALLOCATE_VARSIZE_MAP(NATIVE_CONTEXT_TYPE, native_context)
     ALLOCATE_VARSIZE_MAP(EVAL_CONTEXT_TYPE, eval_context)
     ALLOCATE_VARSIZE_MAP(SCRIPT_CONTEXT_TYPE, script_context)
     ALLOCATE_VARSIZE_MAP(SCRIPT_CONTEXT_TABLE_TYPE, script_context_table)
@@ -470,8 +469,6 @@ bool Heap::CreateInitialMaps() {
     ALLOCATE_VARSIZE_MAP(OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
                          object_boilerplate_description)
 
-    ALLOCATE_MAP(NATIVE_CONTEXT_TYPE, NativeContext::kSize, native_context)
-
     ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
                  side_effect_call_handler_info)
     ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
@@ -825,6 +822,15 @@ void Heap::CreateInitialObjects() {
       factory->NewFeedbackMetadata(0, 0, AllocationType::kReadOnly);
   set_empty_feedback_metadata(*empty_feedback_metadata);
 
+  // Canonical scope arrays.
+  Handle<ScopeInfo> global_this_binding =
+      ScopeInfo::CreateGlobalThisBinding(isolate());
+  set_global_this_binding_scope_info(*global_this_binding);
+
+  Handle<ScopeInfo> empty_function =
+      ScopeInfo::CreateForEmptyFunction(isolate());
+  set_empty_function_scope_info(*empty_function);
+
   // Allocate the empty script.
   Handle<Script> script = factory->NewScript(factory->empty_string());
   script->set_type(Script::TYPE_NATIVE);
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index f7efc64247370b..c71192bfdceec1 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -52,9 +52,6 @@ class SlotSet : public Malloced {
   void SetPageStart(Address page_start) { page_start_ = page_start; }
 
   // The slot offset specifies a slot at address page_start_ + slot_offset.
-  // This method should only be called on the main thread because concurrent
-  // allocation of the bucket is not thread-safe.
-  //
   // AccessMode defines whether there can be concurrent access on the buckets
   // or not.
   template <AccessMode access_mode = AccessMode::ATOMIC>
@@ -181,7 +178,10 @@ class SlotSet : public Malloced {
   // Iterate over all slots in the set and for each slot invoke the callback.
   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
   // Returns the new number of slots.
-  // This method should only be called on the main thread.
+  //
+  // Iteration can be performed concurrently with other operations that use
+  // atomic access mode such as insertion and removal. However there is no
+  // guarantee about ordering and linearizability.
   //
   // Sample usage:
   // Iterate([](MaybeObjectSlot slot) {
@@ -411,8 +411,8 @@ class V8_EXPORT_PRIVATE TypedSlots {
   void Merge(TypedSlots* other);
 
  protected:
-  class OffsetField : public BitField<int, 0, 29> {};
-  class TypeField : public BitField<SlotType, 29, 3> {};
+  using OffsetField = BitField<int, 0, 29>;
+  using TypeField = BitField<SlotType, 29, 3>;
   struct TypedSlot {
     uint32_t type_and_offset;
   };
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 3b4ed8d30ad888..2feb47bec1bbf2 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -172,8 +172,6 @@ bool PagedSpace::Contains(Object o) {
 void PagedSpace::UnlinkFreeListCategories(Page* page) {
   DCHECK_EQ(this, page->owner());
   page->ForAllFreeListCategories([this](FreeListCategory* category) {
-    DCHECK_EQ(free_list(), category->owner());
-    category->set_free_list(nullptr);
     free_list()->RemoveCategory(category);
   });
 }
@@ -182,9 +180,8 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
   DCHECK_EQ(this, page->owner());
   size_t added = 0;
   page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
-    category->set_free_list(free_list());
     added += category->available();
-    category->Relink();
+    category->Relink(free_list());
   });
 
   DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
@@ -315,10 +312,51 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
   UNREACHABLE();
 }
 
-FreeList* FreeListCategory::owner() { return free_list_; }
+bool FreeListCategory::is_linked(FreeList* owner) const {
+  return prev_ != nullptr || next_ != nullptr ||
+         owner->categories_[type_] == this;
+}
+
+void FreeListCategory::UpdateCountersAfterAllocation(size_t allocation_size) {
+  available_ -= allocation_size;
+}
+
+Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
+  FreeListCategory* category_top = top(type);
+  if (category_top != nullptr) {
+    DCHECK(!category_top->top().is_null());
+    return Page::FromHeapObject(category_top->top());
+  } else {
+    return nullptr;
+  }
+}
 
-bool FreeListCategory::is_linked() {
-  return prev_ != nullptr || next_ != nullptr;
+Page* FreeListLegacy::GetPageForSize(size_t size_in_bytes) {
+  const int minimum_category =
+      static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+  Page* page = GetPageForCategoryType(kHuge);
+  if (!page && static_cast<int>(kLarge) >= minimum_category)
+    page = GetPageForCategoryType(kLarge);
+  if (!page && static_cast<int>(kMedium) >= minimum_category)
+    page = GetPageForCategoryType(kMedium);
+  if (!page && static_cast<int>(kSmall) >= minimum_category)
+    page = GetPageForCategoryType(kSmall);
+  if (!page && static_cast<int>(kTiny) >= minimum_category)
+    page = GetPageForCategoryType(kTiny);
+  if (!page && static_cast<int>(kTiniest) >= minimum_category)
+    page = GetPageForCategoryType(kTiniest);
+  return page;
+}
+
+Page* FreeListFastAlloc::GetPageForSize(size_t size_in_bytes) {
+  const int minimum_category =
+      static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+  Page* page = GetPageForCategoryType(kHuge);
+  if (!page && static_cast<int>(kLarge) >= minimum_category)
+    page = GetPageForCategoryType(kLarge);
+  if (!page && static_cast<int>(kMedium) >= minimum_category)
+    page = GetPageForCategoryType(kMedium);
+  return page;
 }
 
 AllocationResult LocalAllocationBuffer::AllocateRawAligned(
@@ -338,11 +376,12 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
   return AllocationResult(HeapObject::FromAddress(current_top));
 }
 
-bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
+bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
+                                            AllocationOrigin origin) {
   if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
     return true;
   }
-  return SlowRefillLinearAllocationArea(size_in_bytes);
+  return SlowRefillLinearAllocationArea(size_in_bytes, origin);
 }
 
 HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
@@ -371,19 +410,26 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
   return HeapObject::FromAddress(current_top);
 }
 
-AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
+                                                  AllocationOrigin origin) {
   DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
-  if (!EnsureLinearAllocationArea(size_in_bytes)) {
+  if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
     return AllocationResult::Retry(identity());
   }
   HeapObject object = AllocateLinearly(size_in_bytes);
   DCHECK(!object.is_null());
   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+  if (FLAG_trace_allocations_origins) {
+    UpdateAllocationOrigins(origin);
+  }
+
   return object;
 }
 
 AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
-                                                AllocationAlignment alignment) {
+                                                AllocationAlignment alignment,
+                                                AllocationOrigin origin) {
   DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
   DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
   int allocation_size = size_in_bytes;
@@ -393,7 +439,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
     // allocated, so assume the worst case.
     int filler_size = Heap::GetMaximumFillToAlign(alignment);
     allocation_size += filler_size;
-    if (!EnsureLinearAllocationArea(allocation_size)) {
+    if (!EnsureLinearAllocationArea(allocation_size, origin)) {
       return AllocationResult::Retry(identity());
     }
     allocation_size = size_in_bytes;
@@ -401,12 +447,17 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
     DCHECK(!object.is_null());
   }
   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+  if (FLAG_trace_allocations_origins) {
+    UpdateAllocationOrigins(origin);
+  }
+
   return object;
 }
 
-
 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
-                                         AllocationAlignment alignment) {
+                                         AllocationAlignment alignment,
+                                         AllocationOrigin origin) {
   if (top_on_previous_step_ && top() < top_on_previous_step_ &&
       SupportsInlineAllocation()) {
     // Generated code decreased the top() pointer to do folded allocations.
@@ -421,11 +472,12 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
 
   DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
 #ifdef V8_HOST_ARCH_32_BIT
-  AllocationResult result = alignment != kWordAligned
-                                ? AllocateRawAligned(size_in_bytes, alignment)
-                                : AllocateRawUnaligned(size_in_bytes);
+  AllocationResult result =
+      alignment != kWordAligned
+          ? AllocateRawAligned(size_in_bytes, alignment, origin)
+          : AllocateRawUnaligned(size_in_bytes, origin);
 #else
-  AllocationResult result = AllocateRawUnaligned(size_in_bytes);
+  AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
 #endif
   HeapObject heap_obj;
   if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
@@ -439,13 +491,12 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
   return result;
 }
 
-
 // -----------------------------------------------------------------------------
 // NewSpace
 
-
 AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
-                                              AllocationAlignment alignment) {
+                                              AllocationAlignment alignment,
+                                              AllocationOrigin origin) {
   Address top = allocation_info_.top();
   int filler_size = Heap::GetFillToAlign(top, alignment);
   int aligned_size_in_bytes = size_in_bytes + filler_size;
@@ -472,11 +523,15 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
 
   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
 
+  if (FLAG_trace_allocations_origins) {
+    UpdateAllocationOrigins(origin);
+  }
+
   return obj;
 }
 
-
-AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
+                                                AllocationOrigin origin) {
   Address top = allocation_info_.top();
   if (allocation_info_.limit() < top + size_in_bytes) {
     // See if we can create room.
@@ -493,12 +548,16 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
 
   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
 
+  if (FLAG_trace_allocations_origins) {
+    UpdateAllocationOrigins(origin);
+  }
+
   return obj;
 }
 
-
 AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
-                                       AllocationAlignment alignment) {
+                                       AllocationAlignment alignment,
+                                       AllocationOrigin origin) {
   if (top() < top_on_previous_step_) {
     // Generated code decreased the top() pointer to do folded allocations
     DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
@@ -507,8 +566,8 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
   }
 #ifdef V8_HOST_ARCH_32_BIT
   return alignment != kWordAligned
-             ? AllocateRawAligned(size_in_bytes, alignment)
-             : AllocateRawUnaligned(size_in_bytes);
+             ? AllocateRawAligned(size_in_bytes, alignment, origin)
+             : AllocateRawUnaligned(size_in_bytes, origin);
 #else
 #ifdef V8_COMPRESS_POINTERS
   // TODO(ishell, v8:8875): Consider using aligned allocations once the
@@ -516,14 +575,14 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
   // unaligned access since both x64 and arm64 architectures (where pointer
   // compression is supported) allow unaligned access to doubles and full words.
 #endif  // V8_COMPRESS_POINTERS
-  return AllocateRawUnaligned(size_in_bytes);
+  return AllocateRawUnaligned(size_in_bytes, origin);
 #endif
 }
 
 V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
-    int size_in_bytes, AllocationAlignment alignment) {
+    int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
   base::MutexGuard guard(&mutex_);
-  return AllocateRaw(size_in_bytes, alignment);
+  return AllocateRaw(size_in_bytes, alignment, origin);
 }
 
 LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 438308a346d3f7..dd8ba301018c5b 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -703,7 +703,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
                                        nullptr);
   base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
                                        nullptr);
-  chunk->invalidated_slots_ = nullptr;
+  chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
+  chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
   chunk->set_concurrent_sweeping_state(kSweepingDone);
@@ -821,8 +822,7 @@ void Page::AllocateFreeListCategories() {
   categories_ = new FreeListCategory*[free_list()->number_of_categories()]();
   for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
     DCHECK_NULL(categories_[i]);
-    categories_[i] = new FreeListCategory(
-        reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
+    categories_[i] = new FreeListCategory();
   }
 }
 
@@ -1379,7 +1379,8 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
   ReleaseSlotSet<OLD_TO_OLD>();
   ReleaseTypedSlotSet<OLD_TO_NEW>();
   ReleaseTypedSlotSet<OLD_TO_OLD>();
-  ReleaseInvalidatedSlots();
+  ReleaseInvalidatedSlots<OLD_TO_NEW>();
+  ReleaseInvalidatedSlots<OLD_TO_OLD>();
 
   if (local_tracker_ != nullptr) ReleaseLocalTracker();
   if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
@@ -1461,53 +1462,107 @@ void MemoryChunk::ReleaseTypedSlotSet() {
   }
 }
 
+template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
+template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
+
+template <RememberedSetType type>
 InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
-  DCHECK_NULL(invalidated_slots_);
-  invalidated_slots_ = new InvalidatedSlots();
-  return invalidated_slots_;
+  DCHECK_NULL(invalidated_slots_[type]);
+  invalidated_slots_[type] = new InvalidatedSlots();
+  return invalidated_slots_[type];
 }
 
+template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
+template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
+
+template <RememberedSetType type>
 void MemoryChunk::ReleaseInvalidatedSlots() {
-  if (invalidated_slots_) {
-    delete invalidated_slots_;
-    invalidated_slots_ = nullptr;
+  if (invalidated_slots_[type]) {
+    delete invalidated_slots_[type];
+    invalidated_slots_[type] = nullptr;
   }
 }
 
+template V8_EXPORT_PRIVATE void
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
+                                                            int size);
+template V8_EXPORT_PRIVATE void
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
+                                                            int size);
+
+template <RememberedSetType type>
 void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
                                                      int size) {
-  if (!ShouldSkipEvacuationSlotRecording()) {
-    if (invalidated_slots() == nullptr) {
-      AllocateInvalidatedSlots();
+  bool skip_slot_recording;
+
+  if (type == OLD_TO_NEW) {
+    skip_slot_recording = InYoungGeneration();
+  } else {
+    skip_slot_recording = ShouldSkipEvacuationSlotRecording();
+  }
+
+  if (skip_slot_recording) {
+    return;
+  }
+
+  if (invalidated_slots<type>() == nullptr) {
+    AllocateInvalidatedSlots<type>();
+  }
+
+  InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>();
+  InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
+
+  if (it != invalidated_slots->end() && it->first == object) {
+    // object was already inserted
+    CHECK_LE(size, it->second);
+    return;
+  }
+
+  it = invalidated_slots->insert(it, std::make_pair(object, size));
+
+  // prevent overlapping invalidated objects for old-to-new.
+  if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
+    HeapObject pred = (--it)->first;
+    int pred_size = it->second;
+    DCHECK_LT(pred.address(), object.address());
+
+    if (pred.address() + pred_size > object.address()) {
+      it->second = static_cast<int>(object.address() - pred.address());
     }
-    int old_size = (*invalidated_slots())[object];
-    (*invalidated_slots())[object] = std::max(old_size, size);
   }
 }
 
+template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
+    HeapObject object);
+template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
+    HeapObject object);
+
+template <RememberedSetType type>
 bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
-  if (ShouldSkipEvacuationSlotRecording()) {
-    // Invalidated slots do not matter if we are not recording slots.
-    return true;
-  }
-  if (invalidated_slots() == nullptr) {
+  if (invalidated_slots<type>() == nullptr) {
     return false;
   }
-  return invalidated_slots()->find(object) != invalidated_slots()->end();
+  return invalidated_slots<type>()->find(object) !=
+         invalidated_slots<type>()->end();
 }
 
+template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
+    HeapObject old_start, HeapObject new_start);
+
+template <RememberedSetType type>
 void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
                                                  HeapObject new_start) {
   DCHECK_LT(old_start, new_start);
   DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
             MemoryChunk::FromHeapObject(new_start));
-  if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
-    auto it = invalidated_slots()->find(old_start);
-    if (it != invalidated_slots()->end()) {
+  static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
+  if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
+    auto it = invalidated_slots<type>()->find(old_start);
+    if (it != invalidated_slots<type>()->end()) {
       int old_size = it->second;
       int delta = static_cast<int>(new_start.address() - old_start.address());
-      invalidated_slots()->erase(it);
-      (*invalidated_slots())[new_start] = old_size - delta;
+      invalidated_slots<type>()->erase(it);
+      (*invalidated_slots<type>())[new_start] = old_size - delta;
     }
   }
 }
@@ -1532,10 +1587,6 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
-void Space::CheckOffsetsAreConsistent() const {
-  DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
-}
-
 void Space::AddAllocationObserver(AllocationObserver* observer) {
   allocation_observers_.push_back(observer);
   StartNextInlineAllocationStep();
@@ -1612,8 +1663,9 @@ void PagedSpace::RefillFreeList() {
       // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
       // entries here to make them unavailable for allocations.
       if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
-        p->ForAllFreeListCategories(
-            [](FreeListCategory* category) { category->Reset(); });
+        p->ForAllFreeListCategories([this](FreeListCategory* category) {
+          category->Reset(free_list());
+        });
       }
       // Only during compaction pages can actually change ownership. This is
       // safe because there exists no other competing action on the page links
@@ -1645,6 +1697,11 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
   //   area_size_
   other->FreeLinearAllocationArea();
 
+  for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
+       i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
+    allocations_origins_[i] += other->allocations_origins_[i];
+  }
+
   // The linear allocation area of {other} should be destroyed now.
   DCHECK_EQ(kNullAddress, other->top());
   DCHECK_EQ(kNullAddress, other->limit());
@@ -1846,6 +1903,20 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
   }
 }
 
+void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
+  DCHECK(!((origin != AllocationOrigin::kGC) &&
+           (heap()->isolate()->current_vm_state() == GC)));
+  allocations_origins_[static_cast<int>(origin)]++;
+}
+
+void SpaceWithLinearArea::PrintAllocationsOrigins() {
+  PrintIsolate(
+      heap()->isolate(),
+      "Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n",
+      name(), allocations_origins_[0], allocations_origins_[1],
+      allocations_origins_[2]);
+}
+
 void PagedSpace::MarkLinearAllocationAreaBlack() {
   DCHECK(heap()->incremental_marking()->black_allocation());
   Address current_top = top();
@@ -1911,7 +1982,6 @@ void PagedSpace::ReleasePage(Page* page) {
   DCHECK_EQ(page->owner(), this);
 
   free_list_->EvictFreeListItems(page);
-  DCHECK(!free_list_->ContainsPageFreeListItems(page));
 
   if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
     DCHECK(!top_on_previous_step_);
@@ -1951,7 +2021,8 @@ std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
   return std::unique_ptr<ObjectIterator>(new PagedSpaceObjectIterator(this));
 }
 
-bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
+bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
+    size_t size_in_bytes, AllocationOrigin origin) {
   DCHECK(IsAligned(size_in_bytes, kTaggedSize));
   DCHECK_LE(top(), limit());
 #ifdef DEBUG
@@ -1974,9 +2045,9 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
   }
 
   size_t new_node_size = 0;
-  FreeSpace new_node = free_list_->Allocate(size_in_bytes, &new_node_size);
+  FreeSpace new_node =
+      free_list_->Allocate(size_in_bytes, &new_node_size, origin);
   if (new_node.is_null()) return false;
-
   DCHECK_GE(new_node_size, size_in_bytes);
 
   // The old-space-step might have finished sweeping and restarted marking.
@@ -2895,42 +2966,41 @@ size_t NewSpace::CommittedPhysicalMemory() {
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces implementation
 
-
-void FreeListCategory::Reset() {
+void FreeListCategory::Reset(FreeList* owner) {
+  if (is_linked(owner) && !top().is_null()) {
+    owner->DecreaseAvailableBytes(available_);
+  }
   set_top(FreeSpace());
   set_prev(nullptr);
   set_next(nullptr);
   available_ = 0;
-  length_ = 0;
 }
 
 FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
                                              size_t* node_size) {
-  DCHECK(page()->CanAllocate());
   FreeSpace node = top();
   DCHECK(!node.is_null());
+  DCHECK(Page::FromHeapObject(node)->CanAllocate());
   if (static_cast<size_t>(node.Size()) < minimum_size) {
     *node_size = 0;
     return FreeSpace();
   }
   set_top(node.next());
   *node_size = node.Size();
-  available_ -= *node_size;
-  length_--;
+  UpdateCountersAfterAllocation(*node_size);
   return node;
 }
 
 FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
                                                 size_t* node_size) {
-  DCHECK(page()->CanAllocate());
   FreeSpace prev_non_evac_node;
   for (FreeSpace cur_node = top(); !cur_node.is_null();
        cur_node = cur_node.next()) {
+    DCHECK(Page::FromHeapObject(cur_node)->CanAllocate());
     size_t size = cur_node.size();
     if (size >= minimum_size) {
       DCHECK_GE(available_, size);
-      available_ -= size;
-      length_--;
+      UpdateCountersAfterAllocation(size);
       if (cur_node == top()) {
         set_top(cur_node.next());
       }
@@ -2950,19 +3020,21 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
   return FreeSpace();
 }
 
-void FreeListCategory::Free(Address start, size_t size_in_bytes,
-                            FreeMode mode) {
+void FreeListCategory::Free(Address start, size_t size_in_bytes, FreeMode mode,
+                            FreeList* owner) {
   FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
   free_space.set_next(top());
   set_top(free_space);
   available_ += size_in_bytes;
-  length_++;
-  if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
-    owner()->AddCategory(this);
+  if (mode == kLinkCategory) {
+    if (is_linked(owner)) {
+      owner->IncreaseAvailableBytes(size_in_bytes);
+    } else {
+      owner->AddCategory(this);
+    }
   }
 }
 
-
 void FreeListCategory::RepairFreeList(Heap* heap) {
   Map free_space_map = ReadOnlyRoots(heap).free_space_map();
   FreeSpace n = top();
@@ -2977,21 +3049,30 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
   }
 }
 
-void FreeListCategory::Relink() {
-  DCHECK(!is_linked());
-  owner()->AddCategory(this);
+void FreeListCategory::Relink(FreeList* owner) {
+  DCHECK(!is_linked(owner));
+  owner->AddCategory(this);
 }
 
 // ------------------------------------------------
 // Generic FreeList methods (alloc/free related)
 
 FreeList* FreeList::CreateFreeList() {
-  if (FLAG_gc_freelist_strategy == 1) {
-    return new FreeListFastAlloc();
-  } else if (FLAG_gc_freelist_strategy == 2) {
-    return new FreeListMany();
-  } else {
-    return new FreeListLegacy();
+  switch (FLAG_gc_freelist_strategy) {
+    case 0:
+      return new FreeListLegacy();
+    case 1:
+      return new FreeListFastAlloc();
+    case 2:
+      return new FreeListMany();
+    case 3:
+      return new FreeListManyCached();
+    case 4:
+      return new FreeListManyCachedFastPath();
+    case 5:
+      return new FreeListManyCachedOrigin();
+    default:
+      FATAL("Invalid FreeList strategy");
   }
 }
 
@@ -3001,6 +3082,7 @@ FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
   if (category == nullptr) return FreeSpace();
   FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
   if (!node.is_null()) {
+    DecreaseAvailableBytes(*node_size);
     DCHECK(IsVeryLong() || Available() == SumFreeLists());
   }
   if (category->is_empty()) {
@@ -3018,6 +3100,7 @@ FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
     FreeListCategory* current = it.Next();
     node = current->SearchForNodeInList(minimum_size, node_size);
     if (!node.is_null()) {
+      DecreaseAvailableBytes(*node_size);
       DCHECK(IsVeryLong() || Available() == SumFreeLists());
       if (current->is_empty()) {
         RemoveCategory(current);
@@ -3042,7 +3125,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
   // Insert other blocks at the head of a free list of the appropriate
   // magnitude.
   FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
-  page->free_list_category(type)->Free(start, size_in_bytes, mode);
+  page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
   DCHECK_EQ(page->AvailableInFreeList(),
             page->AvailableInFreeListFromAllocatedBytes());
   return 0;
@@ -3063,7 +3146,8 @@ FreeListLegacy::FreeListLegacy() {
 
 FreeListLegacy::~FreeListLegacy() { delete[] categories_; }
 
-FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size,
+                                   AllocationOrigin origin) {
   DCHECK_GE(kMaxBlockSize, size_in_bytes);
   FreeSpace node;
   // First try the allocation fast path: try to allocate the minimum element
@@ -3121,7 +3205,8 @@ FreeListFastAlloc::FreeListFastAlloc() {
 
 FreeListFastAlloc::~FreeListFastAlloc() { delete[] categories_; }
 
-FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size,
+                                      AllocationOrigin origin) {
   DCHECK_GE(kMaxBlockSize, size_in_bytes);
   FreeSpace node;
   // Try to allocate the biggest element possible (to make the most of later
@@ -3143,16 +3228,7 @@ FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size) {
 // ------------------------------------------------
 // FreeListMany implementation
 
-// Cf. the declaration of |categories_max| in |spaces.h| to see how this is
-// computed.
-const size_t FreeListMany::categories_max[kNumberOfCategories] = {
-    24,    32,    40,    48,    56,    64,    72,
-    80,    88,    96,    104,   112,   120,   128,
-    136,   144,   152,   160,   168,   176,   184,
-    192,   200,   208,   216,   224,   232,   240,
-    248,   256,   384,   512,   768,   1024,  1536,
-    2048,  3072,  4080,  4088,  4096,  6144,  8192,
-    12288, 16384, 24576, 32768, 49152, 65536, Page::kPageSize};
+constexpr unsigned int FreeListMany::categories_min[kNumberOfCategories];
 
 FreeListMany::FreeListMany() {
   // Initializing base (FreeList) fields
@@ -3164,31 +3240,36 @@ FreeListMany::FreeListMany() {
   Reset();
 }
 
+FreeListMany::~FreeListMany() { delete[] categories_; }
+
 size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
-  if (maximum_freed < categories_max[0]) {
+  if (maximum_freed < categories_min[0]) {
     return 0;
   }
-  for (int cat = kFirstCategory + 1; cat < last_category_; cat++) {
-    if (maximum_freed <= categories_max[cat]) {
-      return categories_max[cat - 1];
+  for (int cat = kFirstCategory + 1; cat <= last_category_; cat++) {
+    if (maximum_freed < categories_min[cat]) {
+      return categories_min[cat - 1];
     }
   }
   return maximum_freed;
 }
 
 Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
-  const int minimum_category =
-      static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
-  Page* page = GetPageForCategoryType(last_category_);
-  for (int cat = last_category_ - 1; !page && cat >= minimum_category; cat--) {
+  FreeListCategoryType minimum_category =
+      SelectFreeListCategoryType(size_in_bytes);
+  Page* page = nullptr;
+  for (int cat = minimum_category + 1; !page && cat <= last_category_; cat++) {
     page = GetPageForCategoryType(cat);
   }
+  if (!page) {
+    // Might return a page in which |size_in_bytes| will not fit.
+    page = GetPageForCategoryType(minimum_category);
+  }
   return page;
 }
 
-FreeListMany::~FreeListMany() { delete[] categories_; }
-
-FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size,
+                                 AllocationOrigin origin) {
   DCHECK_GE(kMaxBlockSize, size_in_bytes);
   FreeSpace node;
   FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
@@ -3210,40 +3291,259 @@ FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size) {
   return node;
 }
 
+// ------------------------------------------------
+// FreeListManyCached implementation
+
+FreeListManyCached::FreeListManyCached() { ResetCache(); }
+
+void FreeListManyCached::Reset() {
+  ResetCache();
+  FreeListMany::Reset();
+}
+
+bool FreeListManyCached::AddCategory(FreeListCategory* category) {
+  bool was_added = FreeList::AddCategory(category);
+
+  // Updating cache
+  if (was_added) {
+    UpdateCacheAfterAddition(category->type_);
+  }
+
+#ifdef DEBUG
+  CheckCacheIntegrity();
+#endif
+
+  return was_added;
+}
+
+void FreeListManyCached::RemoveCategory(FreeListCategory* category) {
+  FreeList::RemoveCategory(category);
+
+  // Updating cache
+  int type = category->type_;
+  if (categories_[type] == nullptr) {
+    UpdateCacheAfterRemoval(type);
+  }
+
+#ifdef DEBUG
+  CheckCacheIntegrity();
+#endif
+}
+
+size_t FreeListManyCached::Free(Address start, size_t size_in_bytes,
+                                FreeMode mode) {
+  Page* page = Page::FromAddress(start);
+  page->DecreaseAllocatedBytes(size_in_bytes);
+
+  // Blocks have to be a minimum size to hold free list items.
+  if (size_in_bytes < min_block_size_) {
+    page->add_wasted_memory(size_in_bytes);
+    wasted_bytes_ += size_in_bytes;
+    return size_in_bytes;
+  }
+
+  // Insert other blocks at the head of a free list of the appropriate
+  // magnitude.
+  FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+  page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
+
+  // Updating cache
+  if (mode == kLinkCategory) {
+    UpdateCacheAfterAddition(type);
+
+#ifdef DEBUG
+    CheckCacheIntegrity();
+#endif
+  }
+
+  DCHECK_EQ(page->AvailableInFreeList(),
+            page->AvailableInFreeListFromAllocatedBytes());
+  return 0;
+}
+
+FreeSpace FreeListManyCached::Allocate(size_t size_in_bytes, size_t* node_size,
+                                       AllocationOrigin origin) {
+  USE(origin);
+  DCHECK_GE(kMaxBlockSize, size_in_bytes);
+
+  FreeSpace node;
+  FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+  type = next_nonempty_category[type];
+  for (; type < last_category_; type = next_nonempty_category[type + 1]) {
+    node = TryFindNodeIn(type, size_in_bytes, node_size);
+    if (!node.is_null()) break;
+  }
+
+  if (node.is_null()) {
+    // Searching each element of the last category.
+    type = last_category_;
+    node = SearchForNodeInList(type, size_in_bytes, node_size);
+  }
+
+  // Updating cache
+  if (!node.is_null() && categories_[type] == nullptr) {
+    UpdateCacheAfterRemoval(type);
+  }
+
+#ifdef DEBUG
+  CheckCacheIntegrity();
+#endif
+
+  if (!node.is_null()) {
+    Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+  }
+
+  DCHECK(IsVeryLong() || Available() == SumFreeLists());
+  return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCachedFastPath implementation
+
+FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
+                                               size_t* node_size,
+                                               AllocationOrigin origin) {
+  USE(origin);
+  DCHECK_GE(kMaxBlockSize, size_in_bytes);
+  FreeSpace node;
+
+  // Fast path part 1: searching the last categories
+  FreeListCategoryType first_category =
+      SelectFastAllocationFreeListCategoryType(size_in_bytes);
+  FreeListCategoryType type = first_category;
+  for (type = next_nonempty_category[type]; type <= last_category_;
+       type = next_nonempty_category[type + 1]) {
+    node = TryFindNodeIn(type, size_in_bytes, node_size);
+    if (!node.is_null()) break;
+  }
+
+  // Fast path part 2: searching the medium categories for tiny objects
+  if (node.is_null()) {
+    if (size_in_bytes <= kTinyObjectMaxSize) {
+      for (type = next_nonempty_category[kFastPathFallBackTiny];
+           type < kFastPathFirstCategory;
+           type = next_nonempty_category[type + 1]) {
+        node = TryFindNodeIn(type, size_in_bytes, node_size);
+        if (!node.is_null()) break;
+      }
+    }
+  }
+
+  // Searching the last category
+  if (node.is_null()) {
+    // Searching each element of the last category.
+    type = last_category_;
+    node = SearchForNodeInList(type, size_in_bytes, node_size);
+  }
+
+  // Finally, search the most precise category
+  if (node.is_null()) {
+    type = SelectFreeListCategoryType(size_in_bytes);
+    for (type = next_nonempty_category[type]; type < first_category;
+         type = next_nonempty_category[type + 1]) {
+      node = TryFindNodeIn(type, size_in_bytes, node_size);
+      if (!node.is_null()) break;
+    }
+  }
+
+  // Updating cache
+  if (!node.is_null() && categories_[type] == nullptr) {
+    UpdateCacheAfterRemoval(type);
+  }
+
+#ifdef DEBUG
+  CheckCacheIntegrity();
+#endif
+
+  if (!node.is_null()) {
+    Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+  }
+
+  DCHECK(IsVeryLong() || Available() == SumFreeLists());
+  return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCachedOrigin implementation
+
+FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
+                                             size_t* node_size,
+                                             AllocationOrigin origin) {
+  if (origin == AllocationOrigin::kGC) {
+    return FreeListManyCached::Allocate(size_in_bytes, node_size, origin);
+  } else {
+    return FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size,
+                                                origin);
+  }
+}
+
+// ------------------------------------------------
+// FreeListMap implementation
+
+FreeListMap::FreeListMap() {
+  // Initializing base (FreeList) fields
+  number_of_categories_ = 1;
+  last_category_ = kOnlyCategory;
+  min_block_size_ = kMinBlockSize;
+  categories_ = new FreeListCategory*[number_of_categories_]();
+
+  Reset();
+}
+
+size_t FreeListMap::GuaranteedAllocatable(size_t maximum_freed) {
+  return maximum_freed;
+}
+
+Page* FreeListMap::GetPageForSize(size_t size_in_bytes) {
+  return GetPageForCategoryType(kOnlyCategory);
+}
+
+FreeListMap::~FreeListMap() { delete[] categories_; }
+
+FreeSpace FreeListMap::Allocate(size_t size_in_bytes, size_t* node_size,
+                                AllocationOrigin origin) {
+  DCHECK_GE(kMaxBlockSize, size_in_bytes);
+
+  // The following DCHECK ensures that maps are allocated one by one (ie,
+  // without folding). This assumption currently holds. However, if it were to
+  // become untrue in the future, you'll get an error here. To fix it, I would
+  // suggest removing the DCHECK, and replacing TryFindNodeIn by
+  // SearchForNodeInList below.
+  DCHECK_EQ(size_in_bytes, Map::kSize);
+
+  FreeSpace node = TryFindNodeIn(kOnlyCategory, size_in_bytes, node_size);
+
+  if (!node.is_null()) {
+    Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+  }
+
+  DCHECK_IMPLIES(node.is_null(), IsEmpty());
+  return node;
+}
+
 // ------------------------------------------------
 // Generic FreeList methods (non alloc/free related)
 
 void FreeList::Reset() {
   ForAllFreeListCategories(
-      [](FreeListCategory* category) { category->Reset(); });
+      [this](FreeListCategory* category) { category->Reset(this); });
   for (int i = kFirstCategory; i < number_of_categories_; i++) {
     categories_[i] = nullptr;
   }
   wasted_bytes_ = 0;
+  available_ = 0;
 }
 
 size_t FreeList::EvictFreeListItems(Page* page) {
   size_t sum = 0;
   page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
-    DCHECK_EQ(this, category->owner());
     sum += category->available();
     RemoveCategory(category);
-    category->Reset();
+    category->Reset(this);
   });
   return sum;
 }
 
-bool FreeList::ContainsPageFreeListItems(Page* page) {
-  bool contained = false;
-  page->ForAllFreeListCategories(
-      [this, &contained](FreeListCategory* category) {
-        if (category->owner() == this && category->is_linked()) {
-          contained = true;
-        }
-      });
-  return contained;
-}
-
 void FreeList::RepairLists(Heap* heap) {
   ForAllFreeListCategories(
       [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
@@ -3255,7 +3555,7 @@ bool FreeList::AddCategory(FreeListCategory* category) {
   FreeListCategory* top = categories_[type];
 
   if (category->is_empty()) return false;
-  if (top == category) return false;
+  DCHECK_NE(top, category);
 
   // Common double-linked list insertion.
   if (top != nullptr) {
@@ -3263,6 +3563,8 @@ bool FreeList::AddCategory(FreeListCategory* category) {
   }
   category->set_next(top);
   categories_[type] = category;
+
+  IncreaseAvailableBytes(category->available());
   return true;
 }
 
@@ -3271,6 +3573,10 @@ void FreeList::RemoveCategory(FreeListCategory* category) {
   DCHECK_LT(type, number_of_categories_);
   FreeListCategory* top = categories_[type];
 
+  if (category->is_linked(this)) {
+    DecreaseAvailableBytes(category->available());
+  }
+
   // Common double-linked list removal.
   if (top == category) {
     categories_[type] = category->next();
@@ -3312,13 +3618,25 @@ size_t FreeListCategory::SumFreeList() {
   while (!cur.is_null()) {
     // We can't use "cur->map()" here because both cur's map and the
     // root can be null during bootstrapping.
-    DCHECK(cur.map_slot().contains_value(
-        page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr()));
+    DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
+                                             ->heap()
+                                             ->isolate()
+                                             ->root(RootIndex::kFreeSpaceMap)
+                                             .ptr()));
     sum += cur.relaxed_read_size();
     cur = cur.next();
   }
   return sum;
 }
+int FreeListCategory::FreeListLength() {
+  int length = 0;
+  FreeSpace cur = top();
+  while (!cur.is_null()) {
+    length++;
+    cur = cur.next();
+  }
+  return length;
+}
 
 #ifdef DEBUG
 bool FreeList::IsVeryLong() {
@@ -3364,7 +3682,8 @@ size_t PagedSpace::SizeOfObjects() {
   return Size() - (limit() - top());
 }
 
-bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
+bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes,
+                                         AllocationOrigin origin) {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   if (collector->sweeping_in_progress()) {
     // Wait for the sweeper threads here and complete the sweeping phase.
@@ -3372,38 +3691,43 @@ bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
 
     // After waiting for the sweeper threads, there may be new free-list
     // entries.
-    return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
+    return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
   }
   return false;
 }
 
-bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
+bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes,
+                                              AllocationOrigin origin) {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
     collector->sweeper()->ParallelSweepSpace(identity(), 0);
     RefillFreeList();
-    return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
+    return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
   }
   return false;
 }
 
-bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
+bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+                                                AllocationOrigin origin) {
   VMState<GC> state(heap()->isolate());
   RuntimeCallTimerScope runtime_timer(
       heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
-  return RawSlowRefillLinearAllocationArea(size_in_bytes);
+  return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
 }
 
-bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
-  return RawSlowRefillLinearAllocationArea(size_in_bytes);
+bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+                                                     AllocationOrigin origin) {
+  return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
 }
 
-bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
+bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
+                                                   AllocationOrigin origin) {
   // Allocation in this space has failed.
   DCHECK_GE(size_in_bytes, 0);
   const int kMaxPagesToSweep = 1;
 
-  if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
+  if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
+    return true;
 
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   // Sweeping is still in progress.
@@ -3419,16 +3743,24 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
 
     // Retry the free list allocation.
     if (RefillLinearAllocationAreaFromFreeList(
-            static_cast<size_t>(size_in_bytes)))
+            static_cast<size_t>(size_in_bytes), origin))
       return true;
 
+    // Cleanup invalidated old-to-new refs for compaction space in the
+    // final atomic pause.
+    Sweeper::FreeSpaceMayContainInvalidatedSlots
+        invalidated_slots_in_free_space =
+            is_local() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
+                       : Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+
     // If sweeping is still in progress try to sweep pages.
     int max_freed = collector->sweeper()->ParallelSweepSpace(
-        identity(), size_in_bytes, kMaxPagesToSweep);
+        identity(), size_in_bytes, kMaxPagesToSweep,
+        invalidated_slots_in_free_space);
     RefillFreeList();
     if (max_freed >= size_in_bytes) {
       if (RefillLinearAllocationAreaFromFreeList(
-              static_cast<size_t>(size_in_bytes)))
+              static_cast<size_t>(size_in_bytes), origin))
         return true;
     }
   }
@@ -3441,7 +3773,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
     if (page != nullptr) {
       AddPage(page);
       if (RefillLinearAllocationAreaFromFreeList(
-              static_cast<size_t>(size_in_bytes)))
+              static_cast<size_t>(size_in_bytes), origin))
         return true;
     }
   }
@@ -3450,22 +3782,57 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
     DCHECK((CountTotalPages() > 1) ||
            (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
     return RefillLinearAllocationAreaFromFreeList(
-        static_cast<size_t>(size_in_bytes));
+        static_cast<size_t>(size_in_bytes), origin);
   }
 
   // If sweeper threads are active, wait for them at that point and steal
   // elements form their free-lists. Allocation may still fail their which
   // would indicate that there is not enough memory for the given allocation.
-  return SweepAndRetryAllocation(size_in_bytes);
+  return SweepAndRetryAllocation(size_in_bytes, origin);
 }
 
 // -----------------------------------------------------------------------------
 // MapSpace implementation
 
+// TODO(dmercadier): use a heap instead of sorting like that.
+// Using a heap will have multiple benefits:
+//   - for now, SortFreeList is only called after sweeping, which is somewhat
+//   late. Using a heap, sorting could be done online: FreeListCategories would
+//   be inserted in a heap (ie, in a sorted manner).
+//   - SortFreeList is a bit fragile: any change to FreeListMap (or to
+//   MapSpace::free_list_) could break it.
+void MapSpace::SortFreeList() {
+  using LiveBytesPagePair = std::pair<size_t, Page*>;
+  std::vector<LiveBytesPagePair> pages;
+  pages.reserve(CountTotalPages());
+
+  for (Page* p : *this) {
+    free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
+    pages.push_back(std::make_pair(p->allocated_bytes(), p));
+  }
+
+  // Sorting by least-allocated-bytes first.
+  std::sort(pages.begin(), pages.end(),
+            [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
+              return a.first < b.first;
+            });
+
+  for (LiveBytesPagePair const& p : pages) {
+    // Since AddCategory inserts in head position, it reverts the order produced
+    // by the sort above: least-allocated-bytes will be Added first, and will
+    // therefore be the last element (and the first one will be
+    // most-allocated-bytes).
+    free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
+  }
+}
+
 #ifdef VERIFY_HEAP
 void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
 #endif
 
+// -----------------------------------------------------------------------------
+// ReadOnlySpace implementation
+
 ReadOnlySpace::ReadOnlySpace(Heap* heap)
     : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
       is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 384c731f3761d9..ebb6876cbe1781 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -31,6 +31,7 @@
 #include "src/tasks/cancelable-task.h"
 #include "src/utils/allocation.h"
 #include "src/utils/utils.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
 
 namespace v8 {
 namespace internal {
@@ -120,7 +121,7 @@ class Space;
 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
   DCHECK((0 < size) && (size <= code_space->AreaSize()))
 
-using FreeListCategoryType = int;
+using FreeListCategoryType = int32_t;
 
 static const FreeListCategoryType kFirstCategory = 0;
 static const FreeListCategoryType kInvalidCategory = -1;
@@ -138,32 +139,23 @@ enum RememberedSetType {
 // A free list category maintains a linked list of free memory blocks.
 class FreeListCategory {
  public:
-  FreeListCategory(FreeList* free_list, Page* page)
-      : free_list_(free_list),
-        page_(page),
-        type_(kInvalidCategory),
-        available_(0),
-        length_(0),
-        prev_(nullptr),
-        next_(nullptr) {}
-
   void Initialize(FreeListCategoryType type) {
     type_ = type;
     available_ = 0;
-    length_ = 0;
     prev_ = nullptr;
     next_ = nullptr;
   }
 
-  void Reset();
+  void Reset(FreeList* owner);
 
   void RepairFreeList(Heap* heap);
 
   // Relinks the category into the currently owning free list. Requires that the
   // category is currently unlinked.
-  void Relink();
+  void Relink(FreeList* owner);
 
-  void Free(Address address, size_t size_in_bytes, FreeMode mode);
+  void Free(Address address, size_t size_in_bytes, FreeMode mode,
+            FreeList* owner);
 
   // Performs a single try to pick a node of at least |minimum_size| from the
   // category. Stores the actual size in |node_size|. Returns nullptr if no
@@ -174,22 +166,22 @@ class FreeListCategory {
   // actual size in |node_size|. Returns nullptr if no node is found.
   FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
 
-  inline FreeList* owner();
-  inline Page* page() const { return page_; }
-  inline bool is_linked();
+  inline bool is_linked(FreeList* owner) const;
   bool is_empty() { return top().is_null(); }
-  size_t available() const { return available_; }
-
-  void set_free_list(FreeList* free_list) { free_list_ = free_list; }
+  uint32_t available() const { return available_; }
 
   size_t SumFreeList();
-  int FreeListLength() { return length_; }
+  int FreeListLength();
 
  private:
   // For debug builds we accurately compute free lists lengths up until
   // {kVeryLongFreeList} by manually walking the list.
   static const int kVeryLongFreeList = 500;
 
+  // Updates |available_|, |length_| and free_list_->Available() after an
+  // allocation of size |allocation_size|.
+  inline void UpdateCountersAfterAllocation(size_t allocation_size);
+
   FreeSpace top() { return top_; }
   void set_top(FreeSpace top) { top_ = top; }
   FreeListCategory* prev() { return prev_; }
@@ -197,32 +189,23 @@ class FreeListCategory {
   FreeListCategory* next() { return next_; }
   void set_next(FreeListCategory* next) { next_ = next; }
 
-  // This FreeListCategory is owned by the given free_list_.
-  FreeList* free_list_;
-
-  // This FreeListCategory holds free list entries of the given page_.
-  Page* const page_;
-
   // |type_|: The type of this free list category.
-  FreeListCategoryType type_;
+  FreeListCategoryType type_ = kInvalidCategory;
 
   // |available_|: Total available bytes in all blocks of this free list
   // category.
-  size_t available_;
-
-  // |length_|: Total blocks in this free list category.
-  int length_;
+  uint32_t available_ = 0;
 
   // |top_|: Points to the top FreeSpace in the free list category.
   FreeSpace top_;
 
-  FreeListCategory* prev_;
-  FreeListCategory* next_;
+  FreeListCategory* prev_ = nullptr;
+  FreeListCategory* next_ = nullptr;
 
   friend class FreeList;
+  friend class FreeListManyCached;
   friend class PagedSpace;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
+  friend class MapSpace;
 };
 
 // A free list maintains free blocks of memory. The free list is organized in
@@ -256,22 +239,24 @@ class FreeList {
   // size_in_bytes. This method returns null if the allocation request cannot be
   // handled by the free list.
   virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
-                                                   size_t* node_size) = 0;
+                                                   size_t* node_size,
+                                                   AllocationOrigin origin) = 0;
 
   // Returns a page containing an entry for a given type, or nullptr otherwise.
   V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
 
-  void Reset();
+  virtual void Reset();
 
   // Return the number of bytes available on the free list.
   size_t Available() {
-    size_t available = 0;
-    ForAllFreeListCategories([&available](FreeListCategory* category) {
-      available += category->available();
-    });
-    return available;
+    DCHECK(available_ == SumFreeLists());
+    return available_;
   }
 
+  // Update number of available  bytes on the Freelists.
+  void IncreaseAvailableBytes(size_t bytes) { available_ += bytes; }
+  void DecreaseAvailableBytes(size_t bytes) { available_ -= bytes; }
+
   bool IsEmpty() {
     bool empty = true;
     ForAllFreeListCategories([&empty](FreeListCategory* category) {
@@ -284,7 +269,6 @@ class FreeList {
   void RepairLists(Heap* heap);
 
   V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
-  bool ContainsPageFreeListItems(Page* page);
 
   int number_of_categories() { return number_of_categories_; }
   FreeListCategoryType last_category() { return last_category_; }
@@ -308,15 +292,10 @@ class FreeList {
     }
   }
 
-  bool AddCategory(FreeListCategory* category);
-  V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
+  virtual bool AddCategory(FreeListCategory* category);
+  virtual V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
   void PrintCategories(FreeListCategoryType type);
 
-#ifdef DEBUG
-  size_t SumFreeLists();
-  bool IsVeryLong();
-#endif
-
  protected:
   class FreeListCategoryIterator final {
    public:
@@ -336,6 +315,11 @@ class FreeList {
     FreeListCategory* current_;
   };
 
+#ifdef DEBUG
+  V8_EXPORT_PRIVATE size_t SumFreeLists();
+  bool IsVeryLong();
+#endif
+
   // Tries to retrieve a node from the first category in a given |type|.
   // Returns nullptr if the category is empty or the top entry is smaller
   // than minimum_size.
@@ -355,9 +339,7 @@ class FreeList {
     return categories_[type];
   }
 
-  Page* GetPageForCategoryType(FreeListCategoryType type) {
-    return top(type) ? top(type)->page() : nullptr;
-  }
+  inline Page* GetPageForCategoryType(FreeListCategoryType type);
 
   int number_of_categories_ = 0;
   FreeListCategoryType last_category_ = 0;
@@ -366,10 +348,14 @@ class FreeList {
   std::atomic<size_t> wasted_bytes_{0};
   FreeListCategory** categories_ = nullptr;
 
+  // |available_|: The number of bytes in this freelist.
+  size_t available_ = 0;
+
   friend class FreeListCategory;
   friend class Page;
   friend class MemoryChunk;
   friend class ReadOnlyPage;
+  friend class MapSpace;
 };
 
 // FreeList used for spaces that don't have freelists
@@ -383,7 +369,8 @@ class NoFreeList final : public FreeList {
     FATAL("NoFreeList can't be used as a standard FreeList.");
   }
   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
-                                           size_t* node_size) final {
+                                           size_t* node_size,
+                                           AllocationOrigin origin) final {
     FATAL("NoFreeList can't be used as a standard FreeList.");
   }
   Page* GetPageForSize(size_t size_in_bytes) final {
@@ -412,11 +399,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
     external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
     external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
         0;
-    CheckOffsetsAreConsistent();
   }
 
-  void CheckOffsetsAreConsistent() const;
-
   static inline void MoveExternalBackingStoreBytes(
       ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
 
@@ -531,8 +515,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
   // Tracks off-heap memory used by this space.
   std::atomic<size_t>* external_backing_store_bytes_;
 
-  static const intptr_t kIdOffset = 9 * kSystemPointerSize;
-
   bool allocation_observers_paused_;
   Heap* heap_;
   AllocationSpace id_;
@@ -627,7 +609,8 @@ class MemoryChunk : public BasicMemoryChunk {
       + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES  // SlotSet* array
       + kSystemPointerSize *
             NUMBER_OF_REMEMBERED_SET_TYPES  // TypedSlotSet* array
-      + kSystemPointerSize  // InvalidatedSlots* invalidated_slots_
+      + kSystemPointerSize *
+            NUMBER_OF_REMEMBERED_SET_TYPES  // InvalidatedSlots* array
       + kSystemPointerSize  // std::atomic<intptr_t> high_water_mark_
       + kSystemPointerSize  // base::Mutex* mutex_
       + kSystemPointerSize  // std::atomic<ConcurrentSweepingState>
@@ -713,7 +696,7 @@ class MemoryChunk : public BasicMemoryChunk {
   template <RememberedSetType type>
   bool ContainsSlots() {
     return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
-           invalidated_slots() != nullptr;
+           invalidated_slots<type>() != nullptr;
   }
 
   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
@@ -741,15 +724,23 @@ class MemoryChunk : public BasicMemoryChunk {
   template <RememberedSetType type>
   void ReleaseTypedSlotSet();
 
+  template <RememberedSetType type>
   InvalidatedSlots* AllocateInvalidatedSlots();
+  template <RememberedSetType type>
   void ReleaseInvalidatedSlots();
+  template <RememberedSetType type>
   V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
                                                             int size);
   // Updates invalidated_slots after array left-trimming.
+  template <RememberedSetType type>
   void MoveObjectWithInvalidatedSlots(HeapObject old_start,
                                       HeapObject new_start);
+  template <RememberedSetType type>
   bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
-  InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
+  template <RememberedSetType type>
+  InvalidatedSlots* invalidated_slots() {
+    return invalidated_slots_[type];
+  }
 
   void ReleaseLocalTracker();
 
@@ -925,7 +916,7 @@ class MemoryChunk : public BasicMemoryChunk {
   // is ceil(size() / kPageSize).
   SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
   TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
-  InvalidatedSlots* invalidated_slots_;
+  InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
 
   // Assuming the initial allocation on a page is sequential,
   // count highest number of bytes ever allocated on the page.
@@ -1811,28 +1802,14 @@ class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
     return maximum_freed;
   }
 
-  Page* GetPageForSize(size_t size_in_bytes) override {
-    const int minimum_category =
-        static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
-    Page* page = GetPageForCategoryType(kHuge);
-    if (!page && static_cast<int>(kLarge) >= minimum_category)
-      page = GetPageForCategoryType(kLarge);
-    if (!page && static_cast<int>(kMedium) >= minimum_category)
-      page = GetPageForCategoryType(kMedium);
-    if (!page && static_cast<int>(kSmall) >= minimum_category)
-      page = GetPageForCategoryType(kSmall);
-    if (!page && static_cast<int>(kTiny) >= minimum_category)
-      page = GetPageForCategoryType(kTiny);
-    if (!page && static_cast<int>(kTiniest) >= minimum_category)
-      page = GetPageForCategoryType(kTiniest);
-    return page;
-  }
+  inline Page* GetPageForSize(size_t size_in_bytes) override;
 
   FreeListLegacy();
   ~FreeListLegacy();
 
   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
-                                           size_t* node_size) override;
+                                           size_t* node_size,
+                                           AllocationOrigin origin) override;
 
  private:
   enum { kTiniest, kTiny, kSmall, kMedium, kLarge, kHuge };
@@ -1909,22 +1886,14 @@ class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
     return kHugeAllocationMax;
   }
 
-  Page* GetPageForSize(size_t size_in_bytes) override {
-    const int minimum_category =
-        static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
-    Page* page = GetPageForCategoryType(kHuge);
-    if (!page && static_cast<int>(kLarge) >= minimum_category)
-      page = GetPageForCategoryType(kLarge);
-    if (!page && static_cast<int>(kMedium) >= minimum_category)
-      page = GetPageForCategoryType(kMedium);
-    return page;
-  }
+  inline Page* GetPageForSize(size_t size_in_bytes) override;
 
   FreeListFastAlloc();
   ~FreeListFastAlloc();
 
   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
-                                           size_t* node_size) override;
+                                           size_t* node_size,
+                                           AllocationOrigin origin) override;
 
  private:
   enum { kMedium, kLarge, kHuge };
@@ -1951,14 +1920,10 @@ class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
     }
     return kHuge;
   }
-
-  Page* GetPageForCategoryType(FreeListCategoryType type) {
-    return top(type) ? top(type)->page() : nullptr;
-  }
 };
 
-// Use 49 Freelists: on per size between 24 and 256, and then a few ones for
-// larger sizes. See the variable |categories_max| for the size of each
+// Use 24 Freelists: on per 16 bytes between 24 and 256, and then a few ones for
+// larger sizes. See the variable |categories_min| for the size of each
 // Freelist.  Allocation is done using a best-fit strategy (considering only the
 // first element of each category though).
 // Performances are expected to be worst than FreeListLegacy, but memory
@@ -1973,41 +1938,214 @@ class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
   ~FreeListMany();
 
   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
-                                           size_t* node_size) override;
+                                           size_t* node_size,
+                                           AllocationOrigin origin) override;
 
- private:
+ protected:
   static const size_t kMinBlockSize = 3 * kTaggedSize;
 
   // This is a conservative upper bound. The actual maximum block size takes
   // padding and alignment of data and code pages into account.
   static const size_t kMaxBlockSize = Page::kPageSize;
+  // Largest size for which categories are still precise, and for which we can
+  // therefore compute the category in constant time.
+  static const size_t kPreciseCategoryMaxSize = 256;
 
   // Categories boundaries generated with:
   // perl -E '
-  //   @cat = map {$_*8} 3..32, 48, 64;
-  //   while ($cat[-1] <= 32768) {
-  //     push @cat, $cat[-1]+$cat[-3], $cat[-1]*2
-  //   }
-  //   push @cat, 4080, 4088;
-  //   @cat = sort { $a <=> $b } @cat;
-  //   push @cat, "Page::kPageSize";
-  //   say join ", ", @cat;
-  //   say "\n", scalar @cat'
-  // Note the special case for 4080 and 4088 bytes: experiments have shown that
-  // this category classes are more used than others of similar sizes
-  static const int kNumberOfCategories = 49;
-  static const size_t categories_max[kNumberOfCategories];
+  //      @cat = (24, map {$_*16} 2..16, 48, 64);
+  //      while ($cat[-1] <= 32768) {
+  //        push @cat, $cat[-1]*2
+  //      }
+  //      say join ", ", @cat;
+  //      say "\n", scalar @cat'
+  static const int kNumberOfCategories = 24;
+  static constexpr unsigned int categories_min[kNumberOfCategories] = {
+      24,  32,  48,  64,  80,  96,   112,  128,  144,  160,   176,   192,
+      208, 224, 240, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
 
   // Return the smallest category that could hold |size_in_bytes| bytes.
   FreeListCategoryType SelectFreeListCategoryType(
       size_t size_in_bytes) override {
-    for (int cat = kFirstCategory; cat < last_category_; cat++) {
-      if (size_in_bytes <= categories_max[cat]) {
+    if (size_in_bytes <= kPreciseCategoryMaxSize) {
+      if (size_in_bytes < categories_min[1]) return 0;
+      return static_cast<FreeListCategoryType>(size_in_bytes >> 4) - 1;
+    }
+    for (int cat = (kPreciseCategoryMaxSize >> 4) - 1; cat < last_category_;
+         cat++) {
+      if (size_in_bytes < categories_min[cat + 1]) {
+        return cat;
+      }
+    }
+    return last_category_;
+  }
+
+  FRIEND_TEST(SpacesTest, FreeListManySelectFreeListCategoryType);
+  FRIEND_TEST(SpacesTest, FreeListManyGuaranteedAllocatable);
+};
+
+// Same as FreeListMany but uses a cache to know which categories are empty.
+// The cache (|next_nonempty_category|) is maintained in a way such that for
+// each category c, next_nonempty_category[c] contains the first non-empty
+// category greater or equal to c, that may hold an object of size c.
+// Allocation is done using the same strategy as FreeListMany (ie, best fit).
+class V8_EXPORT_PRIVATE FreeListManyCached : public FreeListMany {
+ public:
+  FreeListManyCached();
+
+  V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+                                           size_t* node_size,
+                                           AllocationOrigin origin) override;
+
+  size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
+
+  void Reset() override;
+
+  bool AddCategory(FreeListCategory* category) override;
+  void RemoveCategory(FreeListCategory* category) override;
+
+ protected:
+  // Updates the cache after adding something in the category |cat|.
+  void UpdateCacheAfterAddition(FreeListCategoryType cat) {
+    for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] > cat;
+         i--) {
+      next_nonempty_category[i] = cat;
+    }
+  }
+
+  // Updates the cache after emptying category |cat|.
+  void UpdateCacheAfterRemoval(FreeListCategoryType cat) {
+    for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] == cat;
+         i--) {
+      next_nonempty_category[i] = next_nonempty_category[cat + 1];
+    }
+  }
+
+#ifdef DEBUG
+  void CheckCacheIntegrity() {
+    for (int i = 0; i <= last_category_; i++) {
+      DCHECK(next_nonempty_category[i] == last_category_ + 1 ||
+             categories_[next_nonempty_category[i]] != nullptr);
+      for (int j = i; j < next_nonempty_category[i]; j++) {
+        DCHECK(categories_[j] == nullptr);
+      }
+    }
+  }
+#endif
+
+  // The cache is overallocated by one so that the last element is always
+  // defined, and when updating the cache, we can always use cache[i+1] as long
+  // as i is < kNumberOfCategories.
+  int next_nonempty_category[kNumberOfCategories + 1];
+
+ private:
+  void ResetCache() {
+    for (int i = 0; i < kNumberOfCategories; i++) {
+      next_nonempty_category[i] = kNumberOfCategories;
+    }
+    // Setting the after-last element as well, as explained in the cache's
+    // declaration.
+    next_nonempty_category[kNumberOfCategories] = kNumberOfCategories;
+  }
+};
+
+// Same as FreeListManyCached but uses a fast path.
+// The fast path overallocates by at least 1.85k bytes. The idea of this 1.85k
+// is: we want the fast path to always overallocate, even for larger
+// categories. Therefore, we have two choices: either overallocate by
+// "size_in_bytes * something" or overallocate by "size_in_bytes +
+// something". We choose the later, as the former will tend to overallocate too
+// much for larger objects. The 1.85k (= 2048 - 128) has been chosen such that
+// for tiny objects (size <= 128 bytes), the first category considered is the
+// 36th (which holds objects of 2k to 3k), while for larger objects, the first
+// category considered will be one that guarantees a 1.85k+ bytes
+// overallocation. Using 2k rather than 1.85k would have resulted in either a
+// more complex logic for SelectFastAllocationFreeListCategoryType, or the 36th
+// category (2k to 3k) not being used; both of which are undesirable.
+// A secondary fast path is used for tiny objects (size <= 128), in order to
+// consider categories from 256 to 2048 bytes for them.
+// Note that this class uses a precise GetPageForSize (inherited from
+// FreeListMany), which makes its fast path less fast in the Scavenger. This is
+// done on purpose, since this class's only purpose is to be used by
+// FreeListManyCachedOrigin, which is precise for the scavenger.
+class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
+ public:
+  V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+                                           size_t* node_size,
+                                           AllocationOrigin origin) override;
+
+ protected:
+  // Objects in the 18th category are at least 2048 bytes
+  static const FreeListCategoryType kFastPathFirstCategory = 18;
+  static const size_t kFastPathStart = 2048;
+  static const size_t kTinyObjectMaxSize = 128;
+  static const size_t kFastPathOffset = kFastPathStart - kTinyObjectMaxSize;
+  // Objects in the 15th category are at least 256 bytes
+  static const FreeListCategoryType kFastPathFallBackTiny = 15;
+
+  STATIC_ASSERT(categories_min[kFastPathFirstCategory] == kFastPathStart);
+  STATIC_ASSERT(categories_min[kFastPathFallBackTiny] ==
+                kTinyObjectMaxSize * 2);
+
+  FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+      size_t size_in_bytes) {
+    DCHECK(size_in_bytes < kMaxBlockSize);
+
+    if (size_in_bytes >= categories_min[last_category_]) return last_category_;
+
+    size_in_bytes += kFastPathOffset;
+    for (int cat = kFastPathFirstCategory; cat < last_category_; cat++) {
+      if (size_in_bytes <= categories_min[cat]) {
         return cat;
       }
     }
     return last_category_;
   }
+
+  FRIEND_TEST(
+      SpacesTest,
+      FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType);
+};
+
+// Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise.
+// The reasonning behind this FreeList is the following: the GC runs in
+// parallel, and therefore, more expensive allocations there are less
+// noticeable. On the other hand, the generated code and runtime need to be very
+// fast. Therefore, the strategy for the former is one that is not very
+// efficient, but reduces fragmentation (FreeListManyCached), while the strategy
+// for the later is one that is very efficient, but introduces some
+// fragmentation (FreeListManyCachedFastPath).
+class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
+    : public FreeListManyCachedFastPath {
+ public:
+  V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+                                           size_t* node_size,
+                                           AllocationOrigin origin) override;
+};
+
+// FreeList for maps: since maps are all the same size, uses a single freelist.
+class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
+ public:
+  size_t GuaranteedAllocatable(size_t maximum_freed) override;
+
+  Page* GetPageForSize(size_t size_in_bytes) override;
+
+  FreeListMap();
+  ~FreeListMap();
+
+  V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+                                           size_t* node_size,
+                                           AllocationOrigin origin) override;
+
+ private:
+  static const size_t kMinBlockSize = Map::kSize;
+  static const size_t kMaxBlockSize = Page::kPageSize;
+  static const FreeListCategoryType kOnlyCategory = 0;
+
+  FreeListCategoryType SelectFreeListCategoryType(
+      size_t size_in_bytes) override {
+    return kOnlyCategory;
+  }
 };
 
 // LocalAllocationBuffer represents a linear allocation area that is created
@@ -2108,6 +2246,10 @@ class SpaceWithLinearArea : public Space {
   V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
       size_t min_size) = 0;
 
+  V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
+
+  void PrintAllocationsOrigins();
+
  protected:
   // If we are doing inline allocation in steps, this method performs the 'step'
   // operation. top is the memory address of the bump pointer at the last
@@ -2125,6 +2267,9 @@ class SpaceWithLinearArea : public Space {
   // TODO(ofrobots): make these private after refactoring is complete.
   LinearAllocationArea allocation_info_;
   Address top_on_previous_step_;
+
+  size_t allocations_origins_[static_cast<int>(
+      AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
 };
 
 class V8_EXPORT_PRIVATE PagedSpace
@@ -2190,17 +2335,19 @@ class V8_EXPORT_PRIVATE PagedSpace
   // Allocate the requested number of bytes in the space if possible, return a
   // failure object if not.
   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
-      int size_in_bytes);
+      int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
 
   // Allocate the requested number of bytes in the space double aligned if
   // possible, return a failure object if not.
   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
-      int size_in_bytes, AllocationAlignment alignment);
+      int size_in_bytes, AllocationAlignment alignment,
+      AllocationOrigin origin = AllocationOrigin::kRuntime);
 
   // Allocate the requested number of bytes in the space and consider allocation
   // alignment if needed.
   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
-      int size_in_bytes, AllocationAlignment alignment);
+      int size_in_bytes, AllocationAlignment alignment,
+      AllocationOrigin origin = AllocationOrigin::kRuntime);
 
   size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
     if (size_in_bytes == 0) return 0;
@@ -2373,7 +2520,8 @@ class V8_EXPORT_PRIVATE PagedSpace
   // Sets up a linear allocation area that fits the given number of bytes.
   // Returns false if there is not enough space and the caller has to retry
   // after collecting garbage.
-  inline bool EnsureLinearAllocationArea(int size_in_bytes);
+  inline bool EnsureLinearAllocationArea(int size_in_bytes,
+                                         AllocationOrigin origin);
   // Allocates an object from the linear allocation area. Assumes that the
   // linear allocation area is large enought to fit the object.
   inline HeapObject AllocateLinearly(int size_in_bytes);
@@ -2385,24 +2533,25 @@ class V8_EXPORT_PRIVATE PagedSpace
                                                AllocationAlignment alignment);
 
   V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
-      size_t size_in_bytes);
+      size_t size_in_bytes, AllocationOrigin origin);
 
   // If sweeping is still in progress try to sweep unswept pages. If that is
   // not successful, wait for the sweeper threads and retry free-list
   // allocation. Returns false if there is not enough space and the caller
   // has to retry after collecting garbage.
-  V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
+  V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(
+      int size_in_bytes, AllocationOrigin origin);
 
   // Slow path of AllocateRaw. This function is space-dependent. Returns false
   // if there is not enough space and the caller has to retry after
   // collecting garbage.
   V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
-      int size_in_bytes);
+      int size_in_bytes, AllocationOrigin origin);
 
   // Implementation of SlowAllocateRaw. Returns false if there is not enough
   // space and the caller has to retry after collecting garbage.
   V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
-      int size_in_bytes);
+      int size_in_bytes, AllocationOrigin origin);
 
   Executability executable_;
 
@@ -2773,16 +2922,19 @@ class V8_EXPORT_PRIVATE NewSpace
   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
 
   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
-  AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
+  AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
+                     AllocationOrigin origin = AllocationOrigin::kRuntime);
 
-  V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
-  AllocateRawUnaligned(int size_in_bytes);
+  V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
+      int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
 
   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
-  AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
+  AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
+              AllocationOrigin origin = AllocationOrigin::kRuntime);
 
   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
-      int size_in_bytes, AllocationAlignment alignment);
+      int size_in_bytes, AllocationAlignment alignment,
+      AllocationOrigin origin = AllocationOrigin::kRuntime);
 
   // Reset the allocation pointer to the beginning of the active semispace.
   void ResetLinearAllocationArea();
@@ -2888,10 +3040,10 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
   bool snapshotable() override { return false; }
 
   V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
-      int size_in_bytes) override;
+      int size_in_bytes, AllocationOrigin origin) override;
 
   V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
-      int size_in_bytes) override;
+      int size_in_bytes, AllocationOrigin origin) override;
 };
 
 // A collection of |CompactionSpace|s used by a single compaction task.
@@ -2961,8 +3113,7 @@ class MapSpace : public PagedSpace {
  public:
   // Creates a map space object.
   explicit MapSpace(Heap* heap)
-      : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
-                   FreeList::CreateFreeList()) {}
+      : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
 
   int RoundSizeDownToObjectAlignment(int size) override {
     if (base::bits::IsPowerOfTwo(Map::kSize)) {
@@ -2972,6 +3123,8 @@ class MapSpace : public PagedSpace {
     }
   }
 
+  void SortFreeList();
+
 #ifdef VERIFY_HEAP
   void VerifyObject(HeapObject obj) override;
 #endif
@@ -3006,6 +3159,8 @@ class ReadOnlySpace : public PagedSpace {
   // to write it into the free list nodes that were already created.
   void RepairFreeListsAfterDeserialization();
 
+  size_t Available() override { return 0; }
+
  private:
   // Unseal the space after is has been sealed, by making it writable.
   // TODO(v8:7464): Only possible if the space hasn't been detached.
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
index 4609c83ca036b0..b43098bf57d350 100644
--- a/deps/v8/src/heap/store-buffer-inl.h
+++ b/deps/v8/src/heap/store-buffer-inl.h
@@ -12,16 +12,6 @@
 namespace v8 {
 namespace internal {
 
-void StoreBuffer::InsertDeletionIntoStoreBuffer(Address start, Address end) {
-  if (top_ + sizeof(Address) * 2 > limit_[current_]) {
-    StoreBufferOverflow(heap_->isolate());
-  }
-  *top_ = MarkDeletionAddress(start);
-  top_++;
-  *top_ = end;
-  top_++;
-}
-
 void StoreBuffer::InsertIntoStoreBuffer(Address slot) {
   if (top_ + sizeof(Address) > limit_[current_]) {
     StoreBufferOverflow(heap_->isolate());
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 7d0dcfc37070e5..349e7877409c80 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -28,7 +28,6 @@ StoreBuffer::StoreBuffer(Heap* heap)
   }
   task_running_ = false;
   insertion_callback = &InsertDuringRuntime;
-  deletion_callback = &DeleteDuringRuntime;
 }
 
 void StoreBuffer::SetUp() {
@@ -91,22 +90,11 @@ void StoreBuffer::TearDown() {
   }
 }
 
-void StoreBuffer::DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
-                                      Address end) {
-  DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
-  store_buffer->InsertDeletionIntoStoreBuffer(start, end);
-}
-
 void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
   DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
   store_buffer->InsertIntoStoreBuffer(slot);
 }
 
-void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
-                                                Address start, Address end) {
-  UNREACHABLE();
-}
-
 void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
                                                 Address slot) {
   DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
@@ -117,10 +105,8 @@ void StoreBuffer::SetMode(StoreBufferMode mode) {
   mode_ = mode;
   if (mode == NOT_IN_GC) {
     insertion_callback = &InsertDuringRuntime;
-    deletion_callback = &DeleteDuringRuntime;
   } else {
     insertion_callback = &InsertDuringGarbageCollection;
-    deletion_callback = &DeleteDuringGarbageCollection;
   }
 }
 
@@ -160,24 +146,9 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
         MemoryChunk::BaseAddress(addr) != chunk->address()) {
       chunk = MemoryChunk::FromAnyPointerAddress(addr);
     }
-    if (IsDeletionAddress(addr)) {
-      last_inserted_addr = kNullAddress;
-      current++;
-      Address end = *current;
-      DCHECK(!IsDeletionAddress(end));
-      addr = UnmarkDeletionAddress(addr);
-      if (end) {
-        RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, addr, end,
-                                               SlotSet::PREFREE_EMPTY_BUCKETS);
-      } else {
-        RememberedSet<OLD_TO_NEW>::Remove(chunk, addr);
-      }
-    } else {
-      DCHECK(!IsDeletionAddress(addr));
-      if (addr != last_inserted_addr) {
-        RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
-        last_inserted_addr = addr;
-      }
+    if (addr != last_inserted_addr) {
+      RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
+      last_inserted_addr = addr;
     }
   }
   lazy_top_[index] = nullptr;
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 62b10b90714397..025bb6a060b272 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -33,17 +33,11 @@ class StoreBuffer {
       Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers),
           1 << (11 + kSystemPointerSizeLog2));
   static const int kStoreBufferMask = kStoreBufferSize - 1;
-  static const intptr_t kDeletionTag = 1;
 
   V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
 
-  static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
-                                            Address start, Address end);
   static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
                                             Address slot);
-
-  static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
-                                  Address end);
   static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot);
 
   explicit StoreBuffer(Heap* heap);
@@ -61,19 +55,6 @@ class StoreBuffer {
   // the remembered set.
   void MoveAllEntriesToRememberedSet();
 
-  inline bool IsDeletionAddress(Address address) const {
-    return address & kDeletionTag;
-  }
-
-  inline Address MarkDeletionAddress(Address address) {
-    return address | kDeletionTag;
-  }
-
-  inline Address UnmarkDeletionAddress(Address address) {
-    return address & ~kDeletionTag;
-  }
-
-  inline void InsertDeletionIntoStoreBuffer(Address start, Address end);
   inline void InsertIntoStoreBuffer(Address slot);
 
   void InsertEntry(Address slot) {
@@ -83,16 +64,6 @@ class StoreBuffer {
     insertion_callback(this, slot);
   }
 
-  // If we only want to delete a single slot, end should be set to null which
-  // will be written into the second field. When processing the store buffer
-  // the more efficient Remove method will be called in this case.
-  void DeleteEntry(Address start, Address end = kNullAddress) {
-    // Deletions coming from the GC are directly deleted from the remembered
-    // set. Deletions coming from the runtime are added to the store buffer
-    // to allow concurrent processing.
-    deletion_callback(this, start, end);
-  }
-
   void SetMode(StoreBufferMode mode);
 
   // Used by the concurrent processing thread to transfer entries from the
@@ -174,7 +145,6 @@ class StoreBuffer {
   // Callbacks are more efficient than reading out the gc state for every
   // store buffer operation.
   void (*insertion_callback)(StoreBuffer*, Address);
-  void (*deletion_callback)(StoreBuffer*, Address, Address);
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index cbb7d717b071da..c3c6b58835ca5e 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -8,6 +8,7 @@
 #include "src/execution/vm-state-inl.h"
 #include "src/heap/array-buffer-tracker-inl.h"
 #include "src/heap/gc-tracer.h"
+#include "src/heap/invalidated-slots-inl.h"
 #include "src/heap/mark-compact-inl.h"
 #include "src/heap/remembered-set.h"
 #include "src/objects/objects-inl.h"
@@ -154,12 +155,21 @@ void Sweeper::StartSweeping() {
   MajorNonAtomicMarkingState* marking_state =
       heap_->mark_compact_collector()->non_atomic_marking_state();
   ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
-    int space_index = GetSweepSpaceIndex(space);
-    std::sort(
-        sweeping_list_[space_index].begin(), sweeping_list_[space_index].end(),
-        [marking_state](Page* a, Page* b) {
-          return marking_state->live_bytes(a) > marking_state->live_bytes(b);
-        });
+    // Sorting is done in order to make compaction more efficient: by sweeping
+    // pages with the most free bytes first, we make it more likely that when
+    // evacuating a page, already swept pages will have enough free bytes to
+    // hold the objects to move (and therefore, we won't need to wait for more
+    // pages to be swept in order to move those objects).
+    // Since maps don't move, there is no need to sort the pages from MAP_SPACE
+    // before sweeping them.
+    if (space != MAP_SPACE) {
+      int space_index = GetSweepSpaceIndex(space);
+      std::sort(
+          sweeping_list_[space_index].begin(),
+          sweeping_list_[space_index].end(), [marking_state](Page* a, Page* b) {
+            return marking_state->live_bytes(a) > marking_state->live_bytes(b);
+          });
+    }
   });
 }
 
@@ -241,8 +251,10 @@ void Sweeper::EnsureCompleted() {
 
 bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
 
-int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
-                      FreeSpaceTreatmentMode free_space_mode) {
+int Sweeper::RawSweep(
+    Page* p, FreeListRebuildingMode free_list_mode,
+    FreeSpaceTreatmentMode free_space_mode,
+    FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
   Space* space = p->owner();
   DCHECK_NOT_NULL(space);
   DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
@@ -265,6 +277,15 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
   ArrayBufferTracker::FreeDead(p, marking_state_);
 
   Address free_start = p->area_start();
+  InvalidatedSlotsCleanup old_to_new_cleanup =
+      InvalidatedSlotsCleanup::NoCleanup(p);
+
+  // Clean invalidated slots during the final atomic pause. After resuming
+  // execution this isn't necessary, invalid old-to-new refs were already
+  // removed by mark compact's update pointers phase.
+  if (invalidated_slots_in_free_space ==
+      FreeSpaceMayContainInvalidatedSlots::kYes)
+    old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
 
   intptr_t live_bytes = 0;
   intptr_t freed_bytes = 0;
@@ -309,6 +330,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
             static_cast<uint32_t>(free_start - p->address()),
             static_cast<uint32_t>(free_end - p->address())));
       }
+
+      old_to_new_cleanup.Free(free_start, free_end);
     }
     Map map = object.synchronized_map();
     int size = object.SizeFromMap(map);
@@ -341,6 +364,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
           static_cast<uint32_t>(free_start - p->address()),
           static_cast<uint32_t>(p->area_end() - p->address())));
     }
+
+    old_to_new_cleanup.Free(free_start, p->area_end());
   }
 
   // Clear invalid typed slots after collection all free ranges.
@@ -390,13 +415,15 @@ bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
   return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
 }
 
-int Sweeper::ParallelSweepSpace(AllocationSpace identity,
-                                int required_freed_bytes, int max_pages) {
+int Sweeper::ParallelSweepSpace(
+    AllocationSpace identity, int required_freed_bytes, int max_pages,
+    FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
   int max_freed = 0;
   int pages_freed = 0;
   Page* page = nullptr;
   while ((page = GetSweepingPageSafe(identity)) != nullptr) {
-    int freed = ParallelSweepPage(page, identity);
+    int freed =
+        ParallelSweepPage(page, identity, invalidated_slots_in_free_space);
     if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
       // Free list of a never-allocate page will be dropped later on.
       continue;
@@ -410,7 +437,9 @@ int Sweeper::ParallelSweepSpace(AllocationSpace identity,
   return max_freed;
 }
 
-int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
+int Sweeper::ParallelSweepPage(
+    Page* page, AllocationSpace identity,
+    FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
   // Early bailout for pages that are swept outside of the regular sweeping
   // path. This check here avoids taking the lock first, avoiding deadlocks.
   if (page->SweepingDone()) return 0;
@@ -430,7 +459,8 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
     page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
     const FreeSpaceTreatmentMode free_space_mode =
         Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
-    max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
+    max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
+                         invalidated_slots_in_free_space);
     DCHECK(page->SweepingDone());
 
     // After finishing sweeping of a page we clean up its remembered set.
@@ -479,11 +509,14 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
 }
 
 void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
+#ifdef DEBUG
   DCHECK_GE(page->area_size(),
             static_cast<size_t>(marking_state_->live_bytes(page)));
   DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
-  page->ForAllFreeListCategories(
-      [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
+  page->ForAllFreeListCategories([page](FreeListCategory* category) {
+    DCHECK(!category->is_linked(page->owner()->free_list()));
+  });
+#endif  // DEBUG
   page->set_concurrent_sweeping_state(Page::kSweepingPending);
   heap_->paged_space(space)->IncreaseAllocatedBytes(
       marking_state_->live_bytes(page), page);
@@ -586,7 +619,8 @@ void Sweeper::MakeIterable(Page* page) {
   DCHECK(IsValidIterabilitySpace(page->owner_identity()));
   const FreeSpaceTreatmentMode free_space_mode =
       Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
-  RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
+  RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
+           FreeSpaceMayContainInvalidatedSlots::kNo);
 }
 
 }  // namespace internal
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 97de7a028d191f..f6ecba8450ce60 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -70,12 +70,8 @@ class Sweeper {
   };
 
   enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
-  enum ClearOldToNewSlotsMode {
-    DO_NOT_CLEAR,
-    CLEAR_REGULAR_SLOTS,
-    CLEAR_TYPED_SLOTS
-  };
   enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
+  enum class FreeSpaceMayContainInvalidatedSlots { kYes, kNo };
 
   Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
 
@@ -83,14 +79,21 @@ class Sweeper {
 
   void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
 
-  int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
-                         int max_pages = 0);
-  int ParallelSweepPage(Page* page, AllocationSpace identity);
+  int ParallelSweepSpace(
+      AllocationSpace identity, int required_freed_bytes, int max_pages = 0,
+      FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
+          FreeSpaceMayContainInvalidatedSlots::kNo);
+  int ParallelSweepPage(
+      Page* page, AllocationSpace identity,
+      FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
+          FreeSpaceMayContainInvalidatedSlots::kNo);
 
   void ScheduleIncrementalSweepingTask();
 
-  int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
-               FreeSpaceTreatmentMode free_space_mode);
+  int RawSweep(
+      Page* p, FreeListRebuildingMode free_list_mode,
+      FreeSpaceTreatmentMode free_space_mode,
+      FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space);
 
   // After calling this function sweeping is considered to be in progress
   // and the main thread can sweep lazily, but the background sweeper tasks
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 51788b41e4a98c..816ddb52c53dd0 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -3,5 +3,6 @@ ishell@chromium.org
 jkummerow@chromium.org
 mvstanton@chromium.org
 verwaest@chromium.org
+mythria@chromium.org
 
 # COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 7aebf857a281cc..f9efcba05f72aa 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -35,7 +35,7 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
     SloppyTNode<DataHandler> handler, int data_index) {
 #ifdef DEBUG
   TNode<Map> handler_map = LoadMap(handler);
-  TNode<Int32T> instance_type = LoadMapInstanceType(handler_map);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(handler_map);
 #endif
   CSA_ASSERT(this,
              Word32Or(InstanceTypeEqual(instance_type, LOAD_HANDLER_TYPE),
@@ -78,7 +78,8 @@ TNode<MaybeObject> AccessorAssembler::TryMonomorphicCase(
   // Adding |header_size| with a separate IntPtrAdd rather than passing it
   // into ElementOffsetFromIndex() allows it to be folded into a single
   // [base, index, offset] indirect memory access on x64.
-  Node* offset = ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, SMI_PARAMETERS);
+  TNode<IntPtrT> offset =
+      ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, SMI_PARAMETERS);
   TNode<MaybeObject> feedback = ReinterpretCast<MaybeObject>(
       Load(MachineType::AnyTagged(), vector,
            IntPtrAdd(offset, IntPtrConstant(header_size))));
@@ -207,7 +208,7 @@ void AccessorAssembler::HandleLoadAccessor(
 
   CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
   CSA_CHECK(this, IsNotCleared(maybe_context));
-  TNode<Object> context = GetHeapObjectAssumeWeak(maybe_context);
+  TNode<HeapObject> context = GetHeapObjectAssumeWeak(maybe_context);
 
   TNode<Foreign> foreign = CAST(
       LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset));
@@ -241,8 +242,9 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
                                         Label* rebox_double,
                                         ExitPoint* exit_point) {
   Comment("field_load");
-  Node* index = DecodeWord<LoadHandler::FieldIndexBits>(handler_word);
-  Node* offset = IntPtrMul(index, IntPtrConstant(kTaggedSize));
+  TNode<IntPtrT> index =
+      Signed(DecodeWord<LoadHandler::FieldIndexBits>(handler_word));
+  TNode<IntPtrT> offset = IntPtrMul(index, IntPtrConstant(kTaggedSize));
 
   Label inobject(this), out_of_object(this);
   Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
@@ -259,8 +261,8 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
       var_double_value->Bind(
           LoadObjectField(holder, offset, MachineType::Float64()));
     } else {
-      Node* mutable_heap_number = LoadObjectField(holder, offset);
-      var_double_value->Bind(LoadHeapNumberValue(mutable_heap_number));
+      TNode<HeapNumber> heap_number = CAST(LoadObjectField(holder, offset));
+      var_double_value->Bind(LoadHeapNumberValue(heap_number));
     }
     Goto(rebox_double);
   }
@@ -268,13 +270,13 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
   BIND(&out_of_object);
   {
     Label is_double(this);
-    Node* properties = LoadFastProperties(holder);
-    Node* value = LoadObjectField(properties, offset);
+    TNode<HeapObject> properties = LoadFastProperties(holder);
+    TNode<Object> value = LoadObjectField(properties, offset);
     GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
     exit_point->Return(value);
 
     BIND(&is_double);
-    var_double_value->Bind(LoadHeapNumberValue(value));
+    var_double_value->Bind(LoadHeapNumberValue(CAST(value)));
     Goto(rebox_double);
   }
 }
@@ -298,9 +300,10 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
   VARIABLE(var_double_value, MachineRepresentation::kFloat64);
   Label rebox_double(this, &var_double_value);
 
-  TNode<WordT> handler_word = SmiUntag(smi_handler);
+  TNode<IntPtrT> handler_word = SmiUntag(smi_handler);
   TNode<IntPtrT> handler_kind =
       Signed(DecodeWord<LoadHandler::KindBits>(handler_word));
+
   if (support_elements == kSupportElements) {
     Label if_element(this), if_indexed_string(this), if_property(this);
     GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kElement)),
@@ -319,10 +322,10 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
 
     BIND(&if_element);
     Comment("element_load");
-    Node* intptr_index = TryToIntptr(p->name(), miss);
-    Node* is_jsarray_condition =
+    TNode<IntPtrT> intptr_index = TryToIntptr(p->name(), miss);
+    TNode<BoolT> is_jsarray_condition =
         IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
-    Node* elements_kind =
+    TNode<Uint32T> elements_kind =
         DecodeWord32FromWord<LoadHandler::ElementsKindBits>(handler_word);
     Label if_hole(this), unimplemented_elements_kind(this),
         if_oob(this, Label::kDeferred);
@@ -345,7 +348,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
       Label return_undefined(this);
 
       // Check if we're allowed to handle OOB accesses.
-      Node* allow_out_of_bounds =
+      TNode<BoolT> allow_out_of_bounds =
           IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
       GotoIfNot(allow_out_of_bounds, miss);
 
@@ -385,15 +388,15 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
         Label if_oob(this, Label::kDeferred);
 
         Comment("indexed string");
-        Node* intptr_index = TryToIntptr(p->name(), miss);
-        Node* length = LoadStringLengthAsWord(holder);
+        TNode<IntPtrT> intptr_index = TryToIntptr(p->name(), miss);
+        TNode<IntPtrT> length = LoadStringLengthAsWord(holder);
         GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
         TNode<Int32T> code = StringCharCodeAt(holder, intptr_index);
         TNode<String> result = StringFromSingleCharCode(code);
         Return(result);
 
         BIND(&if_oob);
-        Node* allow_out_of_bounds =
+        TNode<BoolT> allow_out_of_bounds =
             IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
         GotoIfNot(allow_out_of_bounds, miss);
         GotoIf(IsNoElementsProtectorCellInvalid(), miss);
@@ -426,9 +429,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
       module_export(this, Label::kDeferred), proxy(this, Label::kDeferred),
       native_data_property(this, Label::kDeferred),
       api_getter(this, Label::kDeferred);
+
   GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)), &field);
 
-  GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kConstant)),
+  GotoIf(WordEqual(handler_kind,
+                   IntPtrConstant(LoadHandler::kConstantFromPrototype)),
          &constant);
 
   GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNonExistent)),
@@ -476,11 +481,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
   BIND(&constant);
   {
     Comment("constant_load");
-    TNode<IntPtrT> descriptor =
-        Signed(DecodeWord<LoadHandler::DescriptorBits>(handler_word));
-    Node* value = LoadDescriptorValue(LoadMap(holder), descriptor);
-
-    exit_point->Return(value);
+    exit_point->Return(holder);
   }
 
   BIND(&normal);
@@ -497,8 +498,9 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
       VARIABLE(var_value, MachineRepresentation::kTagged);
       LoadPropertyFromNameDictionary(properties, var_name_index.value(),
                                      &var_details, &var_value);
-      Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
-                                         p->context(), p->receiver(), miss);
+      TNode<Object> value =
+          CallGetterIfAccessor(var_value.value(), var_details.value(),
+                               p->context(), p->receiver(), miss);
       exit_point->Return(value);
     }
   }
@@ -508,9 +510,10 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
     Comment("accessor_load");
     TNode<IntPtrT> descriptor =
         Signed(DecodeWord<LoadHandler::DescriptorBits>(handler_word));
-    Node* accessor_pair = LoadDescriptorValue(LoadMap(holder), descriptor);
-    CSA_ASSERT(this, IsAccessorPair(accessor_pair));
-    Node* getter = LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
+    TNode<AccessorPair> accessor_pair =
+        CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
+    TNode<Object> getter =
+        LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
     CSA_ASSERT(this, Word32BinaryNot(IsTheHole(getter)));
 
     Callable callable = CodeFactory::Call(isolate());
@@ -567,8 +570,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
   {
     CSA_ASSERT(this, IsPropertyCell(holder));
     // Ensure the property cell doesn't contain the hole.
-    Node* value = LoadObjectField(holder, PropertyCell::kValueOffset);
-    Node* details = LoadAndUntagToWord32ObjectField(
+    TNode<Object> value = LoadObjectField(holder, PropertyCell::kValueOffset);
+    TNode<Int32T> details = LoadAndUntagToWord32ObjectField(
         holder, PropertyCell::kPropertyDetailsRawOffset);
     GotoIf(IsTheHole(value), miss);
 
@@ -587,15 +590,15 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
   BIND(&module_export);
   {
     Comment("module export");
-    Node* index = DecodeWord<LoadHandler::ExportsIndexBits>(handler_word);
+    TNode<UintPtrT> index =
+        DecodeWord<LoadHandler::ExportsIndexBits>(handler_word);
     Node* module =
         LoadObjectField(p->receiver(), JSModuleNamespace::kModuleOffset,
                         MachineType::TaggedPointer());
     TNode<ObjectHashTable> exports = CAST(LoadObjectField(
         module, Module::kExportsOffset, MachineType::TaggedPointer()));
-    Node* cell = LoadFixedArrayElement(exports, index);
+    TNode<Cell> cell = CAST(LoadFixedArrayElement(exports, index));
     // The handler is only installed for exports that exist.
-    CSA_ASSERT(this, IsCell(cell));
     Node* value = LoadCellValue(cell);
     Label is_the_hole(this, Label::kDeferred);
     GotoIf(IsTheHole(value), &is_the_hole);
@@ -603,7 +606,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
 
     BIND(&is_the_hole);
     {
-      Node* message = SmiConstant(MessageTemplate::kNotDefined);
+      TNode<Smi> message = SmiConstant(MessageTemplate::kNotDefined);
       exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context(),
                                     message, p->name());
     }
@@ -622,7 +625,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
   GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)),
          &return_true);
 
-  GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kConstant)),
+  GotoIf(WordEqual(handler_kind,
+                   IntPtrConstant(LoadHandler::kConstantFromPrototype)),
          &return_true);
 
   GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNonExistent)),
@@ -686,7 +690,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
   {
     CSA_ASSERT(this, IsPropertyCell(holder));
     // Ensure the property cell doesn't contain the hole.
-    Node* value = LoadObjectField(holder, PropertyCell::kValueOffset);
+    TNode<Object> value = LoadObjectField(holder, PropertyCell::kValueOffset);
     GotoIf(IsTheHole(value), miss);
 
     exit_point->Return(TrueConstant());
@@ -719,7 +723,7 @@ Node* AccessorAssembler::HandleProtoHandler(
   // Check prototype validity cell.
   //
   {
-    Node* maybe_validity_cell =
+    TNode<Object> maybe_validity_cell =
         LoadObjectField(handler, ICHandler::kValidityCellOffset);
     CheckPrototypeValidityCell(maybe_validity_cell, miss);
   }
@@ -728,20 +732,18 @@ Node* AccessorAssembler::HandleProtoHandler(
   // Check smi handler bits.
   //
   {
-    Node* smi_or_code_handler =
+    TNode<Object> smi_or_code_handler =
         LoadObjectField(handler, ICHandler::kSmiHandlerOffset);
     if (on_code_handler) {
       Label if_smi_handler(this);
       GotoIf(TaggedIsSmi(smi_or_code_handler), &if_smi_handler);
 
-      CSA_ASSERT(this, IsCodeMap(LoadMap(smi_or_code_handler)));
+      CSA_ASSERT(this, IsCodeMap(LoadMap(CAST(smi_or_code_handler))));
       on_code_handler(smi_or_code_handler);
 
       BIND(&if_smi_handler);
-    } else {
-      CSA_ASSERT(this, TaggedIsSmi(smi_or_code_handler));
     }
-    Node* handler_flags = SmiUntag(smi_or_code_handler);
+    TNode<IntPtrT> handler_flags = SmiUntag(CAST(smi_or_code_handler));
 
     // Lookup on receiver and access checks are not necessary for global ICs
     // because in the former case the validity cell check guards modifications
@@ -767,8 +769,8 @@ Node* AccessorAssembler::HandleProtoHandler(
       {
         TNode<MaybeObject> data2 = LoadHandlerDataField(handler, 2);
         CSA_ASSERT(this, IsWeakOrCleared(data2));
-        TNode<Object> expected_native_context =
-            GetHeapObjectAssumeWeak(data2, miss);
+        TNode<Context> expected_native_context =
+            CAST(GetHeapObjectAssumeWeak(data2, miss));
         EmitAccessCheck(expected_native_context, p->context(), p->receiver(),
                         &done, miss);
       }
@@ -824,7 +826,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
           VARIABLE(var_value, MachineRepresentation::kTagged);
           LoadPropertyFromNameDictionary(properties, name_index, &var_details,
                                          &var_value);
-          Node* value =
+          TNode<Object> value =
               CallGetterIfAccessor(var_value.value(), var_details.value(),
                                    p->context(), p->receiver(), miss);
           exit_point->Return(value);
@@ -832,21 +834,38 @@ void AccessorAssembler::HandleLoadICProtoHandler(
       },
       miss, ic_mode);
 
-  TNode<MaybeObject> maybe_holder = LoadHandlerDataField(handler, 1);
+  TNode<MaybeObject> maybe_holder_or_constant =
+      LoadHandlerDataField(handler, 1);
 
-  Label load_from_cached_holder(this), done(this);
+  Label load_from_cached_holder(this), is_smi(this), done(this);
 
-  Branch(IsStrongReferenceTo(maybe_holder, NullConstant()), &done,
+  GotoIf(TaggedIsSmi(maybe_holder_or_constant), &is_smi);
+  Branch(IsStrongReferenceTo(maybe_holder_or_constant, NullConstant()), &done,
          &load_from_cached_holder);
 
-  BIND(&load_from_cached_holder);
+  BIND(&is_smi);
   {
-    // For regular holders, having passed the receiver map check and the
-    // validity cell check implies that |holder| is alive. However, for global
-    // object receivers, |maybe_holder| may be cleared.
-    CSA_ASSERT(this, IsWeakOrCleared(maybe_holder));
-    Node* holder = GetHeapObjectAssumeWeak(maybe_holder, miss);
+    CSA_ASSERT(
+        this,
+        WordEqual(
+            Signed(DecodeWord<LoadHandler::KindBits>(SmiUntag(smi_handler))),
+            IntPtrConstant(LoadHandler::kConstantFromPrototype)));
+    if (access_mode == LoadAccessMode::kHas) {
+      exit_point->Return(TrueConstant());
+    } else {
+      exit_point->Return(maybe_holder_or_constant);
+    }
+  }
 
+  BIND(&load_from_cached_holder);
+  {
+    // For regular holders, having passed the receiver map check and
+    // the validity cell check implies that |holder| is
+    // alive. However, for global object receivers, |maybe_holder| may
+    // be cleared.
+    CSA_ASSERT(this, IsWeakOrCleared(maybe_holder_or_constant));
+    TNode<HeapObject> holder =
+        GetHeapObjectAssumeWeak(maybe_holder_or_constant, miss);
     var_holder->Bind(holder);
     Goto(&done);
   }
@@ -858,22 +877,22 @@ void AccessorAssembler::HandleLoadICProtoHandler(
   }
 }
 
-void AccessorAssembler::EmitAccessCheck(Node* expected_native_context,
-                                        Node* context, Node* receiver,
+void AccessorAssembler::EmitAccessCheck(TNode<Context> expected_native_context,
+                                        TNode<Context> context, Node* receiver,
                                         Label* can_access, Label* miss) {
   CSA_ASSERT(this, IsNativeContext(expected_native_context));
 
-  Node* native_context = LoadNativeContext(context);
-  GotoIf(WordEqual(expected_native_context, native_context), can_access);
+  TNode<Context> native_context = LoadNativeContext(context);
+  GotoIf(TaggedEqual(expected_native_context, native_context), can_access);
   // If the receiver is not a JSGlobalProxy then we miss.
   GotoIfNot(IsJSGlobalProxy(receiver), miss);
   // For JSGlobalProxy receiver try to compare security tokens of current
   // and expected native contexts.
-  Node* expected_token = LoadContextElement(expected_native_context,
-                                            Context::SECURITY_TOKEN_INDEX);
-  Node* current_token =
+  TNode<Object> expected_token = LoadContextElement(
+      expected_native_context, Context::SECURITY_TOKEN_INDEX);
+  TNode<Object> current_token =
       LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
-  Branch(WordEqual(expected_token, current_token), can_access, miss);
+  Branch(TaggedEqual(expected_token, current_token), can_access, miss);
 }
 
 void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
@@ -886,7 +905,7 @@ void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
     CSA_ASSERT(this, IsNotSetWord32(details,
                                     PropertyDetails::kAttributesReadOnlyMask));
   }
-  Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
+  TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
   GotoIf(Word32Equal(kind, Int32Constant(kData)), writable);
   // Fall through if it's an accessor property.
 }
@@ -896,8 +915,8 @@ void AccessorAssembler::HandleStoreICNativeDataProperty(
   Comment("native_data_property_store");
   TNode<IntPtrT> descriptor =
       Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word));
-  Node* accessor_info = LoadDescriptorValue(LoadMap(holder), descriptor);
-  CSA_CHECK(this, IsAccessorInfo(accessor_info));
+  TNode<AccessorInfo> accessor_info =
+      CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
 
   TailCallRuntime(Runtime::kStoreCallbackProperty, p->context(), p->receiver(),
                   holder, accessor_info, p->name(), p->value());
@@ -917,7 +936,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
   BIND(&if_smi_handler);
   {
     Node* holder = p->receiver();
-    Node* handler_word = SmiUntag(CAST(handler));
+    TNode<IntPtrT> handler_word = SmiUntag(CAST(handler));
 
     Label if_fast_smi(this), if_proxy(this);
 
@@ -925,7 +944,8 @@ void AccessorAssembler::HandleStoreICHandlerCase(
     STATIC_ASSERT(StoreHandler::kNormal + 1 == StoreHandler::kProxy);
     STATIC_ASSERT(StoreHandler::kProxy + 1 == StoreHandler::kKindsNumber);
 
-    Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+    TNode<UintPtrT> handler_kind =
+        DecodeWord<StoreHandler::KindBits>(handler_word);
     GotoIf(IntPtrLessThan(handler_kind,
                           IntPtrConstant(StoreHandler::kGlobalProxy)),
            &if_fast_smi);
@@ -941,7 +961,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
         properties, CAST(p->name()), &dictionary_found, &var_name_index, miss);
     BIND(&dictionary_found);
     {
-      Node* details = LoadDetailsByKeyIndex<NameDictionary>(
+      TNode<Uint32T> details = LoadDetailsByKeyIndex<NameDictionary>(
           properties, var_name_index.value());
       // Check that the property is a writable data property (no accessor).
       const int kTypeAndReadOnlyMask = PropertyDetails::KindField::kMask |
@@ -956,7 +976,8 @@ void AccessorAssembler::HandleStoreICHandlerCase(
 
     BIND(&if_fast_smi);
     {
-      Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+      TNode<UintPtrT> handler_kind =
+          DecodeWord<StoreHandler::KindBits>(handler_word);
 
       Label data(this), accessor(this), native_data_property(this);
       GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kAccessor)),
@@ -1034,7 +1055,7 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
     StoreTransitionMapFlags flags) {
   DCHECK_EQ(0, flags & ~kStoreTransitionMapFlagsMask);
   if (flags & kCheckPrototypeValidity) {
-    Node* maybe_validity_cell =
+    TNode<Object> maybe_validity_cell =
         LoadObjectField(transition_map, Map::kPrototypeValidityCellOffset);
     CheckPrototypeValidityCell(maybe_validity_cell, miss);
   }
@@ -1044,21 +1065,22 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
   GotoIf(IsSetWord32<Map::IsDeprecatedBit>(bitfield3), miss);
 
   // Load last descriptor details.
-  Node* nof = DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+  TNode<UintPtrT> nof =
+      DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
   CSA_ASSERT(this, WordNotEqual(nof, IntPtrConstant(0)));
   TNode<DescriptorArray> descriptors = LoadMapDescriptors(transition_map);
 
-  Node* factor = IntPtrConstant(DescriptorArray::kEntrySize);
+  TNode<IntPtrT> factor = IntPtrConstant(DescriptorArray::kEntrySize);
   TNode<IntPtrT> last_key_index = UncheckedCast<IntPtrT>(IntPtrAdd(
       IntPtrConstant(DescriptorArray::ToKeyIndex(-1)), IntPtrMul(nof, factor)));
   if (flags & kValidateTransitionHandler) {
     TNode<Name> key = LoadKeyByKeyIndex(descriptors, last_key_index);
-    GotoIf(WordNotEqual(key, p->name()), miss);
+    GotoIf(TaggedNotEqual(key, p->name()), miss);
   } else {
-    CSA_ASSERT(this, WordEqual(LoadKeyByKeyIndex(descriptors, last_key_index),
-                               p->name()));
+    CSA_ASSERT(this, TaggedEqual(LoadKeyByKeyIndex(descriptors, last_key_index),
+                                 p->name()));
   }
-  Node* details = LoadDetailsByKeyIndex(descriptors, last_key_index);
+  TNode<Uint32T> details = LoadDetailsByKeyIndex(descriptors, last_key_index);
   if (flags & kValidateTransitionHandler) {
     // Follow transitions only in the following cases:
     // 1) name is a non-private symbol and attributes equal to NONE,
@@ -1077,7 +1099,7 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
     // DontEnum attribute is allowed only for private symbols and vice versa.
     Branch(Word32Equal(
                IsSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
-               IsPrivateSymbol(p->name())),
+               IsPrivateSymbol(CAST(p->name()))),
            &attributes_ok, miss);
 
     BIND(&attributes_ok);
@@ -1089,7 +1111,8 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
 }
 
 void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
-                                       Node* name_index, Node* representation,
+                                       Node* name_index,
+                                       TNode<Word32T> representation,
                                        Node* value, Label* bailout) {
   Label r_smi(this), r_double(this), r_heapobject(this), all_fine(this);
   // Ignore FLAG_track_fields etc. and always emit code for all checks,
@@ -1114,12 +1137,7 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
   BIND(&r_double);
   {
     GotoIf(TaggedIsSmi(value), &all_fine);
-    Node* value_map = LoadMap(value);
-    // While supporting mutable HeapNumbers would be straightforward, such
-    // objects should not end up here anyway.
-    CSA_ASSERT(this, WordNotEqual(value_map,
-                                  LoadRoot(RootIndex::kMutableHeapNumberMap)));
-    Branch(IsHeapNumberMap(value_map), &all_fine, bailout);
+    Branch(IsHeapNumber(value), &all_fine, bailout);
   }
 
   BIND(&r_heapobject);
@@ -1144,7 +1162,7 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
     TNode<Map> field_type_map =
         CAST(GetHeapObjectAssumeWeak(field_type, bailout));
     // FieldType::Class(...) performs a map check.
-    Branch(WordEqual(LoadMap(value), field_type_map), &all_fine, bailout);
+    Branch(TaggedEqual(LoadMap(value), field_type_map), &all_fine, bailout);
   }
 
   BIND(&all_fine);
@@ -1157,8 +1175,8 @@ TNode<BoolT> AccessorAssembler::IsPropertyDetailsConst(Node* details) {
 
 void AccessorAssembler::OverwriteExistingFastDataProperty(
     Node* object, Node* object_map, Node* descriptors,
-    Node* descriptor_name_index, Node* details, Node* value, Label* slow,
-    bool do_transitioning_store) {
+    Node* descriptor_name_index, Node* details, TNode<Object> value,
+    Label* slow, bool do_transitioning_store) {
   Label done(this), if_field(this), if_descriptor(this);
 
   CSA_ASSERT(this,
@@ -1171,17 +1189,19 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
 
   BIND(&if_field);
   {
-    Node* representation =
+    TNode<Uint32T> representation =
         DecodeWord32<PropertyDetails::RepresentationField>(details);
 
     CheckFieldType(CAST(descriptors), descriptor_name_index, representation,
                    value, slow);
 
-    Node* field_index =
+    TNode<UintPtrT> field_index =
         DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
-    field_index = IntPtrAdd(field_index,
-                            LoadMapInobjectPropertiesStartInWords(object_map));
-    Node* instance_size_in_words = LoadMapInstanceSizeInWords(object_map);
+    field_index = Unsigned(
+        IntPtrAdd(field_index,
+                  Unsigned(LoadMapInobjectPropertiesStartInWords(object_map))));
+    TNode<IntPtrT> instance_size_in_words =
+        LoadMapInstanceSizeInWords(object_map);
 
     Label inobject(this), backing_store(this);
     Branch(UintPtrLessThan(field_index, instance_size_in_words), &inobject,
@@ -1212,19 +1232,19 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
                                          MachineRepresentation::kFloat64);
         } else {
           if (do_transitioning_store) {
-            Node* mutable_heap_number =
-                AllocateMutableHeapNumberWithValue(double_value);
+            TNode<HeapNumber> heap_number =
+                AllocateHeapNumberWithValue(double_value);
             StoreMap(object, object_map);
-            StoreObjectField(object, field_offset, mutable_heap_number);
+            StoreObjectField(object, field_offset, heap_number);
           } else {
-            Node* mutable_heap_number = LoadObjectField(object, field_offset);
+            TNode<HeapNumber> heap_number =
+                CAST(LoadObjectField(object, field_offset));
             Label if_mutable(this);
             GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
-            TNode<Float64T> current_value =
-                LoadHeapNumberValue(mutable_heap_number);
+            TNode<Float64T> current_value = LoadHeapNumberValue(heap_number);
             BranchIfSameNumberValue(current_value, double_value, &done, slow);
             BIND(&if_mutable);
-            StoreHeapNumberValue(mutable_heap_number, double_value);
+            StoreHeapNumberValue(heap_number, double_value);
           }
         }
         Goto(&done);
@@ -1250,8 +1270,8 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
 
     BIND(&backing_store);
     {
-      Node* backing_store_index =
-          IntPtrSub(field_index, instance_size_in_words);
+      TNode<IntPtrT> backing_store_index =
+          Signed(IntPtrSub(field_index, instance_size_in_words));
 
       if (do_transitioning_store) {
         // Allocate mutable heap number before extending properties backing
@@ -1264,10 +1284,10 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
                                 Int32Constant(Representation::kDouble)),
                  &cont);
           {
-            Node* double_value = ChangeNumberToFloat64(CAST(value));
-            Node* mutable_heap_number =
-                AllocateMutableHeapNumberWithValue(double_value);
-            var_value.Bind(mutable_heap_number);
+            TNode<Float64T> double_value = ChangeNumberToFloat64(CAST(value));
+            TNode<HeapNumber> heap_number =
+                AllocateHeapNumberWithValue(double_value);
+            var_value.Bind(heap_number);
             Goto(&cont);
           }
           BIND(&cont);
@@ -1288,18 +1308,17 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
             &double_rep, &tagged_rep);
         BIND(&double_rep);
         {
-          Node* mutable_heap_number =
-              LoadPropertyArrayElement(properties, backing_store_index);
+          TNode<HeapNumber> heap_number =
+              CAST(LoadPropertyArrayElement(properties, backing_store_index));
           TNode<Float64T> double_value = ChangeNumberToFloat64(CAST(value));
 
           Label if_mutable(this);
           GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
-          TNode<Float64T> current_value =
-              LoadHeapNumberValue(mutable_heap_number);
+          TNode<Float64T> current_value = LoadHeapNumberValue(heap_number);
           BranchIfSameNumberValue(current_value, double_value, &done, slow);
 
           BIND(&if_mutable);
-          StoreHeapNumberValue(mutable_heap_number, double_value);
+          StoreHeapNumberValue(heap_number, double_value);
           Goto(&done);
         }
         BIND(&tagged_rep);
@@ -1322,9 +1341,9 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
   BIND(&if_descriptor);
   {
     // Check that constant matches value.
-    Node* constant = LoadValueByKeyIndex(
+    TNode<Object> constant = LoadValueByKeyIndex(
         CAST(descriptors), UncheckedCast<IntPtrT>(descriptor_name_index));
-    GotoIf(WordNotEqual(value, constant), slow);
+    GotoIf(TaggedNotEqual(value, constant), slow);
 
     if (do_transitioning_store) {
       StoreMap(object, object_map);
@@ -1334,15 +1353,17 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
   BIND(&done);
 }
 
-void AccessorAssembler::CheckPrototypeValidityCell(Node* maybe_validity_cell,
-                                                   Label* miss) {
+void AccessorAssembler::CheckPrototypeValidityCell(
+    TNode<Object> maybe_validity_cell, Label* miss) {
   Label done(this);
-  GotoIf(WordEqual(maybe_validity_cell, SmiConstant(Map::kPrototypeChainValid)),
-         &done);
+  GotoIf(
+      TaggedEqual(maybe_validity_cell, SmiConstant(Map::kPrototypeChainValid)),
+      &done);
   CSA_ASSERT(this, TaggedIsNotSmi(maybe_validity_cell));
 
-  Node* cell_value = LoadObjectField(maybe_validity_cell, Cell::kValueOffset);
-  Branch(WordEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)), &done,
+  TNode<Object> cell_value =
+      LoadObjectField(CAST(maybe_validity_cell), Cell::kValueOffset);
+  Branch(TaggedEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)), &done,
          miss);
 
   BIND(&done);
@@ -1353,9 +1374,11 @@ void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
   Comment("accessor_store");
   TNode<IntPtrT> descriptor =
       Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word));
-  Node* accessor_pair = LoadDescriptorValue(LoadMap(holder), descriptor);
+  TNode<HeapObject> accessor_pair =
+      CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
   CSA_ASSERT(this, IsAccessorPair(accessor_pair));
-  Node* setter = LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
+  TNode<Object> setter =
+      LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
   CSA_ASSERT(this, Word32BinaryNot(IsTheHole(setter)));
 
   Callable callable = CodeFactory::Call(isolate());
@@ -1402,7 +1425,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
       p, handler, on_code_handler,
       // on_found_on_receiver
       [=](Node* properties, Node* name_index) {
-        Node* details =
+        TNode<Uint32T> details =
             LoadDetailsByKeyIndex<NameDictionary>(properties, name_index);
         // Check that the property is a writable data property (no accessor).
         const int kTypeAndReadOnlyMask =
@@ -1422,15 +1445,16 @@ void AccessorAssembler::HandleStoreICProtoHandler(
         if_accessor(this), if_native_data_property(this);
 
     CSA_ASSERT(this, TaggedIsSmi(smi_handler));
-    Node* handler_word = SmiUntag(smi_handler);
+    TNode<IntPtrT> handler_word = SmiUntag(smi_handler);
 
-    Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+    TNode<UintPtrT> handler_kind =
+        DecodeWord<StoreHandler::KindBits>(handler_word);
     GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal)),
            &if_add_normal);
 
     TNode<MaybeObject> maybe_holder = LoadHandlerDataField(handler, 1);
     CSA_ASSERT(this, IsWeakOrCleared(maybe_holder));
-    TNode<Object> holder = GetHeapObjectAssumeWeak(maybe_holder, miss);
+    TNode<HeapObject> holder = GetHeapObjectAssumeWeak(maybe_holder, miss);
 
     GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kGlobalProxy)),
            &if_store_global_proxy);
@@ -1497,11 +1521,11 @@ void AccessorAssembler::HandleStoreICProtoHandler(
           IsCleared(maybe_context), [=] { return SmiConstant(0); },
           [=] { return GetHeapObjectAssumeWeak(maybe_context); });
 
-      Node* foreign = LoadObjectField(call_handler_info,
-                                      CallHandlerInfo::kJsCallbackOffset);
+      TNode<Foreign> foreign = CAST(LoadObjectField(
+          call_handler_info, CallHandlerInfo::kJsCallbackOffset));
       Node* callback = LoadObjectField(foreign, Foreign::kForeignAddressOffset,
                                        MachineType::Pointer());
-      Node* data =
+      TNode<Object> data =
           LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
 
       VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver());
@@ -1560,7 +1584,8 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
     TailCallRuntime(Runtime::kSetPropertyWithReceiver, p->context(), proxy,
                     p->name(), p->value(), p->receiver());
   } else {
-    Node* name = CallBuiltin(Builtins::kToName, p->context(), p->name());
+    TNode<Object> name =
+        CallBuiltin(Builtins::kToName, p->context(), p->name());
     TailCallBuiltin(Builtins::kProxySetProperty, p->context(), proxy, name,
                     p->value(), p->receiver());
   }
@@ -1571,7 +1596,8 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
                                                     Label* miss) {
   Comment("field store");
 #ifdef DEBUG
-  Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+  TNode<UintPtrT> handler_kind =
+      DecodeWord<StoreHandler::KindBits>(handler_word);
   CSA_ASSERT(
       this,
       Word32Or(
@@ -1579,7 +1605,7 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
           WordEqual(handler_kind, IntPtrConstant(StoreHandler::kConstField))));
 #endif
 
-  Node* field_representation =
+  TNode<UintPtrT> field_representation =
       DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
 
   Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
@@ -1674,8 +1700,9 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
     GotoIf(TaggedIsSmi(maybe_field_type), &done);
     // Check that value type matches the field type.
     {
-      Node* field_type = GetHeapObjectAssumeWeak(maybe_field_type, bailout);
-      Branch(WordEqual(LoadMap(value), field_type), &done, bailout);
+      TNode<HeapObject> field_type =
+          GetHeapObjectAssumeWeak(maybe_field_type, bailout);
+      Branch(TaggedEqual(LoadMap(CAST(value)), field_type), &done, bailout);
     }
     BIND(&done);
 
@@ -1700,7 +1727,8 @@ Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
   VARIABLE(var_encoded_hash, MachineRepresentation::kWord32);
   VARIABLE(var_length, ParameterRepresentation(mode));
 
-  Node* properties = LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
+  TNode<Object> properties =
+      LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
   var_properties.Bind(properties);
 
   Label if_smi_hash(this), if_property_array(this), extend_store(this);
@@ -1708,8 +1736,8 @@ Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
 
   BIND(&if_smi_hash);
   {
-    Node* hash = SmiToInt32(properties);
-    Node* encoded_hash =
+    TNode<Int32T> hash = SmiToInt32(CAST(properties));
+    TNode<Word32T> encoded_hash =
         Word32Shl(hash, Int32Constant(PropertyArray::HashField::kShift));
     var_encoded_hash.Bind(encoded_hash);
     var_length.Bind(IntPtrOrSmiConstant(0, mode));
@@ -1719,11 +1747,11 @@ Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
 
   BIND(&if_property_array);
   {
-    Node* length_and_hash_int32 = LoadAndUntagToWord32ObjectField(
+    TNode<Int32T> length_and_hash_int32 = LoadAndUntagToWord32ObjectField(
         var_properties.value(), PropertyArray::kLengthAndHashOffset);
     var_encoded_hash.Bind(Word32And(
         length_and_hash_int32, Int32Constant(PropertyArray::HashField::kMask)));
-    Node* length_intptr = ChangeInt32ToIntPtr(
+    TNode<IntPtrT> length_intptr = ChangeInt32ToIntPtr(
         Word32And(length_and_hash_int32,
                   Int32Constant(PropertyArray::LengthField::kMask)));
     Node* length = IntPtrToParameter(length_intptr, mode);
@@ -1771,10 +1799,10 @@ Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
 
     // TODO(gsathya): Clean up the type conversions by creating smarter
     // helpers that do the correct op based on the mode.
-    Node* new_capacity_int32 =
+    TNode<Int32T> new_capacity_int32 =
         TruncateIntPtrToInt32(ParameterToIntPtr(new_capacity, mode));
-    Node* new_length_and_hash_int32 =
-        Word32Or(var_encoded_hash.value(), new_capacity_int32);
+    TNode<Int32T> new_length_and_hash_int32 =
+        Signed(Word32Or(var_encoded_hash.value(), new_capacity_int32));
     StoreObjectField(new_properties, PropertyArray::kLengthAndHashOffset,
                      SmiFromInt32(new_length_and_hash_int32));
     StoreObjectField(object, JSObject::kPropertiesOrHashOffset, new_properties);
@@ -1795,7 +1823,8 @@ void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
     property_storage = LoadFastProperties(object);
   }
 
-  Node* index = DecodeWord<StoreHandler::FieldIndexBits>(handler_word);
+  TNode<UintPtrT> index =
+      DecodeWord<StoreHandler::FieldIndexBits>(handler_word);
   TNode<IntPtrT> offset = Signed(TimesTaggedSize(index));
   if (representation.IsDouble()) {
     if (!FLAG_unbox_double_fields || !is_inobject) {
@@ -1818,8 +1847,9 @@ void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
       BranchIfSameNumberValue(current_value, UncheckedCast<Float64T>(value),
                               &const_checked, bailout);
     } else {
-      Node* current_value = LoadObjectField(property_storage, offset);
-      Branch(WordEqual(current_value, value), &const_checked, bailout);
+      TNode<Object> current_value = LoadObjectField(property_storage, offset);
+      Branch(TaggedEqual(current_value, UncheckedCast<Object>(value)),
+             &const_checked, bailout);
     }
   }
 
@@ -1859,42 +1889,44 @@ void AccessorAssembler::EmitFastElementsBoundsCheck(Node* object,
 }
 
 void AccessorAssembler::EmitElementLoad(
-    Node* object, Node* elements_kind, SloppyTNode<IntPtrT> intptr_index,
-    Node* is_jsarray_condition, Label* if_hole, Label* rebox_double,
-    Variable* var_double_value, Label* unimplemented_elements_kind,
-    Label* out_of_bounds, Label* miss, ExitPoint* exit_point,
-    LoadAccessMode access_mode) {
+    Node* object, TNode<Word32T> elements_kind,
+    SloppyTNode<IntPtrT> intptr_index, Node* is_jsarray_condition,
+    Label* if_hole, Label* rebox_double, Variable* var_double_value,
+    Label* unimplemented_elements_kind, Label* out_of_bounds, Label* miss,
+    ExitPoint* exit_point, LoadAccessMode access_mode) {
   Label if_typed_array(this), if_fast(this), if_fast_packed(this),
       if_fast_holey(this), if_fast_double(this), if_fast_holey_double(this),
       if_nonfast(this), if_dictionary(this);
-  Branch(
-      Int32GreaterThan(elements_kind, Int32Constant(LAST_FROZEN_ELEMENTS_KIND)),
-      &if_nonfast, &if_fast);
+  Branch(Int32GreaterThan(elements_kind,
+                          Int32Constant(LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)),
+         &if_nonfast, &if_fast);
 
   BIND(&if_fast);
   {
     TNode<FixedArrayBase> elements = LoadJSObjectElements(CAST(object));
     EmitFastElementsBoundsCheck(object, elements, intptr_index,
                                 is_jsarray_condition, out_of_bounds);
-    int32_t kinds[] = {// Handled by if_fast_packed.
-                       PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
-                       PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
-                       // Handled by if_fast_holey.
-                       HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
-                       HOLEY_FROZEN_ELEMENTS, HOLEY_SEALED_ELEMENTS,
-                       // Handled by if_fast_double.
-                       PACKED_DOUBLE_ELEMENTS,
-                       // Handled by if_fast_holey_double.
-                       HOLEY_DOUBLE_ELEMENTS};
-    Label* labels[] = {
-        // FAST_{SMI,}_ELEMENTS
-        &if_fast_packed, &if_fast_packed, &if_fast_packed, &if_fast_packed,
-        // FAST_HOLEY_{SMI,}_ELEMENTS
-        &if_fast_holey, &if_fast_holey, &if_fast_holey, &if_fast_holey,
-        // PACKED_DOUBLE_ELEMENTS
-        &if_fast_double,
-        // HOLEY_DOUBLE_ELEMENTS
-        &if_fast_holey_double};
+    int32_t kinds[] = {
+        // Handled by if_fast_packed.
+        PACKED_SMI_ELEMENTS, PACKED_ELEMENTS, PACKED_NONEXTENSIBLE_ELEMENTS,
+        PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
+        // Handled by if_fast_holey.
+        HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS, HOLEY_NONEXTENSIBLE_ELEMENTS,
+        HOLEY_FROZEN_ELEMENTS, HOLEY_SEALED_ELEMENTS,
+        // Handled by if_fast_double.
+        PACKED_DOUBLE_ELEMENTS,
+        // Handled by if_fast_holey_double.
+        HOLEY_DOUBLE_ELEMENTS};
+    Label* labels[] = {// FAST_{SMI,}_ELEMENTS
+                       &if_fast_packed, &if_fast_packed, &if_fast_packed,
+                       &if_fast_packed, &if_fast_packed,
+                       // FAST_HOLEY_{SMI,}_ELEMENTS
+                       &if_fast_holey, &if_fast_holey, &if_fast_holey,
+                       &if_fast_holey, &if_fast_holey,
+                       // PACKED_DOUBLE_ELEMENTS
+                       &if_fast_double,
+                       // HOLEY_DOUBLE_ELEMENTS
+                       &if_fast_holey_double};
     Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
            arraysize(kinds));
 
@@ -1910,8 +1942,9 @@ void AccessorAssembler::EmitElementLoad(
     BIND(&if_fast_holey);
     {
       Comment("fast holey elements");
-      Node* element = UnsafeLoadFixedArrayElement(CAST(elements), intptr_index);
-      GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
+      TNode<Object> element =
+          UnsafeLoadFixedArrayElement(CAST(elements), intptr_index);
+      GotoIf(TaggedEqual(element, TheHoleConstant()), if_hole);
       exit_point->Return(access_mode == LoadAccessMode::kHas ? TrueConstant()
                                                              : element);
     }
@@ -1931,9 +1964,9 @@ void AccessorAssembler::EmitElementLoad(
     BIND(&if_fast_holey_double);
     {
       Comment("holey double elements");
-      Node* value = LoadFixedDoubleArrayElement(CAST(elements), intptr_index,
-                                                MachineType::Float64(), 0,
-                                                INTPTR_PARAMETERS, if_hole);
+      TNode<Float64T> value = LoadFixedDoubleArrayElement(
+          CAST(elements), intptr_index, MachineType::Float64(), 0,
+          INTPTR_PARAMETERS, if_hole);
       if (access_mode == LoadAccessMode::kHas) {
         exit_point->Return(TrueConstant());
       } else {
@@ -2020,35 +2053,35 @@ void AccessorAssembler::EmitElementLoad(
         BIND(&uint16_elements);
         {
           Comment("UINT16_ELEMENTS");
-          Node* index = WordShl(intptr_index, IntPtrConstant(1));
+          TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(1));
           Node* element = Load(MachineType::Uint16(), backing_store, index);
           exit_point->Return(SmiFromInt32(element));
         }
         BIND(&int16_elements);
         {
           Comment("INT16_ELEMENTS");
-          Node* index = WordShl(intptr_index, IntPtrConstant(1));
+          TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(1));
           Node* element = Load(MachineType::Int16(), backing_store, index);
           exit_point->Return(SmiFromInt32(element));
         }
         BIND(&uint32_elements);
         {
           Comment("UINT32_ELEMENTS");
-          Node* index = WordShl(intptr_index, IntPtrConstant(2));
+          TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2));
           Node* element = Load(MachineType::Uint32(), backing_store, index);
           exit_point->Return(ChangeUint32ToTagged(element));
         }
         BIND(&int32_elements);
         {
           Comment("INT32_ELEMENTS");
-          Node* index = WordShl(intptr_index, IntPtrConstant(2));
+          TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2));
           Node* element = Load(MachineType::Int32(), backing_store, index);
           exit_point->Return(ChangeInt32ToTagged(element));
         }
         BIND(&float32_elements);
         {
           Comment("FLOAT32_ELEMENTS");
-          Node* index = WordShl(intptr_index, IntPtrConstant(2));
+          TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2));
           Node* element = Load(MachineType::Float32(), backing_store, index);
           var_double_value->Bind(ChangeFloat32ToFloat64(element));
           Goto(rebox_double);
@@ -2056,7 +2089,7 @@ void AccessorAssembler::EmitElementLoad(
         BIND(&float64_elements);
         {
           Comment("FLOAT64_ELEMENTS");
-          Node* index = WordShl(intptr_index, IntPtrConstant(3));
+          TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(3));
           Node* element = Load(MachineType::Float64(), backing_store, index);
           var_double_value->Bind(element);
           Goto(rebox_double);
@@ -2105,12 +2138,12 @@ void AccessorAssembler::InvalidateValidityCellIfPrototype(Node* map,
 
   BIND(&is_prototype);
   {
-    Node* maybe_prototype_info =
+    TNode<Object> maybe_prototype_info =
         LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
     // If there's no prototype info then there's nothing to invalidate.
     GotoIf(TaggedIsSmi(maybe_prototype_info), &cont);
 
-    Node* function = ExternalConstant(
+    TNode<ExternalReference> function = ExternalConstant(
         ExternalReference::invalidate_prototype_chains_function());
     CallCFunction(function, MachineType::AnyTagged(),
                   std::make_pair(MachineType::AnyTagged(), map));
@@ -2130,8 +2163,9 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
   // Receivers requiring non-standard element accesses (interceptors, access
   // checks, strings and string wrappers, proxies) are handled in the runtime.
   GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_custom);
-  Node* elements_kind = LoadMapElementsKind(receiver_map);
-  Node* is_jsarray_condition = InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
+  TNode<Int32T> elements_kind = LoadMapElementsKind(receiver_map);
+  TNode<BoolT> is_jsarray_condition =
+      InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
   VARIABLE(var_double_value, MachineRepresentation::kFloat64);
   Label rebox_double(this, &var_double_value);
 
@@ -2192,12 +2226,14 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
   VARIABLE(var_details, MachineRepresentation::kWord32);
   VARIABLE(var_value, MachineRepresentation::kTagged);
 
+  TNode<Name> name = CAST(p->name());
+
   // Receivers requiring non-standard accesses (interceptors, access
   // checks, strings and string wrappers) are handled in the runtime.
   GotoIf(IsSpecialReceiverInstanceType(instance_type), &special_receiver);
 
   // Check if the receiver has fast or slow properties.
-  Node* bitfield3 = LoadMapBitField3(receiver_map);
+  TNode<Uint32T> bitfield3 = LoadMapBitField3(receiver_map);
   GotoIf(IsSetWord32<Map::IsDictionaryMapBit>(bitfield3),
          &if_property_dictionary);
 
@@ -2209,7 +2245,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
   TVARIABLE(IntPtrT, var_name_index);
   Label* notfound = use_stub_cache == kUseStubCache ? &try_stub_cache
                                                     : &lookup_prototype_chain;
-  DescriptorLookup(p->name(), descriptors, bitfield3, &if_descriptor_found,
+  DescriptorLookup(name, descriptors, bitfield3, &if_descriptor_found,
                    &var_name_index, notfound);
 
   BIND(&if_descriptor_found);
@@ -2226,13 +2262,13 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
     // When there is no feedback vector don't use stub cache.
     GotoIfNot(IsUndefined(p->vector()), &stub_cache);
     // Fall back to the slow path for private symbols.
-    Branch(IsPrivateSymbol(p->name()), slow, &lookup_prototype_chain);
+    Branch(IsPrivateSymbol(name), slow, &lookup_prototype_chain);
 
     BIND(&stub_cache);
     Comment("stub cache probe for fast property load");
     TVARIABLE(MaybeObject, var_handler);
     Label found_handler(this, &var_handler), stub_cache_miss(this);
-    TryProbeStubCache(isolate()->load_stub_cache(), receiver, p->name(),
+    TryProbeStubCache(isolate()->load_stub_cache(), receiver, name,
                       &found_handler, &var_handler, &stub_cache_miss);
     BIND(&found_handler);
     {
@@ -2247,7 +2283,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
       // chain. If it doesn't, then there's no point in missing.
       Comment("KeyedLoadGeneric_miss");
       TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context(), p->receiver(),
-                      p->name(), p->slot(), p->vector());
+                      name, p->slot(), p->vector());
     }
   }
 
@@ -2260,8 +2296,8 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
     TVARIABLE(IntPtrT, var_name_index);
     Label dictionary_found(this, &var_name_index);
     TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
-    NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()),
-                                         &dictionary_found, &var_name_index,
+    NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found,
+                                         &var_name_index,
                                          &lookup_prototype_chain);
     BIND(&dictionary_found);
     {
@@ -2273,8 +2309,8 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
 
   BIND(&if_found_on_receiver);
   {
-    Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
-                                       p->context(), receiver, slow);
+    TNode<Object> value = CallGetterIfAccessor(
+        var_value.value(), var_details.value(), p->context(), receiver, slow);
     IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
     Return(value);
   }
@@ -2289,7 +2325,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
 
     var_holder_map.Bind(receiver_map);
     var_holder_instance_type.Bind(instance_type);
-    GotoIf(IsPrivateSymbol(p->name()), &is_private_symbol);
+    GotoIf(IsPrivateSymbol(name), &is_private_symbol);
 
     Goto(&loop);
     BIND(&loop);
@@ -2298,16 +2334,16 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
       GotoIf(InstanceTypeEqual(var_holder_instance_type.value(),
                                JS_TYPED_ARRAY_TYPE),
              slow);
-      Node* proto = LoadMapPrototype(var_holder_map.value());
-      GotoIf(WordEqual(proto, NullConstant()), &return_undefined);
-      Node* proto_map = LoadMap(proto);
-      Node* proto_instance_type = LoadMapInstanceType(proto_map);
+      TNode<HeapObject> proto = LoadMapPrototype(var_holder_map.value());
+      GotoIf(TaggedEqual(proto, NullConstant()), &return_undefined);
+      TNode<Map> proto_map = LoadMap(proto);
+      TNode<Uint16T> proto_instance_type = LoadMapInstanceType(proto_map);
       var_holder_map.Bind(proto_map);
       var_holder_instance_type.Bind(proto_instance_type);
       Label next_proto(this), return_value(this, &var_value), goto_slow(this);
       TryGetOwnProperty(p->context(), receiver, proto, proto_map,
-                        proto_instance_type, p->name(), &return_value,
-                        &var_value, &next_proto, &goto_slow);
+                        proto_instance_type, name, &return_value, &var_value,
+                        &next_proto, &goto_slow);
 
       // This trampoline and the next are required to appease Turbofan's
       // variable merging.
@@ -2323,12 +2359,12 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
 
     BIND(&is_private_symbol);
     {
-      CSA_ASSERT(this, IsPrivateSymbol(p->name()));
+      CSA_ASSERT(this, IsPrivateSymbol(name));
 
       // For private names that don't exist on the receiver, we bail
       // to the runtime to throw. For private symbols, we just return
       // undefined.
-      Branch(IsPrivateName(p->name()), slow, &return_undefined);
+      Branch(IsPrivateName(CAST(name)), slow, &return_undefined);
     }
 
     BIND(&return_undefined);
@@ -2341,11 +2377,11 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
     GotoIfNot(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), slow);
 
     // Private field/symbol lookup is not supported.
-    GotoIf(IsPrivateSymbol(p->name()), slow);
+    GotoIf(IsPrivateSymbol(name), slow);
 
     direct_exit.ReturnCallStub(
         Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
-        p->context(), receiver /*holder is the same as receiver*/, p->name(),
+        p->context(), receiver /*holder is the same as receiver*/, name,
         receiver, SmiConstant(OnNonExistent::kReturnUndefined));
   }
 }
@@ -2361,7 +2397,7 @@ Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
   // See v8::internal::StubCache::PrimaryOffset().
   STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
   // Compute the hash of the name (use entire hash field).
-  Node* hash_field = LoadNameHashField(name);
+  TNode<Uint32T> hash_field = LoadNameHashField(name);
   CSA_ASSERT(this,
              Word32Equal(Word32And(hash_field,
                                    Int32Constant(Name::kHashNotComputedMask)),
@@ -2370,12 +2406,12 @@ Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
   // Using only the low bits in 64-bit mode is unlikely to increase the
   // risk of collision even if the heap is spread over an area larger than
   // 4Gb (and not at all if it isn't).
-  Node* map_word = BitcastTaggedToWord(map);
+  TNode<IntPtrT> map_word = BitcastTaggedToWord(map);
 
-  Node* map32 = TruncateIntPtrToInt32(UncheckedCast<IntPtrT>(
+  TNode<Int32T> map32 = TruncateIntPtrToInt32(UncheckedCast<IntPtrT>(
       WordXor(map_word, WordShr(map_word, StubCache::kMapKeyShift))));
   // Base the offset on a simple combination of name and map.
-  Node* hash = Int32Add(hash_field, map32);
+  TNode<Word32T> hash = Int32Add(hash_field, map32);
   uint32_t mask = (StubCache::kPrimaryTableSize - 1)
                   << StubCache::kCacheIndexShift;
   return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
@@ -2385,8 +2421,8 @@ Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) {
   // See v8::internal::StubCache::SecondaryOffset().
 
   // Use the seed from the primary cache in the secondary cache.
-  Node* name32 = TruncateIntPtrToInt32(BitcastTaggedToWord(name));
-  Node* hash = Int32Sub(TruncateIntPtrToInt32(seed), name32);
+  TNode<Int32T> name32 = TruncateIntPtrToInt32(BitcastTaggedToWord(name));
+  TNode<Word32T> hash = Int32Sub(TruncateIntPtrToInt32(seed), name32);
   hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
   int32_t mask = (StubCache::kSecondaryTableSize - 1)
                  << StubCache::kCacheIndexShift;
@@ -2395,7 +2431,7 @@ Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) {
 
 void AccessorAssembler::TryProbeStubCacheTable(
     StubCache* stub_cache, StubCacheTable table_id, Node* entry_offset,
-    Node* name, Node* map, Label* if_handler,
+    TNode<Object> name, TNode<Map> map, Label* if_handler,
     TVariable<MaybeObject>* var_handler, Label* if_miss) {
   StubCache::Table table = static_cast<StubCache::Table>(table_id);
   // The {table_offset} holds the entry offset times four (due to masking
@@ -2403,19 +2439,20 @@ void AccessorAssembler::TryProbeStubCacheTable(
   const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
   entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
 
-  Node* key_base = ExternalConstant(
+  TNode<ExternalReference> key_base = ExternalConstant(
       ExternalReference::Create(stub_cache->key_reference(table)));
 
   // Check that the key in the entry matches the name.
   DCHECK_EQ(0, offsetof(StubCache::Entry, key));
-  Node* cached_key = Load(MachineType::TaggedPointer(), key_base, entry_offset);
-  GotoIf(WordNotEqual(name, cached_key), if_miss);
+  TNode<HeapObject> cached_key =
+      CAST(Load(MachineType::TaggedPointer(), key_base, entry_offset));
+  GotoIf(TaggedNotEqual(name, cached_key), if_miss);
 
   // Check that the map in the entry matches.
-  Node* cached_map = Load(
-      MachineType::TaggedPointer(), key_base,
+  TNode<Object> cached_map = Load<Object>(
+      key_base,
       IntPtrAdd(entry_offset, IntPtrConstant(offsetof(StubCache::Entry, map))));
-  GotoIf(WordNotEqual(map, cached_map), if_miss);
+  GotoIf(TaggedNotEqual(map, cached_map), if_miss);
 
   TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>(
       Load(MachineType::AnyTagged(), key_base,
@@ -2428,7 +2465,7 @@ void AccessorAssembler::TryProbeStubCacheTable(
 }
 
 void AccessorAssembler::TryProbeStubCache(StubCache* stub_cache, Node* receiver,
-                                          Node* name, Label* if_handler,
+                                          TNode<Object> name, Label* if_handler,
                                           TVariable<MaybeObject>* var_handler,
                                           Label* if_miss) {
   Label try_secondary(this), miss(this);
@@ -2439,7 +2476,7 @@ void AccessorAssembler::TryProbeStubCache(StubCache* stub_cache, Node* receiver,
   // Check that the {receiver} isn't a smi.
   GotoIf(TaggedIsSmi(receiver), &miss);
 
-  Node* receiver_map = LoadMap(receiver);
+  TNode<Map> receiver_map = LoadMap(receiver);
 
   // Probe the primary table.
   Node* primary_offset = StubCachePrimaryOffset(name, receiver_map);
@@ -2477,7 +2514,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
   Label stub_call(this, Label::kDeferred), miss(this, Label::kDeferred),
       no_feedback(this, Label::kDeferred);
 
-  Node* recv_map = LoadReceiverMap(p->receiver());
+  TNode<Map> recv_map = LoadReceiverMap(p->receiver());
   GotoIf(IsDeprecatedMap(recv_map), &miss);
 
   GotoIf(IsUndefined(p->vector()), &no_feedback);
@@ -2513,7 +2550,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
     // Call into the stub that implements the non-inlined parts of LoadIC.
     Callable ic =
         Builtins::CallableFor(isolate(), Builtins::kLoadIC_Noninlined);
-    Node* code_target = HeapConstant(ic.code());
+    TNode<Code> code_target = HeapConstant(ic.code());
     exit_point->ReturnCallStub(ic.descriptor(), code_target, p->context(),
                                p->receiver(), p->name(), p->slot(),
                                p->vector());
@@ -2524,8 +2561,8 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
     Comment("LoadIC_BytecodeHandler_nofeedback");
     // Call into the stub that implements the non-inlined parts of LoadIC.
     exit_point->ReturnCallStub(
-        Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
-        p->context(), p->receiver(), p->name(), p->slot(), p->vector());
+        Builtins::CallableFor(isolate(), Builtins::kLoadIC_NoFeedback),
+        p->context(), p->receiver(), p->name(), p->slot());
   }
 
   BIND(&miss);
@@ -2547,7 +2584,7 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
   Label if_handler(this, &var_handler), non_inlined(this, Label::kDeferred),
       try_polymorphic(this), miss(this, Label::kDeferred);
 
-  Node* receiver_map = LoadReceiverMap(p->receiver());
+  TNode<Map> receiver_map = LoadReceiverMap(p->receiver());
   GotoIf(IsDeprecatedMap(receiver_map), &miss);
 
   // Check monomorphic case.
@@ -2584,58 +2621,34 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
 }
 
 void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
-                                          Node* receiver_map,
+                                          TNode<Map> receiver_map,
                                           TNode<HeapObject> feedback,
                                           TVariable<MaybeObject>* var_handler,
                                           Label* if_handler, Label* miss,
                                           ExitPoint* exit_point) {
-  Label try_uninitialized(this, Label::kDeferred);
-
   // Neither deprecated map nor monomorphic. These cases are handled in the
   // bytecode handler.
   CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(receiver_map)));
-  CSA_ASSERT(this, WordNotEqual(receiver_map, feedback));
+  CSA_ASSERT(this, TaggedNotEqual(receiver_map, feedback));
   CSA_ASSERT(this, Word32BinaryNot(IsWeakFixedArrayMap(LoadMap(feedback))));
   DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
 
   {
     // Check megamorphic case.
-    GotoIfNot(WordEqual(feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
-              &try_uninitialized);
+    GotoIfNot(TaggedEqual(feedback, MegamorphicSymbolConstant()), miss);
 
     TryProbeStubCache(isolate()->load_stub_cache(), p->receiver(), p->name(),
                       if_handler, var_handler, miss);
   }
-
-  BIND(&try_uninitialized);
-  {
-    // Check uninitialized case.
-    GotoIfNot(WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol)),
-              miss);
-    exit_point->ReturnCallStub(
-        Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
-        p->context(), p->receiver(), p->name(), p->slot(), p->vector());
-  }
 }
 
-void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
-  Label miss(this, Label::kDeferred),
-      check_function_prototype(this);
+void AccessorAssembler::LoadIC_NoFeedback(const LoadICParameters* p) {
+  Label miss(this, Label::kDeferred);
   Node* receiver = p->receiver();
   GotoIf(TaggedIsSmi(receiver), &miss);
-  Node* receiver_map = LoadMap(receiver);
-  Node* instance_type = LoadMapInstanceType(receiver_map);
-
-  GotoIf(IsUndefined(p->vector()), &check_function_prototype);
-  // Optimistically write the state transition to the vector.
-  StoreFeedbackVectorSlot(p->vector(), p->slot(),
-                          LoadRoot(RootIndex::kpremonomorphic_symbol),
-                          SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
-  StoreWeakReferenceInFeedbackVector(p->vector(), p->slot(), receiver_map,
-                                     kTaggedSize, SMI_PARAMETERS);
-  Goto(&check_function_prototype);
+  TNode<Map> receiver_map = LoadMap(receiver);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
 
-  BIND(&check_function_prototype);
   {
     // Special case for Function.prototype load, because it's very common
     // for ICs that are only executed once (MyFunc.prototype.foo = ...).
@@ -2644,9 +2657,9 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
               &not_function_prototype);
     GotoIfNot(IsPrototypeString(p->name()), &not_function_prototype);
 
-    GotoIfPrototypeRequiresRuntimeLookup(CAST(receiver), CAST(receiver_map),
+    GotoIfPrototypeRequiresRuntimeLookup(CAST(receiver), receiver_map,
                                          &not_function_prototype);
-    Return(LoadJSFunctionPrototype(receiver, &miss));
+    Return(LoadJSFunctionPrototype(CAST(receiver), &miss));
     BIND(&not_function_prototype);
   }
 
@@ -2655,15 +2668,6 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
 
   BIND(&miss);
   {
-    Label call_runtime(this, Label::kDeferred);
-    GotoIf(IsUndefined(p->vector()), &call_runtime);
-    // Undo the optimistic state transition.
-    StoreFeedbackVectorSlot(p->vector(), p->slot(),
-                            LoadRoot(RootIndex::kuninitialized_symbol),
-                            SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
-    Goto(&call_runtime);
-
-    BIND(&call_runtime);
     TailCallRuntime(Runtime::kLoadIC_Miss, p->context(), p->receiver(),
                     p->name(), p->slot(), p->vector());
   }
@@ -2715,7 +2719,7 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
         CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, try_handler));
     TNode<Object> value =
         LoadObjectField(property_cell, PropertyCell::kValueOffset);
-    GotoIf(WordEqual(value, TheHoleConstant()), miss);
+    GotoIf(TaggedEqual(value, TheHoleConstant()), miss);
     exit_point->Return(value);
   }
 
@@ -2746,7 +2750,7 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
   TNode<MaybeObject> feedback_element =
       LoadFeedbackVectorSlot(vector, slot, kTaggedSize, slot_mode);
   TNode<Object> handler = CAST(feedback_element);
-  GotoIf(WordEqual(handler, LoadRoot(RootIndex::kuninitialized_symbol)), miss);
+  GotoIf(TaggedEqual(handler, UninitializedSymbolConstant()), miss);
 
   OnNonExistent on_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF
                                      ? OnNonExistent::kThrowReferenceError
@@ -2756,7 +2760,8 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
   TNode<Context> native_context = LoadNativeContext(context);
   TNode<JSGlobalProxy> receiver =
       CAST(LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX));
-  Node* holder = LoadContextElement(native_context, Context::EXTENSION_INDEX);
+  TNode<Object> holder =
+      LoadContextElement(native_context, Context::EXTENSION_INDEX);
 
   LazyLoadICParameters p([=] { return context; }, receiver, lazy_name,
                          ParameterToTagged(slot, slot_mode), vector, holder);
@@ -2772,10 +2777,11 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
   TVARIABLE(MaybeObject, var_handler);
   Label if_handler(this, &var_handler), try_polymorphic(this, Label::kDeferred),
       try_megamorphic(this, Label::kDeferred),
+      try_uninitialized(this, Label::kDeferred),
       try_polymorphic_name(this, Label::kDeferred),
       miss(this, Label::kDeferred), generic(this, Label::kDeferred);
 
-  Node* receiver_map = LoadReceiverMap(p->receiver());
+  TNode<Map> receiver_map = LoadReceiverMap(p->receiver());
   GotoIf(IsDeprecatedMap(receiver_map), &miss);
 
   GotoIf(IsUndefined(p->vector()), &generic);
@@ -2807,8 +2813,8 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
   {
     // Check megamorphic case.
     Comment("KeyedLoadIC_try_megamorphic");
-    Branch(WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
-           &generic, &try_polymorphic_name);
+    Branch(TaggedEqual(strong_feedback, MegamorphicSymbolConstant()), &generic,
+           &try_uninitialized);
   }
 
   BIND(&generic);
@@ -2821,42 +2827,49 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
                     p->vector());
   }
 
+  BIND(&try_uninitialized);
+  {
+    // Check uninitialized case.
+    Comment("KeyedLoadIC_try_uninitialized");
+    Branch(TaggedEqual(strong_feedback, UninitializedSymbolConstant()), &miss,
+           &try_polymorphic_name);
+  }
+
   BIND(&try_polymorphic_name);
   {
     // We might have a name in feedback, and a weak fixed array in the next
     // slot.
-    Node* name = p->name();
     Comment("KeyedLoadIC_try_polymorphic_name");
-    VARIABLE(var_name, MachineRepresentation::kTagged, name);
-    VARIABLE(var_index, MachineType::PointerRepresentation());
+    TVARIABLE(Object, var_name, p->name());
+    TVARIABLE(IntPtrT, var_index);
     Label if_polymorphic_name(this, &var_name), if_internalized(this),
         if_notinternalized(this, Label::kDeferred);
 
     // Fast-case: The recorded {feedback} matches the {name}.
-    GotoIf(WordEqual(strong_feedback, name), &if_polymorphic_name);
+    GotoIf(TaggedEqual(strong_feedback, p->name()), &if_polymorphic_name);
 
     // Try to internalize the {name} if it isn't already.
-    TryToName(name, &miss, &var_index, &if_internalized, &var_name, &miss,
+    TryToName(p->name(), &miss, &var_index, &if_internalized, &var_name, &miss,
               &if_notinternalized);
 
     BIND(&if_internalized);
     {
       // The {var_name} now contains a unique name.
-      Branch(WordEqual(strong_feedback, var_name.value()), &if_polymorphic_name,
-             &miss);
+      Branch(TaggedEqual(strong_feedback, var_name.value()),
+             &if_polymorphic_name, &miss);
     }
 
     BIND(&if_notinternalized);
     {
       // Try to internalize the {name}.
-      Node* function = ExternalConstant(
+      TNode<ExternalReference> function = ExternalConstant(
           ExternalReference::try_internalize_string_function());
-      Node* const isolate_ptr =
+      TNode<ExternalReference> const isolate_ptr =
           ExternalConstant(ExternalReference::isolate_address(isolate()));
-      var_name.Bind(
+      var_name = CAST(
           CallCFunction(function, MachineType::AnyTagged(),
                         std::make_pair(MachineType::Pointer(), isolate_ptr),
-                        std::make_pair(MachineType::AnyTagged(), name)));
+                        std::make_pair(MachineType::AnyTagged(), p->name())));
       Goto(&if_internalized);
     }
 
@@ -2864,11 +2877,10 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
     {
       // If the name comparison succeeded, we know we have a weak fixed array
       // with at least one map/handler pair.
-      Node* name = var_name.value();
       TailCallBuiltin(access_mode == LoadAccessMode::kLoad
                           ? Builtins::kKeyedLoadIC_PolymorphicName
                           : Builtins::kKeyedHasIC_PolymorphicName,
-                      p->context(), p->receiver(), name, p->slot(),
+                      p->context(), p->receiver(), var_name.value(), p->slot(),
                       p->vector());
     }
   }
@@ -2884,8 +2896,8 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
 }
 
 void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
-  VARIABLE(var_index, MachineType::PointerRepresentation());
-  VARIABLE(var_unique, MachineRepresentation::kTagged, p->name());
+  TVARIABLE(IntPtrT, var_index);
+  TVARIABLE(Object, var_unique, p->name());
   Label if_index(this), if_unique_name(this), if_notunique(this),
       if_other(this, Label::kDeferred), if_runtime(this, Label::kDeferred);
 
@@ -2898,16 +2910,17 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
 
   BIND(&if_other);
   {
-    Node* name = CallBuiltin(Builtins::kToName, p->context(), p->name());
-    var_unique.Bind(name);
+    TNode<Name> name =
+        CAST(CallBuiltin(Builtins::kToName, p->context(), p->name()));
+    var_unique = name;
     TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique,
               &if_runtime, &if_notunique);
   }
 
   BIND(&if_index);
   {
-    Node* receiver_map = LoadMap(receiver);
-    Node* instance_type = LoadMapInstanceType(receiver_map);
+    TNode<Map> receiver_map = LoadMap(receiver);
+    TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
     GenericElementLoad(receiver, receiver_map, instance_type, var_index.value(),
                        &if_runtime);
   }
@@ -2915,8 +2928,8 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
   BIND(&if_unique_name);
   {
     LoadICParameters pp(p, var_unique.value());
-    Node* receiver_map = LoadMap(receiver);
-    Node* instance_type = LoadMapInstanceType(receiver_map);
+    TNode<Map> receiver_map = LoadMap(receiver);
+    TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
     GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
                         &if_runtime);
   }
@@ -2941,8 +2954,8 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
         // with this have shown that it causes too much traffic on the stub
         // cache. We may want to re-evaluate that in the future.
         LoadICParameters pp(p, var_unique.value());
-        Node* receiver_map = LoadMap(receiver);
-        Node* instance_type = LoadMapInstanceType(receiver_map);
+        TNode<Map> receiver_map = LoadMap(receiver);
+        TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
         GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
                             &if_runtime, kDontUseStubCache);
       }
@@ -2967,8 +2980,8 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
   Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
 
   Node* receiver = p->receiver();
-  Node* receiver_map = LoadReceiverMap(receiver);
-  Node* name = p->name();
+  TNode<Map> receiver_map = LoadReceiverMap(receiver);
+  TNode<Name> name = CAST(p->name());
   Node* vector = p->vector();
   Node* slot = p->slot();
   TNode<Context> context = p->context();
@@ -2976,10 +2989,11 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
   // When we get here, we know that the {name} matches the recorded
   // feedback name in the {vector} and can safely be used for the
   // LoadIC handler logic below.
-  CSA_ASSERT(this, IsName(name));
   CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(receiver_map)));
-  CSA_ASSERT(this, WordEqual(name, CAST(LoadFeedbackVectorSlot(
-                                       vector, slot, 0, SMI_PARAMETERS))));
+  CSA_ASSERT(this,
+             TaggedEqual(
+                 name, LoadFeedbackVectorSlot(vector, slot, 0, SMI_PARAMETERS)),
+             name, vector);
 
   // Check if we have a matching handler for the {receiver_map}.
   TNode<MaybeObject> feedback_element =
@@ -3014,11 +3028,10 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
   Label if_handler(this, &var_handler),
       if_handler_from_stub_cache(this, &var_handler, Label::kDeferred),
       try_polymorphic(this, Label::kDeferred),
-      try_megamorphic(this, Label::kDeferred),
-      try_uninitialized(this, Label::kDeferred), miss(this, Label::kDeferred),
+      try_megamorphic(this, Label::kDeferred), miss(this, Label::kDeferred),
       no_feedback(this, Label::kDeferred);
 
-  Node* receiver_map = LoadReceiverMap(p->receiver());
+  TNode<Map> receiver_map = LoadReceiverMap(p->receiver());
   GotoIf(IsDeprecatedMap(receiver_map), &miss);
 
   GotoIf(IsUndefined(p->vector()), &no_feedback);
@@ -3047,26 +3060,16 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
   BIND(&try_megamorphic);
   {
     // Check megamorphic case.
-    GotoIfNot(
-        WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
-        &try_uninitialized);
+    GotoIfNot(TaggedEqual(strong_feedback, MegamorphicSymbolConstant()), &miss);
 
     TryProbeStubCache(isolate()->store_stub_cache(), p->receiver(), p->name(),
                       &if_handler, &var_handler, &miss);
   }
-  BIND(&try_uninitialized);
-  {
-    // Check uninitialized case.
-    Branch(
-        WordEqual(strong_feedback, LoadRoot(RootIndex::kuninitialized_symbol)),
-        &no_feedback, &miss);
-  }
 
   BIND(&no_feedback);
   {
-    TailCallBuiltin(Builtins::kStoreIC_Uninitialized, p->context(),
-                    p->receiver(), p->name(), p->value(), p->slot(),
-                    p->vector());
+    TailCallBuiltin(Builtins::kStoreIC_NoFeedback, p->context(), p->receiver(),
+                    p->name(), p->value(), p->slot());
   }
 
   BIND(&miss);
@@ -3085,9 +3088,11 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
   BIND(&if_heapobject);
   {
     Label try_handler(this), miss(this, Label::kDeferred);
-    GotoIf(
-        WordEqual(maybe_weak_ref, LoadRoot(RootIndex::kpremonomorphic_symbol)),
-        &miss);
+    // We use pre-monomorphic state for global stores that run into
+    // interceptors because the property doesn't exist yet. Using
+    // pre-monomorphic state gives it a chance to find more information the
+    // second time.
+    GotoIf(TaggedEqual(maybe_weak_ref, PremonomorphicSymbolConstant()), &miss);
 
     CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref));
     TNode<PropertyCell> property_cell =
@@ -3103,11 +3108,10 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
       TNode<MaybeObject> handler = LoadFeedbackVectorSlot(
           pp->vector(), pp->slot(), kTaggedSize, SMI_PARAMETERS);
 
-      GotoIf(WordEqual(handler, LoadRoot(RootIndex::kuninitialized_symbol)),
-             &miss);
+      GotoIf(TaggedEqual(handler, UninitializedSymbolConstant()), &miss);
 
       DCHECK_NULL(pp->receiver());
-      Node* native_context = LoadNativeContext(pp->context());
+      TNode<Context> native_context = LoadNativeContext(pp->context());
       StoreICParameters p(
           pp->context(),
           LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX),
@@ -3139,7 +3143,7 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
 }
 
 void AccessorAssembler::StoreGlobalIC_PropertyCellCase(Node* property_cell,
-                                                       Node* value,
+                                                       TNode<Object> value,
                                                        ExitPoint* exit_point,
                                                        Label* miss) {
   Comment("StoreGlobalIC_TryPropertyCellCase");
@@ -3148,16 +3152,17 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(Node* property_cell,
   // Load the payload of the global parameter cell. A hole indicates that
   // the cell has been invalidated and that the store must be handled by the
   // runtime.
-  Node* cell_contents =
+  TNode<Object> cell_contents =
       LoadObjectField(property_cell, PropertyCell::kValueOffset);
-  Node* details = LoadAndUntagToWord32ObjectField(
+  TNode<Int32T> details = LoadAndUntagToWord32ObjectField(
       property_cell, PropertyCell::kPropertyDetailsRawOffset);
   GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), miss);
   CSA_ASSERT(this,
              Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
                          Int32Constant(kData)));
 
-  Node* type = DecodeWord32<PropertyDetails::PropertyCellTypeField>(details);
+  TNode<Uint32T> type =
+      DecodeWord32<PropertyDetails::PropertyCellTypeField>(details);
 
   Label constant(this), store(this), not_smi(this);
 
@@ -3183,9 +3188,9 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(Node* property_cell,
   BIND(&not_smi);
   {
     GotoIf(TaggedIsSmi(value), miss);
-    Node* expected_map = LoadMap(cell_contents);
-    Node* map = LoadMap(value);
-    GotoIfNot(WordEqual(expected_map, map), miss);
+    TNode<Map> expected_map = LoadMap(CAST(cell_contents));
+    TNode<Map> map = LoadMap(CAST(value));
+    GotoIfNot(TaggedEqual(expected_map, map), miss);
     Goto(&store);
   }
 
@@ -3197,7 +3202,7 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(Node* property_cell,
 
   BIND(&constant);
   {
-    GotoIfNot(WordEqual(cell_contents, value), miss);
+    GotoIfNot(TaggedEqual(cell_contents, value), miss);
     exit_point->Return(value);
   }
 }
@@ -3213,7 +3218,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
         no_feedback(this, Label::kDeferred),
         try_polymorphic_name(this, Label::kDeferred);
 
-    Node* receiver_map = LoadReceiverMap(p->receiver());
+    TNode<Map> receiver_map = LoadReceiverMap(p->receiver());
     GotoIf(IsDeprecatedMap(receiver_map), &miss);
 
     GotoIf(IsUndefined(p->vector()), &no_feedback);
@@ -3244,9 +3249,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
     {
       // Check megamorphic case.
       Comment("KeyedStoreIC_try_megamorphic");
-      Branch(
-          WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
-          &no_feedback, &try_polymorphic_name);
+      Branch(TaggedEqual(strong_feedback, MegamorphicSymbolConstant()),
+             &no_feedback, &try_polymorphic_name);
     }
 
     BIND(&no_feedback);
@@ -3259,7 +3263,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
     {
       // We might have a name in feedback, and a fixed array in the next slot.
       Comment("KeyedStoreIC_try_polymorphic_name");
-      GotoIfNot(WordEqual(strong_feedback, p->name()), &miss);
+      GotoIfNot(TaggedEqual(strong_feedback, p->name()), &miss);
       // If the name comparison succeeded, we know we have a feedback vector
       // with at least one map/handler pair.
       TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot(
@@ -3286,7 +3290,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
         try_polymorphic(this, Label::kDeferred),
         try_megamorphic(this, Label::kDeferred);
 
-    Node* array_map = LoadReceiverMap(p->receiver());
+    TNode<Map> array_map = LoadReceiverMap(p->receiver());
     GotoIf(IsDeprecatedMap(array_map), &miss);
 
     GotoIf(IsUndefined(p->vector()), &miss);
@@ -3314,8 +3318,8 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
         TNode<Map> transition_map =
             CAST(GetHeapObjectAssumeWeak(maybe_transition_map, &miss));
         GotoIf(IsDeprecatedMap(transition_map), &miss);
-        Node* code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
-        CSA_ASSERT(this, IsCode(code));
+        TNode<Code> code =
+            CAST(LoadObjectField(handler, StoreHandler::kSmiHandlerOffset));
         TailCallStub(StoreTransitionDescriptor{}, code, p->context(),
                      p->receiver(), p->name(), transition_map, p->value(),
                      p->slot(), p->vector());
@@ -3335,14 +3339,12 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
     BIND(&try_megamorphic);
     {
       Comment("StoreInArrayLiteralIC_try_megamorphic");
-      CSA_ASSERT(this,
-                 Word32Or(WordEqual(strong_feedback,
-                                    LoadRoot(RootIndex::kuninitialized_symbol)),
-                          WordEqual(strong_feedback,
-                                    LoadRoot(RootIndex::kmegamorphic_symbol))));
-      GotoIfNot(
-          WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
-          &miss);
+      CSA_ASSERT(
+          this,
+          Word32Or(TaggedEqual(strong_feedback, UninitializedSymbolConstant()),
+                   TaggedEqual(strong_feedback, MegamorphicSymbolConstant())));
+      GotoIfNot(TaggedEqual(strong_feedback, MegamorphicSymbolConstant()),
+                &miss);
       TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context(),
                       p->value(), p->receiver(), p->name());
     }
@@ -3363,7 +3365,7 @@ void AccessorAssembler::GenerateLoadIC() {
   using Descriptor = LoadWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3376,7 +3378,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
   using Descriptor = LoadWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3390,7 +3392,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
 
   BIND(&if_handler);
   LazyLoadICParameters p([=] { return context; }, receiver,
-                         [=] { return CAST(name); }, slot, vector);
+                         [=] { return name; }, slot, vector);
   HandleLoadICHandlerCase(&p, CAST(var_handler.value()), &miss, &direct_exit);
 
   BIND(&miss);
@@ -3402,7 +3404,7 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
   using Descriptor = LoadWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3411,7 +3413,7 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
   TVARIABLE(MaybeObject, var_handler);
   Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
 
-  Node* receiver_map = LoadReceiverMap(receiver);
+  TNode<Map> receiver_map = LoadReceiverMap(receiver);
   TNode<MaybeObject> feedback_element =
       LoadFeedbackVectorSlot(vector, slot, 0, SMI_PARAMETERS);
   TNode<HeapObject> feedback = CAST(feedback_element);
@@ -3432,27 +3434,26 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
                                 slot, vector);
 }
 
-void AccessorAssembler::GenerateLoadIC_Uninitialized() {
-  using Descriptor = LoadWithVectorDescriptor;
+void AccessorAssembler::GenerateLoadIC_NoFeedback() {
+  using Descriptor = LoadDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
-  Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
 
-  LoadICParameters p(context, receiver, name, slot, vector);
-  LoadIC_Uninitialized(&p);
+  LoadICParameters p(context, receiver, name, slot, UndefinedConstant());
+  LoadIC_NoFeedback(&p);
 }
 
 void AccessorAssembler::GenerateLoadICTrampoline() {
   using Descriptor = LoadDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  Node* vector = LoadFeedbackVectorForStub();
+  TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
 
   TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
 }
@@ -3461,10 +3462,10 @@ void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
   using Descriptor = LoadDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  Node* vector = LoadFeedbackVectorForStub();
+  TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
 
   TailCallBuiltin(Builtins::kLoadIC_Megamorphic, context, receiver, name, slot,
                   vector);
@@ -3473,7 +3474,7 @@ void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
 void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
   using Descriptor = LoadGlobalWithVectorDescriptor;
 
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Name> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3484,16 +3485,16 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
       // lazy_context
       [=] { return context; },
       // lazy_name
-      [=] { return CAST(name); }, typeof_mode, &direct_exit);
+      [=] { return name; }, typeof_mode, &direct_exit);
 }
 
 void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
   using Descriptor = LoadGlobalDescriptor;
 
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  Node* vector = LoadFeedbackVectorForStub();
+  TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
 
   Callable callable =
       CodeFactory::LoadGlobalICInOptimizedCode(isolate(), typeof_mode);
@@ -3504,7 +3505,7 @@ void AccessorAssembler::GenerateKeyedLoadIC() {
   using Descriptor = LoadWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3517,7 +3518,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
   using Descriptor = LoadWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3530,10 +3531,10 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
   using Descriptor = LoadDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  Node* vector = LoadFeedbackVectorForStub();
+  TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
 
   TailCallBuiltin(Builtins::kKeyedLoadIC, context, receiver, name, slot,
                   vector);
@@ -3543,10 +3544,10 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
   using Descriptor = LoadDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  Node* vector = LoadFeedbackVectorForStub();
+  TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
 
   TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, context, receiver, name,
                   slot, vector);
@@ -3556,7 +3557,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
   using Descriptor = LoadWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3568,7 +3569,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
 void AccessorAssembler::GenerateStoreGlobalIC() {
   using Descriptor = StoreGlobalWithVectorDescriptor;
 
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* value = Parameter(Descriptor::kValue);
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
@@ -3581,11 +3582,11 @@ void AccessorAssembler::GenerateStoreGlobalIC() {
 void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
   using Descriptor = StoreGlobalDescriptor;
 
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* value = Parameter(Descriptor::kValue);
   Node* slot = Parameter(Descriptor::kSlot);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  Node* vector = LoadFeedbackVectorForStub();
+  TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
 
   TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
 }
@@ -3594,7 +3595,7 @@ void AccessorAssembler::GenerateStoreIC() {
   using Descriptor = StoreWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* value = Parameter(Descriptor::kValue);
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
@@ -3608,11 +3609,11 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
   using Descriptor = StoreDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* value = Parameter(Descriptor::kValue);
   Node* slot = Parameter(Descriptor::kSlot);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  Node* vector = LoadFeedbackVectorForStub();
+  TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
 
   TailCallBuiltin(Builtins::kStoreIC, context, receiver, name, value, slot,
                   vector);
@@ -3622,7 +3623,7 @@ void AccessorAssembler::GenerateKeyedStoreIC() {
   using Descriptor = StoreWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* value = Parameter(Descriptor::kValue);
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
@@ -3636,11 +3637,11 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
   using Descriptor = StoreDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* value = Parameter(Descriptor::kValue);
   Node* slot = Parameter(Descriptor::kSlot);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-  Node* vector = LoadFeedbackVectorForStub();
+  TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
 
   TailCallBuiltin(Builtins::kKeyedStoreIC, context, receiver, name, value, slot,
                   vector);
@@ -3650,7 +3651,7 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
   using Descriptor = StoreWithVectorDescriptor;
 
   Node* array = Parameter(Descriptor::kReceiver);
-  Node* index = Parameter(Descriptor::kName);
+  TNode<Object> index = CAST(Parameter(Descriptor::kName));
   Node* value = Parameter(Descriptor::kValue);
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
@@ -3798,8 +3799,8 @@ void AccessorAssembler::GenerateCloneObjectIC() {
     TNode<IntPtrT> field_offset_difference =
         TimesTaggedSize(IntPtrSub(result_start, source_start));
 
-    // Just copy the fields as raw data (pretending that there are no
-    // MutableHeapNumbers). This doesn't need write barriers.
+    // Just copy the fields as raw data (pretending that there are no mutable
+    // HeapNumbers). This doesn't need write barriers.
     BuildFastLoop(
         source_start, source_size,
         [=](Node* field_index) {
@@ -3813,7 +3814,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
         },
         1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
 
-    // If MutableHeapNumbers can occur, we need to go through the {object}
+    // If mutable HeapNumbers can occur, we need to go through the {object}
     // again here and properly clone them. We use a second loop here to
     // ensure that the GC (and heap verifier) always sees properly initialized
     // objects, i.e. never hits undefined values in double fields.
@@ -3827,11 +3828,10 @@ void AccessorAssembler::GenerateCloneObjectIC() {
             TNode<Object> field = LoadObjectField(object, result_offset);
             Label if_done(this), if_mutableheapnumber(this, Label::kDeferred);
             GotoIf(TaggedIsSmi(field), &if_done);
-            Branch(IsMutableHeapNumber(CAST(field)), &if_mutableheapnumber,
-                   &if_done);
+            Branch(IsHeapNumber(CAST(field)), &if_mutableheapnumber, &if_done);
             BIND(&if_mutableheapnumber);
             {
-              TNode<Object> value = AllocateMutableHeapNumberWithValue(
+              TNode<HeapNumber> value = AllocateHeapNumberWithValue(
                   LoadHeapNumberValue(UncheckedCast<HeapNumber>(field)));
               StoreObjectField(object, result_offset, value);
               Goto(&if_done);
@@ -3856,14 +3856,11 @@ void AccessorAssembler::GenerateCloneObjectIC() {
   BIND(&try_megamorphic);
   {
     Comment("CloneObjectIC_try_megamorphic");
-    CSA_ASSERT(this,
-               Word32Or(WordEqual(strong_feedback,
-                                  LoadRoot(RootIndex::kuninitialized_symbol)),
-                        WordEqual(strong_feedback,
-                                  LoadRoot(RootIndex::kmegamorphic_symbol))));
-    GotoIfNot(
-        WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
-        &miss);
+    CSA_ASSERT(
+        this,
+        Word32Or(TaggedEqual(strong_feedback, UninitializedSymbolConstant()),
+                 TaggedEqual(strong_feedback, MegamorphicSymbolConstant())));
+    GotoIfNot(TaggedEqual(strong_feedback, MegamorphicSymbolConstant()), &miss);
     Goto(&slow);
   }
 
@@ -3876,8 +3873,8 @@ void AccessorAssembler::GenerateCloneObjectIC() {
   BIND(&miss);
   {
     Comment("CloneObjectIC_miss");
-    Node* map_or_result = CallRuntime(Runtime::kCloneObjectIC_Miss, context,
-                                      source, flags, slot, vector);
+    TNode<HeapObject> map_or_result = CAST(CallRuntime(
+        Runtime::kCloneObjectIC_Miss, context, source, flags, slot, vector));
     var_handler = UncheckedCast<MaybeObject>(map_or_result);
     GotoIf(IsMap(map_or_result), &if_handler);
     CSA_ASSERT(this, IsJSObject(map_or_result));
@@ -3889,7 +3886,7 @@ void AccessorAssembler::GenerateKeyedHasIC() {
   using Descriptor = LoadWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3902,7 +3899,7 @@ void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
   using Descriptor = LoadWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
   // TODO(magardn): implement HasProperty handling in KeyedLoadICGeneric
   Return(HasProperty(context, receiver, name,
@@ -3913,7 +3910,7 @@ void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() {
   using Descriptor = LoadWithVectorDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* slot = Parameter(Descriptor::kSlot);
   Node* vector = Parameter(Descriptor::kVector);
   TNode<Context> context = CAST(Parameter(Descriptor::kContext));
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 6127b244e3e63c..0de2292fd6d4a2 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -30,7 +30,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
   void GenerateLoadIC();
   void GenerateLoadIC_Megamorphic();
   void GenerateLoadIC_Noninlined();
-  void GenerateLoadIC_Uninitialized();
+  void GenerateLoadIC_NoFeedback();
   void GenerateLoadICTrampoline();
   void GenerateLoadICTrampoline_Megamorphic();
   void GenerateKeyedLoadIC();
@@ -56,9 +56,9 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
 
   void GenerateStoreInArrayLiteralIC();
 
-  void TryProbeStubCache(StubCache* stub_cache, Node* receiver, Node* name,
-                         Label* if_handler, TVariable<MaybeObject>* var_handler,
-                         Label* if_miss);
+  void TryProbeStubCache(StubCache* stub_cache, Node* receiver,
+                         TNode<Object> name, Label* if_handler,
+                         TVariable<MaybeObject>* var_handler, Label* if_miss);
 
   Node* StubCachePrimaryOffsetForTesting(Node* name, Node* map) {
     return StubCachePrimaryOffset(name, map);
@@ -68,7 +68,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
   }
 
   struct LoadICParameters {
-    LoadICParameters(TNode<Context> context, Node* receiver, Node* name,
+    LoadICParameters(TNode<Context> context, Node* receiver, TNode<Object> name,
                      Node* slot, Node* vector, Node* holder = nullptr)
         : context_(context),
           receiver_(receiver),
@@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
           vector_(vector),
           holder_(holder ? holder : receiver) {}
 
-    LoadICParameters(const LoadICParameters* p, Node* unique_name)
+    LoadICParameters(const LoadICParameters* p, TNode<Object> unique_name)
         : context_(p->context_),
           receiver_(p->receiver_),
           name_(unique_name),
@@ -87,7 +87,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
 
     TNode<Context> context() const { return context_; }
     Node* receiver() const { return receiver_; }
-    Node* name() const { return name_; }
+    TNode<Object> name() const { return name_; }
     Node* slot() const { return slot_; }
     Node* vector() const { return vector_; }
     Node* holder() const { return holder_; }
@@ -95,7 +95,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
    private:
     TNode<Context> context_;
     Node* receiver_;
-    Node* name_;
+    TNode<Object> name_;
     Node* slot_;
     Node* vector_;
     Node* holder_;
@@ -119,13 +119,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
           holder_(p->holder()) {
       TNode<Context> p_context = p->context();
       context_ = [=] { return p_context; };
-      TNode<Object> p_name = TNode<Object>::UncheckedCast(p->name());
+      TNode<Object> p_name = p->name();
       name_ = [=] { return p_name; };
     }
 
     TNode<Context> context() const { return context_(); }
     Node* receiver() const { return receiver_; }
-    Node* name() const { return name_(); }
+    TNode<Object> name() const { return name_(); }
     Node* slot() const { return slot_; }
     Node* vector() const { return vector_; }
     Node* holder() const { return holder_; }
@@ -156,8 +156,9 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
 
  protected:
   struct StoreICParameters : public LoadICParameters {
-    StoreICParameters(TNode<Context> context, Node* receiver, Node* name,
-                      SloppyTNode<Object> value, Node* slot, Node* vector)
+    StoreICParameters(TNode<Context> context, Node* receiver,
+                      TNode<Object> name, SloppyTNode<Object> value, Node* slot,
+                      Node* vector)
         : LoadICParameters(context, receiver, name, slot, vector),
           value_(value) {}
 
@@ -191,12 +192,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
   void OverwriteExistingFastDataProperty(Node* object, Node* object_map,
                                          Node* descriptors,
                                          Node* descriptor_name_index,
-                                         Node* details, Node* value,
+                                         Node* details, TNode<Object> value,
                                          Label* slow,
                                          bool do_transitioning_store);
 
   void CheckFieldType(TNode<DescriptorArray> descriptors, Node* name_index,
-                      Node* representation, Node* value, Label* bailout);
+                      TNode<Word32T> representation, Node* value,
+                      Label* bailout);
 
  private:
   // Stub generation entry points.
@@ -204,7 +206,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
   // LoadIC contains the full LoadIC logic, while LoadIC_Noninlined contains
   // logic not inlined into Ignition bytecode handlers.
   void LoadIC(const LoadICParameters* p);
-  void LoadIC_Noninlined(const LoadICParameters* p, Node* receiver_map,
+  void LoadIC_Noninlined(const LoadICParameters* p, TNode<Map> receiver_map,
                          TNode<HeapObject> feedback,
                          TVariable<MaybeObject>* var_handler, Label* if_handler,
                          Label* miss, ExitPoint* exit_point);
@@ -214,7 +216,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
   TNode<MaybeObject> LoadDescriptorValueOrFieldType(
       TNode<Map> map, TNode<IntPtrT> descriptor_entry);
 
-  void LoadIC_Uninitialized(const LoadICParameters* p);
+  void LoadIC_NoFeedback(const LoadICParameters* p);
 
   void KeyedLoadIC(const LoadICParameters* p, LoadAccessMode access_mode);
   void KeyedLoadICGeneric(const LoadICParameters* p);
@@ -222,7 +224,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
                                   LoadAccessMode access_mode);
   void StoreIC(const StoreICParameters* p);
   void StoreGlobalIC(const StoreICParameters* p);
-  void StoreGlobalIC_PropertyCellCase(Node* property_cell, Node* value,
+  void StoreGlobalIC_PropertyCellCase(Node* property_cell, TNode<Object> value,
                                       ExitPoint* exit_point, Label* miss);
   void KeyedStoreIC(const StoreICParameters* p);
   void StoreInArrayLiteralIC(const StoreICParameters* p);
@@ -275,8 +277,9 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
                        Variable* var_double_value, Label* rebox_double,
                        ExitPoint* exit_point);
 
-  void EmitAccessCheck(Node* expected_native_context, Node* context,
-                       Node* receiver, Label* can_access, Label* miss);
+  void EmitAccessCheck(TNode<Context> expected_native_context,
+                       TNode<Context> context, Node* receiver,
+                       Label* can_access, Label* miss);
 
   void HandleLoadICSmiHandlerLoadNamedCase(
       const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
@@ -317,7 +320,8 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
                                  Representation representation, Node* value,
                                  Label* miss);
 
-  void CheckPrototypeValidityCell(Node* maybe_validity_cell, Label* miss);
+  void CheckPrototypeValidityCell(TNode<Object> maybe_validity_cell,
+                                  Label* miss);
   void HandleStoreICNativeDataProperty(const StoreICParameters* p, Node* holder,
                                        Node* handler_word);
 
@@ -366,7 +370,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
   void EmitFastElementsBoundsCheck(Node* object, Node* elements,
                                    Node* intptr_index,
                                    Node* is_jsarray_condition, Label* miss);
-  void EmitElementLoad(Node* object, Node* elements_kind,
+  void EmitElementLoad(Node* object, TNode<Word32T> elements_kind,
                        SloppyTNode<IntPtrT> key, Node* is_jsarray_condition,
                        Label* if_hole, Label* rebox_double,
                        Variable* var_double_value,
@@ -387,8 +391,8 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
   Node* StubCacheSecondaryOffset(Node* name, Node* seed);
 
   void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
-                              Node* entry_offset, Node* name, Node* map,
-                              Label* if_handler,
+                              Node* entry_offset, TNode<Object> name,
+                              TNode<Map> map, Label* if_handler,
                               TVariable<MaybeObject>* var_handler,
                               Label* if_miss);
 };
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index 50b7cd1ebb0bae..f6bec6eab9fa0b 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -114,8 +114,9 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
   BIND(&do_fadd);
   {
     var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
-    Node* value = Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
-    Node* result = AllocateHeapNumberWithValue(value);
+    TNode<Float64T> value =
+        Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
+    TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
     var_result.Bind(result);
     Goto(&end);
   }
@@ -124,8 +125,9 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
   {
     // No checks on rhs are done yet. We just know lhs is not a number or Smi.
     Label if_lhsisoddball(this), if_lhsisnotoddball(this);
-    Node* lhs_instance_type = LoadInstanceType(lhs);
-    Node* lhs_is_oddball = InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
+    TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
+    TNode<BoolT> lhs_is_oddball =
+        InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
     Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
 
     BIND(&if_lhsisoddball);
@@ -154,7 +156,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
       // Check if the {rhs} is a smi, and exit the string check early if it is.
       GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
 
-      Node* rhs_instance_type = LoadInstanceType(rhs);
+      TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
 
       // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
       // need an Oddball check.
@@ -173,8 +175,9 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
   {
     // Check if rhs is an oddball. At this point we know lhs is either a
     // Smi or number or oddball and rhs is not a number or Smi.
-    Node* rhs_instance_type = LoadInstanceType(rhs);
-    Node* rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
+    TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
+    TNode<BoolT> rhs_is_oddball =
+        InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
     GotoIf(rhs_is_oddball, &call_with_oddball_feedback);
     Goto(&call_with_any_feedback);
   }
@@ -322,9 +325,10 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
   {
     // No checks on rhs are done yet. We just know lhs is not a number or Smi.
     Label if_left_bigint(this), if_left_oddball(this);
-    Node* lhs_instance_type = LoadInstanceType(lhs);
+    TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
     GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_left_bigint);
-    Node* lhs_is_oddball = InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
+    TNode<BoolT> lhs_is_oddball =
+        InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
     Branch(lhs_is_oddball, &if_left_oddball, &call_with_any_feedback);
 
     BIND(&if_left_oddball);
@@ -361,9 +365,10 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
   {
     // Check if rhs is an oddball. At this point we know lhs is either a
     // Smi or number or oddball and rhs is not a number or Smi.
-    Node* rhs_instance_type = LoadInstanceType(rhs);
+    TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
     GotoIf(IsBigIntInstanceType(rhs_instance_type), &if_bigint);
-    Node* rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
+    TNode<BoolT> rhs_is_oddball =
+        InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
     GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
 
     var_type_feedback.Bind(
@@ -437,7 +442,7 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
     BIND(&if_overflow);
     {
       var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
-      Node* value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs));
+      TNode<Float64T> value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs));
       var_result = AllocateHeapNumberWithValue(value);
       Goto(&end);
     }
@@ -490,7 +495,7 @@ Node* BinaryOpAssembler::Generate_DivideWithFeedback(
     {
       var_type_feedback->Bind(
           SmiConstant(BinaryOperationFeedback::kSignedSmallInputs));
-      Node* value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs));
+      TNode<Float64T> value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs));
       var_result.Bind(AllocateHeapNumberWithValue(value));
       Goto(&end);
     }
@@ -528,7 +533,7 @@ Node* BinaryOpAssembler::Generate_ExponentiateWithFeedback(
     Node* context, Node* base, Node* exponent, Node* slot_id,
     Node* feedback_vector, bool rhs_is_smi) {
   // We currently don't optimize exponentiation based on feedback.
-  Node* dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
+  TNode<Smi> dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
   UpdateFeedback(dummy_feedback, feedback_vector, slot_id);
   return CallBuiltin(Builtins::kExponentiate, context, base, exponent);
 }
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index f5cd0c1de7536f..c0ff8a4c9b111e 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -51,8 +51,8 @@ Handle<Smi> LoadHandler::LoadField(Isolate* isolate, FieldIndex field_index) {
   return handle(Smi::FromInt(config), isolate);
 }
 
-Handle<Smi> LoadHandler::LoadConstant(Isolate* isolate, int descriptor) {
-  int config = KindBits::encode(kConstant) | DescriptorBits::encode(descriptor);
+Handle<Smi> LoadHandler::LoadConstantFromPrototype(Isolate* isolate) {
+  int config = KindBits::encode(kConstantFromPrototype);
   return handle(Smi::FromInt(config), isolate);
 }
 
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 0b8ebd2bbe3d80..814935c6ebe996 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -30,7 +30,7 @@ int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
                             Handle<Smi>* smi_handler, Handle<Map> receiver_map,
                             Handle<JSReceiver> holder, MaybeObjectHandle data1,
                             MaybeObjectHandle maybe_data2) {
-  int checks_count = 0;
+  int data_size = 1;
   // Holder-is-receiver case itself does not add entries unless there is an
   // optional data2 value provided.
 
@@ -51,7 +51,7 @@ int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
       using Bit = typename ICHandler::DoAccessCheckOnReceiverBits;
       *smi_handler = SetBitFieldValue<Bit>(isolate, *smi_handler, true);
     }
-    checks_count++;
+    data_size++;
   } else if (receiver_map->is_dictionary_map() &&
              !receiver_map->IsJSGlobalObjectMap()) {
     if (!fill_handler) {
@@ -67,16 +67,16 @@ int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
     if (fill_handler) {
       // This value will go either to data2 or data3 slot depending on whether
       // data2 slot is already occupied by native context.
-      if (checks_count == 0) {
+      if (data_size == 1) {
         handler->set_data2(*maybe_data2);
       } else {
-        DCHECK_EQ(1, checks_count);
+        DCHECK_EQ(2, data_size);
         handler->set_data3(*maybe_data2);
       }
     }
-    checks_count++;
+    data_size++;
   }
-  return checks_count;
+  return data_size;
 }
 
 // Returns 0 if the validity cell check is enough to ensure that the
@@ -86,10 +86,10 @@ int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
 // Returns -1 if the handler has to be compiled or the number of prototype
 // checks otherwise.
 template <typename ICHandler>
-int GetPrototypeCheckCount(
-    Isolate* isolate, Handle<Smi>* smi_handler, Handle<Map> receiver_map,
-    Handle<JSReceiver> holder, MaybeObjectHandle data1,
-    MaybeObjectHandle maybe_data2 = MaybeObjectHandle()) {
+int GetHandlerDataSize(Isolate* isolate, Handle<Smi>* smi_handler,
+                       Handle<Map> receiver_map, Handle<JSReceiver> holder,
+                       MaybeObjectHandle data1,
+                       MaybeObjectHandle maybe_data2 = MaybeObjectHandle()) {
   DCHECK_NOT_NULL(smi_handler);
   return InitPrototypeChecksImpl<ICHandler, false>(isolate, Handle<ICHandler>(),
                                                    smi_handler, receiver_map,
@@ -121,14 +121,13 @@ Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
     data1 = maybe_data1;
   }
 
-  int checks_count = GetPrototypeCheckCount<LoadHandler>(
+  int data_size = GetHandlerDataSize<LoadHandler>(
       isolate, &smi_handler, receiver_map, holder, data1, maybe_data2);
 
   Handle<Object> validity_cell =
       Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
 
-  int data_count = 1 + checks_count;
-  Handle<LoadHandler> handler = isolate->factory()->NewLoadHandler(data_count);
+  Handle<LoadHandler> handler = isolate->factory()->NewLoadHandler(data_size);
 
   handler->set_smi_handler(*smi_handler);
   handler->set_validity_cell(*validity_cell);
@@ -144,19 +143,18 @@ Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
                                           Handle<Smi> smi_handler) {
   Handle<JSReceiver> end;  // null handle, means full prototype chain lookup.
   MaybeObjectHandle data1 = holder;
-  int checks_count = GetPrototypeCheckCount<LoadHandler>(
-      isolate, &smi_handler, receiver_map, end, data1);
+  int data_size = GetHandlerDataSize<LoadHandler>(isolate, &smi_handler,
+                                                  receiver_map, end, data1);
 
   Handle<Object> validity_cell =
       Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
   if (validity_cell->IsSmi()) {
-    DCHECK_EQ(0, checks_count);
+    DCHECK_EQ(1, data_size);
     // Lookup on receiver isn't supported in case of a simple smi handler.
     if (!LookupOnReceiverBits::decode(smi_handler->value())) return smi_handler;
   }
 
-  int data_count = 1 + checks_count;
-  Handle<LoadHandler> handler = isolate->factory()->NewLoadHandler(data_count);
+  Handle<LoadHandler> handler = isolate->factory()->NewLoadHandler(data_size);
 
   handler->set_smi_handler(*smi_handler);
   handler->set_validity_cell(*validity_cell);
@@ -251,16 +249,13 @@ Handle<Object> StoreHandler::StoreThroughPrototype(
     data1 = maybe_data1;
   }
 
-  int checks_count = GetPrototypeCheckCount<StoreHandler>(
+  int data_size = GetHandlerDataSize<StoreHandler>(
       isolate, &smi_handler, receiver_map, holder, data1, maybe_data2);
 
   Handle<Object> validity_cell =
       Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
-  DCHECK_IMPLIES(validity_cell->IsSmi(), checks_count == 0);
 
-  int data_count = 1 + checks_count;
-  Handle<StoreHandler> handler =
-      isolate->factory()->NewStoreHandler(data_count);
+  Handle<StoreHandler> handler = isolate->factory()->NewStoreHandler(data_size);
 
   handler->set_smi_handler(*smi_handler);
   handler->set_validity_cell(*validity_cell);
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index b8888868ec0b90..80d19d73ecfd5e 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -37,7 +37,7 @@ class LoadHandler final : public DataHandler {
     kNormal,
     kGlobal,
     kField,
-    kConstant,
+    kConstantFromPrototype,
     kAccessor,
     kNativeDataProperty,
     kApiGetter,
@@ -47,65 +47,58 @@ class LoadHandler final : public DataHandler {
     kNonExistent,
     kModuleExport
   };
-  class KindBits : public BitField<Kind, 0, 4> {};
+  using KindBits = BitField<Kind, 0, 4>;
 
   // Defines whether access rights check should be done on receiver object.
   // Applicable to named property kinds only when loading value from prototype
   // chain. Ignored when loading from holder.
-  class DoAccessCheckOnReceiverBits
-      : public BitField<bool, KindBits::kNext, 1> {};
+  using DoAccessCheckOnReceiverBits = KindBits::Next<bool, 1>;
 
   // Defines whether a lookup should be done on receiver object before
   // proceeding to the prototype chain. Applicable to named property kinds only
   // when loading value from prototype chain. Ignored when loading from holder.
-  class LookupOnReceiverBits
-      : public BitField<bool, DoAccessCheckOnReceiverBits::kNext, 1> {};
+  using LookupOnReceiverBits = DoAccessCheckOnReceiverBits::Next<bool, 1>;
 
   //
   // Encoding when KindBits contains kForConstants.
   //
 
   // Index of a value entry in the descriptor array.
-  class DescriptorBits : public BitField<unsigned, LookupOnReceiverBits::kNext,
-                                         kDescriptorIndexBitCount> {};
+  using DescriptorBits =
+      LookupOnReceiverBits::Next<unsigned, kDescriptorIndexBitCount>;
   // Make sure we don't overflow the smi.
-  STATIC_ASSERT(DescriptorBits::kNext <= kSmiValueSize);
+  STATIC_ASSERT(DescriptorBits::kLastUsedBit < kSmiValueSize);
 
   //
   // Encoding when KindBits contains kField.
   //
-  class IsInobjectBits : public BitField<bool, LookupOnReceiverBits::kNext, 1> {
-  };
-  class IsDoubleBits : public BitField<bool, IsInobjectBits::kNext, 1> {};
+  using IsInobjectBits = LookupOnReceiverBits::Next<bool, 1>;
+  using IsDoubleBits = IsInobjectBits::Next<bool, 1>;
   // +1 here is to cover all possible JSObject header sizes.
-  class FieldIndexBits : public BitField<unsigned, IsDoubleBits::kNext,
-                                         kDescriptorIndexBitCount + 1> {};
+  using FieldIndexBits =
+      IsDoubleBits::Next<unsigned, kDescriptorIndexBitCount + 1>;
   // Make sure we don't overflow the smi.
-  STATIC_ASSERT(FieldIndexBits::kNext <= kSmiValueSize);
+  STATIC_ASSERT(FieldIndexBits::kLastUsedBit < kSmiValueSize);
 
   //
   // Encoding when KindBits contains kElement or kIndexedString.
   //
-  class AllowOutOfBoundsBits
-      : public BitField<bool, LookupOnReceiverBits::kNext, 1> {};
+  using AllowOutOfBoundsBits = LookupOnReceiverBits::Next<bool, 1>;
 
   //
   // Encoding when KindBits contains kElement.
   //
-  class IsJsArrayBits : public BitField<bool, AllowOutOfBoundsBits::kNext, 1> {
-  };
-  class ConvertHoleBits : public BitField<bool, IsJsArrayBits::kNext, 1> {};
-  class ElementsKindBits
-      : public BitField<ElementsKind, ConvertHoleBits::kNext, 8> {};
+  using IsJsArrayBits = AllowOutOfBoundsBits::Next<bool, 1>;
+  using ConvertHoleBits = IsJsArrayBits::Next<bool, 1>;
+  using ElementsKindBits = ConvertHoleBits::Next<ElementsKind, 8>;
   // Make sure we don't overflow the smi.
-  STATIC_ASSERT(ElementsKindBits::kNext <= kSmiValueSize);
+  STATIC_ASSERT(ElementsKindBits::kLastUsedBit < kSmiValueSize);
 
   //
   // Encoding when KindBits contains kModuleExport.
   //
-  class ExportsIndexBits
-      : public BitField<unsigned, LookupOnReceiverBits::kNext,
-                        kSmiValueSize - LookupOnReceiverBits::kNext> {};
+  using ExportsIndexBits = LookupOnReceiverBits::Next<
+      unsigned, kSmiValueSize - LookupOnReceiverBits::kLastUsedBit - 1>;
 
   // Decodes kind from Smi-handler.
   static inline Kind GetHandlerKind(Smi smi_handler);
@@ -123,8 +116,9 @@ class LoadHandler final : public DataHandler {
   // Creates a Smi-handler for loading a field from fast object.
   static inline Handle<Smi> LoadField(Isolate* isolate, FieldIndex field_index);
 
-  // Creates a Smi-handler for loading a constant from fast object.
-  static inline Handle<Smi> LoadConstant(Isolate* isolate, int descriptor);
+  // Creates a Smi-handler for loading a cached constant from fast
+  // prototype object.
+  static inline Handle<Smi> LoadConstantFromPrototype(Isolate* isolate);
 
   // Creates a Smi-handler for calling a getter on a fast object.
   static inline Handle<Smi> LoadAccessor(Isolate* isolate, int descriptor);
@@ -206,47 +200,43 @@ class StoreHandler final : public DataHandler {
     kProxy,
     kKindsNumber  // Keep last
   };
-  class KindBits : public BitField<Kind, 0, 4> {};
+  using KindBits = BitField<Kind, 0, 4>;
 
   enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged };
 
   // Applicable to kGlobalProxy, kProxy kinds.
 
   // Defines whether access rights check should be done on receiver object.
-  class DoAccessCheckOnReceiverBits
-      : public BitField<bool, KindBits::kNext, 1> {};
+  using DoAccessCheckOnReceiverBits = KindBits::Next<bool, 1>;
 
   // Defines whether a lookup should be done on receiver object before
   // proceeding to the prototype chain. Applicable to named property kinds only
   // when storing through prototype chain. Ignored when storing to holder.
-  class LookupOnReceiverBits
-      : public BitField<bool, DoAccessCheckOnReceiverBits::kNext, 1> {};
+  using LookupOnReceiverBits = DoAccessCheckOnReceiverBits::Next<bool, 1>;
 
   // Applicable to kField, kTransitionToField and kTransitionToConstant
   // kinds.
 
   // Index of a value entry in the descriptor array.
-  class DescriptorBits : public BitField<unsigned, LookupOnReceiverBits::kNext,
-                                         kDescriptorIndexBitCount> {};
+  using DescriptorBits =
+      LookupOnReceiverBits::Next<unsigned, kDescriptorIndexBitCount>;
   //
   // Encoding when KindBits contains kTransitionToConstant.
   //
 
   // Make sure we don't overflow the smi.
-  STATIC_ASSERT(DescriptorBits::kNext <= kSmiValueSize);
+  STATIC_ASSERT(DescriptorBits::kLastUsedBit < kSmiValueSize);
 
   //
   // Encoding when KindBits contains kField or kTransitionToField.
   //
-  class IsInobjectBits : public BitField<bool, DescriptorBits::kNext, 1> {};
-  class FieldRepresentationBits
-      : public BitField<FieldRepresentation, IsInobjectBits::kNext, 2> {};
+  using IsInobjectBits = DescriptorBits::Next<bool, 1>;
+  using FieldRepresentationBits = IsInobjectBits::Next<FieldRepresentation, 2>;
   // +1 here is to cover all possible JSObject header sizes.
-  class FieldIndexBits
-      : public BitField<unsigned, FieldRepresentationBits::kNext,
-                        kDescriptorIndexBitCount + 1> {};
+  using FieldIndexBits =
+      FieldRepresentationBits::Next<unsigned, kDescriptorIndexBitCount + 1>;
   // Make sure we don't overflow the smi.
-  STATIC_ASSERT(FieldIndexBits::kNext <= kSmiValueSize);
+  STATIC_ASSERT(FieldIndexBits::kLastUsedBit < kSmiValueSize);
 
   // Creates a Smi-handler for storing a field to fast object.
   static inline Handle<Smi> StoreField(Isolate* isolate, int descriptor,
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 377e3df6ae2797..3c8d1ea58280fb 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -14,6 +14,7 @@
 #include "src/execution/execution.h"
 #include "src/execution/frames-inl.h"
 #include "src/execution/isolate-inl.h"
+#include "src/execution/runtime-profiler.h"
 #include "src/handles/handles-inl.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/handler-configuration-inl.h"
@@ -28,14 +29,13 @@
 #include "src/objects/heap-number-inl.h"
 #include "src/objects/js-array-inl.h"
 #include "src/objects/module-inl.h"
-#include "src/objects/struct-inl.h"
-#include "src/utils/ostreams.h"
-#include "src/execution/runtime-profiler.h"
 #include "src/objects/prototype.h"
+#include "src/objects/struct-inl.h"
 #include "src/runtime/runtime-utils.h"
 #include "src/runtime/runtime.h"
 #include "src/tracing/trace-event.h"
 #include "src/tracing/tracing-category-observer.h"
+#include "src/utils/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -391,19 +391,23 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
     }
 
     if (*name == ReadOnlyRoots(isolate()).iterator_symbol()) {
-      return Runtime::ThrowIteratorError(isolate(), object);
+      return isolate()->Throw<Object>(
+          ErrorUtils::NewIteratorError(isolate(), object));
+    }
+
+    if (IsAnyHas()) {
+      return TypeError(MessageTemplate::kInvalidInOperatorUse, object, name);
+    } else {
+      DCHECK(object->IsNullOrUndefined(isolate()));
+      ErrorUtils::ThrowLoadFromNullOrUndefined(isolate(), object, name);
+      return MaybeHandle<Object>();
     }
-    return TypeError(IsAnyHas() ? MessageTemplate::kInvalidInOperatorUse
-                                : MessageTemplate::kNonObjectPropertyLoad,
-                     object, name);
   }
 
   if (MigrateDeprecated(isolate(), object)) use_ic = false;
 
-  if (state() != UNINITIALIZED) {
-    JSObject::MakePrototypesFast(object, kStartAtReceiver, isolate());
-    update_receiver_map(object);
-  }
+  JSObject::MakePrototypesFast(object, kStartAtReceiver, isolate());
+  update_receiver_map(object);
 
   LookupIterator it(isolate(), object, name);
 
@@ -414,7 +418,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
     if (name->IsPrivateName() && !it.IsFound()) {
       Handle<String> name_string(String::cast(Symbol::cast(*name).name()),
                                  isolate());
-      return TypeError(MessageTemplate::kInvalidPrivateFieldRead, object,
+      return TypeError(MessageTemplate::kInvalidPrivateMemberRead, object,
                        name_string);
     }
 
@@ -618,7 +622,7 @@ void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) {
   DCHECK(IsAnyLoad() || IsAnyStore() || IsAnyHas());
   switch (state()) {
     case NO_FEEDBACK:
-      break;
+      UNREACHABLE();
     case UNINITIALIZED:
     case PREMONOMORPHIC:
       UpdateMonomorphicIC(handler, name);
@@ -648,15 +652,6 @@ void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) {
 }
 
 void LoadIC::UpdateCaches(LookupIterator* lookup) {
-  if (state() == UNINITIALIZED && !IsLoadGlobalIC()) {
-    // This is the first time we execute this inline cache. Set the target to
-    // the pre monomorphic stub to delay setting the monomorphic state.
-    TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
-    ConfigureVectorState(receiver_map());
-    TraceIC("LoadIC", lookup->name());
-    return;
-  }
-
   Handle<Object> code;
   if (lookup->state() == LookupIterator::ACCESS_CHECK) {
     code = slow_stub();
@@ -908,6 +903,33 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
         if (receiver_is_holder) return smi_handler;
         TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
       }
+      if (lookup->constness() == PropertyConstness::kConst &&
+          !receiver_is_holder) {
+        DCHECK(!lookup->is_dictionary_holder());
+
+        Handle<Object> value = lookup->GetDataValue();
+
+        if (value->IsThinString()) {
+          value = handle(ThinString::cast(*value)->actual(), isolate());
+        }
+
+        // Non internalized strings could turn into thin/cons strings
+        // when internalized. Weak references to thin/cons strings are
+        // not supported in the GC. If concurrent marking is running
+        // and the thin/cons string is marked but the actual string is
+        // not, then the weak reference could be missed.
+        if (!value->IsString() ||
+            (value->IsString() && value->IsInternalizedString())) {
+          MaybeObjectHandle weak_value =
+              value->IsSmi() ? MaybeObjectHandle(*value, isolate())
+                             : MaybeObjectHandle::Weak(*value, isolate());
+
+          smi_handler = LoadHandler::LoadConstantFromPrototype(isolate());
+          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
+          return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+                                                smi_handler, weak_value);
+        }
+      }
       return LoadHandler::LoadFromPrototype(isolate(), map, holder,
                                             smi_handler);
     }
@@ -1117,7 +1139,7 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
                                     is_js_array, load_mode);
   }
   DCHECK(IsFastElementsKind(elements_kind) ||
-         IsFrozenOrSealedElementsKind(elements_kind) ||
+         IsAnyNonextensibleElementsKind(elements_kind) ||
          IsTypedArrayElementsKind(elements_kind));
   bool convert_hole_to_undefined =
       (elements_kind == HOLEY_SMI_ELEMENTS ||
@@ -1415,16 +1437,14 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
     return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
   }
 
-  if (state() != UNINITIALIZED) {
-    JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
-  }
+  JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
   LookupIterator it(isolate(), object, name);
 
   if (name->IsPrivate()) {
     if (name->IsPrivateName() && !it.IsFound()) {
       Handle<String> name_string(String::cast(Symbol::cast(*name).name()),
                                  isolate());
-      return TypeError(MessageTemplate::kInvalidPrivateFieldWrite, object,
+      return TypeError(MessageTemplate::kInvalidPrivateMemberWrite, object,
                        name_string);
     }
 
@@ -1442,15 +1462,6 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
 
 void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
                            StoreOrigin store_origin) {
-  if (state() == UNINITIALIZED && !IsStoreGlobalIC()) {
-    // This is the first time we execute this inline cache. Transition
-    // to premonomorphic state to delay setting the monomorphic state.
-    TRACE_HANDLER_STATS(isolate(), StoreIC_Premonomorphic);
-    ConfigureVectorState(receiver_map());
-    TraceIC("StoreIC", lookup->name());
-    return;
-  }
-
   MaybeObjectHandle handler;
   if (LookupForWrite(lookup, value, store_origin)) {
     if (IsStoreGlobalIC()) {
@@ -1810,10 +1821,8 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
   handlers.reserve(target_receiver_maps.size());
   StoreElementPolymorphicHandlers(&target_receiver_maps, &handlers, store_mode);
   if (target_receiver_maps.size() == 0) {
-    // Transition to PREMONOMORPHIC state here and remember a weak-reference
-    // to the {receiver_map} in case TurboFan sees this function before the
-    // IC can transition further.
-    ConfigureVectorState(receiver_map);
+    Handle<Object> handler = StoreElementHandler(receiver_map, store_mode);
+    ConfigureVectorState(Handle<Name>(), receiver_map, handler);
   } else if (target_receiver_maps.size() == 1) {
     ConfigureVectorState(Handle<Name>(), target_receiver_maps[0], handlers[0]);
   } else {
@@ -1840,6 +1849,7 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
         CodeFactory::KeyedStoreIC_SloppyArguments(isolate(), store_mode).code();
   } else if (receiver_map->has_fast_elements() ||
              receiver_map->has_sealed_elements() ||
+             receiver_map->has_nonextensible_elements() ||
              receiver_map->has_typed_array_elements()) {
     TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
     code = CodeFactory::StoreFastElementIC(isolate(), store_mode).code();
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 7e87b015d4a06f..bb4e6cb4278972 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -30,7 +30,7 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
 
   void KeyedStoreGeneric();
 
-  void StoreIC_Uninitialized();
+  void StoreIC_NoFeedback();
 
   // Generates code for [[Set]] operation, the |unique_name| is supposed to be
   // unique otherwise this code will always go to runtime.
@@ -62,8 +62,8 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
                          TNode<Object> key, TNode<Object> value,
                          Maybe<LanguageMode> language_mode);
 
-  void EmitGenericElementStore(Node* receiver, Node* receiver_map,
-                               Node* instance_type, Node* intptr_index,
+  void EmitGenericElementStore(Node* receiver, TNode<Map> receiver_map,
+                               Node* instance_type, TNode<IntPtrT> index,
                                Node* value, Node* context, Label* slow);
 
   // If language mode is not provided it is deduced from the feedback slot's
@@ -82,35 +82,38 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
                              Nothing<LanguageMode>());
   }
 
-  void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
+  void BranchIfPrototypesHaveNonFastElements(TNode<Map> receiver_map,
                                              Label* non_fast_elements,
                                              Label* only_fast_elements);
 
-  void TryRewriteElements(Node* receiver, Node* receiver_map, Node* elements,
-                          Node* native_context, ElementsKind from_kind,
-                          ElementsKind to_kind, Label* bailout);
+  void TryRewriteElements(Node* receiver, TNode<Map> receiver_map,
+                          Node* elements, Node* native_context,
+                          ElementsKind from_kind, ElementsKind to_kind,
+                          Label* bailout);
 
-  void StoreElementWithCapacity(Node* receiver, Node* receiver_map,
-                                Node* elements, Node* elements_kind,
-                                Node* intptr_index, Node* value, Node* context,
-                                Label* slow, UpdateLength update_length);
+  void StoreElementWithCapacity(Node* receiver, TNode<Map> receiver_map,
+                                SloppyTNode<FixedArrayBase> elements,
+                                TNode<Word32T> elements_kind,
+                                TNode<IntPtrT> index, Node* value,
+                                Node* context, Label* slow,
+                                UpdateLength update_length);
 
   void MaybeUpdateLengthAndReturn(Node* receiver, Node* index, Node* value,
                                   UpdateLength update_length);
 
-  void TryChangeToHoleyMapHelper(Node* receiver, Node* receiver_map,
+  void TryChangeToHoleyMapHelper(Node* receiver, TNode<Map> receiver_map,
                                  Node* native_context, ElementsKind packed_kind,
                                  ElementsKind holey_kind, Label* done,
                                  Label* map_mismatch, Label* bailout);
-  void TryChangeToHoleyMap(Node* receiver, Node* receiver_map,
-                           Node* current_elements_kind, Node* context,
+  void TryChangeToHoleyMap(Node* receiver, TNode<Map> receiver_map,
+                           TNode<Word32T> current_elements_kind, Node* context,
                            ElementsKind packed_kind, Label* bailout);
-  void TryChangeToHoleyMapMulti(Node* receiver, Node* receiver_map,
-                                Node* current_elements_kind, Node* context,
-                                ElementsKind packed_kind,
+  void TryChangeToHoleyMapMulti(Node* receiver, TNode<Map> receiver_map,
+                                TNode<Word32T> current_elements_kind,
+                                Node* context, ElementsKind packed_kind,
                                 ElementsKind packed_kind_2, Label* bailout);
 
-  void LookupPropertyOnPrototypeChain(Node* receiver_map, Node* name,
+  void LookupPropertyOnPrototypeChain(TNode<Map> receiver_map, Node* name,
                                       Label* accessor,
                                       Variable* var_accessor_pair,
                                       Variable* var_accessor_holder,
@@ -138,10 +141,9 @@ void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state) {
   assembler.KeyedStoreGeneric();
 }
 
-void StoreICUninitializedGenerator::Generate(
-    compiler::CodeAssemblerState* state) {
+void StoreICNoFeedbackGenerator::Generate(compiler::CodeAssemblerState* state) {
   KeyedStoreGenericAssembler assembler(state, StoreMode::kOrdinary);
-  assembler.StoreIC_Uninitialized();
+  assembler.StoreIC_NoFeedback();
 }
 
 void KeyedStoreGenericGenerator::SetProperty(
@@ -169,7 +171,8 @@ void KeyedStoreGenericGenerator::SetPropertyInLiteral(
 }
 
 void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
-    Node* receiver_map, Label* non_fast_elements, Label* only_fast_elements) {
+    TNode<Map> receiver_map, Label* non_fast_elements,
+    Label* only_fast_elements) {
   VARIABLE(var_map, MachineRepresentation::kTagged);
   var_map.Bind(receiver_map);
   Label loop_body(this, &var_map);
@@ -178,11 +181,11 @@ void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
   BIND(&loop_body);
   {
     Node* map = var_map.value();
-    Node* prototype = LoadMapPrototype(map);
+    TNode<HeapObject> prototype = LoadMapPrototype(map);
     GotoIf(IsNull(prototype), only_fast_elements);
-    Node* prototype_map = LoadMap(prototype);
+    TNode<Map> prototype_map = LoadMap(prototype);
     var_map.Bind(prototype_map);
-    TNode<Int32T> instance_type = LoadMapInstanceType(prototype_map);
+    TNode<Uint16T> instance_type = LoadMapInstanceType(prototype_map);
     GotoIf(IsCustomElementsReceiverInstanceType(instance_type),
            non_fast_elements);
     TNode<Int32T> elements_kind = LoadMapElementsKind(prototype_map);
@@ -193,8 +196,9 @@ void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
 }
 
 void KeyedStoreGenericAssembler::TryRewriteElements(
-    Node* receiver, Node* receiver_map, Node* elements, Node* native_context,
-    ElementsKind from_kind, ElementsKind to_kind, Label* bailout) {
+    Node* receiver, TNode<Map> receiver_map, Node* elements,
+    Node* native_context, ElementsKind from_kind, ElementsKind to_kind,
+    Label* bailout) {
   DCHECK(IsFastPackedElementsKind(from_kind));
   ElementsKind holey_from_kind = GetHoleyElementsKind(from_kind);
   ElementsKind holey_to_kind = GetHoleyElementsKind(to_kind);
@@ -205,8 +209,8 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
   VARIABLE(var_target_map, MachineRepresentation::kTagged);
   // Check if the receiver has the default |from_kind| map.
   {
-    Node* packed_map = LoadJSArrayElementsMap(from_kind, native_context);
-    GotoIf(WordNotEqual(receiver_map, packed_map), &check_holey_map);
+    TNode<Map> packed_map = LoadJSArrayElementsMap(from_kind, native_context);
+    GotoIf(TaggedNotEqual(receiver_map, packed_map), &check_holey_map);
     var_target_map.Bind(
         LoadContextElement(native_context, Context::ArrayMapIndex(to_kind)));
     Goto(&perform_transition);
@@ -215,9 +219,9 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
   // Check if the receiver has the default |holey_from_kind| map.
   BIND(&check_holey_map);
   {
-    Node* holey_map = LoadContextElement(
+    TNode<Object> holey_map = LoadContextElement(
         native_context, Context::ArrayMapIndex(holey_from_kind));
-    GotoIf(WordNotEqual(receiver_map, holey_map), bailout);
+    GotoIf(TaggedNotEqual(receiver_map, holey_map), bailout);
     var_target_map.Bind(LoadContextElement(
         native_context, Context::ArrayMapIndex(holey_to_kind)));
     Goto(&perform_transition);
@@ -227,7 +231,7 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
   BIND(&perform_transition);
   {
     if (IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(to_kind)) {
-      Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+      TNode<IntPtrT> capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
       GrowElementsCapacity(receiver, elements, from_kind, to_kind, capacity,
                            capacity, INTPTR_PARAMETERS, bailout);
     }
@@ -236,38 +240,39 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
 }
 
 void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper(
-    Node* receiver, Node* receiver_map, Node* native_context,
+    Node* receiver, TNode<Map> receiver_map, Node* native_context,
     ElementsKind packed_kind, ElementsKind holey_kind, Label* done,
     Label* map_mismatch, Label* bailout) {
-  Node* packed_map = LoadJSArrayElementsMap(packed_kind, native_context);
-  GotoIf(WordNotEqual(receiver_map, packed_map), map_mismatch);
+  TNode<Map> packed_map = LoadJSArrayElementsMap(packed_kind, native_context);
+  GotoIf(TaggedNotEqual(receiver_map, packed_map), map_mismatch);
   if (AllocationSite::ShouldTrack(packed_kind, holey_kind)) {
     TrapAllocationMemento(receiver, bailout);
   }
-  Node* holey_map =
+  TNode<Object> holey_map =
       LoadContextElement(native_context, Context::ArrayMapIndex(holey_kind));
   StoreMap(receiver, holey_map);
   Goto(done);
 }
 
 void KeyedStoreGenericAssembler::TryChangeToHoleyMap(
-    Node* receiver, Node* receiver_map, Node* current_elements_kind,
-    Node* context, ElementsKind packed_kind, Label* bailout) {
+    Node* receiver, TNode<Map> receiver_map,
+    TNode<Word32T> current_elements_kind, Node* context,
+    ElementsKind packed_kind, Label* bailout) {
   ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
   Label already_holey(this);
 
   GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind)),
          &already_holey);
-  Node* native_context = LoadNativeContext(context);
+  TNode<Context> native_context = LoadNativeContext(context);
   TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
                             holey_kind, &already_holey, bailout, bailout);
   BIND(&already_holey);
 }
 
 void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti(
-    Node* receiver, Node* receiver_map, Node* current_elements_kind,
-    Node* context, ElementsKind packed_kind, ElementsKind packed_kind_2,
-    Label* bailout) {
+    Node* receiver, TNode<Map> receiver_map,
+    TNode<Word32T> current_elements_kind, Node* context,
+    ElementsKind packed_kind, ElementsKind packed_kind_2, Label* bailout) {
   ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
   ElementsKind holey_kind_2 = GetHoleyElementsKind(packed_kind_2);
   Label already_holey(this), check_other_kind(this);
@@ -277,7 +282,7 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti(
   GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind_2)),
          &already_holey);
 
-  Node* native_context = LoadNativeContext(context);
+  TNode<Context> native_context = LoadNativeContext(context);
   TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
                             holey_kind, &already_holey, &check_other_kind,
                             bailout);
@@ -291,7 +296,7 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti(
 void KeyedStoreGenericAssembler::MaybeUpdateLengthAndReturn(
     Node* receiver, Node* index, Node* value, UpdateLength update_length) {
   if (update_length != kDontChangeLength) {
-    Node* new_length = SmiTag(Signed(IntPtrAdd(index, IntPtrConstant(1))));
+    TNode<Smi> new_length = SmiTag(Signed(IntPtrAdd(index, IntPtrConstant(1))));
     StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset, new_length,
                                    MachineRepresentation::kTagged);
   }
@@ -299,8 +304,9 @@ void KeyedStoreGenericAssembler::MaybeUpdateLengthAndReturn(
 }
 
 void KeyedStoreGenericAssembler::StoreElementWithCapacity(
-    Node* receiver, Node* receiver_map, Node* elements, Node* elements_kind,
-    Node* intptr_index, Node* value, Node* context, Label* slow,
+    Node* receiver, TNode<Map> receiver_map,
+    SloppyTNode<FixedArrayBase> elements, TNode<Word32T> elements_kind,
+    TNode<IntPtrT> index, Node* value, Node* context, Label* slow,
     UpdateLength update_length) {
   if (update_length != kDontChangeLength) {
     CSA_ASSERT(this, InstanceTypeEqual(LoadMapInstanceType(receiver_map),
@@ -319,14 +325,14 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
   const int kHeaderSize = FixedArray::kHeaderSize - kHeapObjectTag;
 
   Label check_double_elements(this), check_cow_elements(this);
-  Node* elements_map = LoadMap(elements);
-  GotoIf(WordNotEqual(elements_map, LoadRoot(RootIndex::kFixedArrayMap)),
+  TNode<Map> elements_map = LoadMap(elements);
+  GotoIf(TaggedNotEqual(elements_map, FixedArrayMapConstant()),
          &check_double_elements);
 
   // FixedArray backing store -> Smi or object elements.
   {
-    Node* offset = ElementOffsetFromIndex(intptr_index, PACKED_ELEMENTS,
-                                          INTPTR_PARAMETERS, kHeaderSize);
+    TNode<IntPtrT> offset = ElementOffsetFromIndex(
+        index, PACKED_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
     // Check if we're about to overwrite the hole. We can safely do that
     // only if there can be no setters on the prototype chain.
     // If we know that we're storing beyond the previous array length, we
@@ -334,8 +340,9 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
     {
       Label hole_check_passed(this);
       if (update_length == kDontChangeLength) {
-        Node* element = Load(MachineType::AnyTagged(), elements, offset);
-        GotoIf(WordNotEqual(element, TheHoleConstant()), &hole_check_passed);
+        TNode<Object> element =
+            CAST(Load(MachineType::AnyTagged(), elements, offset));
+        GotoIf(TaggedNotEqual(element, TheHoleConstant()), &hole_check_passed);
       }
       BranchIfPrototypesHaveNonFastElements(receiver_map, slow,
                                             &hole_check_passed);
@@ -354,7 +361,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
       }
       StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, elements,
                           offset, value);
-      MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+      MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
 
       BIND(&non_smi_value);
     }
@@ -372,7 +379,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
                             PACKED_ELEMENTS, slow);
       }
       Store(elements, offset, value);
-      MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+      MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
 
       BIND(&must_transition);
     }
@@ -380,8 +387,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
     // Transition to the required ElementsKind.
     {
       Label transition_to_double(this), transition_to_object(this);
-      Node* native_context = LoadNativeContext(context);
-      Branch(WordEqual(LoadMap(value), LoadRoot(RootIndex::kHeapNumberMap)),
+      TNode<Context> native_context = LoadNativeContext(context);
+      Branch(TaggedEqual(LoadMap(value), HeapNumberMapConstant()),
              &transition_to_double, &transition_to_object);
       BIND(&transition_to_double);
       {
@@ -393,16 +400,15 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
         TryRewriteElements(receiver, receiver_map, elements, native_context,
                            PACKED_SMI_ELEMENTS, target_kind, slow);
         // Reload migrated elements.
-        Node* double_elements = LoadElements(receiver);
-        Node* double_offset =
-            ElementOffsetFromIndex(intptr_index, PACKED_DOUBLE_ELEMENTS,
-                                   INTPTR_PARAMETERS, kHeaderSize);
+        TNode<FixedArrayBase> double_elements = LoadElements(receiver);
+        TNode<IntPtrT> double_offset = ElementOffsetFromIndex(
+            index, PACKED_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
         // Make sure we do not store signalling NaNs into double arrays.
-        Node* double_value = Float64SilenceNaN(LoadHeapNumberValue(value));
+        TNode<Float64T> double_value =
+            Float64SilenceNaN(LoadHeapNumberValue(value));
         StoreNoWriteBarrier(MachineRepresentation::kFloat64, double_elements,
                             double_offset, double_value);
-        MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
-                                   update_length);
+        MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
       }
 
       BIND(&transition_to_object);
@@ -415,22 +421,21 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
         TryRewriteElements(receiver, receiver_map, elements, native_context,
                            PACKED_SMI_ELEMENTS, target_kind, slow);
         // The elements backing store didn't change, no reload necessary.
-        CSA_ASSERT(this, WordEqual(elements, LoadElements(receiver)));
+        CSA_ASSERT(this, TaggedEqual(elements, LoadElements(receiver)));
         Store(elements, offset, value);
-        MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
-                                   update_length);
+        MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
       }
     }
   }
 
   BIND(&check_double_elements);
-  Node* fixed_double_array_map = LoadRoot(RootIndex::kFixedDoubleArrayMap);
-  GotoIf(WordNotEqual(elements_map, fixed_double_array_map),
+  TNode<Map> fixed_double_array_map = FixedDoubleArrayMapConstant();
+  GotoIf(TaggedNotEqual(elements_map, fixed_double_array_map),
          &check_cow_elements);
   // FixedDoubleArray backing store -> double elements.
   {
-    Node* offset = ElementOffsetFromIndex(intptr_index, PACKED_DOUBLE_ELEMENTS,
-                                          INTPTR_PARAMETERS, kHeaderSize);
+    TNode<IntPtrT> offset = ElementOffsetFromIndex(
+        index, PACKED_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
     // Check if we're about to overwrite the hole. We can safely do that
     // only if there can be no setters on the prototype chain.
     {
@@ -463,25 +468,25 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
       }
       StoreNoWriteBarrier(MachineRepresentation::kFloat64, elements, offset,
                           double_value);
-      MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+      MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
 
       BIND(&non_number_value);
     }
 
     // Transition to object elements.
     {
-      Node* native_context = LoadNativeContext(context);
+      TNode<Context> native_context = LoadNativeContext(context);
       ElementsKind target_kind = update_length == kBumpLengthWithGap
                                      ? HOLEY_ELEMENTS
                                      : PACKED_ELEMENTS;
       TryRewriteElements(receiver, receiver_map, elements, native_context,
                          PACKED_DOUBLE_ELEMENTS, target_kind, slow);
       // Reload migrated elements.
-      Node* fast_elements = LoadElements(receiver);
-      Node* fast_offset = ElementOffsetFromIndex(
-          intptr_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+      TNode<FixedArrayBase> fast_elements = LoadElements(receiver);
+      TNode<IntPtrT> fast_offset = ElementOffsetFromIndex(
+          index, PACKED_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
       Store(fast_elements, fast_offset, value);
-      MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+      MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
     }
   }
 
@@ -493,13 +498,13 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
 }
 
 void KeyedStoreGenericAssembler::EmitGenericElementStore(
-    Node* receiver, Node* receiver_map, Node* instance_type, Node* intptr_index,
-    Node* value, Node* context, Label* slow) {
+    Node* receiver, TNode<Map> receiver_map, Node* instance_type,
+    TNode<IntPtrT> index, Node* value, Node* context, Label* slow) {
   Label if_fast(this), if_in_bounds(this), if_out_of_bounds(this),
       if_increment_length_by_one(this), if_bump_length_with_gap(this),
       if_grow(this), if_nonfast(this), if_typed_array(this),
       if_dictionary(this);
-  Node* elements = LoadElements(receiver);
+  TNode<FixedArrayBase> elements = LoadElements(receiver);
   TNode<Int32T> elements_kind = LoadMapElementsKind(receiver_map);
   Branch(IsFastElementsKind(elements_kind), &if_fast, &if_nonfast);
   BIND(&if_fast);
@@ -507,25 +512,23 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
   Label if_array(this);
   GotoIf(InstanceTypeEqual(instance_type, JS_ARRAY_TYPE), &if_array);
   {
-    Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
-    Branch(UintPtrLessThan(intptr_index, capacity), &if_in_bounds,
-           &if_out_of_bounds);
+    TNode<IntPtrT> capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+    Branch(UintPtrLessThan(index, capacity), &if_in_bounds, &if_out_of_bounds);
   }
   BIND(&if_array);
   {
-    Node* length = SmiUntag(LoadFastJSArrayLength(receiver));
-    GotoIf(UintPtrLessThan(intptr_index, length), &if_in_bounds);
-    Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
-    GotoIf(UintPtrGreaterThanOrEqual(intptr_index, capacity), &if_grow);
-    Branch(WordEqual(intptr_index, length), &if_increment_length_by_one,
+    TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(receiver));
+    GotoIf(UintPtrLessThan(index, length), &if_in_bounds);
+    TNode<IntPtrT> capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+    GotoIf(UintPtrGreaterThanOrEqual(index, capacity), &if_grow);
+    Branch(WordEqual(index, length), &if_increment_length_by_one,
            &if_bump_length_with_gap);
   }
 
   BIND(&if_in_bounds);
   {
     StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
-                             intptr_index, value, context, slow,
-                             kDontChangeLength);
+                             index, value, context, slow, kDontChangeLength);
   }
 
   BIND(&if_out_of_bounds);
@@ -541,15 +544,14 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
   BIND(&if_increment_length_by_one);
   {
     StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
-                             intptr_index, value, context, slow,
+                             index, value, context, slow,
                              kIncrementLengthByOne);
   }
 
   BIND(&if_bump_length_with_gap);
   {
     StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
-                             intptr_index, value, context, slow,
-                             kBumpLengthWithGap);
+                             index, value, context, slow, kBumpLengthWithGap);
   }
 
   // Out-of-capacity accesses (index >= capacity) jump here. Additionally,
@@ -593,7 +595,7 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
 }
 
 void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
-    Node* receiver_map, Node* name, Label* accessor,
+    TNode<Map> receiver_map, Node* name, Label* accessor,
     Variable* var_accessor_pair, Variable* var_accessor_holder, Label* readonly,
     Label* bailout) {
   Label ok_to_write(this);
@@ -610,7 +612,7 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
     Node* holder = var_holder.value();
     GotoIf(IsNull(holder), &ok_to_write);
     Node* holder_map = var_holder_map.value();
-    Node* instance_type = LoadMapInstanceType(holder_map);
+    TNode<Uint16T> instance_type = LoadMapInstanceType(holder_map);
     Label next_proto(this);
     {
       Label found(this), found_fast(this), found_dict(this), found_global(this);
@@ -623,7 +625,7 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
       {
         TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value());
         TNode<IntPtrT> name_index = var_entry.value();
-        Node* details = LoadDetailsByKeyIndex(descriptors, name_index);
+        TNode<Uint32T> details = LoadDetailsByKeyIndex(descriptors, name_index);
         JumpIfDataProperty(details, &ok_to_write, readonly);
 
         // Accessor case.
@@ -638,9 +640,9 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
 
       BIND(&found_dict);
       {
-        Node* dictionary = var_meta_storage.value();
-        Node* entry = var_entry.value();
-        Node* details =
+        TNode<HeapObject> dictionary = var_meta_storage.value();
+        TNode<IntPtrT> entry = var_entry.value();
+        TNode<Uint32T> details =
             LoadDetailsByKeyIndex<NameDictionary>(dictionary, entry);
         JumpIfDataProperty(details, &ok_to_write, readonly);
 
@@ -657,14 +659,14 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
 
       BIND(&found_global);
       {
-        Node* dictionary = var_meta_storage.value();
-        Node* entry = var_entry.value();
-        Node* property_cell =
-            LoadValueByKeyIndex<GlobalDictionary>(dictionary, entry);
-        Node* value =
+        TNode<HeapObject> dictionary = var_meta_storage.value();
+        TNode<IntPtrT> entry = var_entry.value();
+        TNode<PropertyCell> property_cell =
+            CAST(LoadValueByKeyIndex<GlobalDictionary>(dictionary, entry));
+        TNode<Object> value =
             LoadObjectField(property_cell, PropertyCell::kValueOffset);
-        GotoIf(WordEqual(value, TheHoleConstant()), &next_proto);
-        Node* details = LoadAndUntagToWord32ObjectField(
+        GotoIf(TaggedEqual(value, TheHoleConstant()), &next_proto);
+        TNode<Int32T> details = LoadAndUntagToWord32ObjectField(
             property_cell, PropertyCell::kPropertyDetailsRawOffset);
         JumpIfDataProperty(details, &ok_to_write, readonly);
 
@@ -682,7 +684,7 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
     BIND(&next_proto);
     // Bailout if it can be an integer indexed exotic case.
     GotoIf(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE), bailout);
-    Node* proto = LoadMapPrototype(holder_map);
+    TNode<HeapObject> proto = LoadMapPrototype(holder_map);
     GotoIf(IsNull(proto), &ok_to_write);
     var_holder.Bind(proto);
     var_holder_map.Bind(LoadMap(proto));
@@ -765,7 +767,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
   VARIABLE(var_accessor_holder, MachineRepresentation::kTagged);
   Label fast_properties(this), dictionary_properties(this), accessor(this),
       readonly(this);
-  Node* bitfield3 = LoadMapBitField3(receiver_map);
+  TNode<Uint32T> bitfield3 = LoadMapBitField3(receiver_map);
+  TNode<Name> name = CAST(p->name());
   Branch(IsSetWord32<Map::IsDictionaryMapBit>(bitfield3),
          &dictionary_properties, &fast_properties);
 
@@ -775,13 +778,13 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
     TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
     Label descriptor_found(this), lookup_transition(this);
     TVARIABLE(IntPtrT, var_name_index);
-    DescriptorLookup(p->name(), descriptors, bitfield3, &descriptor_found,
+    DescriptorLookup(name, descriptors, bitfield3, &descriptor_found,
                      &var_name_index, &lookup_transition);
 
     BIND(&descriptor_found);
     {
       TNode<IntPtrT> name_index = var_name_index.value();
-      Node* details = LoadDetailsByKeyIndex(descriptors, name_index);
+      TNode<Uint32T> details = LoadDetailsByKeyIndex(descriptors, name_index);
       Label data_property(this);
       JumpIfDataProperty(details, &data_property,
                          ShouldReconfigureExisting() ? nullptr : &readonly);
@@ -796,12 +799,13 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
         var_accessor_holder.Bind(receiver);
         Goto(&accessor);
       } else {
-        Goto(&data_property);
+        // Handle accessor to data property reconfiguration in runtime.
+        Goto(slow);
       }
 
       BIND(&data_property);
       {
-        CheckForAssociatedProtector(p->name(), slow);
+        CheckForAssociatedProtector(name, slow);
         OverwriteExistingFastDataProperty(receiver, receiver_map, descriptors,
                                           name_index, details, p->value(), slow,
                                           false);
@@ -811,8 +815,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
     BIND(&lookup_transition);
     {
       Comment("lookup transition");
-      TNode<Map> transition_map = FindCandidateStoreICTransitionMapHandler(
-          receiver_map, CAST(p->name()), slow);
+      TNode<Map> transition_map =
+          FindCandidateStoreICTransitionMapHandler(receiver_map, name, slow);
 
       // Validate the transition handler candidate and apply the transition.
       StoreTransitionMapFlags flags = kValidateTransitionHandler;
@@ -833,9 +837,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
     TVARIABLE(IntPtrT, var_name_index);
     Label dictionary_found(this, &var_name_index), not_found(this);
     TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(receiver)));
-    NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()),
-                                         &dictionary_found, &var_name_index,
-                                         &not_found);
+    NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found,
+                                         &var_name_index, &not_found);
     BIND(&dictionary_found);
     {
       Label overwrite(this);
@@ -858,7 +861,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
 
       BIND(&overwrite);
       {
-        CheckForAssociatedProtector(p->name(), slow);
+        CheckForAssociatedProtector(name, slow);
         StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
                                              p->value());
         exit_point->Return(p->value());
@@ -867,37 +870,37 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
 
     BIND(&not_found);
     {
-      CheckForAssociatedProtector(p->name(), slow);
+      CheckForAssociatedProtector(name, slow);
       Label extensible(this), is_private_symbol(this);
-      Node* bitfield3 = LoadMapBitField3(receiver_map);
-      GotoIf(IsPrivateSymbol(p->name()), &is_private_symbol);
+      TNode<Uint32T> bitfield3 = LoadMapBitField3(receiver_map);
+      GotoIf(IsPrivateSymbol(name), &is_private_symbol);
       Branch(IsSetWord32<Map::IsExtensibleBit>(bitfield3), &extensible, slow);
 
       BIND(&is_private_symbol);
       {
-        CSA_ASSERT(this, IsPrivateSymbol(p->name()));
+        CSA_ASSERT(this, IsPrivateSymbol(name));
         // For private names, we miss to the runtime which will throw.
         // For private symbols, we extend and store an own property.
-        Branch(IsPrivateName(p->name()), slow, &extensible);
+        Branch(IsPrivateName(CAST(name)), slow, &extensible);
       }
 
       BIND(&extensible);
       if (ShouldCheckPrototype()) {
         DCHECK(ShouldCallSetter());
         LookupPropertyOnPrototypeChain(
-            receiver_map, p->name(), &accessor, &var_accessor_pair,
+            receiver_map, name, &accessor, &var_accessor_pair,
             &var_accessor_holder,
             ShouldReconfigureExisting() ? nullptr : &readonly, slow);
       }
       Label add_dictionary_property_slow(this);
       InvalidateValidityCellIfPrototype(receiver_map, bitfield3);
-      Add<NameDictionary>(properties, CAST(p->name()), p->value(),
+      Add<NameDictionary>(properties, name, p->value(),
                           &add_dictionary_property_slow);
       exit_point->Return(p->value());
 
       BIND(&add_dictionary_property_slow);
       exit_point->ReturnCallRuntime(Runtime::kAddDictionaryProperty,
-                                    p->context(), p->receiver(), p->name(),
+                                    p->context(), p->receiver(), name,
                                     p->value());
     }
   }
@@ -909,9 +912,9 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
       Node* accessor_pair = var_accessor_pair.value();
       GotoIf(IsAccessorInfoMap(LoadMap(accessor_pair)), slow);
       CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
-      Node* setter =
-          LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
-      Node* setter_map = LoadMap(setter);
+      TNode<HeapObject> setter =
+          CAST(LoadObjectField(accessor_pair, AccessorPair::kSetterOffset));
+      TNode<Map> setter_map = LoadMap(setter);
       // FunctionTemplateInfo setters are not supported yet.
       GotoIf(IsFunctionTemplateInfoMap(setter_map), slow);
       GotoIfNot(IsCallableMap(setter_map), &not_callable);
@@ -927,15 +930,15 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
           if (language_mode == LanguageMode::kStrict) {
             exit_point->ReturnCallRuntime(
                 Runtime::kThrowTypeError, p->context(),
-                SmiConstant(MessageTemplate::kNoSetterInCallback), p->name(),
+                SmiConstant(MessageTemplate::kNoSetterInCallback), name,
                 var_accessor_holder.value());
           } else {
             exit_point->Return(p->value());
           }
         } else {
           CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context(),
-                      SmiConstant(MessageTemplate::kNoSetterInCallback),
-                      p->name(), var_accessor_holder.value());
+                      SmiConstant(MessageTemplate::kNoSetterInCallback), name,
+                      var_accessor_holder.value());
           exit_point->Return(p->value());
         }
       }
@@ -950,14 +953,14 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
         if (language_mode == LanguageMode::kStrict) {
           Node* type = Typeof(p->receiver());
           ThrowTypeError(p->context(), MessageTemplate::kStrictReadOnlyProperty,
-                         p->name(), type, p->receiver());
+                         name, type, p->receiver());
         } else {
           exit_point->Return(p->value());
         }
       } else {
         CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context(),
-                    SmiConstant(MessageTemplate::kStrictReadOnlyProperty),
-                    p->name(), Typeof(p->receiver()), p->receiver());
+                    SmiConstant(MessageTemplate::kStrictReadOnlyProperty), name,
+                    Typeof(p->receiver()), p->receiver());
         exit_point->Return(p->value());
       }
     }
@@ -975,7 +978,7 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
 
   GotoIf(TaggedIsSmi(receiver), &slow);
   TNode<Map> receiver_map = LoadMap(CAST(receiver));
-  TNode<Int32T> instance_type = LoadMapInstanceType(receiver_map);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
   // Receivers requiring non-standard element accesses (interceptors, access
   // checks, strings and string wrappers, proxies) are handled in the runtime.
   GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &slow);
@@ -1043,51 +1046,33 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
   KeyedStoreGeneric(context, receiver, key, value, Just(language_mode));
 }
 
-void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
-  using Descriptor = StoreWithVectorDescriptor;
+void KeyedStoreGenericAssembler::StoreIC_NoFeedback() {
+  using Descriptor = StoreDescriptor;
 
   Node* receiver = Parameter(Descriptor::kReceiver);
-  Node* name = Parameter(Descriptor::kName);
+  TNode<Object> name = CAST(Parameter(Descriptor::kName));
   Node* value = Parameter(Descriptor::kValue);
   Node* slot = Parameter(Descriptor::kSlot);
-  Node* vector = Parameter(Descriptor::kVector);
   Node* context = Parameter(Descriptor::kContext);
 
   Label miss(this, Label::kDeferred), store_property(this);
 
   GotoIf(TaggedIsSmi(receiver), &miss);
-  Node* receiver_map = LoadMap(receiver);
-  TNode<Int32T> instance_type = LoadMapInstanceType(receiver_map);
+  TNode<Map> receiver_map = LoadMap(receiver);
+  TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
   // Receivers requiring non-standard element accesses (interceptors, access
   // checks, strings and string wrappers, proxies) are handled in the runtime.
   GotoIf(IsSpecialReceiverInstanceType(instance_type), &miss);
-
-  // Optimistically write the state transition to the vector.
-  GotoIf(IsUndefined(vector), &store_property);
-  StoreFeedbackVectorSlot(vector, slot,
-                          LoadRoot(RootIndex::kpremonomorphic_symbol),
-                          SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
-  Goto(&store_property);
-
-  BIND(&store_property);
   {
-    StoreICParameters p(CAST(context), receiver, name, value, slot, vector);
+    StoreICParameters p(CAST(context), receiver, name, value, slot,
+                        UndefinedConstant());
     EmitGenericPropertyStore(receiver, receiver_map, &p, &miss);
   }
 
   BIND(&miss);
   {
-    Label call_runtime(this);
-    // Undo the optimistic state transition.
-    GotoIf(IsUndefined(vector), &call_runtime);
-    StoreFeedbackVectorSlot(vector, slot,
-                            LoadRoot(RootIndex::kuninitialized_symbol),
-                            SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
-    Goto(&call_runtime);
-
-    BIND(&call_runtime);
-    TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
-                    receiver, name);
+    TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+                    UndefinedConstant(), receiver, name);
   }
 }
 
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 322bb6332113e0..efee0da80e6c62 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -37,7 +37,7 @@ class KeyedStoreGenericGenerator {
                                    TNode<Object> value);
 };
 
-class StoreICUninitializedGenerator {
+class StoreICNoFeedbackGenerator {
  public:
   static void Generate(compiler::CodeAssemblerState* state);
 };
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 176749781cf7cb..f7e25ca0bbc99c 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -596,9 +596,6 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
   empty_function_map->set_is_prototype_map(true);
   DCHECK(!empty_function_map->is_dictionary_map());
 
-  // Allocate ScopeInfo for the empty function.
-  Handle<ScopeInfo> scope_info = ScopeInfo::CreateForEmptyFunction(isolate());
-
   // Allocate the empty function as the prototype for function according to
   // ES#sec-properties-of-the-function-prototype-object
   NewFunctionArgs args = NewFunctionArgs::ForBuiltin(
@@ -612,7 +609,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
   script->set_type(Script::TYPE_NATIVE);
   Handle<WeakFixedArray> infos = factory()->NewWeakFixedArray(2);
   script->set_shared_function_infos(*infos);
-  empty_function->shared().set_scope_info(*scope_info);
+  empty_function->shared().set_raw_scope_info(
+      ReadOnlyRoots(isolate()).empty_function_scope_info());
   empty_function->shared().DontAdaptArguments();
   SharedFunctionInfo::SetScript(handle(empty_function->shared(), isolate()),
                                 script, 1);
@@ -659,17 +657,21 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic() {
   Handle<JSFunction> function = factory()->NewFunction(args);
   function->shared().DontAdaptArguments();
 
-  // %ThrowTypeError% must not have a name property.
-  if (JSReceiver::DeleteProperty(function, factory()->name_string())
-          .IsNothing()) {
-    DCHECK(false);
-  }
+  PropertyAttributes ro_attribs =
+      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+  // %ThrowTypeError% must have a name property with an empty string value.
+  // Per spec, ThrowTypeError's name must also be non-configurable, otherwise
+  // we could omit explicitly setting a property attribute here and just fall
+  // back to the default name attribute on function.
+  JSObject::SetOwnPropertyIgnoreAttributes(
+      function, factory()->name_string(), factory()->empty_string(), ro_attribs)
+      .Assert();
 
   // length needs to be non configurable.
   Handle<Object> value(Smi::FromInt(function->length()), isolate());
-  JSObject::SetOwnPropertyIgnoreAttributes(
-      function, factory()->length_string(), value,
-      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY))
+  JSObject::SetOwnPropertyIgnoreAttributes(function, factory()->length_string(),
+                                           value, ro_attribs)
       .Assert();
 
   if (JSObject::PreventExtensions(function, kThrowOnError).IsNothing()) {
@@ -1154,7 +1156,8 @@ void Genesis::CreateRoots() {
 void Genesis::InstallGlobalThisBinding() {
   Handle<ScriptContextTable> script_contexts(
       native_context()->script_context_table(), isolate());
-  Handle<ScopeInfo> scope_info = ScopeInfo::CreateGlobalThisBinding(isolate());
+  Handle<ScopeInfo> scope_info =
+      ReadOnlyRoots(isolate()).global_this_binding_scope_info_handle();
   Handle<Context> context =
       factory()->NewScriptContext(native_context(), scope_info);
 
@@ -1222,8 +1225,8 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
         FunctionTemplateInfo::cast(js_global_object_template->constructor()),
         isolate());
     js_global_object_function = ApiNatives::CreateApiFunction(
-        isolate(), js_global_object_constructor, factory()->the_hole_value(),
-        JS_GLOBAL_OBJECT_TYPE);
+        isolate(), isolate()->native_context(), js_global_object_constructor,
+        factory()->the_hole_value(), JS_GLOBAL_OBJECT_TYPE);
   }
 
   js_global_object_function->initial_map().set_is_prototype_map(true);
@@ -1248,8 +1251,8 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
     Handle<FunctionTemplateInfo> global_constructor(
         FunctionTemplateInfo::cast(data->constructor()), isolate());
     global_proxy_function = ApiNatives::CreateApiFunction(
-        isolate(), global_constructor, factory()->the_hole_value(),
-        JS_GLOBAL_PROXY_TYPE);
+        isolate(), isolate()->native_context(), global_constructor,
+        factory()->the_hole_value(), JS_GLOBAL_PROXY_TYPE);
   }
   global_proxy_function->initial_map().set_is_access_check_needed(true);
   global_proxy_function->initial_map().set_may_have_interesting_symbols(true);
@@ -2450,11 +2453,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
         Handle<JSFunction> fun =
             SimpleInstallFunction(isolate_, prototype, "exec",
                                   Builtins::kRegExpPrototypeExec, 1, true);
-        // Check that index of "exec" function in JSRegExp is correct.
+        native_context()->set_regexp_exec_function(*fun);
         DCHECK_EQ(JSRegExp::kExecFunctionDescriptorIndex,
                   prototype->map().LastAdded());
-
-        native_context()->set_regexp_exec_function(*fun);
       }
 
       SimpleInstallGetter(isolate_, prototype, factory->dotAll_string(),
@@ -2481,35 +2482,50 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
       SimpleInstallFunction(isolate_, prototype, "test",
                             Builtins::kRegExpPrototypeTest, 1, true);
 
-      InstallFunctionAtSymbol(isolate_, prototype, factory->match_symbol(),
-                              "[Symbol.match]", Builtins::kRegExpPrototypeMatch,
-                              1, true);
-      DCHECK_EQ(JSRegExp::kSymbolMatchFunctionDescriptorIndex,
-                prototype->map().LastAdded());
-
-      InstallFunctionAtSymbol(isolate_, prototype, factory->match_all_symbol(),
-                              "[Symbol.matchAll]",
-                              Builtins::kRegExpPrototypeMatchAll, 1, true);
-      DCHECK_EQ(JSRegExp::kSymbolMatchAllFunctionDescriptorIndex,
-                prototype->map().LastAdded());
-
-      InstallFunctionAtSymbol(isolate_, prototype, factory->replace_symbol(),
-                              "[Symbol.replace]",
-                              Builtins::kRegExpPrototypeReplace, 2, false);
-      DCHECK_EQ(JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
-                prototype->map().LastAdded());
-
-      InstallFunctionAtSymbol(isolate_, prototype, factory->search_symbol(),
-                              "[Symbol.search]",
-                              Builtins::kRegExpPrototypeSearch, 1, true);
-      DCHECK_EQ(JSRegExp::kSymbolSearchFunctionDescriptorIndex,
-                prototype->map().LastAdded());
-
-      InstallFunctionAtSymbol(isolate_, prototype, factory->split_symbol(),
-                              "[Symbol.split]", Builtins::kRegExpPrototypeSplit,
-                              2, false);
-      DCHECK_EQ(JSRegExp::kSymbolSplitFunctionDescriptorIndex,
-                prototype->map().LastAdded());
+      {
+        Handle<JSFunction> fun = InstallFunctionAtSymbol(
+            isolate_, prototype, factory->match_symbol(), "[Symbol.match]",
+            Builtins::kRegExpPrototypeMatch, 1, true);
+        native_context()->set_regexp_match_function(*fun);
+        DCHECK_EQ(JSRegExp::kSymbolMatchFunctionDescriptorIndex,
+                  prototype->map().LastAdded());
+      }
+
+      {
+        Handle<JSFunction> fun = InstallFunctionAtSymbol(
+            isolate_, prototype, factory->match_all_symbol(),
+            "[Symbol.matchAll]", Builtins::kRegExpPrototypeMatchAll, 1, true);
+        native_context()->set_regexp_match_all_function(*fun);
+        DCHECK_EQ(JSRegExp::kSymbolMatchAllFunctionDescriptorIndex,
+                  prototype->map().LastAdded());
+      }
+
+      {
+        Handle<JSFunction> fun = InstallFunctionAtSymbol(
+            isolate_, prototype, factory->replace_symbol(), "[Symbol.replace]",
+            Builtins::kRegExpPrototypeReplace, 2, false);
+        native_context()->set_regexp_replace_function(*fun);
+        DCHECK_EQ(JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
+                  prototype->map().LastAdded());
+      }
+
+      {
+        Handle<JSFunction> fun = InstallFunctionAtSymbol(
+            isolate_, prototype, factory->search_symbol(), "[Symbol.search]",
+            Builtins::kRegExpPrototypeSearch, 1, true);
+        native_context()->set_regexp_search_function(*fun);
+        DCHECK_EQ(JSRegExp::kSymbolSearchFunctionDescriptorIndex,
+                  prototype->map().LastAdded());
+      }
+
+      {
+        Handle<JSFunction> fun = InstallFunctionAtSymbol(
+            isolate_, prototype, factory->split_symbol(), "[Symbol.split]",
+            Builtins::kRegExpPrototypeSplit, 2, false);
+        native_context()->set_regexp_split_function(*fun);
+        DCHECK_EQ(JSRegExp::kSymbolSplitFunctionDescriptorIndex,
+                  prototype->map().LastAdded());
+      }
 
       Handle<Map> prototype_map(prototype->map(), isolate());
       Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate_);
@@ -4246,8 +4262,9 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_namespace_exports)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_methods)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_chaining)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_nullish)
 
 #ifdef V8_INTL_SUPPORT
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_add_calendar_numbering_system)
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index 85ef1f4d836b01..6a9b4c33cdad0f 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -47,7 +47,7 @@ Address IsolateAllocator::InitReservation() {
   size_t reservation_size = kPtrComprHeapReservationSize;
   size_t base_alignment = kPtrComprIsolateRootAlignment;
 
-  const int kMaxAttempts = 3;
+  const int kMaxAttempts = 4;
   for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
     Address hint = RoundDown(reinterpret_cast<Address>(
                                  platform_page_allocator->GetRandomMmapAddr()),
@@ -72,35 +72,44 @@ Address IsolateAllocator::InitReservation() {
     // Fuchsia does not respect given hints so as a workaround we will use
     // overreserved address space region instead of trying to re-reserve
     // a subregion.
-    if (padded_reservation.InVM(address, reservation_size)) {
-      reservation_ = std::move(padded_reservation);
-      return address;
-    }
+    bool overreserve = true;
 #else
-    // Now free the padded reservation and immediately try to reserve an exact
-    // region at aligned address. We have to do this dancing because the
-    // reservation address requirement is more complex than just a certain
-    // alignment and not all operating systems support freeing parts of reserved
-    // address space regions.
-    padded_reservation.Free();
-
-    VirtualMemory reservation(platform_page_allocator, reservation_size,
-                              reinterpret_cast<void*>(address));
-    if (!reservation.IsReserved()) break;
-
-    // The reservation could still be somewhere else but we can accept it
-    // if the reservation has the required alignment.
-    Address aligned_address =
-        RoundUp(reservation.address() + kPtrComprIsolateRootBias,
-                base_alignment) -
-        kPtrComprIsolateRootBias;
+    // For the last attempt use the overreserved region to avoid an OOM crash.
+    // This case can happen if there are many isolates being created in
+    // parallel that race for reserving the regions.
+    bool overreserve = (attempt == kMaxAttempts - 1);
+#endif
 
-    if (reservation.address() == aligned_address) {
-      reservation_ = std::move(reservation);
-      CHECK_EQ(reservation_.size(), reservation_size);
-      return aligned_address;
+    if (overreserve) {
+      if (padded_reservation.InVM(address, reservation_size)) {
+        reservation_ = std::move(padded_reservation);
+        return address;
+      }
+    } else {
+      // Now free the padded reservation and immediately try to reserve an exact
+      // region at aligned address. We have to do this dancing because the
+      // reservation address requirement is more complex than just a certain
+      // alignment and not all operating systems support freeing parts of
+      // reserved address space regions.
+      padded_reservation.Free();
+
+      VirtualMemory reservation(platform_page_allocator, reservation_size,
+                                reinterpret_cast<void*>(address));
+      if (!reservation.IsReserved()) break;
+
+      // The reservation could still be somewhere else but we can accept it
+      // if the reservation has the required alignment.
+      Address aligned_address =
+          RoundUp(reservation.address() + kPtrComprIsolateRootBias,
+                  base_alignment) -
+          kPtrComprIsolateRootBias;
+
+      if (reservation.address() == aligned_address) {
+        reservation_ = std::move(reservation);
+        CHECK_EQ(reservation_.size(), reservation_size);
+        return aligned_address;
+      }
     }
-#endif
   }
   V8::FatalProcessOutOfMemory(nullptr,
                               "Failed to reserve memory for new V8 Isolate");
diff --git a/deps/v8/src/init/setup-isolate-deserialize.cc b/deps/v8/src/init/setup-isolate-deserialize.cc
index 8a73ff0c8ac9d5..ff0268d3c84f91 100644
--- a/deps/v8/src/init/setup-isolate-deserialize.cc
+++ b/deps/v8/src/init/setup-isolate-deserialize.cc
@@ -7,7 +7,6 @@
 #include "src/base/logging.h"
 #include "src/execution/isolate.h"
 #include "src/interpreter/interpreter.h"
-#include "src/objects/objects-inl.h"
 #include "src/utils/ostreams.h"
 
 namespace v8 {
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 19ad57038f10c9..15eb929332a85e 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -90,6 +90,13 @@ void V8::InitializeOncePerProcessImpl() {
     FLAG_expose_wasm = false;
   }
 
+  // The --jitless and --interpreted-frames-native-stack flags are incompatible
+  // since the latter requires code generation while the former prohibits code
+  // generation.
+  CHECK_WITH_MSG(!FLAG_interpreted_frames_native_stack || !FLAG_jitless,
+                 "The --jitless and --interpreted-frames-native-stack flags "
+                 "are incompatible.");
+
   base::OS::Initialize(FLAG_hard_abort, FLAG_gc_fake_mmap);
 
   if (FLAG_random_seed) SetRandomMmapSeed(FLAG_random_seed);
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index e5fa06fd54884b..f72dcce0a93c44 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -6,6 +6,7 @@ include_rules = [
   "+src/base/logging.h",
   "+src/base/macros.h",
   "+src/base/memory.h",
+  "+src/base/optional.h",
   "+src/base/platform/platform.h",
   "+src/base/platform/mutex.h",
   "+src/base/safe_conversions.h",
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index a979205084a2cd..92422c0a88270e 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -5,6 +5,6 @@ kozyatinskiy@chromium.org
 pfeldman@chromium.org
 yangguo@chromium.org
 
-per-file PRESUBMIT.py=file://INFRA_OWNERS
+per-file PRESUBMIT.py=file:../../INFRA_OWNERS
 
 # COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index ad91a8e65e97aa..18a10285dd031a 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -50,8 +50,8 @@
 namespace v8_inspector {
 
 namespace {
-static const char kGlobalHandleLabel[] = "DevTools console";
-static bool isResolvableNumberLike(String16 query) {
+const char kGlobalHandleLabel[] = "DevTools console";
+bool isResolvableNumberLike(String16 query) {
   return query == "Infinity" || query == "-Infinity" || query == "NaN";
 }
 }  // namespace
@@ -220,8 +220,13 @@ class InjectedScript::ProtocolPromiseHandler {
                                                       : 0)
             .setColumnNumber(
                 stack && !stack->isEmpty() ? stack->topColumnNumber() : 0)
-            .setException(wrappedValue->clone())
             .build();
+    response = scope.injectedScript()->addExceptionToDetails(
+        result, exceptionDetails.get(), m_objectGroup);
+    if (!response.isSuccess()) {
+      callback->sendFailure(response);
+      return;
+    }
     if (stack)
       exceptionDetails->setStackTrace(
           stack->buildInspectorObjectImpl(m_inspector->debugger()));
@@ -289,8 +294,7 @@ Response InjectedScript::getProperties(
   PropertyAccumulator accumulator(&mirrors);
   if (!ValueMirror::getProperties(context, object, ownProperties,
                                   accessorPropertiesOnly, &accumulator)) {
-    return createExceptionDetails(tryCatch, groupName, wrapMode,
-                                  exceptionDetails);
+    return createExceptionDetails(tryCatch, groupName, exceptionDetails);
   }
   for (const PropertyMirror& mirror : mirrors) {
     std::unique_ptr<PropertyDescriptor> descriptor =
@@ -489,14 +493,18 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
                              &limit, &limit, &preview);
   if (!preview) return nullptr;
 
-  std::unordered_set<String16> selectedColumns;
+  std::vector<String16> selectedColumns;
+  std::unordered_set<String16> columnSet;
   v8::Local<v8::Array> v8Columns;
   if (maybeColumns.ToLocal(&v8Columns)) {
     for (uint32_t i = 0; i < v8Columns->Length(); ++i) {
       v8::Local<v8::Value> column;
       if (v8Columns->Get(context, i).ToLocal(&column) && column->IsString()) {
-        selectedColumns.insert(
-            toProtocolString(isolate, column.As<v8::String>()));
+        String16 name = toProtocolString(isolate, column.As<v8::String>());
+        if (columnSet.find(name) == columnSet.end()) {
+          columnSet.insert(name);
+          selectedColumns.push_back(name);
+        }
       }
     }
   }
@@ -505,14 +513,18 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
          *preview->getProperties()) {
       ObjectPreview* columnPreview = column->getValuePreview(nullptr);
       if (!columnPreview) continue;
-
-      auto filtered = v8::base::make_unique<Array<PropertyPreview>>();
+      // Use raw pointer here since the lifetime of each PropertyPreview is
+      // ensured by columnPreview. This saves an additional clone.
+      std::unordered_map<String16, PropertyPreview*> columnMap;
       for (const std::unique_ptr<PropertyPreview>& property :
            *columnPreview->getProperties()) {
-        if (selectedColumns.find(property->getName()) !=
-            selectedColumns.end()) {
-          filtered->emplace_back(property->clone());
-        }
+        if (columnSet.find(property->getName()) == columnSet.end()) continue;
+        columnMap[property->getName()] = property.get();
+      }
+      auto filtered = v8::base::make_unique<Array<PropertyPreview>>();
+      for (const String16& column : selectedColumns) {
+        if (columnMap.find(column) == columnMap.end()) continue;
+        filtered->push_back(columnMap[column]->clone());
       }
       columnPreview->setProperties(std::move(filtered));
     }
@@ -632,9 +644,25 @@ Response InjectedScript::resolveCallArgument(
   return Response::OK();
 }
 
+Response InjectedScript::addExceptionToDetails(
+    v8::Local<v8::Value> exception,
+    protocol::Runtime::ExceptionDetails* exceptionDetails,
+    const String16& objectGroup) {
+  if (exception.IsEmpty()) return Response::OK();
+  std::unique_ptr<protocol::Runtime::RemoteObject> wrapped;
+  Response response =
+      wrapObject(exception, objectGroup,
+                 exception->IsNativeError() ? WrapMode::kNoPreview
+                                            : WrapMode::kWithPreview,
+                 &wrapped);
+  if (!response.isSuccess()) return response;
+  exceptionDetails->setException(std::move(wrapped));
+  return Response::OK();
+}
+
 Response InjectedScript::createExceptionDetails(
     const v8::TryCatch& tryCatch, const String16& objectGroup,
-    WrapMode wrapMode, Maybe<protocol::Runtime::ExceptionDetails>* result) {
+    Maybe<protocol::Runtime::ExceptionDetails>* result) {
   if (!tryCatch.HasCaught()) return Response::InternalError();
   v8::Local<v8::Message> message = tryCatch.Message();
   v8::Local<v8::Value> exception = tryCatch.Exception();
@@ -667,16 +695,9 @@ Response InjectedScript::createExceptionDetails(
               ->createStackTrace(stackTrace)
               ->buildInspectorObjectImpl(m_context->inspector()->debugger()));
   }
-  if (!exception.IsEmpty()) {
-    std::unique_ptr<protocol::Runtime::RemoteObject> wrapped;
-    Response response =
-        wrapObject(exception, objectGroup,
-                   exception->IsNativeError() ? WrapMode::kNoPreview
-                                              : WrapMode::kWithPreview,
-                   &wrapped);
-    if (!response.isSuccess()) return response;
-    exceptionDetails->setException(std::move(wrapped));
-  }
+  Response response =
+      addExceptionToDetails(exception, exceptionDetails.get(), objectGroup);
+  if (!response.isSuccess()) return response;
   *result = std::move(exceptionDetails);
   return Response::OK();
 }
@@ -709,8 +730,7 @@ Response InjectedScript::wrapEvaluateResult(
     if (!response.isSuccess()) return response;
     // We send exception in result for compatibility reasons, even though it's
     // accessible through exceptionDetails.exception.
-    response = createExceptionDetails(tryCatch, objectGroup, wrapMode,
-                                      exceptionDetails);
+    response = createExceptionDetails(tryCatch, objectGroup, exceptionDetails);
     if (!response.isSuccess()) return response;
   }
   return Response::OK();
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 03c743e1cd3428..d007e9121ece3b 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -117,7 +117,7 @@ class InjectedScript final {
                                v8::Local<v8::Value>* result);
 
   Response createExceptionDetails(
-      const v8::TryCatch&, const String16& groupName, WrapMode wrapMode,
+      const v8::TryCatch&, const String16& groupName,
       Maybe<protocol::Runtime::ExceptionDetails>* result);
   Response wrapEvaluateResult(
       v8::MaybeLocal<v8::Value> maybeResultValue, const v8::TryCatch&,
@@ -219,6 +219,10 @@ class InjectedScript final {
   void discardEvaluateCallbacks();
   std::unique_ptr<EvaluateCallback> takeEvaluateCallback(
       EvaluateCallback* callback);
+  Response addExceptionToDetails(
+      v8::Local<v8::Value> exception,
+      protocol::Runtime::ExceptionDetails* exceptionDetails,
+      const String16& objectGroup);
 
   InspectedContext* m_context;
   int m_sessionId;
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index fd2d35abd7ec5f..a8aee0b7f36a68 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -490,7 +490,7 @@ Response V8RuntimeAgentImpl::compileScript(
   if (!isOk) {
     if (scope.tryCatch().HasCaught()) {
       response = scope.injectedScript()->createExceptionDetails(
-          scope.tryCatch(), String16(), WrapMode::kNoPreview, exceptionDetails);
+          scope.tryCatch(), String16(), exceptionDetails);
       if (!response.isSuccess()) return response;
       return Response::OK();
     } else {
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 3ab9085c4491fb..9edfbc1a212a46 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -7,6 +7,7 @@
 #include <algorithm>
 #include <cmath>
 
+#include "src/base/optional.h"
 #include "src/debug/debug-interface.h"
 #include "src/inspector/v8-debugger.h"
 #include "src/inspector/v8-inspector-impl.h"
@@ -199,37 +200,57 @@ String16 descriptionForRegExp(v8::Isolate* isolate,
 
 enum class ErrorType { kNative, kClient };
 
+// Build a description from an exception using the following rules:
+//   * Usually return the stack trace found in the {stack} property.
+//   * If the stack trace does not start with the class name of the passed
+//     exception, try to build a description from the class name, the
+//     {message} property and the rest of the stack trace.
+//     (The stack trace is only used if {message} was also found in
+//     said stack trace).
 String16 descriptionForError(v8::Local<v8::Context> context,
                              v8::Local<v8::Object> object, ErrorType type) {
   v8::Isolate* isolate = context->GetIsolate();
   v8::TryCatch tryCatch(isolate);
   String16 className = toProtocolString(isolate, object->GetConstructorName());
-  v8::Local<v8::Value> stackValue;
-  if (!object->Get(context, toV8String(isolate, "stack"))
-           .ToLocal(&stackValue) ||
-      !stackValue->IsString()) {
-    return className;
-  }
-  String16 stack = toProtocolString(isolate, stackValue.As<v8::String>());
-  String16 description = stack;
-  if (type == ErrorType::kClient) {
-    if (stack.substring(0, className.length()) != className) {
-      v8::Local<v8::Value> messageValue;
-      if (!object->Get(context, toV8String(isolate, "message"))
-               .ToLocal(&messageValue) ||
-          !messageValue->IsString()) {
-        return stack;
-      }
-      String16 message = toProtocolStringWithTypeCheck(isolate, messageValue);
-      size_t index = stack.find(message);
-      String16 stackWithoutMessage =
-          index != String16::kNotFound
-              ? stack.substring(index + message.length())
-              : String16();
-      description = className + ": " + message + stackWithoutMessage;
+
+  v8::base::Optional<String16> stack;
+  {
+    v8::Local<v8::Value> stackValue;
+    if (object->Get(context, toV8String(isolate, "stack"))
+            .ToLocal(&stackValue) &&
+        stackValue->IsString()) {
+      stack = toProtocolString(isolate, stackValue.As<v8::String>());
     }
   }
-  return description;
+
+  if (type == ErrorType::kNative && stack) return *stack;
+
+  if (stack && stack->substring(0, className.length()) == className) {
+    return *stack;
+  }
+
+  v8::base::Optional<String16> message;
+  {
+    v8::Local<v8::Value> messageValue;
+    if (object->Get(context, toV8String(isolate, "message"))
+            .ToLocal(&messageValue) &&
+        messageValue->IsString()) {
+      String16 msg = toProtocolStringWithTypeCheck(isolate, messageValue);
+      if (!msg.isEmpty()) message = msg;
+    }
+  }
+
+  if (!message) return stack ? *stack : className;
+
+  String16 description = className + ": " + *message;
+  if (!stack) return description;
+
+  DCHECK(stack && message);
+  size_t index = stack->find(*message);
+  String16 stackWithoutMessage =
+      index != String16::kNotFound ? stack->substring(index + message->length())
+                                   : String16();
+  return description + stackWithoutMessage;
 }
 
 String16 descriptionForObject(v8::Isolate* isolate,
@@ -1593,13 +1614,13 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
         value, RemoteObject::SubtypeEnum::Regexp,
         descriptionForRegExp(isolate, value.As<v8::RegExp>()));
   }
-  if (value->IsFunction()) {
-    return v8::base::make_unique<FunctionMirror>(value);
-  }
   if (value->IsProxy()) {
     return v8::base::make_unique<ObjectMirror>(
         value, RemoteObject::SubtypeEnum::Proxy, "Proxy");
   }
+  if (value->IsFunction()) {
+    return v8::base::make_unique<FunctionMirror>(value);
+  }
   if (value->IsDate()) {
     return v8::base::make_unique<ObjectMirror>(
         value, RemoteObject::SubtypeEnum::Date,
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index b58fbd33095582..cfc3eb36c15d8e 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -23,7 +23,8 @@ class RegisterTransferWriter final
     : public NON_EXPORTED_BASE(BytecodeRegisterOptimizer::BytecodeWriter),
       public NON_EXPORTED_BASE(ZoneObject) {
  public:
-  RegisterTransferWriter(BytecodeArrayBuilder* builder) : builder_(builder) {}
+  explicit RegisterTransferWriter(BytecodeArrayBuilder* builder)
+      : builder_(builder) {}
   ~RegisterTransferWriter() override = default;
 
   void EmitLdar(Register input) override { builder_->OutputLdarRaw(input); }
@@ -98,6 +99,19 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
       isolate, register_count, parameter_count(), handler_table);
 }
 
+#ifdef DEBUG
+int BytecodeArrayBuilder::CheckBytecodeMatches(Handle<BytecodeArray> bytecode) {
+  return bytecode_array_writer_.CheckBytecodeMatches(bytecode);
+}
+#endif
+
+Handle<ByteArray> BytecodeArrayBuilder::ToSourcePositionTable(
+    Isolate* isolate) {
+  DCHECK(RemainderOfBlockIsDead());
+
+  return bytecode_array_writer_.ToSourcePositionTable(isolate);
+}
+
 BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
     Bytecode bytecode) {
   BytecodeSourceInfo source_position;
@@ -810,10 +824,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadIteratorProperty(
-    Register object, int feedback_slot) {
-  size_t name_index = IteratorSymbolConstantPoolEntry();
-  OutputLdaNamedProperty(object, name_index, feedback_slot);
+BytecodeArrayBuilder& BytecodeArrayBuilder::GetIterator(Register object,
+                                                        int feedback_slot) {
+  OutputGetIterator(object, feedback_slot);
   return *this;
 }
 
@@ -1159,6 +1172,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefinedOrNull(
+    BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
+  OutputJumpIfUndefinedOrNull(label, 0);
+  return *this;
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotUndefined(
     BytecodeLabel* label) {
   DCHECK(!label->is_bound());
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index c5fd3111c09517..06230f9270d3b8 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -21,6 +21,7 @@
 namespace v8 {
 namespace internal {
 
+class BytecodeArray;
 class FeedbackVectorSpec;
 class Isolate;
 
@@ -42,6 +43,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
           SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
 
   Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate);
+  Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
+
+#ifdef DEBUG
+  int CheckBytecodeMatches(Handle<BytecodeArray> bytecode);
+#endif
 
   // Get the number of parameters expected by function.
   int parameter_count() const {
@@ -127,9 +133,10 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
 
   // Keyed load property. The key should be in the accumulator.
   BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
+
   // Named load property of the @@iterator symbol.
-  BytecodeArrayBuilder& LoadIteratorProperty(Register object,
-                                             int feedback_slot);
+  BytecodeArrayBuilder& GetIterator(Register object, int feedback_slot);
+
   // Named load property of the @@asyncIterator symbol.
   BytecodeArrayBuilder& LoadAsyncIteratorProperty(Register object,
                                                   int feedback_slot);
@@ -418,6 +425,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
   BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfNotNull(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
+  BytecodeArrayBuilder& JumpIfUndefinedOrNull(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfNotUndefined(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfNil(BytecodeLabel* label, Token::Value op,
                                   NilValue nil);
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 3ecc5e1a89243b..3a459b48332073 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -12,7 +12,6 @@
 #include "src/interpreter/bytecode-source-info.h"
 #include "src/interpreter/constant-array-builder.h"
 #include "src/interpreter/handler-table-builder.h"
-#include "src/logging/log.h"
 #include "src/objects/objects-inl.h"
 
 namespace v8 {
@@ -50,19 +49,43 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
       bytecode_size, &bytecodes()->front(), frame_size, parameter_count,
       constant_pool);
   bytecode_array->set_handler_table(*handler_table);
-  if (!source_position_table_builder_.Lazy()) {
-    Handle<ByteArray> source_position_table =
-        source_position_table_builder_.Omit()
-            ? ReadOnlyRoots(isolate).empty_byte_array_handle()
-            : source_position_table_builder()->ToSourcePositionTable(isolate);
-    bytecode_array->set_source_position_table(*source_position_table);
-    LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(
-                                bytecode_array->GetFirstBytecodeAddress(),
-                                *source_position_table));
-  }
   return bytecode_array;
 }
 
+Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(Isolate* isolate) {
+  DCHECK(!source_position_table_builder_.Lazy());
+  Handle<ByteArray> source_position_table =
+      source_position_table_builder_.Omit()
+          ? ReadOnlyRoots(isolate).empty_byte_array_handle()
+          : source_position_table_builder_.ToSourcePositionTable(isolate);
+  return source_position_table;
+}
+
+#ifdef DEBUG
+int BytecodeArrayWriter::CheckBytecodeMatches(Handle<BytecodeArray> bytecode) {
+  int mismatches = false;
+  int bytecode_size = static_cast<int>(bytecodes()->size());
+  const byte* bytecode_ptr = &bytecodes()->front();
+  if (bytecode_size != bytecode->length()) mismatches = true;
+
+  // If there's a mismatch only in the length of the bytecode (very unlikely)
+  // then the first mismatch will be the first extra bytecode.
+  int first_mismatch = std::min(bytecode_size, bytecode->length());
+  for (int i = 0; i < first_mismatch; ++i) {
+    if (bytecode_ptr[i] != bytecode->get(i)) {
+      mismatches = true;
+      first_mismatch = i;
+      break;
+    }
+  }
+
+  if (mismatches) {
+    return first_mismatch;
+  }
+  return -1;
+}
+#endif
+
 void BytecodeArrayWriter::Write(BytecodeNode* node) {
   DCHECK(!Bytecodes::IsJump(node->bytecode()));
 
@@ -286,6 +309,8 @@ Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
       return Bytecode::kJumpIfUndefinedConstant;
     case Bytecode::kJumpIfNotUndefined:
       return Bytecode::kJumpIfNotUndefinedConstant;
+    case Bytecode::kJumpIfUndefinedOrNull:
+      return Bytecode::kJumpIfUndefinedOrNullConstant;
     case Bytecode::kJumpIfJSReceiver:
       return Bytecode::kJumpIfJSReceiverConstant;
     default:
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 5dac1b41c3159c..22f0296affbf25 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -55,6 +55,13 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
                                         int parameter_count,
                                         Handle<ByteArray> handler_table);
 
+  Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
+
+#ifdef DEBUG
+  // Returns -1 if they match or the offset of the first mismatching byte.
+  int CheckBytecodeMatches(Handle<BytecodeArray> bytecode);
+#endif
+
   bool RemainderOfBlockIsDead() const { return exit_seen_in_block_; }
 
  private:
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index 0f87c5bdfb46a2..40ee88d6ebceb7 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -18,8 +18,8 @@ namespace interpreter {
 
 class CreateArrayLiteralFlags {
  public:
-  class FlagsBits : public BitField8<int, 0, 5> {};
-  class FastCloneSupportedBit : public BitField8<bool, FlagsBits::kNext, 1> {};
+  using FlagsBits = BitField8<int, 0, 5>;
+  using FastCloneSupportedBit = FlagsBits::Next<bool, 1>;
 
   static uint8_t Encode(bool use_fast_shallow_clone, int runtime_flags);
 
@@ -29,8 +29,8 @@ class CreateArrayLiteralFlags {
 
 class CreateObjectLiteralFlags {
  public:
-  class FlagsBits : public BitField8<int, 0, 5> {};
-  class FastCloneSupportedBit : public BitField8<bool, FlagsBits::kNext, 1> {};
+  using FlagsBits = BitField8<int, 0, 5>;
+  using FastCloneSupportedBit = FlagsBits::Next<bool, 1>;
 
   static uint8_t Encode(int runtime_flags, bool fast_clone_supported);
 
@@ -40,8 +40,8 @@ class CreateObjectLiteralFlags {
 
 class CreateClosureFlags {
  public:
-  class PretenuredBit : public BitField8<bool, 0, 1> {};
-  class FastNewClosureBit : public BitField8<bool, PretenuredBit::kNext, 1> {};
+  using PretenuredBit = BitField8<bool, 0, 1>;
+  using FastNewClosureBit = PretenuredBit::Next<bool, 1>;
 
   static uint8_t Encode(bool pretenure, bool is_function_scope,
                         bool might_always_opt);
@@ -80,9 +80,8 @@ class TestTypeOfFlags {
 
 class StoreLookupSlotFlags {
  public:
-  class LanguageModeBit : public BitField8<LanguageMode, 0, 1> {};
-  class LookupHoistingModeBit
-      : public BitField8<bool, LanguageModeBit::kNext, 1> {};
+  using LanguageModeBit = BitField8<LanguageMode, 0, 1>;
+  using LookupHoistingModeBit = LanguageModeBit::Next<bool, 1>;
   STATIC_ASSERT(LanguageModeSize <= LanguageModeBit::kNumValues);
 
   static uint8_t Encode(LanguageMode language_mode,
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index d3b27b4375f5dd..6a0b02d8527ea8 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -14,7 +14,9 @@
 #include "src/interpreter/bytecode-jump-table.h"
 #include "src/interpreter/bytecode-label.h"
 #include "src/interpreter/bytecode-register-allocator.h"
+#include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/control-flow-builders.h"
+#include "src/logging/log.h"
 #include "src/objects/debug-objects.h"
 #include "src/objects/literal-objects-inl.h"
 #include "src/objects/objects-inl.h"
@@ -915,39 +917,67 @@ class BytecodeGenerator::IteratorRecord final {
   Register next_;
 };
 
+class BytecodeGenerator::OptionalChainNullLabelScope final {
+ public:
+  explicit OptionalChainNullLabelScope(BytecodeGenerator* bytecode_generator)
+      : bytecode_generator_(bytecode_generator),
+        labels_(bytecode_generator->zone()) {
+    prev_ = bytecode_generator_->optional_chaining_null_labels_;
+    bytecode_generator_->optional_chaining_null_labels_ = &labels_;
+  }
+
+  ~OptionalChainNullLabelScope() {
+    bytecode_generator_->optional_chaining_null_labels_ = prev_;
+  }
+
+  BytecodeLabels* labels() { return &labels_; }
+
+ private:
+  BytecodeGenerator* bytecode_generator_;
+  BytecodeLabels labels_;
+  BytecodeLabels* prev_;
+};
+
 namespace {
 
+template <typename PropertyT>
+struct Accessors : public ZoneObject {
+  Accessors() : getter(nullptr), setter(nullptr) {}
+  PropertyT* getter;
+  PropertyT* setter;
+};
+
 // A map from property names to getter/setter pairs allocated in the zone that
 // also provides a way of accessing the pairs in the order they were first
 // added so that the generated bytecode is always the same.
+template <typename PropertyT>
 class AccessorTable
-    : public base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+    : public base::TemplateHashMap<Literal, Accessors<PropertyT>,
                                    bool (*)(void*, void*),
                                    ZoneAllocationPolicy> {
  public:
   explicit AccessorTable(Zone* zone)
-      : base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+      : base::TemplateHashMap<Literal, Accessors<PropertyT>,
                               bool (*)(void*, void*), ZoneAllocationPolicy>(
             Literal::Match, ZoneAllocationPolicy(zone)),
         zone_(zone) {}
 
-  Iterator lookup(Literal* literal) {
-    Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
+  Accessors<PropertyT>* LookupOrInsert(Literal* key) {
+    auto it = this->find(key, true, ZoneAllocationPolicy(zone_));
     if (it->second == nullptr) {
-      it->second = new (zone_) ObjectLiteral::Accessors();
-      ordered_accessors_.push_back({literal, it->second});
+      it->second = new (zone_) Accessors<PropertyT>();
+      ordered_accessors_.push_back({key, it->second});
     }
-    return it;
+    return it->second;
   }
 
-  const std::vector<std::pair<Literal*, ObjectLiteral::Accessors*>>&
+  const std::vector<std::pair<Literal*, Accessors<PropertyT>*>>&
   ordered_accessors() {
     return ordered_accessors_;
   }
 
  private:
-  std::vector<std::pair<Literal*, ObjectLiteral::Accessors*>>
-      ordered_accessors_;
+  std::vector<std::pair<Literal*, Accessors<PropertyT>*>> ordered_accessors_;
 
   Zone* zone_;
 };
@@ -994,6 +1024,7 @@ BytecodeGenerator::BytecodeGenerator(
       execution_context_(nullptr),
       execution_result_(nullptr),
       incoming_new_target_or_generator_(),
+      optional_chaining_null_labels_(nullptr),
       dummy_feedback_slot_(feedback_spec(), FeedbackSlotKind::kCompareOp),
       generator_jump_table_(nullptr),
       suspend_count_(0),
@@ -1012,7 +1043,7 @@ Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
 #ifdef DEBUG
   // Unoptimized compilation should be context-independent. Verify that we don't
   // access the native context by nulling it out during finalization.
-  SaveAndSwitchContext save(isolate, Context());
+  NullContextScope null_context_scope(isolate);
 #endif
 
   AllocateDeferredConstants(isolate, script);
@@ -1036,6 +1067,32 @@ Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
   return bytecode_array;
 }
 
+Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
+    Isolate* isolate) {
+  DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
+#ifdef DEBUG
+  // Unoptimized compilation should be context-independent. Verify that we don't
+  // access the native context by nulling it out during finalization.
+  NullContextScope null_context_scope(isolate);
+#endif
+
+  Handle<ByteArray> source_position_table =
+      builder()->ToSourcePositionTable(isolate);
+
+  LOG_CODE_EVENT(isolate,
+                 CodeLinePosInfoRecordEvent(
+                     info_->bytecode_array()->GetFirstBytecodeAddress(),
+                     *source_position_table));
+
+  return source_position_table;
+}
+
+#ifdef DEBUG
+int BytecodeGenerator::CheckBytecodeMatches(Handle<BytecodeArray> bytecode) {
+  return builder()->CheckBytecodeMatches(bytecode);
+}
+#endif
+
 void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
                                                   Handle<Script> script) {
   // Build global declaration pair arrays.
@@ -1383,8 +1440,9 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
       BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
       break;
   }
-  DCHECK_IMPLIES(decl->fun()->ShouldEagerCompile(),
-                 IsInEagerLiterals(decl->fun(), *eager_inner_literals_));
+  DCHECK_IMPLIES(
+      eager_inner_literals_ != nullptr && decl->fun()->ShouldEagerCompile(),
+      IsInEagerLiterals(decl->fun(), *eager_inner_literals_));
 }
 
 void BytecodeGenerator::VisitModuleNamespaceImports() {
@@ -1755,14 +1813,13 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
     return;
   }
 
-  BytecodeLabel subject_null_label, subject_undefined_label;
+  BytecodeLabel subject_undefined_label;
   FeedbackSlot slot = feedback_spec()->AddForInSlot();
 
   // Prepare the state for executing ForIn.
   builder()->SetExpressionAsStatementPosition(stmt->subject());
   VisitForAccumulatorValue(stmt->subject());
-  builder()->JumpIfUndefined(&subject_undefined_label);
-  builder()->JumpIfNull(&subject_null_label);
+  builder()->JumpIfUndefinedOrNull(&subject_undefined_label);
   Register receiver = register_allocator()->NewRegister();
   builder()->ToObject(receiver);
 
@@ -1804,7 +1861,6 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
     builder()->StoreAccumulatorInRegister(index);
     loop_builder.JumpToHeader(loop_depth_);
   }
-  builder()->Bind(&subject_null_label);
   builder()->Bind(&subject_undefined_label);
 }
 
@@ -1978,39 +2034,6 @@ bool BytecodeGenerator::ShouldOptimizeAsOneShot() const {
          info()->literal()->is_oneshot_iife();
 }
 
-void BytecodeGenerator::BuildPrivateClassMemberNameAssignment(
-    ClassLiteral::Property* property) {
-  DCHECK(property->is_private());
-  switch (property->kind()) {
-    case ClassLiteral::Property::FIELD: {
-      // Create the private name symbols for fields during class
-      // evaluation and store them on the context. These will be
-      // used as keys later during instance or static initialization.
-      RegisterAllocationScope private_name_register_scope(this);
-      Register private_name = register_allocator()->NewRegister();
-      VisitForRegisterValue(property->key(), private_name);
-      builder()
-          ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
-          .StoreAccumulatorInRegister(private_name)
-          .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
-      DCHECK_NOT_NULL(property->private_name_var());
-      BuildVariableAssignment(property->private_name_var(), Token::INIT,
-                              HoleCheckMode::kElided);
-      break;
-    }
-    case ClassLiteral::Property::METHOD: {
-      // Create the closures for private methods.
-      VisitForAccumulatorValue(property->value());
-      BuildVariableAssignment(property->private_name_var(), Token::INIT,
-                              HoleCheckMode::kElided);
-      break;
-    }
-    default:
-      // TODO(joyee): Private accessors are not yet supported.
-      UNREACHABLE();
-  }
-}
-
 void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
   size_t class_boilerplate_entry =
       builder()->AllocateDeferredConstantPoolEntry();
@@ -2019,6 +2042,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
   VisitDeclarations(expr->scope()->declarations());
   Register class_constructor = register_allocator()->NewRegister();
 
+  AccessorTable<ClassLiteral::Property> private_accessors(zone());
   {
     RegisterAllocationScope register_scope(this);
     RegisterList args = register_allocator()->NewGrowableRegisterList();
@@ -2076,7 +2100,44 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
       }
 
       if (property->is_private()) {
-        BuildPrivateClassMemberNameAssignment(property);
+        // Assign private class member's name variables.
+        switch (property->kind()) {
+          case ClassLiteral::Property::FIELD: {
+            // Create the private name symbols for fields during class
+            // evaluation and store them on the context. These will be
+            // used as keys later during instance or static initialization.
+            RegisterAllocationScope private_name_register_scope(this);
+            Register private_name = register_allocator()->NewRegister();
+            VisitForRegisterValue(property->key(), private_name);
+            builder()
+                ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
+                .StoreAccumulatorInRegister(private_name)
+                .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
+            DCHECK_NOT_NULL(property->private_name_var());
+            BuildVariableAssignment(property->private_name_var(), Token::INIT,
+                                    HoleCheckMode::kElided);
+            break;
+          }
+          case ClassLiteral::Property::METHOD: {
+            // Create the closures for private methods.
+            VisitForAccumulatorValue(property->value());
+            BuildVariableAssignment(property->private_name_var(), Token::INIT,
+                                    HoleCheckMode::kElided);
+            break;
+          }
+          case ClassLiteral::Property::GETTER: {
+            Literal* key = property->key()->AsLiteral();
+            DCHECK_NULL(private_accessors.LookupOrInsert(key)->getter);
+            private_accessors.LookupOrInsert(key)->getter = property;
+            break;
+          }
+          case ClassLiteral::Property::SETTER: {
+            Literal* key = property->key()->AsLiteral();
+            DCHECK_NULL(private_accessors.LookupOrInsert(key)->setter);
+            private_accessors.LookupOrInsert(key)->setter = property;
+            break;
+          }
+        }
         // The private fields are initialized in the initializer function and
         // the private brand for the private methods are initialized in the
         // constructor instead.
@@ -2122,6 +2183,37 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
         .CallRuntime(Runtime::kCreatePrivateNameSymbol, brand);
     BuildVariableAssignment(expr->scope()->brand(), Token::INIT,
                             HoleCheckMode::kElided);
+
+    // Store the home object for any private methods that need
+    // them. We do this here once the prototype and brand symbol has
+    // been created. Private accessors have their home object set later
+    // when they are defined.
+    for (int i = 0; i < expr->properties()->length(); i++) {
+      RegisterAllocationScope register_scope(this);
+      ClassLiteral::Property* property = expr->properties()->at(i);
+      if (property->NeedsHomeObjectOnClassPrototype()) {
+        Register func = register_allocator()->NewRegister();
+        BuildVariableLoad(property->private_name_var(), HoleCheckMode::kElided);
+        builder()->StoreAccumulatorInRegister(func);
+        VisitSetHomeObject(func, prototype, property);
+      }
+    }
+
+    // Define accessors, using only a single call to the runtime for each pair
+    // of corresponding getters and setters.
+    for (auto accessors : private_accessors.ordered_accessors()) {
+      RegisterAllocationScope inner_register_scope(this);
+      RegisterList accessors_reg = register_allocator()->NewRegisterList(2);
+      ClassLiteral::Property* getter = accessors.second->getter;
+      ClassLiteral::Property* setter = accessors.second->setter;
+      VisitLiteralAccessor(prototype, getter, accessors_reg[0]);
+      VisitLiteralAccessor(prototype, setter, accessors_reg[1]);
+      builder()->CallRuntime(Runtime::kCreatePrivateAccessors, accessors_reg);
+      Variable* var = getter != nullptr ? getter->private_name_var()
+                                        : setter->private_name_var();
+      DCHECK_NOT_NULL(var);
+      BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
+    }
   }
 
   if (expr->instance_members_initializer_function() != nullptr) {
@@ -2241,12 +2333,13 @@ void BytecodeGenerator::VisitInitializeClassMembersStatement(
   }
 }
 
-void BytecodeGenerator::BuildThrowPrivateMethodWriteError(
-    const AstRawString* name) {
+void BytecodeGenerator::BuildInvalidPropertyAccess(MessageTemplate tmpl,
+                                                   Property* property) {
   RegisterAllocationScope register_scope(this);
+  const AstRawString* name = property->key()->AsVariableProxy()->raw_name();
   RegisterList args = register_allocator()->NewRegisterList(2);
   builder()
-      ->LoadLiteral(Smi::FromEnum(MessageTemplate::kInvalidPrivateMethodWrite))
+      ->LoadLiteral(Smi::FromEnum(tmpl))
       .StoreAccumulatorInRegister(args[0])
       .LoadLiteral(name)
       .StoreAccumulatorInRegister(args[1])
@@ -2437,7 +2530,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   }
 
   // Store computed values into the literal.
-  AccessorTable accessor_table(zone());
+  AccessorTable<ObjectLiteral::Property> accessor_table(zone());
   for (; property_index < expr->properties()->length(); property_index++) {
     ObjectLiteral::Property* property = expr->properties()->at(property_index);
     if (property->is_computed_name()) break;
@@ -2506,12 +2599,12 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
       }
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          accessor_table.LookupOrInsert(key)->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          accessor_table.LookupOrInsert(key)->setter = property;
         }
         break;
     }
@@ -2524,8 +2617,8 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
     RegisterList args = register_allocator()->NewRegisterList(5);
     builder()->MoveRegister(literal, args[0]);
     VisitForRegisterValue(accessors.first, args[1]);
-    VisitObjectLiteralAccessor(literal, accessors.second->getter, args[2]);
-    VisitObjectLiteralAccessor(literal, accessors.second->setter, args[3]);
+    VisitLiteralAccessor(literal, accessors.second->getter, args[2]);
+    VisitLiteralAccessor(literal, accessors.second->setter, args[3]);
     builder()
         ->LoadLiteral(Smi::FromInt(NONE))
         .StoreAccumulatorInRegister(args[4])
@@ -3017,6 +3110,7 @@ void BytecodeGenerator::BuildThrowIfHole(Variable* variable) {
 
 void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
                                                             Token::Value op) {
+  DCHECK(!IsPrivateMethodOrAccessorVariableMode(variable->mode()));
   if (variable->is_this() && variable->mode() == VariableMode::kConst &&
       op == Token::INIT) {
     // Perform an initialization check for 'this'. 'this' variable is the
@@ -3201,10 +3295,10 @@ BytecodeGenerator::AssignmentLhsData::NamedSuperProperty(
 }
 // static
 BytecodeGenerator::AssignmentLhsData
-BytecodeGenerator::AssignmentLhsData::PrivateMethod(Register object,
-                                                    const AstRawString* name) {
-  return AssignmentLhsData(PRIVATE_METHOD, nullptr, RegisterList(), object,
-                           Register(), nullptr, name);
+BytecodeGenerator::AssignmentLhsData::PrivateMethodOrAccessor(
+    AssignType type, Property* property) {
+  return AssignmentLhsData(type, property, RegisterList(), Register(),
+                           Register(), nullptr, nullptr);
 }
 // static
 BytecodeGenerator::AssignmentLhsData
@@ -3237,12 +3331,12 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
       Register key = VisitForRegisterValue(property->key());
       return AssignmentLhsData::KeyedProperty(object, key);
     }
-    case PRIVATE_METHOD: {
+    case PRIVATE_METHOD:
+    case PRIVATE_GETTER_ONLY:
+    case PRIVATE_SETTER_ONLY:
+    case PRIVATE_GETTER_AND_SETTER: {
       DCHECK(!property->IsSuperAccess());
-      AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
-      Register object = VisitForRegisterValue(property->obj());
-      const AstRawString* name = property->key()->AsVariableProxy()->raw_name();
-      return AssignmentLhsData::PrivateMethod(object, name);
+      return AssignmentLhsData::PrivateMethodOrAccessor(assign_type, property);
     }
     case NAMED_SUPER_PROPERTY: {
       AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
@@ -3316,8 +3410,7 @@ void BytecodeGenerator::BuildFinalizeIteration(
                           ast_string_constants()->return_string(),
                           feedback_index(feedback_spec()->AddLoadICSlot()))
       .StoreAccumulatorInRegister(method)
-      .JumpIfUndefined(iterator_is_done.New())
-      .JumpIfNull(iterator_is_done.New());
+      .JumpIfUndefinedOrNull(iterator_is_done.New());
 
   {
     RegisterAllocationScope register_scope(this);
@@ -3625,22 +3718,6 @@ void BytecodeGenerator::BuildDestructuringObjectAssignment(
     LookupHoistingMode lookup_hoisting_mode) {
   RegisterAllocationScope scope(this);
 
-  // if (value === null || value === undefined)
-  //   throw new TypeError(kNonCoercible);
-  //
-  // TODO(leszeks): Eliminate check if value is known to be non-null (e.g.
-  // an object literal).
-  BytecodeLabel is_null_or_undefined, not_null_or_undefined;
-  builder()
-      ->JumpIfNull(&is_null_or_undefined)
-      .JumpIfNotUndefined(&not_null_or_undefined);
-
-  {
-    builder()->Bind(&is_null_or_undefined);
-    builder()->SetExpressionPosition(pattern);
-    builder()->CallRuntime(Runtime::kThrowPatternAssignmentNonCoercible);
-  }
-
   // Store the assignment value in a register.
   Register value;
   RegisterList rest_runtime_callargs;
@@ -3651,7 +3728,34 @@ void BytecodeGenerator::BuildDestructuringObjectAssignment(
   } else {
     value = register_allocator()->NewRegister();
   }
-  builder()->Bind(&not_null_or_undefined).StoreAccumulatorInRegister(value);
+  builder()->StoreAccumulatorInRegister(value);
+
+  // if (value === null || value === undefined)
+  //   throw new TypeError(kNonCoercible);
+  //
+  // Since the first property access on null/undefined will also trigger a
+  // TypeError, we can elide this check. The exception is when there are no
+  // properties and no rest property (this is an empty literal), or when the
+  // first property is a computed name and accessing it can have side effects.
+  //
+  // TODO(leszeks): Also eliminate this check if the value is known to be
+  // non-null (e.g. an object literal).
+  if (pattern->properties()->is_empty() ||
+      (pattern->properties()->at(0)->is_computed_name() &&
+       pattern->properties()->at(0)->kind() != ObjectLiteralProperty::SPREAD)) {
+    BytecodeLabel is_null_or_undefined, not_null_or_undefined;
+    builder()
+        ->JumpIfUndefinedOrNull(&is_null_or_undefined)
+        .Jump(&not_null_or_undefined);
+
+    {
+      builder()->Bind(&is_null_or_undefined);
+      builder()->SetExpressionPosition(pattern);
+      builder()->CallRuntime(Runtime::kThrowPatternAssignmentNonCoercible,
+                             value);
+    }
+    builder()->Bind(&not_null_or_undefined);
+  }
 
   int i = 0;
   for (ObjectLiteralProperty* pattern_property : *pattern->properties()) {
@@ -3799,7 +3903,27 @@ void BytecodeGenerator::BuildAssignment(
       break;
     }
     case PRIVATE_METHOD: {
-      BuildThrowPrivateMethodWriteError(lhs_data.name());
+      BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateMethodWrite,
+                                 lhs_data.expr()->AsProperty());
+      break;
+    }
+    case PRIVATE_GETTER_ONLY: {
+      BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateSetterAccess,
+                                 lhs_data.expr()->AsProperty());
+      break;
+    }
+    case PRIVATE_SETTER_ONLY:
+    case PRIVATE_GETTER_AND_SETTER: {
+      Register value = register_allocator()->NewRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      Property* property = lhs_data.expr()->AsProperty();
+      Register object = VisitForRegisterValue(property->obj());
+      Register key = VisitForRegisterValue(property->key());
+      BuildPrivateBrandCheck(property, object);
+      BuildPrivateSetterAccess(object, key, value);
+      if (!execution_result()->IsEffect()) {
+        builder()->LoadAccumulatorWithRegister(value);
+      }
       break;
     }
   }
@@ -3847,11 +3971,16 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
                              lhs_data.super_property_args().Truncate(3));
       break;
     }
-    case PRIVATE_METHOD: {
-      BuildThrowPrivateMethodWriteError(lhs_data.name());
+    case PRIVATE_METHOD:
+    case PRIVATE_GETTER_ONLY:
+    case PRIVATE_SETTER_ONLY:
+    case PRIVATE_GETTER_AND_SETTER: {
+      // ({ #foo: name } = obj) is currently syntactically invalid.
+      UNREACHABLE();
       break;
     }
   }
+
   BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation();
   FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
   if (expr->value()->IsSmiLiteral()) {
@@ -4284,7 +4413,14 @@ void BytecodeGenerator::VisitThrow(Throw* expr) {
 }
 
 void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
+  if (property->is_optional_chain_link()) {
+    DCHECK_NOT_NULL(optional_chaining_null_labels_);
+    builder()->LoadAccumulatorWithRegister(obj).JumpIfUndefinedOrNull(
+        optional_chaining_null_labels_->New());
+  }
+
   AssignType property_kind = Property::GetAssignType(property);
+
   switch (property_kind) {
     case NON_PROPERTY:
       UNREACHABLE();
@@ -4308,18 +4444,20 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
     case KEYED_SUPER_PROPERTY:
       VisitKeyedSuperPropertyLoad(property, Register::invalid_value());
       break;
+    case PRIVATE_SETTER_ONLY: {
+      BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateGetterAccess,
+                                 property);
+      break;
+    }
+    case PRIVATE_GETTER_ONLY:
+    case PRIVATE_GETTER_AND_SETTER: {
+      Register key = VisitForRegisterValue(property->key());
+      BuildPrivateBrandCheck(property, obj);
+      BuildPrivateGetterAccess(obj, key);
+      break;
+    }
     case PRIVATE_METHOD: {
-      Variable* private_name = property->key()->AsVariableProxy()->var();
-
-      // Perform the brand check.
-      DCHECK(private_name->requires_brand_check());
-      ClassScope* scope = private_name->scope()->AsClassScope();
-      Variable* brand = scope->brand();
-      BuildVariableLoadForAccumulatorValue(brand, HoleCheckMode::kElided);
-      builder()->SetExpressionPosition(property);
-      builder()->LoadKeyedProperty(
-          obj, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
-
+      BuildPrivateBrandCheck(property, obj);
       // In the case of private methods, property->key() is the function to be
       // loaded (stored in a context slot), so load this directly.
       VisitForAccumulatorValue(property->key());
@@ -4328,6 +4466,48 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
   }
 }
 
+void BytecodeGenerator::BuildPrivateGetterAccess(Register object,
+                                                 Register accessor_pair) {
+  RegisterAllocationScope scope(this);
+  Register accessor = register_allocator()->NewRegister();
+  RegisterList args = register_allocator()->NewRegisterList(1);
+
+  builder()
+      ->CallRuntime(Runtime::kLoadPrivateGetter, accessor_pair)
+      .StoreAccumulatorInRegister(accessor)
+      .MoveRegister(object, args[0])
+      .CallProperty(accessor, args,
+                    feedback_index(feedback_spec()->AddCallICSlot()));
+}
+
+void BytecodeGenerator::BuildPrivateSetterAccess(Register object,
+                                                 Register accessor_pair,
+                                                 Register value) {
+  RegisterAllocationScope scope(this);
+  Register accessor = register_allocator()->NewRegister();
+  RegisterList args = register_allocator()->NewRegisterList(2);
+
+  builder()
+      ->CallRuntime(Runtime::kLoadPrivateSetter, accessor_pair)
+      .StoreAccumulatorInRegister(accessor)
+      .MoveRegister(object, args[0])
+      .MoveRegister(value, args[1])
+      .CallProperty(accessor, args,
+                    feedback_index(feedback_spec()->AddCallICSlot()));
+}
+
+void BytecodeGenerator::BuildPrivateBrandCheck(Property* property,
+                                               Register object) {
+  Variable* private_name = property->key()->AsVariableProxy()->var();
+  DCHECK(private_name->requires_brand_check());
+  ClassScope* scope = private_name->scope()->AsClassScope();
+  Variable* brand = scope->brand();
+  BuildVariableLoadForAccumulatorValue(brand, HoleCheckMode::kElided);
+  builder()->SetExpressionPosition(property);
+  builder()->LoadKeyedProperty(
+      object, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
+}
+
 void BytecodeGenerator::VisitPropertyLoadForRegister(Register obj,
                                                      Property* expr,
                                                      Register destination) {
@@ -4376,6 +4556,16 @@ void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
   }
 }
 
+void BytecodeGenerator::VisitOptionalChain(OptionalChain* expr) {
+  BytecodeLabel done;
+  OptionalChainNullLabelScope label_scope(this);
+  VisitForAccumulatorValue(expr->expression());
+  builder()->Jump(&done);
+  label_scope.labels()->Bind(builder());
+  builder()->LoadUndefined();
+  builder()->Bind(&done);
+}
+
 void BytecodeGenerator::VisitProperty(Property* expr) {
   AssignType property_kind = Property::GetAssignType(expr);
   if (property_kind != NAMED_SUPER_PROPERTY &&
@@ -4509,6 +4699,12 @@ void BytecodeGenerator::VisitCall(Call* expr) {
       UNREACHABLE();
   }
 
+  if (expr->is_optional_chain_link()) {
+    DCHECK_NOT_NULL(optional_chaining_null_labels_);
+    builder()->LoadAccumulatorWithRegister(callee).JumpIfUndefinedOrNull(
+        optional_chaining_null_labels_->New());
+  }
+
   // Evaluate all arguments to the function call and store in sequential args
   // registers.
   VisitArguments(expr->arguments(), &args);
@@ -4770,6 +4966,29 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* unary) {
     Register object = VisitForRegisterValue(property->obj());
     VisitForAccumulatorValue(property->key());
     builder()->Delete(object, language_mode());
+  } else if (expr->IsOptionalChain()) {
+    Expression* expr_inner = expr->AsOptionalChain()->expression();
+    if (expr_inner->IsProperty()) {
+      Property* property = expr_inner->AsProperty();
+      DCHECK(!property->IsPrivateReference());
+      BytecodeLabel done;
+      OptionalChainNullLabelScope label_scope(this);
+      VisitForAccumulatorValue(property->obj());
+      if (property->is_optional_chain_link()) {
+        builder()->JumpIfUndefinedOrNull(label_scope.labels()->New());
+      }
+      Register object = register_allocator()->NewRegister();
+      builder()->StoreAccumulatorInRegister(object);
+      VisitForAccumulatorValue(property->key());
+      builder()->Delete(object, language_mode());
+      builder()->Jump(&done);
+      label_scope.labels()->Bind(builder());
+      builder()->LoadTrue();
+      builder()->Bind(&done);
+    } else {
+      VisitForEffect(expr);
+      builder()->LoadTrue();
+    }
   } else if (expr->IsVariableProxy() &&
              !expr->AsVariableProxy()->is_new_target()) {
     // Delete of an unqualified identifier is allowed in sloppy mode but is
@@ -4875,8 +5094,25 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
       break;
     }
     case PRIVATE_METHOD: {
-      BuildThrowPrivateMethodWriteError(
-          property->key()->AsVariableProxy()->raw_name());
+      BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateMethodWrite,
+                                 property);
+      return;
+    }
+    case PRIVATE_GETTER_ONLY: {
+      BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateSetterAccess,
+                                 property);
+      return;
+    }
+    case PRIVATE_SETTER_ONLY: {
+      BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateGetterAccess,
+                                 property);
+      return;
+    }
+    case PRIVATE_GETTER_AND_SETTER: {
+      object = VisitForRegisterValue(property->obj());
+      key = VisitForRegisterValue(property->key());
+      BuildPrivateBrandCheck(property, object);
+      BuildPrivateGetterAccess(object, key);
       break;
     }
   }
@@ -4945,9 +5181,18 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
           .CallRuntime(Runtime::kStoreKeyedToSuper, super_property_args);
       break;
     }
+    case PRIVATE_SETTER_ONLY:
+    case PRIVATE_GETTER_ONLY:
     case PRIVATE_METHOD: {
-      BuildThrowPrivateMethodWriteError(
-          property->key()->AsVariableProxy()->raw_name());
+      UNREACHABLE();
+    }
+    case PRIVATE_GETTER_AND_SETTER: {
+      Register value = register_allocator()->NewRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      BuildPrivateSetterAccess(object, key, value);
+      if (!execution_result()->IsEffect()) {
+        builder()->LoadAccumulatorWithRegister(value);
+      }
       break;
     }
   }
@@ -4969,6 +5214,9 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
     case Token::AND:
       VisitLogicalAndExpression(binop);
       break;
+    case Token::NULLISH:
+      VisitNullishExpression(binop);
+      break;
     default:
       VisitArithmeticExpression(binop);
       break;
@@ -4986,6 +5234,9 @@ void BytecodeGenerator::VisitNaryOperation(NaryOperation* expr) {
     case Token::AND:
       VisitNaryLogicalAndExpression(expr);
       break;
+    case Token::NULLISH:
+      VisitNaryNullishExpression(expr);
+      break;
     default:
       VisitNaryArithmeticExpression(expr);
       break;
@@ -5128,39 +5379,37 @@ void BytecodeGenerator::VisitImportCallExpression(ImportCallExpression* expr) {
 }
 
 void BytecodeGenerator::BuildGetIterator(IteratorType hint) {
-  RegisterList args = register_allocator()->NewRegisterList(1);
-  Register method = register_allocator()->NewRegister();
-  Register obj = args[0];
-
   if (hint == IteratorType::kAsync) {
+    RegisterAllocationScope scope(this);
+
+    Register obj = register_allocator()->NewRegister();
+    Register method = register_allocator()->NewRegister();
+
     // Set method to GetMethod(obj, @@asyncIterator)
     builder()->StoreAccumulatorInRegister(obj).LoadAsyncIteratorProperty(
         obj, feedback_index(feedback_spec()->AddLoadICSlot()));
 
-    BytecodeLabel async_iterator_undefined, async_iterator_null, done;
-    // TODO(ignition): Add a single opcode for JumpIfNullOrUndefined
-    builder()->JumpIfUndefined(&async_iterator_undefined);
-    builder()->JumpIfNull(&async_iterator_null);
+    BytecodeLabel async_iterator_undefined, done;
+    builder()->JumpIfUndefinedOrNull(&async_iterator_undefined);
 
     // Let iterator be Call(method, obj)
     builder()->StoreAccumulatorInRegister(method).CallProperty(
-        method, args, feedback_index(feedback_spec()->AddCallICSlot()));
+        method, RegisterList(obj),
+        feedback_index(feedback_spec()->AddCallICSlot()));
 
     // If Type(iterator) is not Object, throw a TypeError exception.
     builder()->JumpIfJSReceiver(&done);
     builder()->CallRuntime(Runtime::kThrowSymbolAsyncIteratorInvalid);
 
     builder()->Bind(&async_iterator_undefined);
-    builder()->Bind(&async_iterator_null);
     // If method is undefined,
     //     Let syncMethod be GetMethod(obj, @@iterator)
     builder()
-        ->LoadIteratorProperty(obj,
-                               feedback_index(feedback_spec()->AddLoadICSlot()))
+        ->GetIterator(obj, feedback_index(feedback_spec()->AddLoadICSlot()))
         .StoreAccumulatorInRegister(method);
 
     //     Let syncIterator be Call(syncMethod, obj)
-    builder()->CallProperty(method, args,
+    builder()->CallProperty(method, RegisterList(obj),
                             feedback_index(feedback_spec()->AddCallICSlot()));
 
     // Return CreateAsyncFromSyncIterator(syncIterator)
@@ -5171,16 +5420,22 @@ void BytecodeGenerator::BuildGetIterator(IteratorType hint) {
 
     builder()->Bind(&done);
   } else {
-    // Let method be GetMethod(obj, @@iterator).
-    builder()
-        ->StoreAccumulatorInRegister(obj)
-        .LoadIteratorProperty(obj,
-                              feedback_index(feedback_spec()->AddLoadICSlot()))
-        .StoreAccumulatorInRegister(method);
+    {
+      RegisterAllocationScope scope(this);
 
-    // Let iterator be Call(method, obj).
-    builder()->CallProperty(method, args,
-                            feedback_index(feedback_spec()->AddCallICSlot()));
+      Register obj = register_allocator()->NewRegister();
+      Register method = register_allocator()->NewRegister();
+
+      // Let method be GetMethod(obj, @@iterator).
+      builder()
+          ->StoreAccumulatorInRegister(obj)
+          .GetIterator(obj, feedback_index(feedback_spec()->AddLoadICSlot()))
+          .StoreAccumulatorInRegister(method);
+
+      // Let iterator be Call(method, obj).
+      builder()->CallProperty(method, RegisterList(obj),
+                              feedback_index(feedback_spec()->AddCallICSlot()));
+    }
 
     // If Type(iterator) is not Object, throw a TypeError exception.
     BytecodeLabel no_type_error;
@@ -5241,8 +5496,7 @@ void BytecodeGenerator::BuildCallIteratorMethod(Register iterator,
   FeedbackSlot slot = feedback_spec()->AddLoadICSlot();
   builder()
       ->LoadNamedProperty(iterator, method_name, feedback_index(slot))
-      .JumpIfUndefined(if_notcalled->New())
-      .JumpIfNull(if_notcalled->New())
+      .JumpIfUndefinedOrNull(if_notcalled->New())
       .StoreAccumulatorInRegister(method)
       .CallProperty(method, receiver_and_args,
                     feedback_index(feedback_spec()->AddCallICSlot()))
@@ -5375,14 +5629,16 @@ void BytecodeGenerator::VisitNaryCommaExpression(NaryOperation* expr) {
 void BytecodeGenerator::VisitLogicalTestSubExpression(
     Token::Value token, Expression* expr, BytecodeLabels* then_labels,
     BytecodeLabels* else_labels, int coverage_slot) {
-  DCHECK(token == Token::OR || token == Token::AND);
+  DCHECK(token == Token::OR || token == Token::AND || token == Token::NULLISH);
 
   BytecodeLabels test_next(zone());
   if (token == Token::OR) {
     VisitForTest(expr, then_labels, &test_next, TestFallthrough::kElse);
-  } else {
-    DCHECK_EQ(Token::AND, token);
+  } else if (token == Token::AND) {
     VisitForTest(expr, &test_next, else_labels, TestFallthrough::kThen);
+  } else {
+    DCHECK_EQ(Token::NULLISH, token);
+    VisitForNullishTest(expr, then_labels, &test_next, else_labels);
   }
   test_next.Bind(builder());
 
@@ -5392,7 +5648,7 @@ void BytecodeGenerator::VisitLogicalTestSubExpression(
 void BytecodeGenerator::VisitLogicalTest(Token::Value token, Expression* left,
                                          Expression* right,
                                          int right_coverage_slot) {
-  DCHECK(token == Token::OR || token == Token::AND);
+  DCHECK(token == Token::OR || token == Token::AND || token == Token::NULLISH);
   TestResultScope* test_result = execution_result()->AsTest();
   BytecodeLabels* then_labels = test_result->then_labels();
   BytecodeLabels* else_labels = test_result->else_labels();
@@ -5407,7 +5663,7 @@ void BytecodeGenerator::VisitLogicalTest(Token::Value token, Expression* left,
 void BytecodeGenerator::VisitNaryLogicalTest(
     Token::Value token, NaryOperation* expr,
     const NaryCodeCoverageSlots* coverage_slots) {
-  DCHECK(token == Token::OR || token == Token::AND);
+  DCHECK(token == Token::OR || token == Token::AND || token == Token::NULLISH);
   DCHECK_GT(expr->subsequent_length(), 0);
 
   TestResultScope* test_result = execution_result()->AsTest();
@@ -5463,6 +5719,27 @@ bool BytecodeGenerator::VisitLogicalAndSubExpression(Expression* expr,
   return false;
 }
 
+bool BytecodeGenerator::VisitNullishSubExpression(Expression* expr,
+                                                  BytecodeLabels* end_labels,
+                                                  int coverage_slot) {
+  if (expr->IsLiteralButNotNullOrUndefined()) {
+    VisitForAccumulatorValue(expr);
+    end_labels->Bind(builder());
+    return true;
+  } else if (!expr->IsNullOrUndefinedLiteral()) {
+    VisitForAccumulatorValue(expr);
+    BytecodeLabel is_null_or_undefined;
+    builder()
+        ->JumpIfUndefinedOrNull(&is_null_or_undefined)
+        .Jump(end_labels->New());
+    builder()->Bind(&is_null_or_undefined);
+  }
+
+  BuildIncrementBlockCoverageCounterIfEnabled(coverage_slot);
+
+  return false;
+}
+
 void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
   Expression* left = binop->left();
   Expression* right = binop->right();
@@ -5585,6 +5862,68 @@ void BytecodeGenerator::VisitNaryLogicalAndExpression(NaryOperation* expr) {
   }
 }
 
+void BytecodeGenerator::VisitNullishExpression(BinaryOperation* binop) {
+  Expression* left = binop->left();
+  Expression* right = binop->right();
+
+  int right_coverage_slot =
+      AllocateBlockCoverageSlotIfEnabled(binop, SourceRangeKind::kRight);
+
+  if (execution_result()->IsTest()) {
+    TestResultScope* test_result = execution_result()->AsTest();
+    if (left->IsLiteralButNotNullOrUndefined() && left->ToBooleanIsTrue()) {
+      builder()->Jump(test_result->NewThenLabel());
+    } else if (left->IsNullOrUndefinedLiteral() &&
+               right->IsNullOrUndefinedLiteral()) {
+      BuildIncrementBlockCoverageCounterIfEnabled(right_coverage_slot);
+      builder()->Jump(test_result->NewElseLabel());
+    } else {
+      VisitLogicalTest(Token::NULLISH, left, right, right_coverage_slot);
+    }
+    test_result->SetResultConsumedByTest();
+  } else {
+    BytecodeLabels end_labels(zone());
+    if (VisitNullishSubExpression(left, &end_labels, right_coverage_slot)) {
+      return;
+    }
+    VisitForAccumulatorValue(right);
+    end_labels.Bind(builder());
+  }
+}
+
+void BytecodeGenerator::VisitNaryNullishExpression(NaryOperation* expr) {
+  Expression* first = expr->first();
+  DCHECK_GT(expr->subsequent_length(), 0);
+
+  NaryCodeCoverageSlots coverage_slots(this, expr);
+
+  if (execution_result()->IsTest()) {
+    TestResultScope* test_result = execution_result()->AsTest();
+    if (first->IsLiteralButNotNullOrUndefined() && first->ToBooleanIsTrue()) {
+      builder()->Jump(test_result->NewThenLabel());
+    } else {
+      VisitNaryLogicalTest(Token::NULLISH, expr, &coverage_slots);
+    }
+    test_result->SetResultConsumedByTest();
+  } else {
+    BytecodeLabels end_labels(zone());
+    if (VisitNullishSubExpression(first, &end_labels,
+                                  coverage_slots.GetSlotFor(0))) {
+      return;
+    }
+    for (size_t i = 0; i < expr->subsequent_length() - 1; ++i) {
+      if (VisitNullishSubExpression(expr->subsequent(i), &end_labels,
+                                    coverage_slots.GetSlotFor(i + 1))) {
+        return;
+      }
+    }
+    // We have to visit the last value even if it's nullish, because we need its
+    // actual value.
+    VisitForAccumulatorValue(expr->subsequent(expr->subsequent_length() - 1));
+    end_labels.Bind(builder());
+  }
+}
+
 void BytecodeGenerator::BuildNewLocalActivationContext() {
   ValueResultScope value_execution_result(this);
   Scope* scope = closure_scope();
@@ -5682,8 +6021,9 @@ void BytecodeGenerator::BuildNewLocalCatchContext(Scope* scope) {
   builder()->CreateCatchContext(exception, scope);
 }
 
-void BytecodeGenerator::VisitObjectLiteralAccessor(
-    Register home_object, ObjectLiteralProperty* property, Register value_out) {
+void BytecodeGenerator::VisitLiteralAccessor(Register home_object,
+                                             LiteralProperty* property,
+                                             Register value_out) {
   if (property == nullptr) {
     builder()->LoadNull().StoreAccumulatorInRegister(value_out);
   } else {
@@ -5929,6 +6269,25 @@ void BytecodeGenerator::VisitForTest(Expression* expr,
   }
 }
 
+// Visits the expression |expr| for testing its nullish value and jumping to the
+// |then| or |other| label depending on value and short-circuit semantics
+void BytecodeGenerator::VisitForNullishTest(Expression* expr,
+                                            BytecodeLabels* then_labels,
+                                            BytecodeLabels* test_next_labels,
+                                            BytecodeLabels* else_labels) {
+  // Nullish short circuits on undefined or null, otherwise we fall back to
+  // BuildTest with no fallthrough.
+  // TODO(joshualitt): We should do this in a TestResultScope.
+  TypeHint type_hint = VisitForAccumulatorValue(expr);
+  ToBooleanMode mode = ToBooleanModeFromTypeHint(type_hint);
+
+  // Skip the nullish shortcircuit if we already have a boolean.
+  if (mode != ToBooleanMode::kAlreadyBoolean) {
+    builder()->JumpIfUndefinedOrNull(test_next_labels->New());
+  }
+  BuildTest(mode, then_labels, else_labels, TestFallthrough::kNone);
+}
+
 void BytecodeGenerator::VisitInSameTestExecutionScope(Expression* expr) {
   DCHECK(execution_result()->IsTest());
   {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index b754d2c296cd2c..134b1b463ab11e 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -18,6 +18,7 @@ namespace internal {
 
 class AstNodeSourceRanges;
 class AstStringConstants;
+class BytecodeArray;
 class UnoptimizedCompilationInfo;
 enum class SourceRangeKind;
 
@@ -38,6 +39,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
   void GenerateBytecode(uintptr_t stack_limit);
   Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate,
                                          Handle<Script> script);
+  Handle<ByteArray> FinalizeSourcePositionTable(Isolate* isolate);
+
+#ifdef DEBUG
+  int CheckBytecodeMatches(Handle<BytecodeArray> bytecode);
+#endif
 
 #define DECLARE_VISIT(type) void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
@@ -66,6 +72,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
   class AccumulatorPreservingScope;
   class TestResultScope;
   class ValueResultScope;
+  class OptionalChainNullLabelScope;
 
   using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
 
@@ -84,8 +91,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
                                            Register object,
                                            const AstRawString* name);
     static AssignmentLhsData KeyedProperty(Register object, Register key);
-    static AssignmentLhsData PrivateMethod(Register object,
-                                           const AstRawString* name);
+    static AssignmentLhsData PrivateMethodOrAccessor(AssignType type,
+                                                     Property* property);
     static AssignmentLhsData NamedSuperProperty(
         RegisterList super_property_args);
     static AssignmentLhsData KeyedSuperProperty(
@@ -93,7 +100,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
 
     AssignType assign_type() const { return assign_type_; }
     Expression* expr() const {
-      DCHECK_EQ(assign_type_, NON_PROPERTY);
+      DCHECK(assign_type_ == NON_PROPERTY || assign_type_ == PRIVATE_METHOD ||
+             assign_type_ == PRIVATE_GETTER_ONLY ||
+             assign_type_ == PRIVATE_SETTER_ONLY ||
+             assign_type_ == PRIVATE_GETTER_AND_SETTER);
       return expr_;
     }
     Expression* object_expr() const {
@@ -101,8 +111,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
       return object_expr_;
     }
     Register object() const {
-      DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY ||
-             assign_type_ == PRIVATE_METHOD);
+      DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY);
       return object_;
     }
     Register key() const {
@@ -110,7 +119,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
       return key_;
     }
     const AstRawString* name() const {
-      DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == PRIVATE_METHOD);
+      DCHECK(assign_type_ == NAMED_PROPERTY);
       return name_;
     }
     RegisterList super_property_args() const {
@@ -159,12 +168,14 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
   void VisitCommaExpression(BinaryOperation* binop);
   void VisitLogicalOrExpression(BinaryOperation* binop);
   void VisitLogicalAndExpression(BinaryOperation* binop);
+  void VisitNullishExpression(BinaryOperation* binop);
 
   // Dispatched from VisitNaryOperation.
   void VisitNaryArithmeticExpression(NaryOperation* expr);
   void VisitNaryCommaExpression(NaryOperation* expr);
   void VisitNaryLogicalOrExpression(NaryOperation* expr);
   void VisitNaryLogicalAndExpression(NaryOperation* expr);
+  void VisitNaryNullishExpression(NaryOperation* expr);
 
   // Dispatched from VisitUnaryOperation.
   void VisitVoid(UnaryOperation* expr);
@@ -295,8 +306,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
   void VisitArgumentsObject(Variable* variable);
   void VisitRestArgumentsArray(Variable* rest);
   void VisitCallSuper(Call* call);
-  void BuildThrowPrivateMethodWriteError(const AstRawString* name);
-  void BuildPrivateClassMemberNameAssignment(ClassLiteral::Property* property);
+  void BuildInvalidPropertyAccess(MessageTemplate tmpl, Property* property);
+  void BuildPrivateBrandCheck(Property* property, Register object);
+  void BuildPrivateGetterAccess(Register obj, Register access_pair);
+  void BuildPrivateSetterAccess(Register obj, Register access_pair,
+                                Register value);
   void BuildClassLiteral(ClassLiteral* expr, Register name);
   void VisitClassLiteral(ClassLiteral* expr, Register name);
   void VisitNewTargetVariable(Variable* variable);
@@ -308,9 +322,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
   void VisitBlockDeclarationsAndStatements(Block* stmt);
   void VisitSetHomeObject(Register value, Register home_object,
                           LiteralProperty* property);
-  void VisitObjectLiteralAccessor(Register home_object,
-                                  ObjectLiteralProperty* property,
-                                  Register value_out);
+  void VisitLiteralAccessor(Register home_object, LiteralProperty* property,
+                            Register value_out);
   void VisitForInAssignment(Expression* expr);
   void VisitModuleNamespaceImports();
 
@@ -320,6 +333,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
                         int right_coverage_slot);
   void VisitNaryLogicalTest(Token::Value token, NaryOperation* expr,
                             const NaryCodeCoverageSlots* coverage_slots);
+
   // Visit a (non-RHS) test for a logical op, which falls through if the test
   // fails or jumps to the appropriate labels if it succeeds.
   void VisitLogicalTestSubExpression(Token::Value token, Expression* expr,
@@ -334,6 +348,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
                                     BytecodeLabels* end_labels,
                                     int coverage_slot);
 
+  // Helper for binary and nary nullish op value expressions.
+  bool VisitNullishSubExpression(Expression* expr, BytecodeLabels* end_labels,
+                                 int coverage_slot);
+
   // Visit the body of a loop iteration.
   void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
 
@@ -375,6 +393,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
   void VisitForEffect(Expression* expr);
   void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
                     BytecodeLabels* else_labels, TestFallthrough fallthrough);
+  void VisitForNullishTest(Expression* expr, BytecodeLabels* then_labels,
+                           BytecodeLabels* test_next_labels,
+                           BytecodeLabels* else_labels);
 
   void VisitInSameTestExecutionScope(Expression* expr);
 
@@ -489,6 +510,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
 
   Register incoming_new_target_or_generator_;
 
+  BytecodeLabels* optional_chaining_null_labels_;
+
   // Dummy feedback slot for compare operations, where we don't care about
   // feedback
   SharedFeedbackSlot dummy_feedback_slot_;
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 591dfbe2b7404c..6802d53c955fbd 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -298,6 +298,7 @@ namespace interpreter {
   V(JumpIfNotNullConstant, AccumulatorUse::kRead, OperandType::kIdx)           \
   V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)         \
   V(JumpIfNotUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)      \
+  V(JumpIfUndefinedOrNullConstant, AccumulatorUse::kRead, OperandType::kIdx)   \
   V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)              \
   V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
   V(JumpIfJSReceiverConstant, AccumulatorUse::kRead, OperandType::kIdx)        \
@@ -315,6 +316,7 @@ namespace interpreter {
   V(JumpIfNotNull, AccumulatorUse::kRead, OperandType::kUImm)                  \
   V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kUImm)                \
   V(JumpIfNotUndefined, AccumulatorUse::kRead, OperandType::kUImm)             \
+  V(JumpIfUndefinedOrNull, AccumulatorUse::kRead, OperandType::kUImm)          \
   V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kUImm)               \
                                                                                \
   /* Smi-table lookup for switch statements */                                 \
@@ -353,6 +355,9 @@ namespace interpreter {
   V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg,                \
     OperandType::kRegOutList, OperandType::kRegCount)                          \
                                                                                \
+  /* Iterator protocol operations */                                           \
+  V(GetIterator, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx) \
+                                                                               \
   /* Debugger */                                                               \
   V(Debugger, AccumulatorUse::kNone)                                           \
                                                                                \
@@ -407,7 +412,8 @@ namespace interpreter {
   V(JumpIfNotNull)                                      \
   V(JumpIfUndefined)                                    \
   V(JumpIfNotUndefined)                                 \
-  V(JumpIfJSReceiver)                                   \
+  V(JumpIfUndefinedOrNull)                              \
+  V(JumpIfJSReceiver)
 
 #define JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)     \
   JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
@@ -415,9 +421,10 @@ namespace interpreter {
   V(JumpIfNotNullConstant)                             \
   V(JumpIfUndefinedConstant)                           \
   V(JumpIfNotUndefinedConstant)                        \
+  V(JumpIfUndefinedOrNullConstant)                     \
   V(JumpIfTrueConstant)                                \
   V(JumpIfFalseConstant)                               \
-  V(JumpIfJSReceiverConstant)                          \
+  V(JumpIfJSReceiverConstant)
 
 #define JUMP_CONSTANT_BYTECODE_LIST(V)         \
   JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 7291ea1c35d599..f01821b5651f4b 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -31,14 +31,14 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
     : CodeStubAssembler(state),
       bytecode_(bytecode),
       operand_scale_(operand_scale),
-      VARIABLE_CONSTRUCTOR(interpreted_frame_pointer_,
-                           MachineType::PointerRepresentation()),
+      TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
       VARIABLE_CONSTRUCTOR(
           bytecode_array_, MachineRepresentation::kTagged,
           Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
-      VARIABLE_CONSTRUCTOR(
-          bytecode_offset_, MachineType::PointerRepresentation(),
-          Parameter(InterpreterDispatchDescriptor::kBytecodeOffset)),
+      TVARIABLE_CONSTRUCTOR(
+          bytecode_offset_,
+          UncheckedCast<IntPtrT>(
+              Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))),
       VARIABLE_CONSTRUCTOR(
           dispatch_table_, MachineType::PointerRepresentation(),
           Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
@@ -48,17 +48,15 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
       accumulator_use_(AccumulatorUse::kNone),
       made_call_(false),
       reloaded_frame_ptr_(false),
-      bytecode_array_valid_(true),
-      disable_stack_check_across_call_(false),
-      stack_pointer_before_call_(nullptr) {
+      bytecode_array_valid_(true) {
 #ifdef V8_TRACE_IGNITION
   TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
 #endif
   RegisterCallGenerationCallbacks([this] { CallPrologue(); },
                                   [this] { CallEpilogue(); });
 
-  // Save the bytecode offset immediately if bytecode will make a call along the
-  // critical path, or it is a return bytecode.
+  // Save the bytecode offset immediately if bytecode will make a call along
+  // the critical path, or it is a return bytecode.
   if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
       Bytecodes::Returns(bytecode)) {
     SaveBytecodeOffset();
@@ -73,28 +71,28 @@ InterpreterAssembler::~InterpreterAssembler() {
   UnregisterCallGenerationCallbacks();
 }
 
-Node* InterpreterAssembler::GetInterpretedFramePointer() {
+TNode<RawPtrT> InterpreterAssembler::GetInterpretedFramePointer() {
   if (!interpreted_frame_pointer_.IsBound()) {
-    interpreted_frame_pointer_.Bind(LoadParentFramePointer());
+    interpreted_frame_pointer_ = LoadParentFramePointer();
   } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
              !reloaded_frame_ptr_) {
-    interpreted_frame_pointer_.Bind(LoadParentFramePointer());
+    interpreted_frame_pointer_ = LoadParentFramePointer();
     reloaded_frame_ptr_ = true;
   }
   return interpreted_frame_pointer_.value();
 }
 
-Node* InterpreterAssembler::BytecodeOffset() {
+TNode<IntPtrT> InterpreterAssembler::BytecodeOffset() {
   if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
       (bytecode_offset_.value() ==
        Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
-    bytecode_offset_.Bind(ReloadBytecodeOffset());
+    bytecode_offset_ = ReloadBytecodeOffset();
   }
   return bytecode_offset_.value();
 }
 
-Node* InterpreterAssembler::ReloadBytecodeOffset() {
-  Node* offset = LoadAndUntagRegister(Register::bytecode_offset());
+TNode<IntPtrT> InterpreterAssembler::ReloadBytecodeOffset() {
+  TNode<IntPtrT> offset = LoadAndUntagRegister(Register::bytecode_offset());
   if (operand_scale() != OperandScale::kSingle) {
     // Add one to the offset such that it points to the actual bytecode rather
     // than the Wide / ExtraWide prefix bytecode.
@@ -104,13 +102,31 @@ Node* InterpreterAssembler::ReloadBytecodeOffset() {
 }
 
 void InterpreterAssembler::SaveBytecodeOffset() {
-  Node* offset = BytecodeOffset();
+  TNode<IntPtrT> bytecode_offset = BytecodeOffset();
   if (operand_scale() != OperandScale::kSingle) {
-    // Subtract one from the offset such that it points to the Wide / ExtraWide
-    // prefix bytecode.
-    offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
+    // Subtract one from the bytecode_offset such that it points to the Wide /
+    // ExtraWide prefix bytecode.
+    bytecode_offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
+  }
+  int store_offset =
+      Register::bytecode_offset().ToOperand() * kSystemPointerSize;
+  TNode<RawPtrT> base = GetInterpretedFramePointer();
+
+  if (SmiValuesAre32Bits()) {
+    int zero_offset = store_offset + 4;
+    int payload_offset = store_offset;
+#if V8_TARGET_LITTLE_ENDIAN
+    std::swap(zero_offset, payload_offset);
+#endif
+    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
+                        IntPtrConstant(zero_offset), Int32Constant(0));
+    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
+                        IntPtrConstant(payload_offset),
+                        TruncateIntPtrToInt32(bytecode_offset));
+  } else {
+    StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
+                        IntPtrConstant(store_offset), SmiTag(bytecode_offset));
   }
-  StoreAndTagRegister(offset, Register::bytecode_offset());
 }
 
 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
@@ -137,7 +153,7 @@ Node* InterpreterAssembler::GetAccumulatorUnchecked() {
   return accumulator_.value();
 }
 
-Node* InterpreterAssembler::GetAccumulator() {
+TNode<Object> InterpreterAssembler::GetAccumulator() {
   DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
   accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
   return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
@@ -149,20 +165,18 @@ void InterpreterAssembler::SetAccumulator(Node* value) {
   accumulator_.Bind(value);
 }
 
-Node* InterpreterAssembler::GetContext() {
-  return LoadRegister(Register::current_context());
+TNode<Context> InterpreterAssembler::GetContext() {
+  return CAST(LoadRegister(Register::current_context()));
 }
 
-void InterpreterAssembler::SetContext(Node* value) {
+void InterpreterAssembler::SetContext(TNode<Context> value) {
   StoreRegister(value, Register::current_context());
 }
 
-Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
-  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
-  cur_context.Bind(context);
-
-  Variable cur_depth(this, MachineRepresentation::kWord32);
-  cur_depth.Bind(depth);
+Node* InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
+                                              TNode<Uint32T> depth) {
+  TVARIABLE(Context, cur_context, context);
+  TVARIABLE(Uint32T, cur_depth, depth);
 
   Label context_found(this);
 
@@ -175,9 +189,9 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
   // Loop until the depth is 0.
   BIND(&context_search);
   {
-    cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
-    cur_context.Bind(
-        LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
+    cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
+    cur_context =
+        CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
 
     Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
            &context_search);
@@ -187,14 +201,10 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
   return cur_context.value();
 }
 
-void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
-                                                              Node* depth,
-                                                              Label* target) {
-  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
-  cur_context.Bind(context);
-
-  Variable cur_depth(this, MachineRepresentation::kWord32);
-  cur_depth.Bind(depth);
+void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(
+    TNode<Context> context, TNode<Uint32T> depth, Label* target) {
+  TVARIABLE(Context, cur_context, context);
+  TVARIABLE(Uint32T, cur_depth, depth);
 
   Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
   Label context_search(this, 2, context_search_loop_variables);
@@ -207,62 +217,73 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
     // eval, we could pass in a context chain bitmask to figure out which
     // contexts actually need to be checked.
 
-    Node* extension_slot =
+    TNode<Object> extension_slot =
         LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
 
     // Jump to the target if the extension slot is not a hole.
-    GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
+    GotoIf(TaggedNotEqual(extension_slot, TheHoleConstant()), target);
 
-    cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
-    cur_context.Bind(
-        LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
+    cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
+    cur_context =
+        CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
 
     GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
            &context_search);
   }
 }
 
-Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
-  return WordPoisonOnSpeculation(
-      IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
+TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Node* reg_index) {
+  return Signed(WordPoisonOnSpeculation(
+      IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))));
 }
 
-Node* InterpreterAssembler::RegisterLocation(Register reg) {
+TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
   return RegisterLocation(IntPtrConstant(reg.ToOperand()));
 }
 
-Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
-  return TimesSystemPointerSize(index);
+TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(Node* index) {
+  return Signed(TimesSystemPointerSize(index));
 }
 
-Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+TNode<Object> InterpreterAssembler::LoadRegister(Node* reg_index) {
   return LoadFullTagged(GetInterpretedFramePointer(),
                         RegisterFrameOffset(reg_index),
                         LoadSensitivity::kCritical);
 }
 
-Node* InterpreterAssembler::LoadRegister(Register reg) {
+TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
   return LoadFullTagged(GetInterpretedFramePointer(),
                         IntPtrConstant(reg.ToOperand() * kSystemPointerSize));
 }
 
-Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
-  return LoadAndUntagSmi(GetInterpretedFramePointer(),
-                         reg.ToOperand() * kSystemPointerSize);
+TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
+  TNode<RawPtrT> base = GetInterpretedFramePointer();
+  int index = reg.ToOperand() * kSystemPointerSize;
+  if (SmiValuesAre32Bits()) {
+#if V8_TARGET_LITTLE_ENDIAN
+    index += 4;
+#endif
+    return ChangeInt32ToIntPtr(
+        Load(MachineType::Int32(), base, IntPtrConstant(index)));
+  } else {
+    return SmiToIntPtr(
+        Load(MachineType::TaggedSigned(), base, IntPtrConstant(index)));
+  }
 }
 
-Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
+TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
+    int operand_index) {
   return LoadRegister(
       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
 }
 
-std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex(
-    int operand_index) {
+std::pair<TNode<Object>, TNode<Object>>
+InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
   DCHECK_EQ(OperandType::kRegPair,
             Bytecodes::GetOperandType(bytecode_, operand_index));
   Node* first_reg_index =
       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
-  Node* second_reg_index = NextRegister(first_reg_index);
+  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
   return std::make_pair(LoadRegister(first_reg_index),
                         LoadRegister(second_reg_index));
 }
@@ -273,27 +294,27 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
       Bytecodes::GetOperandType(bytecode_, operand_index)));
   DCHECK_EQ(OperandType::kRegCount,
             Bytecodes::GetOperandType(bytecode_, operand_index + 1));
-  Node* base_reg = RegisterLocation(
+  TNode<IntPtrT> base_reg = RegisterLocation(
       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
-  Node* reg_count = BytecodeOperandCount(operand_index + 1);
+  TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
   return RegListNodePair(base_reg, reg_count);
 }
 
 Node* InterpreterAssembler::LoadRegisterFromRegisterList(
     const RegListNodePair& reg_list, int index) {
-  Node* location = RegisterLocationInRegisterList(reg_list, index);
+  TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
   // Location is already poisoned on speculation, so no need to poison here.
   return LoadFullTagged(location);
 }
 
-Node* InterpreterAssembler::RegisterLocationInRegisterList(
+TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
     const RegListNodePair& reg_list, int index) {
   CSA_ASSERT(this,
              Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
-  Node* offset = RegisterFrameOffset(IntPtrConstant(index));
+  TNode<IntPtrT> offset = RegisterFrameOffset(IntPtrConstant(index));
   // Register indexes are negative, so subtract index from base location to get
   // location.
-  return IntPtrSub(reg_list.base_reg_location(), offset);
+  return Signed(IntPtrSub(reg_list.base_reg_location(), offset));
 }
 
 void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
@@ -307,11 +328,6 @@ void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
                                 RegisterFrameOffset(reg_index), value);
 }
 
-void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
-  int offset = reg.ToOperand() * kSystemPointerSize;
-  StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
-}
-
 void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
                                                        int operand_index) {
   StoreRegister(value,
@@ -326,7 +342,7 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
   Node* first_reg_index =
       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
   StoreRegister(value1, first_reg_index);
-  Node* second_reg_index = NextRegister(first_reg_index);
+  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
   StoreRegister(value2, second_reg_index);
 }
 
@@ -337,15 +353,15 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
   Node* first_reg_index =
       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
   StoreRegister(value1, first_reg_index);
-  Node* second_reg_index = NextRegister(first_reg_index);
+  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
   StoreRegister(value2, second_reg_index);
-  Node* third_reg_index = NextRegister(second_reg_index);
+  TNode<IntPtrT> third_reg_index = NextRegister(second_reg_index);
   StoreRegister(value3, third_reg_index);
 }
 
-Node* InterpreterAssembler::NextRegister(Node* reg_index) {
+TNode<IntPtrT> InterpreterAssembler::NextRegister(Node* reg_index) {
   // Register indexes are negative, so the next index is minus one.
-  return IntPtrAdd(reg_index, IntPtrConstant(-1));
+  return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1)));
 }
 
 Node* InterpreterAssembler::OperandOffset(int operand_index) {
@@ -353,27 +369,29 @@ Node* InterpreterAssembler::OperandOffset(int operand_index) {
       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
 }
 
-Node* InterpreterAssembler::BytecodeOperandUnsignedByte(
+TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
     int operand_index, LoadSensitivity needs_poisoning) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                     bytecode_, operand_index, operand_scale()));
   Node* operand_offset = OperandOffset(operand_index);
-  return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-              IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
+  return Load<Uint8T>(BytecodeArrayTaggedPointer(),
+                      IntPtrAdd(BytecodeOffset(), operand_offset),
+                      needs_poisoning);
 }
 
-Node* InterpreterAssembler::BytecodeOperandSignedByte(
+TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
     int operand_index, LoadSensitivity needs_poisoning) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                     bytecode_, operand_index, operand_scale()));
   Node* operand_offset = OperandOffset(operand_index);
-  return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
-              IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
+  return Load<Int8T>(BytecodeArrayTaggedPointer(),
+                     IntPtrAdd(BytecodeOffset(), operand_offset),
+                     needs_poisoning);
 }
 
-Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
+TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
     int relative_offset, MachineType result_type,
     LoadSensitivity needs_poisoning) {
   static const int kMaxCount = 4;
@@ -406,26 +424,28 @@ Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
   // Read the most signicant bytecode into bytes[0] and then in order
   // down to least significant in bytes[count - 1].
   DCHECK_LE(count, kMaxCount);
-  Node* bytes[kMaxCount];
+  TNode<Word32T> bytes[kMaxCount];
   for (int i = 0; i < count; i++) {
     MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
-    Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
-    Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
-    bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset,
-                    needs_poisoning);
+    TNode<IntPtrT> offset =
+        IntPtrConstant(relative_offset + msb_offset + i * kStep);
+    TNode<WordT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
+    bytes[i] =
+        UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(),
+                                    array_offset, needs_poisoning));
   }
 
   // Pack LSB to MSB.
-  Node* result = bytes[--count];
+  TNode<Word32T> result = bytes[--count];
   for (int i = 1; --count >= 0; i++) {
-    Node* shift = Int32Constant(i * kBitsPerByte);
-    Node* value = Word32Shl(bytes[count], shift);
+    TNode<Int32T> shift = Int32Constant(i * kBitsPerByte);
+    TNode<Word32T> value = Word32Shl(bytes[count], shift);
     result = Word32Or(value, result);
   }
   return result;
 }
 
-Node* InterpreterAssembler::BytecodeOperandUnsignedShort(
+TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
     int operand_index, LoadSensitivity needs_poisoning) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
   DCHECK_EQ(
@@ -434,16 +454,17 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedShort(
   int operand_offset =
       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
   if (TargetSupportsUnalignedAccess()) {
-    return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
-                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
-                needs_poisoning);
+    return Load<Uint16T>(
+        BytecodeArrayTaggedPointer(),
+        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
+        needs_poisoning);
   } else {
-    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16(),
-                                        needs_poisoning);
+    return UncheckedCast<Uint16T>(BytecodeOperandReadUnaligned(
+        operand_offset, MachineType::Uint16(), needs_poisoning));
   }
 }
 
-Node* InterpreterAssembler::BytecodeOperandSignedShort(
+TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
     int operand_index, LoadSensitivity needs_poisoning) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
   DCHECK_EQ(
@@ -452,16 +473,17 @@ Node* InterpreterAssembler::BytecodeOperandSignedShort(
   int operand_offset =
       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
   if (TargetSupportsUnalignedAccess()) {
-    return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
-                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
-                needs_poisoning);
+    return Load<Int16T>(
+        BytecodeArrayTaggedPointer(),
+        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
+        needs_poisoning);
   } else {
-    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16(),
-                                        needs_poisoning);
+    return UncheckedCast<Int16T>(BytecodeOperandReadUnaligned(
+        operand_offset, MachineType::Int16(), needs_poisoning));
   }
 }
 
-Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
     int operand_index, LoadSensitivity needs_poisoning) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
@@ -469,16 +491,17 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(
   int operand_offset =
       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
   if (TargetSupportsUnalignedAccess()) {
-    return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
-                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
-                needs_poisoning);
+    return Load<Uint32T>(
+        BytecodeArrayTaggedPointer(),
+        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
+        needs_poisoning);
   } else {
-    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32(),
-                                        needs_poisoning);
+    return UncheckedCast<Uint32T>(BytecodeOperandReadUnaligned(
+        operand_offset, MachineType::Uint32(), needs_poisoning));
   }
 }
 
-Node* InterpreterAssembler::BytecodeOperandSignedQuad(
+TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
     int operand_index, LoadSensitivity needs_poisoning) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
@@ -486,16 +509,17 @@ Node* InterpreterAssembler::BytecodeOperandSignedQuad(
   int operand_offset =
       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
   if (TargetSupportsUnalignedAccess()) {
-    return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
-                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
-                needs_poisoning);
+    return Load<Int32T>(
+        BytecodeArrayTaggedPointer(),
+        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
+        needs_poisoning);
   } else {
-    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32(),
-                                        needs_poisoning);
+    return UncheckedCast<Int32T>(BytecodeOperandReadUnaligned(
+        operand_offset, MachineType::Int32(), needs_poisoning));
   }
 }
 
-Node* InterpreterAssembler::BytecodeSignedOperand(
+TNode<Int32T> InterpreterAssembler::BytecodeSignedOperand(
     int operand_index, OperandSize operand_size,
     LoadSensitivity needs_poisoning) {
   DCHECK(!Bytecodes::IsUnsignedOperandType(
@@ -510,10 +534,9 @@ Node* InterpreterAssembler::BytecodeSignedOperand(
     case OperandSize::kNone:
       UNREACHABLE();
   }
-  return nullptr;
 }
 
-Node* InterpreterAssembler::BytecodeUnsignedOperand(
+TNode<Uint32T> InterpreterAssembler::BytecodeUnsignedOperand(
     int operand_index, OperandSize operand_size,
     LoadSensitivity needs_poisoning) {
   DCHECK(Bytecodes::IsUnsignedOperandType(
@@ -528,10 +551,9 @@ Node* InterpreterAssembler::BytecodeUnsignedOperand(
     case OperandSize::kNone:
       UNREACHABLE();
   }
-  return nullptr;
 }
 
-Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) {
   DCHECK_EQ(OperandType::kRegCount,
             Bytecodes::GetOperandType(bytecode_, operand_index));
   OperandSize operand_size =
@@ -548,7 +570,7 @@ Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
   return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
-Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
   DCHECK_EQ(OperandType::kUImm,
             Bytecodes::GetOperandType(bytecode_, operand_index));
   OperandSize operand_size =
@@ -561,7 +583,7 @@ Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
 }
 
 Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
-  return SmiFromInt32(BytecodeOperandUImm(operand_index));
+  return SmiFromInt32(Signed(BytecodeOperandUImm(operand_index)));
 }
 
 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
@@ -651,7 +673,8 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
       constant_pool, UncheckedCast<IntPtrT>(index), LoadSensitivity::kCritical);
 }
 
-Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
+TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
+    Node* index) {
   return SmiUntag(LoadConstantPoolEntry(index));
 }
 
@@ -662,7 +685,8 @@ Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
   return LoadConstantPoolEntry(index);
 }
 
-Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
+TNode<IntPtrT>
+InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
     int operand_index) {
   return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
 }
@@ -682,22 +706,11 @@ void InterpreterAssembler::CallPrologue() {
     SaveBytecodeOffset();
   }
 
-  if (FLAG_debug_code && !disable_stack_check_across_call_) {
-    DCHECK_NULL(stack_pointer_before_call_);
-    stack_pointer_before_call_ = LoadStackPointer();
-  }
   bytecode_array_valid_ = false;
   made_call_ = true;
 }
 
 void InterpreterAssembler::CallEpilogue() {
-  if (FLAG_debug_code && !disable_stack_check_across_call_) {
-    Node* stack_pointer_after_call = LoadStackPointer();
-    Node* stack_pointer_before_call = stack_pointer_before_call_;
-    stack_pointer_before_call_ = nullptr;
-    AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
-                        AbortReason::kUnexpectedStackPointer);
-  }
 }
 
 void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
@@ -708,7 +721,7 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
   // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
   // count are used as flags. To increment the call count by 1 we hence
   // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
-  Node* new_count = SmiAdd(
+  TNode<Smi> new_count = SmiAdd(
       call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
   // Count is Smi, so we don't need a write barrier.
   StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
@@ -729,7 +742,7 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
 
   // Check if it is a megamorphic {target}.
   Comment("check if megamorphic");
-  Node* is_megamorphic = WordEqual(
+  TNode<BoolT> is_megamorphic = TaggedEqual(
       feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
   Branch(is_megamorphic, &done, &extra_checks);
 
@@ -738,7 +751,7 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
     Label initialize(this), mark_megamorphic(this);
 
     Comment("check if weak reference");
-    Node* is_uninitialized = WordEqual(
+    TNode<BoolT> is_uninitialized = TaggedEqual(
         feedback,
         HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
     GotoIf(is_uninitialized, &initialize);
@@ -764,7 +777,7 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
         Label if_boundfunction(this), if_function(this);
         Node* current = var_current.value();
         CSA_ASSERT(this, TaggedIsNotSmi(current));
-        Node* current_instance_type = LoadInstanceType(current);
+        TNode<Uint16T> current_instance_type = LoadInstanceType(current);
         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
                &if_boundfunction);
         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
@@ -774,11 +787,13 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
         {
           // Check that the JSFunction {current} is in the current native
           // context.
-          Node* current_context =
-              LoadObjectField(current, JSFunction::kContextOffset);
-          Node* current_native_context = LoadNativeContext(current_context);
-          Branch(WordEqual(LoadNativeContext(context), current_native_context),
-                 &done_loop, &mark_megamorphic);
+          TNode<Context> current_context =
+              CAST(LoadObjectField(current, JSFunction::kContextOffset));
+          TNode<Context> current_native_context =
+              LoadNativeContext(current_context);
+          Branch(
+              TaggedEqual(LoadNativeContext(context), current_native_context),
+              &done_loop, &mark_megamorphic);
         }
 
         BIND(&if_boundfunction);
@@ -848,13 +863,13 @@ void InterpreterAssembler::CallJSAndDispatch(
     args_count = args.reg_count();
   } else {
     // Subtract the receiver from the argument count.
-    Node* receiver_count = Int32Constant(1);
+    TNode<Int32T> receiver_count = Int32Constant(1);
     args_count = Int32Sub(args.reg_count(), receiver_count);
   }
 
   Callable callable = CodeFactory::InterpreterPushArgsThenCall(
       isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
-  Node* code_target = HeapConstant(callable.code());
+  TNode<Code> code_target = HeapConstant(callable.code());
 
   TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
                                    args_count, args.base_reg_location(),
@@ -873,7 +888,7 @@ void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
          bytecode_ == Bytecode::kInvokeIntrinsic);
   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
   Callable callable = CodeFactory::Call(isolate());
-  Node* code_target = HeapConstant(callable.code());
+  TNode<Code> code_target = HeapConstant(callable.code());
 
   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
     // The first argument parameter (the receiver) is implied to be undefined.
@@ -913,10 +928,10 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
   Callable callable = CodeFactory::InterpreterPushArgsThenCall(
       isolate(), ConvertReceiverMode::kAny,
       InterpreterPushArgsMode::kWithFinalSpread);
-  Node* code_target = HeapConstant(callable.code());
+  TNode<Code> code_target = HeapConstant(callable.code());
 
-  Node* receiver_count = Int32Constant(1);
-  Node* args_count = Int32Sub(args.reg_count(), receiver_count);
+  TNode<Int32T> receiver_count = Int32Constant(1);
+  TNode<Word32T> args_count = Int32Sub(args.reg_count(), receiver_count);
   TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
                                    args_count, args.base_reg_location(),
                                    function);
@@ -924,8 +939,8 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
 }
 
-Node* InterpreterAssembler::Construct(Node* target, Node* context,
-                                      Node* new_target,
+Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
+                                      SloppyTNode<Object> new_target,
                                       const RegListNodePair& args,
                                       Node* slot_id, Node* feedback_vector) {
   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
@@ -941,8 +956,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
   // Check if we have monomorphic {new_target} feedback already.
   TNode<MaybeObject> feedback =
       LoadFeedbackVectorSlot(feedback_vector, slot_id);
-  Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
-         &extra_checks);
+  Branch(IsWeakReferenceTo(feedback, new_target), &construct, &extra_checks);
 
   BIND(&extra_checks);
   {
@@ -951,7 +965,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
 
     // Check if it is a megamorphic {new_target}..
     Comment("check if megamorphic");
-    Node* is_megamorphic = WordEqual(
+    TNode<BoolT> is_megamorphic = TaggedEqual(
         feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
     GotoIf(is_megamorphic, &construct);
 
@@ -971,10 +985,10 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
       GotoIfNot(IsAllocationSite(strong_feedback), &check_initialized);
 
       // Make sure that {target} and {new_target} are the Array constructor.
-      Node* array_function = LoadContextElement(LoadNativeContext(context),
-                                                Context::ARRAY_FUNCTION_INDEX);
-      GotoIfNot(WordEqual(target, array_function), &mark_megamorphic);
-      GotoIfNot(WordEqual(new_target, array_function), &mark_megamorphic);
+      TNode<Object> array_function = LoadContextElement(
+          LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
+      GotoIfNot(TaggedEqual(target, array_function), &mark_megamorphic);
+      GotoIfNot(TaggedEqual(new_target, array_function), &mark_megamorphic);
       var_site.Bind(strong_feedback);
       Goto(&construct_array);
     }
@@ -983,8 +997,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
     {
       // Check if it is uninitialized.
       Comment("check if uninitialized");
-      Node* is_uninitialized =
-          WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
+      TNode<BoolT> is_uninitialized =
+          TaggedEqual(feedback, UninitializedSymbolConstant());
       Branch(is_uninitialized, &initialize, &mark_megamorphic);
     }
 
@@ -1002,7 +1016,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
         Label if_boundfunction(this), if_function(this);
         Node* current = var_current.value();
         CSA_ASSERT(this, TaggedIsNotSmi(current));
-        Node* current_instance_type = LoadInstanceType(current);
+        TNode<Uint16T> current_instance_type = LoadInstanceType(current);
         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
                &if_boundfunction);
         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
@@ -1012,11 +1026,13 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
         {
           // Check that the JSFunction {current} is in the current native
           // context.
-          Node* current_context =
-              LoadObjectField(current, JSFunction::kContextOffset);
-          Node* current_native_context = LoadNativeContext(current_context);
-          Branch(WordEqual(LoadNativeContext(context), current_native_context),
-                 &done_loop, &mark_megamorphic);
+          TNode<Context> current_context =
+              CAST(LoadObjectField(current, JSFunction::kContextOffset));
+          TNode<Context> current_native_context =
+              LoadNativeContext(current_context);
+          Branch(
+              TaggedEqual(LoadNativeContext(context), current_native_context),
+              &done_loop, &mark_megamorphic);
         }
 
         BIND(&if_boundfunction);
@@ -1032,10 +1048,10 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
       // Create an AllocationSite if {target} and {new_target} refer
       // to the current native context's Array constructor.
       Label create_allocation_site(this), store_weak_reference(this);
-      GotoIfNot(WordEqual(target, new_target), &store_weak_reference);
-      Node* array_function = LoadContextElement(LoadNativeContext(context),
-                                                Context::ARRAY_FUNCTION_INDEX);
-      Branch(WordEqual(target, array_function), &create_allocation_site,
+      GotoIfNot(TaggedEqual(target, new_target), &store_weak_reference);
+      TNode<Object> array_function = LoadContextElement(
+          LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
+      Branch(TaggedEqual(target, array_function), &create_allocation_site,
              &store_weak_reference);
 
       BIND(&create_allocation_site);
@@ -1080,7 +1096,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
     Comment("call using ConstructArray builtin");
     Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
         isolate(), InterpreterPushArgsMode::kArrayFunction);
-    Node* code_target = HeapConstant(callable.code());
+    TNode<Code> code_target = HeapConstant(callable.code());
     var_result.Bind(CallStub(callable.descriptor(), code_target, context,
                              args.reg_count(), args.base_reg_location(), target,
                              new_target, var_site.value()));
@@ -1093,7 +1109,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
     Comment("call using Construct builtin");
     Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
         isolate(), InterpreterPushArgsMode::kOther);
-    Node* code_target = HeapConstant(callable.code());
+    TNode<Code> code_target = HeapConstant(callable.code());
     var_result.Bind(CallStub(callable.descriptor(), code_target, context,
                              args.reg_count(), args.base_reg_location(), target,
                              new_target, UndefinedConstant()));
@@ -1131,7 +1147,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
 
     // Check if it is a megamorphic {new_target}.
     Comment("check if megamorphic");
-    Node* is_megamorphic = WordEqual(
+    TNode<BoolT> is_megamorphic = TaggedEqual(
         feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
     GotoIf(is_megamorphic, &construct);
 
@@ -1147,8 +1163,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
     {
       // Check if it is uninitialized.
       Comment("check if uninitialized");
-      Node* is_uninitialized =
-          WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
+      TNode<BoolT> is_uninitialized =
+          TaggedEqual(feedback, UninitializedSymbolConstant());
       Branch(is_uninitialized, &initialize, &mark_megamorphic);
     }
 
@@ -1166,7 +1182,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
         Label if_boundfunction(this), if_function(this);
         Node* current = var_current.value();
         CSA_ASSERT(this, TaggedIsNotSmi(current));
-        Node* current_instance_type = LoadInstanceType(current);
+        TNode<Uint16T> current_instance_type = LoadInstanceType(current);
         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
                &if_boundfunction);
         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
@@ -1176,11 +1192,13 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
         {
           // Check that the JSFunction {current} is in the current native
           // context.
-          Node* current_context =
-              LoadObjectField(current, JSFunction::kContextOffset);
-          Node* current_native_context = LoadNativeContext(current_context);
-          Branch(WordEqual(LoadNativeContext(context), current_native_context),
-                 &done_loop, &mark_megamorphic);
+          TNode<Context> current_context =
+              CAST(LoadObjectField(current, JSFunction::kContextOffset));
+          TNode<Context> current_native_context =
+              LoadNativeContext(current_context);
+          Branch(
+              TaggedEqual(LoadNativeContext(context), current_native_context),
+              &done_loop, &mark_megamorphic);
         }
 
         BIND(&if_boundfunction);
@@ -1219,7 +1237,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
   Comment("call using ConstructWithSpread builtin");
   Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
       isolate(), InterpreterPushArgsMode::kWithFinalSpread);
-  Node* code_target = HeapConstant(callable.code());
+  TNode<Code> code_target = HeapConstant(callable.code());
   return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
                   args.base_reg_location(), target, new_target,
                   UndefinedConstant());
@@ -1231,14 +1249,14 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
   DCHECK(Bytecodes::IsCallRuntime(bytecode_));
   Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
-  Node* code_target = HeapConstant(callable.code());
+  TNode<Code> code_target = HeapConstant(callable.code());
 
   // Get the function entry from the function id.
   Node* function_table = ExternalConstant(
       ExternalReference::runtime_function_table_address(isolate()));
-  Node* function_offset =
+  TNode<Word32T> function_offset =
       Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
-  Node* function =
+  TNode<WordT> function =
       IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
   Node* function_entry =
       Load(MachineType::Pointer(), function,
@@ -1259,7 +1277,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
   Label load_budget_from_bytecode(this), load_budget_done(this);
   TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
   TNode<FeedbackCell> feedback_cell =
-      CAST(LoadObjectField(function, JSFunction::kFeedbackCellOffset));
+      LoadObjectField<FeedbackCell>(function, JSFunction::kFeedbackCellOffset);
   TNode<Int32T> old_budget = LoadObjectField<Int32T>(
       feedback_cell, FeedbackCell::kInterruptBudgetOffset);
 
@@ -1272,7 +1290,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
   if (backward) {
     // Update budget by |weight| and check if it reaches zero.
     new_budget = Signed(Int32Sub(budget_after_bytecode, weight));
-    Node* condition =
+    TNode<BoolT> condition =
         Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
     Label ok(this), interrupt_check(this, Label::kDeferred);
     Branch(condition, &ok, &interrupt_check);
@@ -1297,19 +1315,22 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
   Comment("] UpdateInterruptBudget");
 }
 
-Node* InterpreterAssembler::Advance() { return Advance(CurrentBytecodeSize()); }
+TNode<IntPtrT> InterpreterAssembler::Advance() {
+  return Advance(CurrentBytecodeSize());
+}
 
-Node* InterpreterAssembler::Advance(int delta) {
+TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
   return Advance(IntPtrConstant(delta));
 }
 
-Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
+TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta,
+                                             bool backward) {
 #ifdef V8_TRACE_IGNITION
   TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
 #endif
-  Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
-                               : IntPtrAdd(BytecodeOffset(), delta);
-  bytecode_offset_.Bind(next_offset);
+  TNode<IntPtrT> next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
+                                        : IntPtrAdd(BytecodeOffset(), delta);
+  bytecode_offset_ = next_offset;
   return next_offset;
 }
 
@@ -1318,7 +1339,7 @@ Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
 
   UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
   Node* new_bytecode_offset = Advance(delta, backward);
-  Node* target_bytecode = LoadBytecode(new_bytecode_offset);
+  TNode<WordT> target_bytecode = LoadBytecode(new_bytecode_offset);
   return DispatchToBytecode(target_bytecode, new_bytecode_offset);
 }
 
@@ -1338,35 +1359,39 @@ void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
   Dispatch();
 }
 
-void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
-  JumpConditional(WordEqual(lhs, rhs), delta);
+void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
+                                             TNode<Object> rhs, Node* delta) {
+  JumpConditional(TaggedEqual(lhs, rhs), delta);
 }
 
-void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
-                                              Node* delta) {
-  JumpConditional(WordNotEqual(lhs, rhs), delta);
+void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
+                                                TNode<Object> rhs,
+                                                Node* delta) {
+  JumpConditional(TaggedNotEqual(lhs, rhs), delta);
 }
 
-Node* InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
+TNode<WordT> InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
   Node* bytecode =
       Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
   return ChangeUint32ToWord(bytecode);
 }
 
-Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
+TNode<WordT> InterpreterAssembler::StarDispatchLookahead(
+    TNode<WordT> target_bytecode) {
   Label do_inline_star(this), done(this);
 
-  Variable var_bytecode(this, MachineType::PointerRepresentation());
-  var_bytecode.Bind(target_bytecode);
+  TVARIABLE(WordT, var_bytecode, target_bytecode);
 
-  Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
-  Node* is_star = WordEqual(target_bytecode, star_bytecode);
+  TNode<Int32T> star_bytecode =
+      Int32Constant(static_cast<int>(Bytecode::kStar));
+  TNode<BoolT> is_star =
+      Word32Equal(TruncateWordToInt32(target_bytecode), star_bytecode);
   Branch(is_star, &do_inline_star, &done);
 
   BIND(&do_inline_star);
   {
     InlineStar();
-    var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
+    var_bytecode = LoadBytecode(BytecodeOffset());
     Goto(&done);
   }
   BIND(&done);
@@ -1397,7 +1422,7 @@ Node* InterpreterAssembler::Dispatch() {
   Comment("========= Dispatch");
   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
   Node* target_offset = Advance();
-  Node* target_bytecode = LoadBytecode(target_offset);
+  TNode<WordT> target_bytecode = LoadBytecode(target_offset);
 
   if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
     target_bytecode = StarDispatchLookahead(target_bytecode);
@@ -1423,7 +1448,7 @@ Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
                                                       Node* bytecode_offset,
                                                       Node* target_bytecode) {
   // TODO(ishell): Add CSA::CodeEntryPoint(code).
-  Node* handler_entry =
+  TNode<IntPtrT> handler_entry =
       IntPtrAdd(BitcastTaggedToWord(handler),
                 IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
   return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
@@ -1433,7 +1458,7 @@ Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
 Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
     Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
   // Propagate speculation poisoning.
-  Node* poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
+  TNode<WordT> poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
   return TailCallBytecodeDispatch(
       InterpreterDispatchDescriptor{}, poisoned_handler_entry,
       GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(),
@@ -1450,7 +1475,7 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
   //   Indices 512-767 correspond to bytecodes with operand_scale == 2
   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
   Node* next_bytecode_offset = Advance(1);
-  Node* next_bytecode = LoadBytecode(next_bytecode_offset);
+  TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset);
 
   if (FLAG_trace_ignition_dispatches) {
     TraceBytecodeDispatch(next_bytecode);
@@ -1467,7 +1492,7 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
     default:
       UNREACHABLE();
   }
-  Node* target_index = IntPtrAdd(base_index, next_bytecode);
+  TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode);
   Node* target_code_entry =
       Load(MachineType::Pointer(), DispatchTableRawPointer(),
            TimesSystemPointerSize(target_index));
@@ -1496,8 +1521,9 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
   // of the first bytecode.
 
   const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
-  Node* profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
-                                    Int32Constant(kFirstBytecodeOffset));
+  TNode<Int32T> profiling_weight =
+      Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
+               Int32Constant(kFirstBytecodeOffset));
   UpdateInterruptBudget(profiling_weight, true);
 }
 
@@ -1508,13 +1534,12 @@ Node* InterpreterAssembler::LoadOsrNestingLevel() {
 }
 
 void InterpreterAssembler::Abort(AbortReason abort_reason) {
-  disable_stack_check_across_call_ = true;
-  Node* abort_id = SmiConstant(abort_reason);
+  TNode<Smi> abort_id = SmiConstant(abort_reason);
   CallRuntime(Runtime::kAbort, GetContext(), abort_id);
-  disable_stack_check_across_call_ = false;
 }
 
-void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
+void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
+                                               TNode<WordT> rhs,
                                                AbortReason abort_reason) {
   Label ok(this), abort(this, Label::kDeferred);
   Branch(WordEqual(lhs, rhs), &ok, &abort);
@@ -1527,11 +1552,11 @@ void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
 }
 
 void InterpreterAssembler::MaybeDropFrames(Node* context) {
-  Node* restart_fp_address =
+  TNode<ExternalReference> restart_fp_address =
       ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
 
-  Node* restart_fp = Load(MachineType::Pointer(), restart_fp_address);
-  Node* null = IntPtrConstant(0);
+  TNode<IntPtrT> restart_fp = Load<IntPtrT>(restart_fp_address);
+  TNode<IntPtrT> null = IntPtrConstant(0);
 
   Label ok(this), drop_frames(this);
   Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
@@ -1552,25 +1577,24 @@ void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
 }
 
 void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
-  Node* counters_table = ExternalConstant(
+  TNode<ExternalReference> counters_table = ExternalConstant(
       ExternalReference::interpreter_dispatch_counters(isolate()));
-  Node* source_bytecode_table_index = IntPtrConstant(
+  TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant(
       static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
 
-  Node* counter_offset = TimesSystemPointerSize(
+  TNode<WordT> counter_offset = TimesSystemPointerSize(
       IntPtrAdd(source_bytecode_table_index, target_bytecode));
-  Node* old_counter =
-      Load(MachineType::IntPtr(), counters_table, counter_offset);
+  TNode<IntPtrT> old_counter = Load<IntPtrT>(counters_table, counter_offset);
 
   Label counter_ok(this), counter_saturated(this, Label::kDeferred);
 
-  Node* counter_reached_max = WordEqual(
+  TNode<BoolT> counter_reached_max = WordEqual(
       old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
   Branch(counter_reached_max, &counter_saturated, &counter_ok);
 
   BIND(&counter_ok);
   {
-    Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
+    TNode<IntPtrT> new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
     StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
                         counter_offset, new_counter);
     Goto(&counter_saturated);
@@ -1594,7 +1618,8 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
 void InterpreterAssembler::AbortIfRegisterCountInvalid(
     Node* parameters_and_registers, Node* formal_parameter_count,
     Node* register_count) {
-  Node* array_size = LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
+  TNode<IntPtrT> array_size =
+      LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
 
   Label ok(this), abort(this, Label::kDeferred);
   Branch(UintPtrLessThanOrEqual(
@@ -1615,7 +1640,7 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
   // registers into the generator's internal parameters_and_registers field.
   TNode<IntPtrT> formal_parameter_count_intptr =
       ChangeInt32ToIntPtr(formal_parameter_count);
-  Node* register_count = ChangeUint32ToWord(registers.reg_count());
+  TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
   if (FLAG_debug_code) {
     CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
                                  RegisterLocation(Register(0))));
@@ -1630,7 +1655,7 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
     // Iterate over parameters and write them into the array.
     Label loop(this, &var_index), done_loop(this);
 
-    Node* reg_base = IntPtrAdd(
+    TNode<IntPtrT> reg_base = IntPtrAdd(
         IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
         formal_parameter_count_intptr);
 
@@ -1641,8 +1666,8 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
       GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
                 &done_loop);
 
-      Node* reg_index = IntPtrSub(reg_base, index);
-      Node* value = LoadRegister(reg_index);
+      TNode<WordT> reg_index = IntPtrSub(reg_base, index);
+      TNode<Object> value = LoadRegister(reg_index);
 
       StoreFixedArrayElement(array, index, value);
 
@@ -1666,11 +1691,12 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
       Node* index = var_index.value();
       GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
 
-      Node* reg_index =
+      TNode<WordT> reg_index =
           IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
-      Node* value = LoadRegister(reg_index);
+      TNode<Object> value = LoadRegister(reg_index);
 
-      Node* array_index = IntPtrAdd(formal_parameter_count_intptr, index);
+      TNode<WordT> array_index =
+          IntPtrAdd(formal_parameter_count_intptr, index);
       StoreFixedArrayElement(array, array_index, value);
 
       var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
@@ -1714,8 +1740,7 @@ Node* InterpreterAssembler::ImportRegisterFile(
         IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
     StoreRegister(value, reg_index);
 
-    StoreFixedArrayElement(array, array_index,
-                           LoadRoot(RootIndex::kStaleRegister));
+    StoreFixedArrayElement(array, array_index, StaleRegisterConstant());
 
     var_index = IntPtrAdd(index, IntPtrConstant(1));
     Goto(&loop);
@@ -1730,8 +1755,8 @@ int InterpreterAssembler::CurrentBytecodeSize() const {
 }
 
 void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
-  Node* object = GetAccumulator();
-  Node* context = GetContext();
+  TNode<Object> object = GetAccumulator();
+  TNode<Context> context = GetContext();
 
   Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
   Variable var_result(this, MachineRepresentation::kTagged);
@@ -1739,7 +1764,7 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
       if_objectisother(this, Label::kDeferred);
 
   GotoIf(TaggedIsSmi(object), &if_objectissmi);
-  Branch(IsHeapNumber(object), &if_objectisheapnumber, &if_objectisother);
+  Branch(IsHeapNumber(CAST(object)), &if_objectisheapnumber, &if_objectisother);
 
   BIND(&if_objectissmi);
   {
@@ -1762,7 +1787,7 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
       builtin = Builtins::kNonNumberToNumeric;
       // Special case for collecting BigInt feedback.
       Label not_bigint(this);
-      GotoIfNot(IsBigInt(object), &not_bigint);
+      GotoIfNot(IsBigInt(CAST(object)), &not_bigint);
       {
         var_result.Bind(object);
         var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
@@ -1781,7 +1806,7 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
 
   // Record the type feedback collected for {object}.
   Node* slot_index = BytecodeOperandIdx(0);
-  Node* maybe_feedback_vector = LoadFeedbackVector();
+  TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
 
   UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
 
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index a135eaacdd116e..33fa987595daee 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -25,7 +25,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
 
   // Returns the 32-bit unsigned count immediate for bytecode operand
   // |operand_index| in the current bytecode.
-  compiler::Node* BytecodeOperandCount(int operand_index);
+  compiler::TNode<Uint32T> BytecodeOperandCount(int operand_index);
   // Returns the 32-bit unsigned flag for bytecode operand |operand_index|
   // in the current bytecode.
   compiler::Node* BytecodeOperandFlag(int operand_index);
@@ -40,7 +40,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
   compiler::Node* BytecodeOperandIdxSmi(int operand_index);
   // Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
   // in the current bytecode.
-  compiler::Node* BytecodeOperandUImm(int operand_index);
+  compiler::TNode<Uint32T> BytecodeOperandUImm(int operand_index);
   // Returns the word-size unsigned immediate for bytecode operand
   // |operand_index| in the current bytecode.
   compiler::Node* BytecodeOperandUImmWord(int operand_index);
@@ -67,34 +67,37 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
   compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
 
   // Accumulator.
-  compiler::Node* GetAccumulator();
+  compiler::TNode<Object> GetAccumulator();
   void SetAccumulator(compiler::Node* value);
 
   // Context.
-  compiler::Node* GetContext();
-  void SetContext(compiler::Node* value);
+  compiler::TNode<Context> GetContext();
+  void SetContext(compiler::TNode<Context> value);
 
   // Context at |depth| in the context chain starting at |context|.
-  compiler::Node* GetContextAtDepth(compiler::Node* context,
-                                    compiler::Node* depth);
+  compiler::Node* GetContextAtDepth(compiler::TNode<Context> context,
+                                    compiler::TNode<Uint32T> depth);
 
   // Goto the given |target| if the context chain starting at |context| has any
   // extensions up to the given |depth|.
-  void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
-                                          compiler::Node* depth, Label* target);
+  void GotoIfHasContextExtensionUpToDepth(compiler::TNode<Context> context,
+                                          compiler::TNode<Uint32T> depth,
+                                          Label* target);
 
   // A RegListNodePair provides an abstraction over lists of registers.
   class RegListNodePair {
    public:
-    RegListNodePair(Node* base_reg_location, Node* reg_count)
+    RegListNodePair(TNode<IntPtrT> base_reg_location, TNode<Word32T> reg_count)
         : base_reg_location_(base_reg_location), reg_count_(reg_count) {}
 
-    compiler::Node* reg_count() const { return reg_count_; }
-    compiler::Node* base_reg_location() const { return base_reg_location_; }
+    compiler::TNode<Word32T> reg_count() const { return reg_count_; }
+    compiler::TNode<IntPtrT> base_reg_location() const {
+      return base_reg_location_;
+    }
 
    private:
-    compiler::Node* base_reg_location_;
-    compiler::Node* reg_count_;
+    compiler::TNode<IntPtrT> base_reg_location_;
+    compiler::TNode<Word32T> reg_count_;
   };
 
   // Backup/restore register file to/from a fixed array of the correct length.
@@ -110,13 +113,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
                                      TNode<Int32T> formal_parameter_count);
 
   // Loads from and stores to the interpreter register file.
-  compiler::Node* LoadRegister(Register reg);
-  compiler::Node* LoadAndUntagRegister(Register reg);
-  compiler::Node* LoadRegisterAtOperandIndex(int operand_index);
-  std::pair<compiler::Node*, compiler::Node*> LoadRegisterPairAtOperandIndex(
-      int operand_index);
+  compiler::TNode<Object> LoadRegister(Register reg);
+  compiler::TNode<IntPtrT> LoadAndUntagRegister(Register reg);
+  compiler::TNode<Object> LoadRegisterAtOperandIndex(int operand_index);
+  std::pair<compiler::TNode<Object>, compiler::TNode<Object>>
+  LoadRegisterPairAtOperandIndex(int operand_index);
   void StoreRegister(compiler::Node* value, Register reg);
-  void StoreAndTagRegister(compiler::Node* value, Register reg);
   void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
   void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
                                        compiler::Node* value2,
@@ -129,20 +131,19 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
   RegListNodePair GetRegisterListAtOperandIndex(int operand_index);
   Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
                                      int index);
-  Node* RegisterLocationInRegisterList(const RegListNodePair& reg_list,
-                                       int index);
+  TNode<IntPtrT> RegisterLocationInRegisterList(const RegListNodePair& reg_list,
+                                                int index);
 
   // Load constant at the index specified in operand |operand_index| from the
   // constant pool.
   compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index);
   // Load and untag constant at the index specified in operand |operand_index|
   // from the constant pool.
-  compiler::Node* LoadAndUntagConstantPoolEntryAtOperandIndex(
-      int operand_index);
+  TNode<IntPtrT> LoadAndUntagConstantPoolEntryAtOperandIndex(int operand_index);
   // Load constant at |index| in the constant pool.
   compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
   // Load and untag constant at |index| in the constant pool.
-  compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
+  TNode<IntPtrT> LoadAndUntagConstantPoolEntry(compiler::Node* index);
 
   // Load the FeedbackVector for the current function. The retuned node could be
   // undefined.
@@ -193,8 +194,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
   // Call constructor |target| with |args| arguments (not including receiver).
   // The |new_target| is the same as the |target| for the new keyword, but
   // differs for the super keyword.
-  compiler::Node* Construct(compiler::Node* target, compiler::Node* context,
-                            compiler::Node* new_target,
+  compiler::Node* Construct(compiler::SloppyTNode<Object> target,
+                            compiler::Node* context,
+                            compiler::SloppyTNode<Object> new_target,
                             const RegListNodePair& args,
                             compiler::Node* slot_id,
                             compiler::Node* feedback_vector);
@@ -225,13 +227,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
 
   // Jump forward relative to the current bytecode by |jump_offset| if the
   // word values |lhs| and |rhs| are equal.
-  void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
-                       compiler::Node* jump_offset);
+  void JumpIfTaggedEqual(compiler::TNode<Object> lhs,
+                         compiler::TNode<Object> rhs,
+                         compiler::Node* jump_offset);
 
   // Jump forward relative to the current bytecode by |jump_offset| if the
   // word values |lhs| and |rhs| are not equal.
-  void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
-                          compiler::Node* jump_offset);
+  void JumpIfTaggedNotEqual(compiler::TNode<Object> lhs,
+                            compiler::TNode<Object> rhs,
+                            compiler::Node* jump_offset);
 
   // Updates the profiler interrupt budget for a return.
   void UpdateInterruptBudgetOnReturn();
@@ -252,7 +256,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
 
   // Abort with the given abort reason.
   void Abort(AbortReason abort_reason);
-  void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+  void AbortIfWordNotEqual(compiler::TNode<WordT> lhs,
+                           compiler::TNode<WordT> rhs,
                            AbortReason abort_reason);
   // Abort if |register_count| is invalid for given register file array.
   void AbortIfRegisterCountInvalid(compiler::Node* parameters_and_registers,
@@ -263,7 +268,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
   void MaybeDropFrames(compiler::Node* context);
 
   // Returns the offset from the BytecodeArrayPointer of the current bytecode.
-  compiler::Node* BytecodeOffset();
+  TNode<IntPtrT> BytecodeOffset();
 
  protected:
   Bytecode bytecode() const { return bytecode_; }
@@ -285,13 +290,13 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
 
   // Returns the frame pointer for the interpreted frame of the function being
   // interpreted.
-  compiler::Node* GetInterpretedFramePointer();
+  TNode<RawPtrT> GetInterpretedFramePointer();
 
   // Operations on registers.
-  compiler::Node* RegisterLocation(Register reg);
-  compiler::Node* RegisterLocation(compiler::Node* reg_index);
-  compiler::Node* NextRegister(compiler::Node* reg_index);
-  compiler::Node* LoadRegister(Node* reg_index);
+  compiler::TNode<IntPtrT> RegisterLocation(Register reg);
+  compiler::TNode<IntPtrT> RegisterLocation(compiler::Node* reg_index);
+  compiler::TNode<IntPtrT> NextRegister(compiler::Node* reg_index);
+  compiler::TNode<Object> LoadRegister(Node* reg_index);
   void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
 
   // Saves and restores interpreter bytecode offset to the interpreter stack
@@ -311,7 +316,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
   void UpdateInterruptBudget(compiler::Node* weight, bool backward);
 
   // Returns the offset of register |index| relative to RegisterFilePointer().
-  compiler::Node* RegisterFrameOffset(compiler::Node* index);
+  compiler::TNode<IntPtrT> RegisterFrameOffset(compiler::Node* index);
 
   // Returns the offset of an operand relative to the current bytecode offset.
   compiler::Node* OperandOffset(int operand_index);
@@ -321,36 +326,36 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
   // The |result_type| determines the size and signedness.  of the
   // value read. This method should only be used on architectures that
   // do not support unaligned memory accesses.
-  compiler::Node* BytecodeOperandReadUnaligned(
+  compiler::TNode<Word32T> BytecodeOperandReadUnaligned(
       int relative_offset, MachineType result_type,
       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
 
   // Returns zero- or sign-extended to word32 value of the operand.
-  compiler::Node* BytecodeOperandUnsignedByte(
+  compiler::TNode<Uint8T> BytecodeOperandUnsignedByte(
       int operand_index,
       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
-  compiler::Node* BytecodeOperandSignedByte(
+  compiler::TNode<Int8T> BytecodeOperandSignedByte(
       int operand_index,
       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
-  compiler::Node* BytecodeOperandUnsignedShort(
+  compiler::TNode<Uint16T> BytecodeOperandUnsignedShort(
       int operand_index,
       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
-  compiler::Node* BytecodeOperandSignedShort(
+  compiler::TNode<Int16T> BytecodeOperandSignedShort(
       int operand_index,
       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
-  compiler::Node* BytecodeOperandUnsignedQuad(
+  compiler::TNode<Uint32T> BytecodeOperandUnsignedQuad(
       int operand_index,
       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
-  compiler::Node* BytecodeOperandSignedQuad(
+  compiler::TNode<Int32T> BytecodeOperandSignedQuad(
       int operand_index,
       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
 
   // Returns zero- or sign-extended to word32 value of the operand of
   // given size.
-  compiler::Node* BytecodeSignedOperand(
+  compiler::TNode<Int32T> BytecodeSignedOperand(
       int operand_index, OperandSize operand_size,
       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
-  compiler::Node* BytecodeUnsignedOperand(
+  compiler::TNode<Uint32T> BytecodeUnsignedOperand(
       int operand_index, OperandSize operand_size,
       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
 
@@ -373,30 +378,31 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
   compiler::Node* Jump(compiler::Node* jump_offset, bool backward);
 
   // Jump forward relative to the current bytecode by |jump_offset| if the
-  // |condition| is true. Helper function for JumpIfWordEqual and
-  // JumpIfWordNotEqual.
+  // |condition| is true. Helper function for JumpIfTaggedEqual and
+  // JumpIfTaggedNotEqual.
   void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
 
   // Save the bytecode offset to the interpreter frame.
   void SaveBytecodeOffset();
   // Reload the bytecode offset from the interpreter frame.
-  Node* ReloadBytecodeOffset();
+  TNode<IntPtrT> ReloadBytecodeOffset();
 
   // Updates and returns BytecodeOffset() advanced by the current bytecode's
   // size. Traces the exit of the current bytecode.
-  compiler::Node* Advance();
+  TNode<IntPtrT> Advance();
 
   // Updates and returns BytecodeOffset() advanced by delta bytecodes.
   // Traces the exit of the current bytecode.
-  compiler::Node* Advance(int delta);
-  compiler::Node* Advance(compiler::Node* delta, bool backward = false);
+  TNode<IntPtrT> Advance(int delta);
+  TNode<IntPtrT> Advance(SloppyTNode<IntPtrT> delta, bool backward = false);
 
   // Load the bytecode at |bytecode_offset|.
-  compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);
+  compiler::TNode<WordT> LoadBytecode(compiler::Node* bytecode_offset);
 
   // Look ahead for Star and inline it in a branch. Returns a new target
   // bytecode node for dispatch.
-  compiler::Node* StarDispatchLookahead(compiler::Node* target_bytecode);
+  compiler::TNode<WordT> StarDispatchLookahead(
+      compiler::TNode<WordT> target_bytecode);
 
   // Build code for Star at the current BytecodeOffset() and Advance() to the
   // next dispatch offset.
@@ -418,17 +424,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
 
   Bytecode bytecode_;
   OperandScale operand_scale_;
-  CodeStubAssembler::Variable interpreted_frame_pointer_;
+  TVariable<RawPtrT> interpreted_frame_pointer_;
   CodeStubAssembler::Variable bytecode_array_;
-  CodeStubAssembler::Variable bytecode_offset_;
+  TVariable<IntPtrT> bytecode_offset_;
   CodeStubAssembler::Variable dispatch_table_;
   CodeStubAssembler::Variable accumulator_;
   AccumulatorUse accumulator_use_;
   bool made_call_;
   bool reloaded_frame_ptr_;
   bool bytecode_array_valid_;
-  bool disable_stack_check_across_call_;
-  compiler::Node* stack_pointer_before_call_;
 
   DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
 };
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 00ce8eaf689006..e8569ecd55b0a7 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -23,6 +23,7 @@
 #include "src/objects/js-generator.h"
 #include "src/objects/objects-inl.h"
 #include "src/objects/oddball.h"
+#include "src/objects/shared-function-info.h"
 #include "src/objects/source-text-module.h"
 #include "src/utils/ostreams.h"
 
@@ -61,7 +62,7 @@ using Variable = CodeStubAssembler::Variable;
 //
 // Load literal '0' into the accumulator.
 IGNITION_HANDLER(LdaZero, InterpreterAssembler) {
-  Node* zero_value = NumberConstant(0.0);
+  TNode<Number> zero_value = NumberConstant(0.0);
   SetAccumulator(zero_value);
   Dispatch();
 }
@@ -128,7 +129,7 @@ IGNITION_HANDLER(LdaFalse, InterpreterAssembler) {
 //
 // Load accumulator with value from register <src>.
 IGNITION_HANDLER(Ldar, InterpreterAssembler) {
-  Node* value = LoadRegisterAtOperandIndex(0);
+  TNode<Object> value = LoadRegisterAtOperandIndex(0);
   SetAccumulator(value);
   Dispatch();
 }
@@ -137,7 +138,7 @@ IGNITION_HANDLER(Ldar, InterpreterAssembler) {
 //
 // Store accumulator to register <dst>.
 IGNITION_HANDLER(Star, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
+  TNode<Object> accumulator = GetAccumulator();
   StoreRegisterAtOperandIndex(accumulator, 0);
   Dispatch();
 }
@@ -146,7 +147,7 @@ IGNITION_HANDLER(Star, InterpreterAssembler) {
 //
 // Stores the value of register <src> to register <dst>.
 IGNITION_HANDLER(Mov, InterpreterAssembler) {
-  Node* src_value = LoadRegisterAtOperandIndex(0);
+  TNode<Object> src_value = LoadRegisterAtOperandIndex(0);
   StoreRegisterAtOperandIndex(src_value, 1);
   Dispatch();
 }
@@ -159,7 +160,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
 
   void LdaGlobal(int slot_operand_index, int name_operand_index,
                  TypeofMode typeof_mode) {
-    Node* maybe_feedback_vector = LoadFeedbackVector();
+    TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
     Node* feedback_slot = BytecodeOperandIdx(slot_operand_index);
 
     AccessorAssembler accessor_asm(state());
@@ -168,7 +169,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
       Dispatch();
     });
 
-    LazyNode<Context> lazy_context = [=] { return CAST(GetContext()); };
+    LazyNode<Context> lazy_context = [=] { return GetContext(); };
 
     LazyNode<Name> lazy_name = [=] {
       Node* name = LoadConstantPoolEntryAtOperandIndex(name_operand_index);
@@ -209,14 +210,14 @@ IGNITION_HANDLER(LdaGlobalInsideTypeof, InterpreterLoadGlobalAssembler) {
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot>.
 IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
 
   // Store the global via the StoreGlobalIC.
   Node* name = LoadConstantPoolEntryAtOperandIndex(0);
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
   Node* raw_slot = BytecodeOperandIdx(1);
-  Node* smi_slot = SmiTag(raw_slot);
-  Node* maybe_vector = LoadFeedbackVector();
+  TNode<Smi> smi_slot = SmiTag(raw_slot);
+  TNode<HeapObject> maybe_vector = LoadFeedbackVector();
 
   Label no_feedback(this, Label::kDeferred), end(this);
   GotoIf(IsUndefined(maybe_vector), &no_feedback);
@@ -238,11 +239,11 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
 // Load the object in |slot_index| of the context at |depth| in the context
 // chain starting at |context| into the accumulator.
 IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
-  Node* context = LoadRegisterAtOperandIndex(0);
+  TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
   Node* slot_index = BytecodeOperandIdx(1);
-  Node* depth = BytecodeOperandUImm(2);
+  TNode<Uint32T> depth = BytecodeOperandUImm(2);
   Node* slot_context = GetContextAtDepth(context, depth);
-  Node* result = LoadContextElement(slot_context, slot_index);
+  TNode<Object> result = LoadContextElement(slot_context, slot_index);
   SetAccumulator(result);
   Dispatch();
 }
@@ -252,11 +253,11 @@ IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
 // Load the object in |slot_index| of the context at |depth| in the context
 // chain starting at |context| into the accumulator.
 IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
-  Node* context = LoadRegisterAtOperandIndex(0);
+  TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
   Node* slot_index = BytecodeOperandIdx(1);
-  Node* depth = BytecodeOperandUImm(2);
+  TNode<Uint32T> depth = BytecodeOperandUImm(2);
   Node* slot_context = GetContextAtDepth(context, depth);
-  Node* result = LoadContextElement(slot_context, slot_index);
+  TNode<Object> result = LoadContextElement(slot_context, slot_index);
   SetAccumulator(result);
   Dispatch();
 }
@@ -266,8 +267,8 @@ IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
 // Load the object in |slot_index| of the current context into the accumulator.
 IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) {
   Node* slot_index = BytecodeOperandIdx(0);
-  Node* slot_context = GetContext();
-  Node* result = LoadContextElement(slot_context, slot_index);
+  TNode<Context> slot_context = GetContext();
+  TNode<Object> result = LoadContextElement(slot_context, slot_index);
   SetAccumulator(result);
   Dispatch();
 }
@@ -277,8 +278,8 @@ IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) {
 // Load the object in |slot_index| of the current context into the accumulator.
 IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
   Node* slot_index = BytecodeOperandIdx(0);
-  Node* slot_context = GetContext();
-  Node* result = LoadContextElement(slot_context, slot_index);
+  TNode<Context> slot_context = GetContext();
+  TNode<Object> result = LoadContextElement(slot_context, slot_index);
   SetAccumulator(result);
   Dispatch();
 }
@@ -288,10 +289,10 @@ IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
 // Stores the object in the accumulator into |slot_index| of the context at
 // |depth| in the context chain starting at |context|.
 IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) {
-  Node* value = GetAccumulator();
-  Node* context = LoadRegisterAtOperandIndex(0);
+  TNode<Object> value = GetAccumulator();
+  TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
   Node* slot_index = BytecodeOperandIdx(1);
-  Node* depth = BytecodeOperandUImm(2);
+  TNode<Uint32T> depth = BytecodeOperandUImm(2);
   Node* slot_context = GetContextAtDepth(context, depth);
   StoreContextElement(slot_context, slot_index, value);
   Dispatch();
@@ -302,9 +303,9 @@ IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) {
 // Stores the object in the accumulator into |slot_index| of the current
 // context.
 IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
   Node* slot_index = BytecodeOperandIdx(0);
-  Node* slot_context = GetContext();
+  TNode<Context> slot_context = GetContext();
   StoreContextElement(slot_context, slot_index, value);
   Dispatch();
 }
@@ -315,8 +316,8 @@ IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) {
 // dynamically.
 IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
   Node* name = LoadConstantPoolEntryAtOperandIndex(0);
-  Node* context = GetContext();
-  Node* result = CallRuntime(Runtime::kLoadLookupSlot, context, name);
+  TNode<Context> context = GetContext();
+  TNode<Object> result = CallRuntime(Runtime::kLoadLookupSlot, context, name);
   SetAccumulator(result);
   Dispatch();
 }
@@ -327,8 +328,8 @@ IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
 // dynamically without causing a NoReferenceError.
 IGNITION_HANDLER(LdaLookupSlotInsideTypeof, InterpreterAssembler) {
   Node* name = LoadConstantPoolEntryAtOperandIndex(0);
-  Node* context = GetContext();
-  Node* result =
+  TNode<Context> context = GetContext();
+  TNode<Object> result =
       CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, context, name);
   SetAccumulator(result);
   Dispatch();
@@ -342,9 +343,9 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
       : InterpreterAssembler(state, bytecode, operand_scale) {}
 
   void LookupContextSlot(Runtime::FunctionId function_id) {
-    Node* context = GetContext();
+    TNode<Context> context = GetContext();
     Node* slot_index = BytecodeOperandIdx(1);
-    Node* depth = BytecodeOperandUImm(2);
+    TNode<Uint32T> depth = BytecodeOperandUImm(2);
 
     Label slowpath(this, Label::kDeferred);
 
@@ -354,7 +355,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
     // Fast path does a normal load context.
     {
       Node* slot_context = GetContextAtDepth(context, depth);
-      Node* result = LoadContextElement(slot_context, slot_index);
+      TNode<Object> result = LoadContextElement(slot_context, slot_index);
       SetAccumulator(result);
       Dispatch();
     }
@@ -363,7 +364,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
     BIND(&slowpath);
     {
       Node* name = LoadConstantPoolEntryAtOperandIndex(0);
-      Node* result = CallRuntime(function_id, context, name);
+      TNode<Object> result = CallRuntime(function_id, context, name);
       SetAccumulator(result);
       Dispatch();
     }
@@ -394,8 +395,8 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler {
       : InterpreterLoadGlobalAssembler(state, bytecode, operand_scale) {}
 
   void LookupGlobalSlot(Runtime::FunctionId function_id) {
-    Node* context = GetContext();
-    Node* depth = BytecodeOperandUImm(2);
+    TNode<Context> context = GetContext();
+    TNode<Uint32T> depth = BytecodeOperandUImm(2);
 
     Label slowpath(this, Label::kDeferred);
 
@@ -419,7 +420,7 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler {
     BIND(&slowpath);
     {
       Node* name = LoadConstantPoolEntryAtOperandIndex(0);
-      Node* result = CallRuntime(function_id, context, name);
+      TNode<Object> result = CallRuntime(function_id, context, name);
       SetAccumulator(result);
       Dispatch();
     }
@@ -448,10 +449,10 @@ IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof,
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index|.
 IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
   Node* name = LoadConstantPoolEntryAtOperandIndex(0);
   Node* bytecode_flags = BytecodeOperandFlag(1);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   Variable var_result(this, MachineRepresentation::kTagged);
 
   Label sloppy(this), strict(this), end(this);
@@ -505,18 +506,18 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
 // Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
 // constant pool entry <name_index>.
 IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
-  Node* feedback_vector = LoadFeedbackVector();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
   Node* feedback_slot = BytecodeOperandIdx(2);
-  Node* smi_slot = SmiTag(feedback_slot);
+  TNode<Smi> smi_slot = SmiTag(feedback_slot);
 
   // Load receiver.
-  Node* recv = LoadRegisterAtOperandIndex(0);
+  TNode<Object> recv = LoadRegisterAtOperandIndex(0);
 
   // Load the name and context lazily.
   LazyNode<Name> name = [=] {
     return CAST(LoadConstantPoolEntryAtOperandIndex(1));
   };
-  LazyNode<Context> context = [=] { return CAST(GetContext()); };
+  LazyNode<Context> context = [=] { return GetContext(); };
 
   Label done(this);
   Variable var_result(this, MachineRepresentation::kTagged);
@@ -538,10 +539,11 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
 //
 // Calls the GetProperty builtin for <object> and the key in the accumulator.
 IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) {
-  Node* object = LoadRegisterAtOperandIndex(0);
+  TNode<Object> object = LoadRegisterAtOperandIndex(0);
   Node* name = LoadConstantPoolEntryAtOperandIndex(1);
-  Node* context = GetContext();
-  Node* result = CallBuiltin(Builtins::kGetProperty, context, object, name);
+  TNode<Context> context = GetContext();
+  TNode<Object> result =
+      CallBuiltin(Builtins::kGetProperty, context, object, name);
   SetAccumulator(result);
   Dispatch();
 }
@@ -551,12 +553,12 @@ IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) {
 // Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
 // in the accumulator.
 IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
-  Node* object = LoadRegisterAtOperandIndex(0);
-  Node* name = GetAccumulator();
+  TNode<Object> object = LoadRegisterAtOperandIndex(0);
+  TNode<Object> name = GetAccumulator();
   Node* raw_slot = BytecodeOperandIdx(1);
-  Node* smi_slot = SmiTag(raw_slot);
-  Node* feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<Smi> smi_slot = SmiTag(raw_slot);
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   VARIABLE(var_result, MachineRepresentation::kTagged);
   var_result.Bind(CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
@@ -573,14 +575,14 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
       : InterpreterAssembler(state, bytecode, operand_scale) {}
 
   void StaNamedProperty(Callable ic, NamedPropertyType property_type) {
-    Node* code_target = HeapConstant(ic.code());
-    Node* object = LoadRegisterAtOperandIndex(0);
+    TNode<Code> code_target = HeapConstant(ic.code());
+    TNode<Object> object = LoadRegisterAtOperandIndex(0);
     Node* name = LoadConstantPoolEntryAtOperandIndex(1);
-    Node* value = GetAccumulator();
+    TNode<Object> value = GetAccumulator();
     Node* raw_slot = BytecodeOperandIdx(2);
-    Node* smi_slot = SmiTag(raw_slot);
-    Node* maybe_vector = LoadFeedbackVector();
-    Node* context = GetContext();
+    TNode<Smi> smi_slot = SmiTag(raw_slot);
+    TNode<HeapObject> maybe_vector = LoadFeedbackVector();
+    TNode<Context> context = GetContext();
 
     VARIABLE(var_result, MachineRepresentation::kTagged);
     var_result.Bind(CallStub(ic.descriptor(), code_target, context, object,
@@ -621,12 +623,12 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
 // <name_index> with the value in the accumulator.
 IGNITION_HANDLER(StaNamedPropertyNoFeedback,
                  InterpreterStoreNamedPropertyAssembler) {
-  Node* object = LoadRegisterAtOperandIndex(0);
+  TNode<Object> object = LoadRegisterAtOperandIndex(0);
   Node* name = LoadConstantPoolEntryAtOperandIndex(1);
-  Node* value = GetAccumulator();
-  Node* context = GetContext();
+  TNode<Object> value = GetAccumulator();
+  TNode<Context> context = GetContext();
 
-  Node* result =
+  TNode<Object> result =
       CallRuntime(Runtime::kSetNamedProperty, context, object, name, value);
   SetAccumulator(result);
   Dispatch();
@@ -637,13 +639,13 @@ IGNITION_HANDLER(StaNamedPropertyNoFeedback,
 // Calls the KeyedStoreIC at FeedbackVector slot <slot> for <object> and
 // the key <key> with the value in the accumulator.
 IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
-  Node* object = LoadRegisterAtOperandIndex(0);
-  Node* name = LoadRegisterAtOperandIndex(1);
-  Node* value = GetAccumulator();
+  TNode<Object> object = LoadRegisterAtOperandIndex(0);
+  TNode<Object> name = LoadRegisterAtOperandIndex(1);
+  TNode<Object> value = GetAccumulator();
   Node* raw_slot = BytecodeOperandIdx(2);
-  Node* smi_slot = SmiTag(raw_slot);
-  Node* maybe_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<Smi> smi_slot = SmiTag(raw_slot);
+  TNode<HeapObject> maybe_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   VARIABLE(var_result, MachineRepresentation::kTagged);
   var_result.Bind(CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
@@ -662,13 +664,13 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
 // Calls the StoreInArrayLiteralIC at FeedbackVector slot <slot> for <array> and
 // the key <index> with the value in the accumulator.
 IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
-  Node* array = LoadRegisterAtOperandIndex(0);
-  Node* index = LoadRegisterAtOperandIndex(1);
-  Node* value = GetAccumulator();
+  TNode<Object> array = LoadRegisterAtOperandIndex(0);
+  TNode<Object> index = LoadRegisterAtOperandIndex(1);
+  TNode<Object> value = GetAccumulator();
   Node* raw_slot = BytecodeOperandIdx(2);
-  Node* smi_slot = SmiTag(raw_slot);
-  Node* feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<Smi> smi_slot = SmiTag(raw_slot);
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   VARIABLE(var_result, MachineRepresentation::kTagged);
   var_result.Bind(CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
@@ -691,14 +693,14 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
 // This definition is not observable and is used only for definitions
 // in object or class literals.
 IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
-  Node* object = LoadRegisterAtOperandIndex(0);
-  Node* name = LoadRegisterAtOperandIndex(1);
-  Node* value = GetAccumulator();
-  Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
-  Node* vector_index = SmiTag(BytecodeOperandIdx(3));
+  TNode<Object> object = LoadRegisterAtOperandIndex(0);
+  TNode<Object> name = LoadRegisterAtOperandIndex(1);
+  TNode<Object> value = GetAccumulator();
+  TNode<Smi> flags = SmiFromInt32(BytecodeOperandFlag(2));
+  TNode<Smi> vector_index = SmiTag(BytecodeOperandIdx(3));
 
-  Node* feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
               value, flags, feedback_vector, vector_index);
@@ -707,10 +709,10 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
 
 IGNITION_HANDLER(CollectTypeProfile, InterpreterAssembler) {
   Node* position = BytecodeOperandImmSmi(0);
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
 
-  Node* feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   CallRuntime(Runtime::kCollectTypeProfile, context, position, value,
               feedback_vector);
@@ -724,10 +726,11 @@ IGNITION_HANDLER(CollectTypeProfile, InterpreterAssembler) {
 // relative to the module context.
 IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
   Node* cell_index = BytecodeOperandImmIntPtr(0);
-  Node* depth = BytecodeOperandUImm(1);
+  TNode<Uint32T> depth = BytecodeOperandUImm(1);
 
   Node* module_context = GetContextAtDepth(GetContext(), depth);
-  Node* module = LoadContextElement(module_context, Context::EXTENSION_INDEX);
+  TNode<SourceTextModule> module =
+      CAST(LoadContextElement(module_context, Context::EXTENSION_INDEX));
 
   Label if_export(this), if_import(this), end(this);
   Branch(IntPtrGreaterThan(cell_index, IntPtrConstant(0)), &if_export,
@@ -735,22 +738,24 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
 
   BIND(&if_export);
   {
-    TNode<FixedArray> regular_exports =
-        CAST(LoadObjectField(module, SourceTextModule::kRegularExportsOffset));
+    TNode<FixedArray> regular_exports = LoadObjectField<FixedArray>(
+        module, SourceTextModule::kRegularExportsOffset);
     // The actual array index is (cell_index - 1).
-    Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1));
-    Node* cell = LoadFixedArrayElement(regular_exports, export_index);
+    TNode<WordT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
+    TNode<Cell> cell =
+        CAST(LoadFixedArrayElement(regular_exports, export_index));
     SetAccumulator(LoadObjectField(cell, Cell::kValueOffset));
     Goto(&end);
   }
 
   BIND(&if_import);
   {
-    TNode<FixedArray> regular_imports =
-        CAST(LoadObjectField(module, SourceTextModule::kRegularImportsOffset));
+    TNode<FixedArray> regular_imports = LoadObjectField<FixedArray>(
+        module, SourceTextModule::kRegularImportsOffset);
     // The actual array index is (-cell_index - 1).
-    Node* import_index = IntPtrSub(IntPtrConstant(-1), cell_index);
-    Node* cell = LoadFixedArrayElement(regular_imports, import_index);
+    TNode<WordT> import_index = IntPtrSub(IntPtrConstant(-1), cell_index);
+    TNode<Cell> cell =
+        CAST(LoadFixedArrayElement(regular_imports, import_index));
     SetAccumulator(LoadObjectField(cell, Cell::kValueOffset));
     Goto(&end);
   }
@@ -764,12 +769,13 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
 // Store accumulator to the module variable identified by <cell_index>.
 // <depth> is the depth of the current context relative to the module context.
 IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
   Node* cell_index = BytecodeOperandImmIntPtr(0);
-  Node* depth = BytecodeOperandUImm(1);
+  TNode<Uint32T> depth = BytecodeOperandUImm(1);
 
   Node* module_context = GetContextAtDepth(GetContext(), depth);
-  Node* module = LoadContextElement(module_context, Context::EXTENSION_INDEX);
+  TNode<SourceTextModule> module =
+      CAST(LoadContextElement(module_context, Context::EXTENSION_INDEX));
 
   Label if_export(this), if_import(this), end(this);
   Branch(IntPtrGreaterThan(cell_index, IntPtrConstant(0)), &if_export,
@@ -777,11 +783,11 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
 
   BIND(&if_export);
   {
-    TNode<FixedArray> regular_exports =
-        CAST(LoadObjectField(module, SourceTextModule::kRegularExportsOffset));
+    TNode<FixedArray> regular_exports = LoadObjectField<FixedArray>(
+        module, SourceTextModule::kRegularExportsOffset);
     // The actual array index is (cell_index - 1).
-    Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1));
-    Node* cell = LoadFixedArrayElement(regular_exports, export_index);
+    TNode<WordT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
+    TNode<Object> cell = LoadFixedArrayElement(regular_exports, export_index);
     StoreObjectField(cell, Cell::kValueOffset, value);
     Goto(&end);
   }
@@ -802,8 +808,8 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
 // Saves the current context in <context>, and pushes the accumulator as the
 // new current context.
 IGNITION_HANDLER(PushContext, InterpreterAssembler) {
-  Node* new_context = GetAccumulator();
-  Node* old_context = GetContext();
+  TNode<Context> new_context = CAST(GetAccumulator());
+  TNode<Context> old_context = GetContext();
   StoreRegisterAtOperandIndex(old_context, 0);
   SetContext(new_context);
   Dispatch();
@@ -813,7 +819,7 @@ IGNITION_HANDLER(PushContext, InterpreterAssembler) {
 //
 // Pops the current context and sets <context> as the new context.
 IGNITION_HANDLER(PopContext, InterpreterAssembler) {
-  Node* context = LoadRegisterAtOperandIndex(0);
+  TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
   SetContext(context);
   Dispatch();
 }
@@ -829,11 +835,11 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
                                    Node* slot, Node* vector, bool lhs_is_smi);
 
   void BinaryOpWithFeedback(BinaryOpGenerator generator) {
-    Node* lhs = LoadRegisterAtOperandIndex(0);
-    Node* rhs = GetAccumulator();
-    Node* context = GetContext();
+    TNode<Object> lhs = LoadRegisterAtOperandIndex(0);
+    TNode<Object> rhs = GetAccumulator();
+    TNode<Context> context = GetContext();
     Node* slot_index = BytecodeOperandIdx(1);
-    Node* maybe_feedback_vector = LoadFeedbackVector();
+    TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
 
     BinaryOpAssembler binop_asm(state());
     Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
@@ -843,11 +849,11 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
   }
 
   void BinaryOpSmiWithFeedback(BinaryOpGenerator generator) {
-    Node* lhs = GetAccumulator();
+    TNode<Object> lhs = GetAccumulator();
     Node* rhs = BytecodeOperandImmSmi(0);
-    Node* context = GetContext();
+    TNode<Context> context = GetContext();
     Node* slot_index = BytecodeOperandIdx(1);
-    Node* maybe_feedback_vector = LoadFeedbackVector();
+    TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
 
     BinaryOpAssembler binop_asm(state());
     Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
@@ -950,11 +956,11 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
       : InterpreterAssembler(state, bytecode, operand_scale) {}
 
   void BitwiseBinaryOpWithFeedback(Operation bitwise_op) {
-    Node* left = LoadRegisterAtOperandIndex(0);
-    Node* right = GetAccumulator();
-    Node* context = GetContext();
+    TNode<Object> left = LoadRegisterAtOperandIndex(0);
+    TNode<Object> right = GetAccumulator();
+    TNode<Context> context = GetContext();
     Node* slot_index = BytecodeOperandIdx(1);
-    Node* maybe_feedback_vector = LoadFeedbackVector();
+    TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
 
     TVARIABLE(Smi, var_left_feedback);
     TVARIABLE(Smi, var_right_feedback);
@@ -1000,11 +1006,11 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
   }
 
   void BitwiseBinaryOpWithSmi(Operation bitwise_op) {
-    Node* left = GetAccumulator();
+    TNode<Object> left = GetAccumulator();
     Node* right = BytecodeOperandImmSmi(0);
     Node* slot_index = BytecodeOperandIdx(1);
-    Node* maybe_feedback_vector = LoadFeedbackVector();
-    Node* context = GetContext();
+    TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+    TNode<Context> context = GetContext();
 
     TVARIABLE(Smi, var_left_feedback);
     VARIABLE(var_left_word32, MachineRepresentation::kWord32);
@@ -1108,10 +1114,10 @@ IGNITION_HANDLER(BitwiseAndSmi, InterpreterBitwiseBinaryOpAssembler) {
 //
 // Perform bitwise-not on the accumulator.
 IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
-  Node* operand = GetAccumulator();
+  TNode<Object> operand = GetAccumulator();
   Node* slot_index = BytecodeOperandIdx(0);
-  Node* maybe_feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   VARIABLE(var_word32, MachineRepresentation::kWord32);
   TVARIABLE(Smi, var_feedback);
@@ -1202,9 +1208,9 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
       Label if_other(this, Label::kDeferred);
       Node* value = var_value.value();
       GotoIf(TaggedIsSmi(value), &if_smi);
-      Node* map = LoadMap(value);
+      TNode<Map> map = LoadMap(value);
       GotoIf(IsHeapNumberMap(map), &if_heapnumber);
-      Node* instance_type = LoadMapInstanceType(map);
+      TNode<Uint16T> instance_type = LoadMapInstanceType(map);
       GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
       Branch(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &if_oddball,
              &if_other);
@@ -1266,7 +1272,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
 
     BIND(&end);
     Node* slot_index = BytecodeOperandIdx(0);
-    Node* maybe_feedback_vector = LoadFeedbackVector();
+    TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
     UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot_index);
     SetAccumulator(var_result.value());
     Dispatch();
@@ -1324,9 +1330,9 @@ IGNITION_HANDLER(Negate, NegateAssemblerImpl) { UnaryOpWithFeedback(); }
 //
 // Convert the object referenced by the accumulator to a name.
 IGNITION_HANDLER(ToName, InterpreterAssembler) {
-  Node* object = GetAccumulator();
-  Node* context = GetContext();
-  Node* result = CallBuiltin(Builtins::kToName, context, object);
+  TNode<Object> object = GetAccumulator();
+  TNode<Context> context = GetContext();
+  TNode<Object> result = CallBuiltin(Builtins::kToName, context, object);
   StoreRegisterAtOperandIndex(result, 0);
   Dispatch();
 }
@@ -1349,9 +1355,9 @@ IGNITION_HANDLER(ToNumeric, InterpreterAssembler) {
 //
 // Convert the object referenced by the accumulator to a JSReceiver.
 IGNITION_HANDLER(ToObject, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
-  Node* context = GetContext();
-  Node* result = CallBuiltin(Builtins::kToObject, context, accumulator);
+  TNode<Object> accumulator = GetAccumulator();
+  TNode<Context> context = GetContext();
+  TNode<Object> result = CallBuiltin(Builtins::kToObject, context, accumulator);
   StoreRegisterAtOperandIndex(result, 0);
   Dispatch();
 }
@@ -1435,7 +1441,7 @@ IGNITION_HANDLER(Dec, IncDecAssembler) { DecWithFeedback(); }
 // Perform logical-not on the accumulator, first casting the
 // accumulator to a boolean value if required.
 IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
   Variable result(this, MachineRepresentation::kTagged);
   Label if_true(this), if_false(this), end(this);
   BranchIfToBooleanIsTrue(value, &if_true, &if_false);
@@ -1459,12 +1465,12 @@ IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) {
 // Perform logical-not on the accumulator, which must already be a boolean
 // value.
 IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
   Variable result(this, MachineRepresentation::kTagged);
   Label if_true(this), if_false(this), end(this);
-  Node* true_value = TrueConstant();
-  Node* false_value = FalseConstant();
-  Branch(WordEqual(value, true_value), &if_true, &if_false);
+  TNode<Oddball> true_value = TrueConstant();
+  TNode<Oddball> false_value = FalseConstant();
+  Branch(TaggedEqual(value, true_value), &if_true, &if_false);
   BIND(&if_true);
   {
     result.Bind(false_value);
@@ -1472,7 +1478,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
   }
   BIND(&if_false);
   {
-    CSA_ASSERT(this, WordEqual(value, false_value));
+    CSA_ASSERT(this, TaggedEqual(value, false_value));
     result.Bind(true_value);
     Goto(&end);
   }
@@ -1486,7 +1492,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
 // Load the accumulator with the string representating type of the
 // object in the accumulator.
 IGNITION_HANDLER(TypeOf, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
   Node* result = Typeof(value);
   SetAccumulator(result);
   Dispatch();
@@ -1497,11 +1503,12 @@ IGNITION_HANDLER(TypeOf, InterpreterAssembler) {
 // Delete the property specified in the accumulator from the object
 // referenced by the register operand following strict mode semantics.
 IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) {
-  Node* object = LoadRegisterAtOperandIndex(0);
-  Node* key = GetAccumulator();
-  Node* context = GetContext();
-  Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
-                             SmiConstant(Smi::FromEnum(LanguageMode::kStrict)));
+  TNode<Object> object = LoadRegisterAtOperandIndex(0);
+  TNode<Object> key = GetAccumulator();
+  TNode<Context> context = GetContext();
+  TNode<Object> result =
+      CallBuiltin(Builtins::kDeleteProperty, context, object, key,
+                  SmiConstant(Smi::FromEnum(LanguageMode::kStrict)));
   SetAccumulator(result);
   Dispatch();
 }
@@ -1511,11 +1518,12 @@ IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) {
 // Delete the property specified in the accumulator from the object
 // referenced by the register operand following sloppy mode semantics.
 IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
-  Node* object = LoadRegisterAtOperandIndex(0);
-  Node* key = GetAccumulator();
-  Node* context = GetContext();
-  Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
-                             SmiConstant(Smi::FromEnum(LanguageMode::kSloppy)));
+  TNode<Object> object = LoadRegisterAtOperandIndex(0);
+  TNode<Object> key = GetAccumulator();
+  TNode<Context> context = GetContext();
+  TNode<Object> result =
+      CallBuiltin(Builtins::kDeleteProperty, context, object, key,
+                  SmiConstant(Smi::FromEnum(LanguageMode::kSloppy)));
   SetAccumulator(result);
   Dispatch();
 }
@@ -1525,9 +1533,9 @@ IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
 // Get the super constructor from the object referenced by the accumulator.
 // The result is stored in register |reg|.
 IGNITION_HANDLER(GetSuperConstructor, InterpreterAssembler) {
-  Node* active_function = GetAccumulator();
-  Node* context = GetContext();
-  Node* result = GetSuperConstructor(context, active_function);
+  TNode<JSFunction> active_function = CAST(GetAccumulator());
+  TNode<Context> context = GetContext();
+  TNode<Object> result = GetSuperConstructor(context, active_function);
   StoreRegisterAtOperandIndex(result, 0);
   Dispatch();
 }
@@ -1540,11 +1548,11 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
 
   // Generates code to perform a JS call that collects type feedback.
   void JSCall(ConvertReceiverMode receiver_mode) {
-    Node* function = LoadRegisterAtOperandIndex(0);
+    TNode<Object> function = LoadRegisterAtOperandIndex(0);
     RegListNodePair args = GetRegisterListAtOperandIndex(1);
     Node* slot_id = BytecodeOperandIdx(3);
-    Node* maybe_feedback_vector = LoadFeedbackVector();
-    Node* context = GetContext();
+    TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+    TNode<Context> context = GetContext();
 
     // Collect the {function} feedback.
     CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
@@ -1555,9 +1563,9 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
 
   // Generates code to perform a JS call without collecting feedback.
   void JSCallNoFeedback(ConvertReceiverMode receiver_mode) {
-    Node* function = LoadRegisterAtOperandIndex(0);
+    TNode<Object> function = LoadRegisterAtOperandIndex(0);
     RegListNodePair args = GetRegisterListAtOperandIndex(1);
-    Node* context = GetContext();
+    TNode<Context> context = GetContext();
 
     // Call the function and dispatch to the next handler.
     CallJSAndDispatch(function, context, args, receiver_mode);
@@ -1574,10 +1582,10 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
     const int kSlotOperandIndex =
         kFirstArgumentOperandIndex + kRecieverAndArgOperandCount;
 
-    Node* function = LoadRegisterAtOperandIndex(0);
+    TNode<Object> function = LoadRegisterAtOperandIndex(0);
     Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex);
-    Node* maybe_feedback_vector = LoadFeedbackVector();
-    Node* context = GetContext();
+    TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+    TNode<Context> context = GetContext();
 
     // Collect the {function} feedback.
     CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
@@ -1590,20 +1598,26 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
       case 1:
         CallJSAndDispatch(
             function, context, Int32Constant(arg_count), receiver_mode,
-            LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
+            static_cast<Node*>(
+                LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)));
         break;
       case 2:
         CallJSAndDispatch(
             function, context, Int32Constant(arg_count), receiver_mode,
-            LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
-            LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1));
+            static_cast<Node*>(
+                LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)),
+            static_cast<Node*>(
+                LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1)));
         break;
       case 3:
         CallJSAndDispatch(
             function, context, Int32Constant(arg_count), receiver_mode,
-            LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
-            LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
-            LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2));
+            static_cast<Node*>(
+                LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)),
+            static_cast<Node*>(
+                LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1)),
+            static_cast<Node*>(
+                LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2)));
         break;
       default:
         UNREACHABLE();
@@ -1664,7 +1678,7 @@ IGNITION_HANDLER(CallNoFeedback, InterpreterJSCallAssembler) {
 IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
   Node* function_id = BytecodeOperandRuntimeId(0);
   RegListNodePair args = GetRegisterListAtOperandIndex(1);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   Node* result = CallRuntimeN(function_id, context, args);
   SetAccumulator(result);
   Dispatch();
@@ -1678,7 +1692,7 @@ IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
 IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) {
   Node* function_id = BytecodeOperandIntrinsicId(0);
   RegListNodePair args = GetRegisterListAtOperandIndex(1);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   Node* result = GenerateInvokeIntrinsic(this, function_id, context, args);
   SetAccumulator(result);
   Dispatch();
@@ -1694,7 +1708,7 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
   // Call the runtime function.
   Node* function_id = BytecodeOperandRuntimeId(0);
   RegListNodePair args = GetRegisterListAtOperandIndex(1);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   Node* result_pair = CallRuntimeN(function_id, context, args, 2);
   // Store the results in <first_return> and <first_return + 1>
   Node* result0 = Projection(0, result_pair);
@@ -1712,9 +1726,9 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
   RegListNodePair args = GetRegisterListAtOperandIndex(1);
 
   // Get the function to call from the native context.
-  Node* context = GetContext();
-  Node* native_context = LoadNativeContext(context);
-  Node* function = LoadContextElement(native_context, context_index);
+  TNode<Context> context = GetContext();
+  TNode<Context> native_context = LoadNativeContext(context);
+  TNode<Object> function = LoadContextElement(native_context, context_index);
 
   // Call the function.
   CallJSAndDispatch(function, context, args,
@@ -1728,11 +1742,11 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
 // final argument is always a spread.
 //
 IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
-  Node* callable = LoadRegisterAtOperandIndex(0);
+  TNode<Object> callable = LoadRegisterAtOperandIndex(0);
   RegListNodePair args = GetRegisterListAtOperandIndex(1);
   Node* slot_id = BytecodeOperandIdx(3);
-  Node* maybe_feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   // Call into Runtime function CallWithSpread which does everything.
   CallJSWithSpreadAndDispatch(callable, context, args, slot_id,
@@ -1746,12 +1760,12 @@ IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
 // argument is always a spread. The new.target is in the accumulator.
 //
 IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
-  Node* new_target = GetAccumulator();
-  Node* constructor = LoadRegisterAtOperandIndex(0);
+  TNode<Object> new_target = GetAccumulator();
+  TNode<Object> constructor = LoadRegisterAtOperandIndex(0);
   RegListNodePair args = GetRegisterListAtOperandIndex(1);
   Node* slot_id = BytecodeOperandIdx(3);
-  Node* feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
   Node* result = ConstructWithSpread(constructor, context, new_target, args,
                                      slot_id, feedback_vector);
   SetAccumulator(result);
@@ -1765,12 +1779,12 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
 // registers. The new.target is in the accumulator.
 //
 IGNITION_HANDLER(Construct, InterpreterAssembler) {
-  Node* new_target = GetAccumulator();
-  Node* constructor = LoadRegisterAtOperandIndex(0);
+  TNode<Object> new_target = GetAccumulator();
+  TNode<Object> constructor = LoadRegisterAtOperandIndex(0);
   RegListNodePair args = GetRegisterListAtOperandIndex(1);
   Node* slot_id = BytecodeOperandIdx(3);
-  Node* feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
   Node* result = Construct(constructor, context, new_target, args, slot_id,
                            feedback_vector);
   SetAccumulator(result);
@@ -1784,9 +1798,9 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
       : InterpreterAssembler(state, bytecode, operand_scale) {}
 
   void CompareOpWithFeedback(Operation compare_op) {
-    Node* lhs = LoadRegisterAtOperandIndex(0);
-    Node* rhs = GetAccumulator();
-    Node* context = GetContext();
+    TNode<Object> lhs = LoadRegisterAtOperandIndex(0);
+    TNode<Object> rhs = GetAccumulator();
+    TNode<Context> context = GetContext();
 
     Variable var_type_feedback(this, MachineRepresentation::kTagged);
     Node* result;
@@ -1809,7 +1823,7 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
     }
 
     Node* slot_index = BytecodeOperandIdx(1);
-    Node* maybe_feedback_vector = LoadFeedbackVector();
+    TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
     UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
                    slot_index);
     SetAccumulator(result);
@@ -1866,9 +1880,9 @@ IGNITION_HANDLER(TestGreaterThanOrEqual, InterpreterCompareOpAssembler) {
 // Test if the value in the <src> register is equal to the accumulator
 // by means of simple comparison. For SMIs and simple reference comparisons.
 IGNITION_HANDLER(TestReferenceEqual, InterpreterAssembler) {
-  Node* lhs = LoadRegisterAtOperandIndex(0);
-  Node* rhs = GetAccumulator();
-  Node* result = SelectBooleanConstant(WordEqual(lhs, rhs));
+  TNode<Object> lhs = LoadRegisterAtOperandIndex(0);
+  TNode<Object> rhs = GetAccumulator();
+  TNode<Oddball> result = SelectBooleanConstant(TaggedEqual(lhs, rhs));
   SetAccumulator(result);
   Dispatch();
 }
@@ -1878,12 +1892,12 @@ IGNITION_HANDLER(TestReferenceEqual, InterpreterAssembler) {
 // Test if the object referenced by the register operand is a property of the
 // object referenced by the accumulator.
 IGNITION_HANDLER(TestIn, InterpreterAssembler) {
-  Node* name = LoadRegisterAtOperandIndex(0);
-  Node* object = GetAccumulator();
+  TNode<Object> name = LoadRegisterAtOperandIndex(0);
+  TNode<Object> object = GetAccumulator();
   Node* raw_slot = BytecodeOperandIdx(1);
-  Node* smi_slot = SmiTag(raw_slot);
-  Node* feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<Smi> smi_slot = SmiTag(raw_slot);
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   VARIABLE(var_result, MachineRepresentation::kTagged);
   var_result.Bind(CallBuiltin(Builtins::kKeyedHasIC, context, object, name,
@@ -1897,11 +1911,11 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) {
 // Test if the object referenced by the <src> register is an an instance of type
 // referenced by the accumulator.
 IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
-  Node* object = LoadRegisterAtOperandIndex(0);
-  Node* callable = GetAccumulator();
+  TNode<Object> object = LoadRegisterAtOperandIndex(0);
+  TNode<Object> callable = GetAccumulator();
   Node* slot_id = BytecodeOperandIdx(1);
-  Node* feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   Label feedback_done(this);
   GotoIf(IsUndefined(feedback_vector), &feedback_done);
@@ -1922,14 +1936,15 @@ IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
 // document.all).
 IGNITION_HANDLER(TestUndetectable, InterpreterAssembler) {
   Label return_false(this), end(this);
-  Node* object = GetAccumulator();
+  TNode<Object> object = GetAccumulator();
 
   // If the object is an Smi then return false.
   SetAccumulator(FalseConstant());
   GotoIf(TaggedIsSmi(object), &end);
 
   // If it is a HeapObject, load the map and check for undetectable bit.
-  Node* result = SelectBooleanConstant(IsUndetectableMap(LoadMap(object)));
+  TNode<Oddball> result =
+      SelectBooleanConstant(IsUndetectableMap(LoadMap(CAST(object))));
   SetAccumulator(result);
   Goto(&end);
 
@@ -1941,8 +1956,9 @@ IGNITION_HANDLER(TestUndetectable, InterpreterAssembler) {
 //
 // Test if the value in accumulator is strictly equal to null.
 IGNITION_HANDLER(TestNull, InterpreterAssembler) {
-  Node* object = GetAccumulator();
-  Node* result = SelectBooleanConstant(WordEqual(object, NullConstant()));
+  TNode<Object> object = GetAccumulator();
+  TNode<Oddball> result =
+      SelectBooleanConstant(TaggedEqual(object, NullConstant()));
   SetAccumulator(result);
   Dispatch();
 }
@@ -1951,8 +1967,9 @@ IGNITION_HANDLER(TestNull, InterpreterAssembler) {
 //
 // Test if the value in the accumulator is strictly equal to undefined.
 IGNITION_HANDLER(TestUndefined, InterpreterAssembler) {
-  Node* object = GetAccumulator();
-  Node* result = SelectBooleanConstant(WordEqual(object, UndefinedConstant()));
+  TNode<Object> object = GetAccumulator();
+  TNode<Oddball> result =
+      SelectBooleanConstant(TaggedEqual(object, UndefinedConstant()));
   SetAccumulator(result);
   Dispatch();
 }
@@ -1962,7 +1979,7 @@ IGNITION_HANDLER(TestUndefined, InterpreterAssembler) {
 // Tests if the object in the <accumulator> is typeof the literal represented
 // by |literal_flag|.
 IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
-  Node* object = GetAccumulator();
+  TNode<Object> object = GetAccumulator();
   Node* literal_flag = BytecodeOperandFlag(0);
 
 #define MAKE_LABEL(name, lower_case) Label if_##lower_case(this);
@@ -1997,25 +2014,25 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
   {
     Comment("IfString");
     GotoIf(TaggedIsSmi(object), &if_false);
-    Branch(IsString(object), &if_true, &if_false);
+    Branch(IsString(CAST(object)), &if_true, &if_false);
   }
   BIND(&if_symbol);
   {
     Comment("IfSymbol");
     GotoIf(TaggedIsSmi(object), &if_false);
-    Branch(IsSymbol(object), &if_true, &if_false);
+    Branch(IsSymbol(CAST(object)), &if_true, &if_false);
   }
   BIND(&if_boolean);
   {
     Comment("IfBoolean");
-    GotoIf(WordEqual(object, TrueConstant()), &if_true);
-    Branch(WordEqual(object, FalseConstant()), &if_true, &if_false);
+    GotoIf(TaggedEqual(object, TrueConstant()), &if_true);
+    Branch(TaggedEqual(object, FalseConstant()), &if_true, &if_false);
   }
   BIND(&if_bigint);
   {
     Comment("IfBigInt");
     GotoIf(TaggedIsSmi(object), &if_false);
-    Branch(IsBigInt(object), &if_true, &if_false);
+    Branch(IsBigInt(CAST(object)), &if_true, &if_false);
   }
   BIND(&if_undefined);
   {
@@ -2023,15 +2040,15 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
     GotoIf(TaggedIsSmi(object), &if_false);
     // Check it is not null and the map has the undetectable bit set.
     GotoIf(IsNull(object), &if_false);
-    Branch(IsUndetectableMap(LoadMap(object)), &if_true, &if_false);
+    Branch(IsUndetectableMap(LoadMap(CAST(object))), &if_true, &if_false);
   }
   BIND(&if_function);
   {
     Comment("IfFunction");
     GotoIf(TaggedIsSmi(object), &if_false);
     // Check if callable bit is set and not undetectable.
-    Node* map_bitfield = LoadMapBitField(LoadMap(object));
-    Node* callable_undetectable =
+    TNode<Int32T> map_bitfield = LoadMapBitField(LoadMap(CAST(object)));
+    TNode<Int32T> callable_undetectable =
         Word32And(map_bitfield, Int32Constant(Map::IsUndetectableBit::kMask |
                                               Map::IsCallableBit::kMask));
     Branch(Word32Equal(callable_undetectable,
@@ -2047,10 +2064,10 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
     GotoIf(IsNull(object), &if_true);
 
     // Check if the object is a receiver type and is not undefined or callable.
-    Node* map = LoadMap(object);
+    TNode<Map> map = LoadMap(CAST(object));
     GotoIfNot(IsJSReceiverMap(map), &if_false);
-    Node* map_bitfield = LoadMapBitField(map);
-    Node* callable_undetectable =
+    TNode<Int32T> map_bitfield = LoadMapBitField(map);
+    TNode<Int32T> callable_undetectable =
         Word32And(map_bitfield, Int32Constant(Map::IsUndetectableBit::kMask |
                                               Map::IsCallableBit::kMask));
     Branch(Word32Equal(callable_undetectable, Int32Constant(0)), &if_true,
@@ -2089,7 +2106,7 @@ IGNITION_HANDLER(Jump, InterpreterAssembler) {
 // Jump by the number of bytes in the Smi in the |idx| entry in the constant
 // pool.
 IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
   Jump(relative_jump);
 }
 
@@ -2099,11 +2116,10 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
 // accumulator contains true. This only works for boolean inputs, and
 // will misbehave if passed arbitrary input values.
 IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
+  TNode<Object> accumulator = GetAccumulator();
   Node* relative_jump = BytecodeOperandUImmWord(0);
-  CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
-  CSA_ASSERT(this, IsBoolean(accumulator));
-  JumpIfWordEqual(accumulator, TrueConstant(), relative_jump);
+  CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+  JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
 }
 
 // JumpIfTrueConstant <idx>
@@ -2112,11 +2128,10 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
 // pool if the accumulator contains true. This only works for boolean inputs,
 // and will misbehave if passed arbitrary input values.
 IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
-  CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
-  CSA_ASSERT(this, IsBoolean(accumulator));
-  JumpIfWordEqual(accumulator, TrueConstant(), relative_jump);
+  TNode<Object> accumulator = GetAccumulator();
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+  JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
 }
 
 // JumpIfFalse <imm>
@@ -2125,11 +2140,10 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
 // accumulator contains false. This only works for boolean inputs, and
 // will misbehave if passed arbitrary input values.
 IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
+  TNode<Object> accumulator = GetAccumulator();
   Node* relative_jump = BytecodeOperandUImmWord(0);
-  CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
-  CSA_ASSERT(this, IsBoolean(accumulator));
-  JumpIfWordEqual(accumulator, FalseConstant(), relative_jump);
+  CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+  JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
 }
 
 // JumpIfFalseConstant <idx>
@@ -2138,11 +2152,10 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
 // pool if the accumulator contains false. This only works for boolean inputs,
 // and will misbehave if passed arbitrary input values.
 IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
-  CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
-  CSA_ASSERT(this, IsBoolean(accumulator));
-  JumpIfWordEqual(accumulator, FalseConstant(), relative_jump);
+  TNode<Object> accumulator = GetAccumulator();
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+  JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
 }
 
 // JumpIfToBooleanTrue <imm>
@@ -2150,7 +2163,7 @@ IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
 // Jump by the number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is true when the object is cast to boolean.
 IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
   Node* relative_jump = BytecodeOperandUImmWord(0);
   Label if_true(this), if_false(this);
   BranchIfToBooleanIsTrue(value, &if_true, &if_false);
@@ -2166,8 +2179,8 @@ IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
 // pool if the object referenced by the accumulator is true when the object is
 // cast to boolean.
 IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
-  Node* value = GetAccumulator();
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  TNode<Object> value = GetAccumulator();
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
   Label if_true(this), if_false(this);
   BranchIfToBooleanIsTrue(value, &if_true, &if_false);
   BIND(&if_true);
@@ -2181,7 +2194,7 @@ IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
 // Jump by the number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is false when the object is cast to boolean.
 IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
   Node* relative_jump = BytecodeOperandUImmWord(0);
   Label if_true(this), if_false(this);
   BranchIfToBooleanIsTrue(value, &if_true, &if_false);
@@ -2197,8 +2210,8 @@ IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
 // pool if the object referenced by the accumulator is false when the object is
 // cast to boolean.
 IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
-  Node* value = GetAccumulator();
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  TNode<Object> value = GetAccumulator();
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
   Label if_true(this), if_false(this);
   BranchIfToBooleanIsTrue(value, &if_true, &if_false);
   BIND(&if_true);
@@ -2212,9 +2225,9 @@ IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
 // Jump by the number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the null constant.
 IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
+  TNode<Object> accumulator = GetAccumulator();
   Node* relative_jump = BytecodeOperandUImmWord(0);
-  JumpIfWordEqual(accumulator, NullConstant(), relative_jump);
+  JumpIfTaggedEqual(accumulator, NullConstant(), relative_jump);
 }
 
 // JumpIfNullConstant <idx>
@@ -2222,9 +2235,9 @@ IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
 // Jump by the number of bytes in the Smi in the |idx| entry in the constant
 // pool if the object referenced by the accumulator is the null constant.
 IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
-  JumpIfWordEqual(accumulator, NullConstant(), relative_jump);
+  TNode<Object> accumulator = GetAccumulator();
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  JumpIfTaggedEqual(accumulator, NullConstant(), relative_jump);
 }
 
 // JumpIfNotNull <imm>
@@ -2232,9 +2245,9 @@ IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
 // Jump by the number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is not the null constant.
 IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
+  TNode<Object> accumulator = GetAccumulator();
   Node* relative_jump = BytecodeOperandUImmWord(0);
-  JumpIfWordNotEqual(accumulator, NullConstant(), relative_jump);
+  JumpIfTaggedNotEqual(accumulator, NullConstant(), relative_jump);
 }
 
 // JumpIfNotNullConstant <idx>
@@ -2242,9 +2255,9 @@ IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
 // Jump by the number of bytes in the Smi in the |idx| entry in the constant
 // pool if the object referenced by the accumulator is not the null constant.
 IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
-  JumpIfWordNotEqual(accumulator, NullConstant(), relative_jump);
+  TNode<Object> accumulator = GetAccumulator();
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  JumpIfTaggedNotEqual(accumulator, NullConstant(), relative_jump);
 }
 
 // JumpIfUndefined <imm>
@@ -2252,9 +2265,9 @@ IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
 // Jump by the number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the undefined constant.
 IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
+  TNode<Object> accumulator = GetAccumulator();
   Node* relative_jump = BytecodeOperandUImmWord(0);
-  JumpIfWordEqual(accumulator, UndefinedConstant(), relative_jump);
+  JumpIfTaggedEqual(accumulator, UndefinedConstant(), relative_jump);
 }
 
 // JumpIfUndefinedConstant <idx>
@@ -2262,9 +2275,9 @@ IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
 // Jump by the number of bytes in the Smi in the |idx| entry in the constant
 // pool if the object referenced by the accumulator is the undefined constant.
 IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
-  JumpIfWordEqual(accumulator, UndefinedConstant(), relative_jump);
+  TNode<Object> accumulator = GetAccumulator();
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  JumpIfTaggedEqual(accumulator, UndefinedConstant(), relative_jump);
 }
 
 // JumpIfNotUndefined <imm>
@@ -2272,9 +2285,9 @@ IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
 // Jump by the number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is not the undefined constant.
 IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
+  TNode<Object> accumulator = GetAccumulator();
   Node* relative_jump = BytecodeOperandUImmWord(0);
-  JumpIfWordNotEqual(accumulator, UndefinedConstant(), relative_jump);
+  JumpIfTaggedNotEqual(accumulator, UndefinedConstant(), relative_jump);
 }
 
 // JumpIfNotUndefinedConstant <idx>
@@ -2283,9 +2296,44 @@ IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
 // pool if the object referenced by the accumulator is not the undefined
 // constant.
 IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
-  JumpIfWordNotEqual(accumulator, UndefinedConstant(), relative_jump);
+  TNode<Object> accumulator = GetAccumulator();
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  JumpIfTaggedNotEqual(accumulator, UndefinedConstant(), relative_jump);
+}
+
+// JumpIfUndefinedOrNull <imm>
+//
+// Jump by the number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the undefined constant or the null constant.
+IGNITION_HANDLER(JumpIfUndefinedOrNull, InterpreterAssembler) {
+  TNode<Object> accumulator = GetAccumulator();
+
+  Label do_jump(this);
+  GotoIf(IsUndefined(accumulator), &do_jump);
+  GotoIf(IsNull(accumulator), &do_jump);
+  Dispatch();
+
+  BIND(&do_jump);
+  Node* relative_jump = BytecodeOperandUImmWord(0);
+  Jump(relative_jump);
+}
+
+// JumpIfUndefinedOrNullConstant <idx>
+//
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is the undefined constant or
+// the null constant.
+IGNITION_HANDLER(JumpIfUndefinedOrNullConstant, InterpreterAssembler) {
+  TNode<Object> accumulator = GetAccumulator();
+
+  Label do_jump(this);
+  GotoIf(IsUndefined(accumulator), &do_jump);
+  GotoIf(IsNull(accumulator), &do_jump);
+  Dispatch();
+
+  BIND(&do_jump);
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  Jump(relative_jump);
 }
 
 // JumpIfJSReceiver <imm>
@@ -2293,14 +2341,14 @@ IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
 // Jump by the number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is a JSReceiver.
 IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
+  TNode<Object> accumulator = GetAccumulator();
   Node* relative_jump = BytecodeOperandUImmWord(0);
 
   Label if_object(this), if_notobject(this, Label::kDeferred), if_notsmi(this);
   Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
 
   BIND(&if_notsmi);
-  Branch(IsJSReceiver(accumulator), &if_object, &if_notobject);
+  Branch(IsJSReceiver(CAST(accumulator)), &if_object, &if_notobject);
   BIND(&if_object);
   Jump(relative_jump);
 
@@ -2313,14 +2361,14 @@ IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
 // Jump by the number of bytes in the Smi in the |idx| entry in the constant
 // pool if the object referenced by the accumulator is a JSReceiver.
 IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
-  Node* accumulator = GetAccumulator();
-  Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
+  TNode<Object> accumulator = GetAccumulator();
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
 
   Label if_object(this), if_notobject(this), if_notsmi(this);
   Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
 
   BIND(&if_notsmi);
-  Branch(IsJSReceiver(accumulator), &if_object, &if_notobject);
+  Branch(IsJSReceiver(CAST(accumulator)), &if_object, &if_notobject);
 
   BIND(&if_object);
   Jump(relative_jump);
@@ -2342,7 +2390,7 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
   // Check if OSR points at the given {loop_depth} are armed by comparing it to
   // the current {osr_level} loaded from the header of the BytecodeArray.
   Label ok(this), osr_armed(this, Label::kDeferred);
-  Node* condition = Int32GreaterThanOrEqual(loop_depth, osr_level);
+  TNode<BoolT> condition = Int32GreaterThanOrEqual(loop_depth, osr_level);
   Branch(condition, &ok, &osr_armed);
 
   BIND(&ok);
@@ -2351,8 +2399,8 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
   BIND(&osr_armed);
   {
     Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
-    Node* target = HeapConstant(callable.code());
-    Node* context = GetContext();
+    TNode<Code> target = HeapConstant(callable.code());
+    TNode<Context> context = GetContext();
     CallStub(callable.descriptor(), target, context);
     JumpBackward(relative_jump);
   }
@@ -2366,7 +2414,7 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
 // case_value falls outside of the table |table_length|, fall-through to the
 // next bytecode.
 IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
-  Node* acc = GetAccumulator();
+  TNode<Object> acc = GetAccumulator();
   Node* table_start = BytecodeOperandIdx(0);
   Node* table_length = BytecodeOperandUImmWord(1);
   Node* case_value_base = BytecodeOperandImmIntPtr(2);
@@ -2378,11 +2426,11 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
   // accumulator values.
   CSA_ASSERT(this, TaggedIsSmi(acc));
 
-  Node* case_value = IntPtrSub(SmiUntag(acc), case_value_base);
+  TNode<WordT> case_value = IntPtrSub(SmiUntag(CAST(acc)), case_value_base);
   GotoIf(IntPtrLessThan(case_value, IntPtrConstant(0)), &fall_through);
   GotoIf(IntPtrGreaterThanOrEqual(case_value, table_length), &fall_through);
-  Node* entry = IntPtrAdd(table_start, case_value);
-  Node* relative_jump = LoadAndUntagConstantPoolEntry(entry);
+  TNode<WordT> entry = IntPtrAdd(table_start, case_value);
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntry(entry);
   Jump(relative_jump);
 
   BIND(&fall_through);
@@ -2395,10 +2443,10 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
 // <flags> and the pattern in <pattern_idx>.
 IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
   Node* pattern = LoadConstantPoolEntryAtOperandIndex(0);
-  Node* feedback_vector = LoadFeedbackVector();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
   Node* slot_id = BytecodeOperandIdx(1);
-  Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
-  Node* context = GetContext();
+  TNode<Smi> flags = SmiFromInt32(BytecodeOperandFlag(2));
+  TNode<Context> context = GetContext();
 
   VARIABLE(result, MachineRepresentation::kTagged);
 
@@ -2414,9 +2462,9 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
 // Creates an array literal for literal index <literal_idx> with
 // CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
 IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
-  Node* feedback_vector = LoadFeedbackVector();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
   Node* slot_id = BytecodeOperandIdx(1);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   Node* bytecode_flags = BytecodeOperandFlag(2);
 
   Label fast_shallow_clone(this), call_runtime(this, Label::kDeferred);
@@ -2439,11 +2487,12 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
 
   BIND(&call_runtime);
   {
-    Node* flags_raw = DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
-        bytecode_flags);
-    Node* flags = SmiTag(flags_raw);
+    TNode<WordT> flags_raw =
+        DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
+            bytecode_flags);
+    TNode<Smi> flags = SmiTag(Signed(flags_raw));
     Node* constant_elements = LoadConstantPoolEntryAtOperandIndex(0);
-    Node* result =
+    TNode<Object> result =
         CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
                     SmiTag(slot_id), constant_elements, flags);
     SetAccumulator(result);
@@ -2455,9 +2504,9 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
 //
 // Creates an empty JSArray literal for literal index <literal_idx>.
 IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
-  Node* feedback_vector = LoadFeedbackVector();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
   Node* slot_id = BytecodeOperandIdx(0);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
 
   Label no_feedback(this, Label::kDeferred), end(this);
   VARIABLE(result, MachineRepresentation::kTagged);
@@ -2488,9 +2537,9 @@ IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
 // Spread the given iterable from the accumulator into a new JSArray.
 // TODO(neis): Turn this into an intrinsic when we're running out of bytecodes.
 IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) {
-  Node* iterable = GetAccumulator();
-  Node* context = GetContext();
-  Node* result =
+  TNode<Object> iterable = GetAccumulator();
+  TNode<Context> context = GetContext();
+  TNode<Object> result =
       CallBuiltin(Builtins::kIterableToListWithSymbolLookup, context, iterable);
   SetAccumulator(result);
   Dispatch();
@@ -2501,7 +2550,7 @@ IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) {
 // Creates an object literal for literal index <literal_idx> with
 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
 IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
-  Node* feedback_vector = LoadFeedbackVector();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
   Node* slot_id = BytecodeOperandIdx(1);
   Node* bytecode_flags = BytecodeOperandFlag(2);
 
@@ -2529,13 +2578,14 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
     // If we can't do a fast clone, call into the runtime.
     Node* object_boilerplate_description =
         LoadConstantPoolEntryAtOperandIndex(0);
-    Node* context = GetContext();
+    TNode<Context> context = GetContext();
 
-    Node* flags_raw = DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
-        bytecode_flags);
-    Node* flags = SmiTag(flags_raw);
+    TNode<WordT> flags_raw =
+        DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
+            bytecode_flags);
+    TNode<Smi> flags = SmiTag(Signed(flags_raw));
 
-    Node* result =
+    TNode<Object> result =
         CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
                     SmiTag(slot_id), object_boilerplate_description, flags);
     SetAccumulator(result);
@@ -2548,7 +2598,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
 //
 // Creates an empty JSObject literal.
 IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   ConstructorBuiltinsAssembler constructor_assembler(state());
   Node* result = constructor_assembler.EmitCreateEmptyObjectLiteral(context);
   SetAccumulator(result);
@@ -2560,15 +2610,15 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
 // Allocates a new JSObject with each enumerable own property copied from
 // {source}, converting getters into data properties.
 IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
-  Node* source = LoadRegisterAtOperandIndex(0);
+  TNode<Object> source = LoadRegisterAtOperandIndex(0);
   Node* bytecode_flags = BytecodeOperandFlag(1);
-  Node* raw_flags =
+  TNode<WordT> raw_flags =
       DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(bytecode_flags);
-  Node* smi_flags = SmiTag(raw_flags);
+  TNode<Smi> smi_flags = SmiTag(Signed(raw_flags));
   Node* raw_slot = BytecodeOperandIdx(2);
-  Node* smi_slot = SmiTag(raw_slot);
-  Node* maybe_feedback_vector = LoadFeedbackVector();
-  Node* context = GetContext();
+  TNode<Smi> smi_slot = SmiTag(raw_slot);
+  TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+  TNode<Context> context = GetContext();
 
   Variable var_result(this, MachineRepresentation::kTagged);
   var_result.Bind(CallBuiltin(Builtins::kCloneObjectIC, context, source,
@@ -2583,7 +2633,7 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
 // accumulator, creating and caching the site object on-demand as per the
 // specification.
 IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
-  Node* feedback_vector = LoadFeedbackVector();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
   Node* slot = BytecodeOperandIdx(1);
 
   Label call_runtime(this, Label::kDeferred);
@@ -2592,7 +2642,7 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
   TNode<Object> cached_value =
       CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
 
-  GotoIf(WordEqual(cached_value, SmiConstant(0)), &call_runtime);
+  GotoIf(TaggedEqual(cached_value, SmiConstant(0)), &call_runtime);
 
   SetAccumulator(cached_value);
   Dispatch();
@@ -2600,13 +2650,14 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
   BIND(&call_runtime);
   {
     Node* description = LoadConstantPoolEntryAtOperandIndex(0);
-    Node* slot_smi = SmiTag(slot);
-    Node* closure = LoadRegister(Register::function_closure());
-    Node* shared_info =
-        LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
-    Node* context = GetContext();
-    Node* result = CallRuntime(Runtime::kGetTemplateObject, context,
-                               description, shared_info, slot_smi);
+    TNode<Smi> slot_smi = SmiTag(slot);
+    TNode<JSFunction> closure =
+        CAST(LoadRegister(Register::function_closure()));
+    TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
+        closure, JSFunction::kSharedFunctionInfoOffset);
+    TNode<Context> context = GetContext();
+    TNode<Object> result = CallRuntime(Runtime::kGetTemplateObject, context,
+                                       description, shared_info, slot_smi);
 
     Label end(this);
     GotoIf(IsUndefined(feedback_vector), &end);
@@ -2626,12 +2677,13 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
 IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
   Node* shared = LoadConstantPoolEntryAtOperandIndex(0);
   Node* flags = BytecodeOperandFlag(2);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   Node* slot = BytecodeOperandIdx(1);
 
   Label if_undefined(this);
-  TNode<FixedArray> feedback_cell_array =
-      LoadClosureFeedbackArray(LoadRegister(Register::function_closure()));
+  TNode<ClosureFeedbackCellArray> feedback_cell_array =
+      LoadClosureFeedbackArray(
+          CAST(LoadRegister(Register::function_closure())));
   TNode<FeedbackCell> feedback_cell =
       CAST(LoadFixedArrayElement(feedback_cell_array, slot));
 
@@ -2641,7 +2693,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
 
   BIND(&if_fast);
   {
-    Node* result =
+    TNode<Object> result =
         CallBuiltin(Builtins::kFastNewClosure, context, shared, feedback_cell);
     SetAccumulator(result);
     Dispatch();
@@ -2655,7 +2707,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
 
     BIND(&if_newspace);
     {
-      Node* result =
+      TNode<Object> result =
           CallRuntime(Runtime::kNewClosure, context, shared, feedback_cell);
       SetAccumulator(result);
       Dispatch();
@@ -2663,8 +2715,8 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
 
     BIND(&if_oldspace);
     {
-      Node* result = CallRuntime(Runtime::kNewClosure_Tenured, context, shared,
-                                 feedback_cell);
+      TNode<Object> result = CallRuntime(Runtime::kNewClosure_Tenured, context,
+                                         shared, feedback_cell);
       SetAccumulator(result);
       Dispatch();
     }
@@ -2676,7 +2728,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
 // Creates a new block context with the scope info constant at |index|.
 IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
   Node* scope_info = LoadConstantPoolEntryAtOperandIndex(0);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   SetAccumulator(CallRuntime(Runtime::kPushBlockContext, context, scope_info));
   Dispatch();
 }
@@ -2686,9 +2738,9 @@ IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
 // Creates a new context for a catch block with the |exception| in a register
 // and the ScopeInfo at |scope_info_idx|.
 IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) {
-  Node* exception = LoadRegisterAtOperandIndex(0);
+  TNode<Object> exception = LoadRegisterAtOperandIndex(0);
   Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   SetAccumulator(
       CallRuntime(Runtime::kPushCatchContext, context, exception, scope_info));
   Dispatch();
@@ -2700,8 +2752,8 @@ IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) {
 IGNITION_HANDLER(CreateFunctionContext, InterpreterAssembler) {
   Node* scope_info_idx = BytecodeOperandIdx(0);
   Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
-  Node* slots = BytecodeOperandUImm(1);
-  Node* context = GetContext();
+  TNode<Uint32T> slots = BytecodeOperandUImm(1);
+  TNode<Context> context = GetContext();
   ConstructorBuiltinsAssembler constructor_assembler(state());
   SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
       scope_info, slots, context, FUNCTION_SCOPE));
@@ -2714,8 +2766,8 @@ IGNITION_HANDLER(CreateFunctionContext, InterpreterAssembler) {
 IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) {
   Node* scope_info_idx = BytecodeOperandIdx(0);
   Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
-  Node* slots = BytecodeOperandUImm(1);
-  Node* context = GetContext();
+  TNode<Uint32T> slots = BytecodeOperandUImm(1);
+  TNode<Context> context = GetContext();
   ConstructorBuiltinsAssembler constructor_assembler(state());
   SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
       scope_info, slots, context, EVAL_SCOPE));
@@ -2727,9 +2779,9 @@ IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) {
 // Creates a new context with the ScopeInfo at |scope_info_idx| for a
 // with-statement with the object in |register|.
 IGNITION_HANDLER(CreateWithContext, InterpreterAssembler) {
-  Node* object = LoadRegisterAtOperandIndex(0);
+  TNode<Object> object = LoadRegisterAtOperandIndex(0);
   Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   SetAccumulator(
       CallRuntime(Runtime::kPushWithContext, context, object, scope_info));
   Dispatch();
@@ -2739,8 +2791,8 @@ IGNITION_HANDLER(CreateWithContext, InterpreterAssembler) {
 //
 // Creates a new mapped arguments object.
 IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
-  Node* closure = LoadRegister(Register::function_closure());
-  Node* context = GetContext();
+  TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
+  TNode<Context> context = GetContext();
 
   Label if_duplicate_parameters(this, Label::kDeferred);
   Label if_not_duplicate_parameters(this);
@@ -2748,11 +2800,11 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
   // Check if function has duplicate parameters.
   // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
   // duplicate parameters.
-  Node* shared_info =
-      LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
+  TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
+      closure, JSFunction::kSharedFunctionInfoOffset);
   Node* flags = LoadObjectField(shared_info, SharedFunctionInfo::kFlagsOffset,
                                 MachineType::Uint32());
-  Node* has_duplicate_parameters =
+  TNode<BoolT> has_duplicate_parameters =
       IsSetWord32<SharedFunctionInfo::HasDuplicateParametersBit>(flags);
   Branch(has_duplicate_parameters, &if_duplicate_parameters,
          &if_not_duplicate_parameters);
@@ -2768,7 +2820,7 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
 
   BIND(&if_duplicate_parameters);
   {
-    Node* result =
+    TNode<Object> result =
         CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
     SetAccumulator(result);
     Dispatch();
@@ -2779,8 +2831,8 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
 //
 // Creates a new unmapped arguments object.
 IGNITION_HANDLER(CreateUnmappedArguments, InterpreterAssembler) {
-  Node* context = GetContext();
-  Node* closure = LoadRegister(Register::function_closure());
+  TNode<Context> context = GetContext();
+  TNode<Object> closure = LoadRegister(Register::function_closure());
   ArgumentsBuiltinsAssembler builtins_assembler(state());
   Node* result =
       builtins_assembler.EmitFastNewStrictArguments(context, closure);
@@ -2792,8 +2844,8 @@ IGNITION_HANDLER(CreateUnmappedArguments, InterpreterAssembler) {
 //
 // Creates a new rest parameter array.
 IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
-  Node* closure = LoadRegister(Register::function_closure());
-  Node* context = GetContext();
+  TNode<Object> closure = LoadRegister(Register::function_closure());
+  TNode<Context> context = GetContext();
   ArgumentsBuiltinsAssembler builtins_assembler(state());
   Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure);
   SetAccumulator(result);
@@ -2804,7 +2856,7 @@ IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
 //
 // Performs a stack guard check.
 IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
-  TNode<Context> context = CAST(GetContext());
+  TNode<Context> context = GetContext();
   PerformStackCheck(context);
   Dispatch();
 }
@@ -2814,10 +2866,10 @@ IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
 // Sets the pending message to the value in the accumulator, and returns the
 // previous pending message in the accumulator.
 IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) {
-  Node* pending_message = ExternalConstant(
+  TNode<ExternalReference> pending_message = ExternalConstant(
       ExternalReference::address_of_pending_message_obj(isolate()));
   Node* previous_message = Load(MachineType::TaggedPointer(), pending_message);
-  Node* new_message = GetAccumulator();
+  TNode<Object> new_message = GetAccumulator();
   StoreFullTaggedNoWriteBarrier(pending_message, new_message);
   SetAccumulator(previous_message);
   Dispatch();
@@ -2827,8 +2879,8 @@ IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) {
 //
 // Throws the exception in the accumulator.
 IGNITION_HANDLER(Throw, InterpreterAssembler) {
-  Node* exception = GetAccumulator();
-  Node* context = GetContext();
+  TNode<Object> exception = GetAccumulator();
+  TNode<Context> context = GetContext();
   CallRuntime(Runtime::kThrow, context, exception);
   // We shouldn't ever return from a throw.
   Abort(AbortReason::kUnexpectedReturnFromThrow);
@@ -2839,8 +2891,8 @@ IGNITION_HANDLER(Throw, InterpreterAssembler) {
 //
 // Re-throws the exception in the accumulator.
 IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
-  Node* exception = GetAccumulator();
-  Node* context = GetContext();
+  TNode<Object> exception = GetAccumulator();
+  TNode<Context> context = GetContext();
   CallRuntime(Runtime::kReThrow, context, exception);
   // We shouldn't ever return from a throw.
   Abort(AbortReason::kUnexpectedReturnFromThrow);
@@ -2861,7 +2913,7 @@ IGNITION_HANDLER(Abort, InterpreterAssembler) {
 // Return the value in the accumulator.
 IGNITION_HANDLER(Return, InterpreterAssembler) {
   UpdateInterruptBudgetOnReturn();
-  Node* accumulator = GetAccumulator();
+  TNode<Object> accumulator = GetAccumulator();
   Return(accumulator);
 }
 
@@ -2869,10 +2921,10 @@ IGNITION_HANDLER(Return, InterpreterAssembler) {
 //
 // Throws an exception if the value in the accumulator is TheHole.
 IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
 
   Label throw_error(this, Label::kDeferred);
-  GotoIf(WordEqual(value, TheHoleConstant()), &throw_error);
+  GotoIf(TaggedEqual(value, TheHoleConstant()), &throw_error);
   Dispatch();
 
   BIND(&throw_error);
@@ -2890,10 +2942,10 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
 //
 // Throws an exception if the value in the accumulator is TheHole.
 IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
 
   Label throw_error(this, Label::kDeferred);
-  GotoIf(WordEqual(value, TheHoleConstant()), &throw_error);
+  GotoIf(TaggedEqual(value, TheHoleConstant()), &throw_error);
   Dispatch();
 
   BIND(&throw_error);
@@ -2910,10 +2962,10 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
 // Throws SuperAleradyCalled exception if the value in the accumulator is not
 // TheHole.
 IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
-  Node* value = GetAccumulator();
+  TNode<Object> value = GetAccumulator();
 
   Label throw_error(this, Label::kDeferred);
-  GotoIf(WordNotEqual(value, TheHoleConstant()), &throw_error);
+  GotoIf(TaggedNotEqual(value, TheHoleConstant()), &throw_error);
   Dispatch();
 
   BIND(&throw_error);
@@ -2929,7 +2981,7 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
 //
 // Call runtime to handle debugger statement.
 IGNITION_HANDLER(Debugger, InterpreterAssembler) {
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
   CallStub(CodeFactory::HandleDebuggerStatement(isolate()), context);
   Dispatch();
 }
@@ -2937,17 +2989,17 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
 // DebugBreak
 //
 // Call runtime to handle a debug break.
-#define DEBUG_BREAK(Name, ...)                                             \
-  IGNITION_HANDLER(Name, InterpreterAssembler) {                           \
-    Node* context = GetContext();                                          \
-    Node* accumulator = GetAccumulator();                                  \
-    Node* result_pair =                                                    \
-        CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
-    Node* return_value = Projection(0, result_pair);                       \
-    Node* original_bytecode = SmiUntag(Projection(1, result_pair));        \
-    MaybeDropFrames(context);                                              \
-    SetAccumulator(return_value);                                          \
-    DispatchToBytecode(original_bytecode, BytecodeOffset());               \
+#define DEBUG_BREAK(Name, ...)                                               \
+  IGNITION_HANDLER(Name, InterpreterAssembler) {                             \
+    TNode<Context> context = GetContext();                                   \
+    TNode<Object> accumulator = GetAccumulator();                            \
+    TNode<Object> result_pair =                                              \
+        CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator);   \
+    Node* return_value = Projection(0, result_pair);                         \
+    TNode<IntPtrT> original_bytecode = SmiUntag(Projection(1, result_pair)); \
+    MaybeDropFrames(context);                                                \
+    SetAccumulator(return_value);                                            \
+    DispatchToBytecode(original_bytecode, BytecodeOffset());                 \
   }
 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
 #undef DEBUG_BREAK
@@ -2957,9 +3009,9 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
 // Increment the execution count for the given slot. Used for block code
 // coverage.
 IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
-  Node* closure = LoadRegister(Register::function_closure());
+  TNode<Object> closure = LoadRegister(Register::function_closure());
   Node* coverage_array_slot = BytecodeOperandIdxSmi(0);
-  Node* context = GetContext();
+  TNode<Context> context = GetContext();
 
   CallBuiltin(Builtins::kIncBlockCounter, context, closure,
               coverage_array_slot);
@@ -2973,8 +3025,8 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
 // map of the |receiver| if it has a usable enum cache or a fixed array
 // with the keys to enumerate in the accumulator.
 IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
-  Node* receiver = LoadRegisterAtOperandIndex(0);
-  Node* context = GetContext();
+  TNode<Object> receiver = LoadRegisterAtOperandIndex(0);
+  TNode<Context> context = GetContext();
 
   Label if_empty(this), if_runtime(this, Label::kDeferred);
   Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
@@ -2983,14 +3035,15 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
 
   BIND(&if_empty);
   {
-    Node* result = EmptyFixedArrayConstant();
+    TNode<FixedArray> result = EmptyFixedArrayConstant();
     SetAccumulator(result);
     Dispatch();
   }
 
   BIND(&if_runtime);
   {
-    Node* result = CallRuntime(Runtime::kForInEnumerate, context, receiver);
+    TNode<Object> result =
+        CallRuntime(Runtime::kForInEnumerate, context, receiver);
     SetAccumulator(result);
     Dispatch();
   }
@@ -3005,12 +3058,10 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
 // |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
 // and cache_length respectively.
 IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
-  Node* enumerator = GetAccumulator();
-  Node* vector_index = BytecodeOperandIdx(1);
-  Node* maybe_feedback_vector = LoadFeedbackVector();
-
   // The {enumerator} is either a Map or a FixedArray.
-  CSA_ASSERT(this, TaggedIsNotSmi(enumerator));
+  TNode<HeapObject> enumerator = CAST(GetAccumulator());
+  Node* vector_index = BytecodeOperandIdx(1);
+  TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
 
   // Check if we're using an enum cache.
   Label if_fast(this), if_slow(this);
@@ -3019,18 +3070,22 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
   BIND(&if_fast);
   {
     // Load the enumeration length and cache from the {enumerator}.
-    Node* enum_length = LoadMapEnumLength(enumerator);
+    TNode<Map> map_enumerator = CAST(enumerator);
+    TNode<WordT> enum_length = LoadMapEnumLength(map_enumerator);
     CSA_ASSERT(this, WordNotEqual(enum_length,
                                   IntPtrConstant(kInvalidEnumCacheSentinel)));
-    Node* descriptors = LoadMapDescriptors(enumerator);
-    Node* enum_cache =
-        LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
-    Node* enum_keys = LoadObjectField(enum_cache, EnumCache::kKeysOffset);
+    TNode<DescriptorArray> descriptors = LoadMapDescriptors(map_enumerator);
+    TNode<EnumCache> enum_cache = LoadObjectField<EnumCache>(
+        descriptors, DescriptorArray::kEnumCacheOffset);
+    TNode<FixedArray> enum_keys =
+        LoadObjectField<FixedArray>(enum_cache, EnumCache::kKeysOffset);
 
     // Check if we have enum indices available.
-    Node* enum_indices = LoadObjectField(enum_cache, EnumCache::kIndicesOffset);
-    Node* enum_indices_length = LoadAndUntagFixedArrayBaseLength(enum_indices);
-    Node* feedback = SelectSmiConstant(
+    TNode<FixedArray> enum_indices =
+        LoadObjectField<FixedArray>(enum_cache, EnumCache::kIndicesOffset);
+    TNode<IntPtrT> enum_indices_length =
+        LoadAndUntagFixedArrayBaseLength(enum_indices);
+    TNode<Smi> feedback = SelectSmiConstant(
         IntPtrLessThanOrEqual(enum_length, enum_indices_length),
         ForInFeedback::kEnumCacheKeysAndIndices, ForInFeedback::kEnumCacheKeys);
     UpdateFeedback(feedback, maybe_feedback_vector, vector_index);
@@ -3038,7 +3093,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
     // Construct the cache info triple.
     Node* cache_type = enumerator;
     Node* cache_array = enum_keys;
-    Node* cache_length = SmiTag(enum_length);
+    TNode<Smi> cache_length = SmiTag(Signed(enum_length));
     StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
     Dispatch();
   }
@@ -3046,16 +3101,16 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
   BIND(&if_slow);
   {
     // The {enumerator} is a FixedArray with all the keys to iterate.
-    CSA_ASSERT(this, IsFixedArray(enumerator));
+    TNode<FixedArray> array_enumerator = CAST(enumerator);
 
     // Record the fact that we hit the for-in slow-path.
     UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
                    vector_index);
 
     // Construct the cache info triple.
-    Node* cache_type = enumerator;
-    Node* cache_array = enumerator;
-    Node* cache_length = LoadFixedArrayBaseLength(enumerator);
+    Node* cache_type = array_enumerator;
+    Node* cache_array = array_enumerator;
+    TNode<Smi> cache_length = LoadFixedArrayBaseLength(array_enumerator);
     StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
     Dispatch();
   }
@@ -3065,22 +3120,22 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
 //
 // Returns the next enumerable property in the the accumulator.
 IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
-  Node* receiver = LoadRegisterAtOperandIndex(0);
-  Node* index = LoadRegisterAtOperandIndex(1);
-  Node* cache_type;
-  Node* cache_array;
+  TNode<HeapObject> receiver = CAST(LoadRegisterAtOperandIndex(0));
+  TNode<Object> index = LoadRegisterAtOperandIndex(1);
+  TNode<Object> cache_type;
+  TNode<Object> cache_array;
   std::tie(cache_type, cache_array) = LoadRegisterPairAtOperandIndex(2);
   Node* vector_index = BytecodeOperandIdx(3);
-  Node* maybe_feedback_vector = LoadFeedbackVector();
+  TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
 
   // Load the next key from the enumeration array.
-  Node* key = LoadFixedArrayElement(CAST(cache_array), index, 0,
-                                    CodeStubAssembler::SMI_PARAMETERS);
+  TNode<Object> key = LoadFixedArrayElement(CAST(cache_array), index, 0,
+                                            CodeStubAssembler::SMI_PARAMETERS);
 
   // Check if we can use the for-in fast path potentially using the enum cache.
   Label if_fast(this), if_slow(this, Label::kDeferred);
-  Node* receiver_map = LoadMap(receiver);
-  Branch(WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
+  TNode<Map> receiver_map = LoadMap(receiver);
+  Branch(TaggedEqual(receiver_map, cache_type), &if_fast, &if_slow);
   BIND(&if_fast);
   {
     // Enum cache in use for {receiver}, the {key} is definitely valid.
@@ -3094,8 +3149,9 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
                    vector_index);
 
     // Need to filter the {key} for the {receiver}.
-    Node* context = GetContext();
-    Node* result = CallBuiltin(Builtins::kForInFilter, context, key, receiver);
+    TNode<Context> context = GetContext();
+    TNode<Object> result =
+        CallBuiltin(Builtins::kForInFilter, context, key, receiver);
     SetAccumulator(result);
     Dispatch();
   }
@@ -3105,12 +3161,12 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
 //
 // Returns false if the end of the enumerable properties has been reached.
 IGNITION_HANDLER(ForInContinue, InterpreterAssembler) {
-  Node* index = LoadRegisterAtOperandIndex(0);
-  Node* cache_length = LoadRegisterAtOperandIndex(1);
+  TNode<Object> index = LoadRegisterAtOperandIndex(0);
+  TNode<Object> cache_length = LoadRegisterAtOperandIndex(1);
 
   // Check if {index} is at {cache_length} already.
   Label if_true(this), if_false(this), end(this);
-  Branch(WordEqual(index, cache_length), &if_true, &if_false);
+  Branch(TaggedEqual(index, cache_length), &if_true, &if_false);
   BIND(&if_true);
   {
     SetAccumulator(FalseConstant());
@@ -3137,6 +3193,26 @@ IGNITION_HANDLER(ForInStep, InterpreterAssembler) {
   Dispatch();
 }
 
+// GetIterator <object>
+//
+// Retrieves the object[Symbol.iterator] method and stores the result
+// in the accumulator
+// TODO(swapnilgaikwad): Extend the functionality of the bytecode to call
+// iterator method for an object
+IGNITION_HANDLER(GetIterator, InterpreterAssembler) {
+  TNode<Object> receiver = LoadRegisterAtOperandIndex(0);
+  TNode<Context> context = GetContext();
+  TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+  Node* feedback_slot = BytecodeOperandIdx(1);
+  TNode<Smi> smi_slot = SmiTag(feedback_slot);
+
+  TNode<Object> result =
+      CallBuiltin(Builtins::kGetIteratorWithFeedback, context, receiver,
+                  smi_slot, feedback_vector);
+  SetAccumulator(result);
+  Dispatch();
+}
+
 // Wide
 //
 // Prefix bytecode indicating next bytecode has wide (16-bit) operands.
@@ -3167,16 +3243,16 @@ IGNITION_HANDLER(Illegal, InterpreterAssembler) {
 // (for debugging purposes) into the generator. Then, returns the value
 // in the accumulator.
 IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
-  Node* generator = LoadRegisterAtOperandIndex(0);
+  TNode<JSGeneratorObject> generator = CAST(LoadRegisterAtOperandIndex(0));
   TNode<FixedArray> array = CAST(LoadObjectField(
       generator, JSGeneratorObject::kParametersAndRegistersOffset));
-  Node* closure = LoadRegister(Register::function_closure());
-  Node* context = GetContext();
+  TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
+  TNode<Context> context = GetContext();
   RegListNodePair registers = GetRegisterListAtOperandIndex(1);
   Node* suspend_id = BytecodeOperandUImmSmi(3);
 
-  Node* shared =
-      LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
+  TNode<SharedFunctionInfo> shared =
+      CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
   TNode<Int32T> formal_parameter_count = UncheckedCast<Int32T>(
       LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
                       MachineType::Uint16()));
@@ -3188,7 +3264,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
 
   // Store the bytecode offset in the [input_or_debug_pos] field, to be used by
   // the inspector.
-  Node* offset = SmiTag(BytecodeOffset());
+  TNode<Smi> offset = SmiTag(BytecodeOffset());
   StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
                    offset);
 
@@ -3204,18 +3280,21 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
 // generator's state by looking up the generator state in a jump table in the
 // constant pool, starting at |table_start|, and of length |table_length|.
 IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
-  Node* generator = LoadRegisterAtOperandIndex(0);
+  TNode<Object> maybe_generator = LoadRegisterAtOperandIndex(0);
 
   Label fallthrough(this);
-  GotoIf(WordEqual(generator, UndefinedConstant()), &fallthrough);
+  GotoIf(TaggedEqual(maybe_generator, UndefinedConstant()), &fallthrough);
 
-  Node* state =
-      LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
-  Node* new_state = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
+  TNode<JSGeneratorObject> generator = CAST(maybe_generator);
+
+  TNode<Smi> state =
+      CAST(LoadObjectField(generator, JSGeneratorObject::kContinuationOffset));
+  TNode<Smi> new_state = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
   StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
                    new_state);
 
-  Node* context = LoadObjectField(generator, JSGeneratorObject::kContextOffset);
+  TNode<Context> context =
+      CAST(LoadObjectField(generator, JSGeneratorObject::kContextOffset));
   SetContext(context);
 
   Node* table_start = BytecodeOperandIdx(1);
@@ -3226,14 +3305,14 @@ IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
   // The state must be a Smi.
   CSA_ASSERT(this, TaggedIsSmi(state));
 
-  Node* case_value = SmiUntag(state);
+  TNode<IntPtrT> case_value = SmiUntag(state);
 
   CSA_ASSERT(this, IntPtrGreaterThanOrEqual(case_value, IntPtrConstant(0)));
   CSA_ASSERT(this, IntPtrLessThan(case_value, table_length));
   USE(table_length);
 
-  Node* entry = IntPtrAdd(table_start, case_value);
-  Node* relative_jump = LoadAndUntagConstantPoolEntry(entry);
+  TNode<WordT> entry = IntPtrAdd(table_start, case_value);
+  TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntry(entry);
   Jump(relative_jump);
 
   BIND(&fallthrough);
@@ -3245,12 +3324,12 @@ IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
 // Imports the register file stored in the generator and marks the generator
 // state as executing.
 IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
-  Node* generator = LoadRegisterAtOperandIndex(0);
-  Node* closure = LoadRegister(Register::function_closure());
+  TNode<JSGeneratorObject> generator = CAST(LoadRegisterAtOperandIndex(0));
+  TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
   RegListNodePair registers = GetRegisterListAtOperandIndex(1);
 
-  Node* shared =
-      LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
+  TNode<SharedFunctionInfo> shared =
+      CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
   TNode<Int32T> formal_parameter_count = UncheckedCast<Int32T>(
       LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
                       MachineType::Uint16()));
@@ -3267,6 +3346,8 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
   Dispatch();
 }
 
+#undef IGNITION_HANDLER
+
 }  // namespace
 
 Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index d58180234024d8..a329e7189f4c64 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -47,7 +47,7 @@ class IntrinsicsGenerator {
   Node* IntrinsicAsBuiltinCall(
       const InterpreterAssembler::RegListNodePair& args, Node* context,
       Builtins::Name name);
-  void AbortIfArgCountMismatch(int expected, compiler::Node* actual);
+  void AbortIfArgCountMismatch(int expected, compiler::TNode<Word32T> actual);
 
 #define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
   Node* name(const InterpreterAssembler::RegListNodePair& args, Node* context);
@@ -124,7 +124,7 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(
 
 Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type,
                                                InstanceTypeCompareMode mode) {
-  Node* instance_type = __ LoadInstanceType(object);
+  TNode<Uint16T> instance_type = __ LoadInstanceType(object);
 
   if (mode == kInstanceTypeEqual) {
     return __ Word32Equal(instance_type, __ Int32Constant(type));
@@ -239,7 +239,7 @@ Node* IntrinsicsGenerator::Call(
 
   if (FLAG_debug_code) {
     InterpreterAssembler::Label arg_count_positive(assembler_);
-    Node* comparison =
+    TNode<BoolT> comparison =
         __ Int32LessThan(target_args.reg_count(), __ Int32Constant(0));
     __ GotoIfNot(comparison, &arg_count_positive);
     __ Abort(AbortReason::kWrongArgumentCountForInvokeIntrinsic);
@@ -265,13 +265,13 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(
   __ GotoIf(__ TaggedIsSmi(sync_iterator), &not_receiver);
   __ GotoIfNot(__ IsJSReceiver(sync_iterator), &not_receiver);
 
-  Node* const next =
+  TNode<Object> const next =
       __ GetProperty(context, sync_iterator, factory()->next_string());
 
-  Node* const native_context = __ LoadNativeContext(context);
-  Node* const map = __ LoadContextElement(
-      native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX);
-  Node* const iterator = __ AllocateJSObjectFromMap(map);
+  TNode<Context> const native_context = __ LoadNativeContext(context);
+  TNode<Map> const map = __ CAST(__ LoadContextElement(
+      native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX));
+  TNode<JSObject> const iterator = __ AllocateJSObjectFromMap(map);
 
   __ StoreObjectFieldNoWriteBarrier(
       iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator);
@@ -303,7 +303,7 @@ Node* IntrinsicsGenerator::CreateJSGeneratorObject(
 Node* IntrinsicsGenerator::GeneratorGetResumeMode(
     const InterpreterAssembler::RegListNodePair& args, Node* context) {
   Node* generator = __ LoadRegisterFromRegisterList(args, 0);
-  Node* const value =
+  TNode<Object> const value =
       __ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
 
   return value;
@@ -320,10 +320,10 @@ Node* IntrinsicsGenerator::GeneratorClose(
 
 Node* IntrinsicsGenerator::GetImportMetaObject(
     const InterpreterAssembler::RegListNodePair& args, Node* context) {
-  Node* const module_context = __ LoadModuleContext(context);
-  Node* const module =
-      __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
-  Node* const import_meta =
+  TNode<Context> const module_context = __ LoadModuleContext(context);
+  TNode<HeapObject> const module =
+      __ CAST(__ LoadContextElement(module_context, Context::EXTENSION_INDEX));
+  TNode<Object> const import_meta =
       __ LoadObjectField(module, SourceTextModule::kImportMetaOffset);
 
   InterpreterAssembler::Variable return_value(assembler_,
@@ -395,9 +395,10 @@ Node* IntrinsicsGenerator::AsyncGeneratorYield(
   return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield);
 }
 
-void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected, Node* actual) {
+void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected,
+                                                  TNode<Word32T> actual) {
   InterpreterAssembler::Label match(assembler_);
-  Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
+  TNode<BoolT> comparison = __ Word32Equal(actual, __ Int32Constant(expected));
   __ GotoIf(comparison, &match);
   __ Abort(AbortReason::kWrongArgumentCountForInvokeIntrinsic);
   __ Goto(&match);
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index eb91ae06a41163..121971d3051ac9 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -9,6 +9,7 @@
 
 #include "builtins-generated/bytecodes-builtins-list.h"
 #include "src/ast/prettyprinter.h"
+#include "src/ast/scopes.h"
 #include "src/codegen/compiler.h"
 #include "src/codegen/unoptimized-compilation-info.h"
 #include "src/init/bootstrapper.h"
@@ -42,6 +43,8 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
 
  private:
   BytecodeGenerator* generator() { return &generator_; }
+  void CheckAndPrintBytecodeMismatch(Isolate* isolate,
+                                     Handle<BytecodeArray> bytecode);
 
   Zone zone_;
   UnoptimizedCompilationInfo compilation_info_;
@@ -202,6 +205,25 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
   return SUCCEEDED;
 }
 
+#ifdef DEBUG
+void InterpreterCompilationJob::CheckAndPrintBytecodeMismatch(
+    Isolate* isolate, Handle<BytecodeArray> bytecode) {
+  int first_mismatch = generator()->CheckBytecodeMatches(bytecode);
+  if (first_mismatch >= 0) {
+    parse_info()->ast_value_factory()->Internalize(isolate);
+    DeclarationScope::AllocateScopeInfos(parse_info(), isolate);
+
+    Handle<BytecodeArray> new_bytecode =
+        generator()->FinalizeBytecode(isolate, parse_info()->script());
+    std::cerr << "Bytecode mismatch\nOriginal bytecode:\n";
+    bytecode->Disassemble(std::cerr);
+    std::cerr << "\nNew bytecode:\n";
+    new_bytecode->Disassemble(std::cerr);
+    FATAL("Bytecode mismatch at offset %d\n", first_mismatch);
+  }
+}
+#endif
+
 InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
     Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
   RuntimeCallTimerScope runtimeTimerScope(
@@ -210,23 +232,36 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
                "V8.CompileIgnitionFinalization");
 
-  Handle<BytecodeArray> bytecodes =
-      generator()->FinalizeBytecode(isolate, parse_info()->script());
-  if (generator()->HasStackOverflow()) {
-    return FAILED;
+  Handle<BytecodeArray> bytecodes = compilation_info_.bytecode_array();
+  if (bytecodes.is_null()) {
+    bytecodes = generator()->FinalizeBytecode(isolate, parse_info()->script());
+    if (generator()->HasStackOverflow()) {
+      return FAILED;
+    }
+    compilation_info()->SetBytecodeArray(bytecodes);
+  }
+
+  if (compilation_info()->SourcePositionRecordingMode() ==
+      SourcePositionTableBuilder::RecordingMode::RECORD_SOURCE_POSITIONS) {
+    Handle<ByteArray> source_position_table =
+        generator()->FinalizeSourcePositionTable(isolate);
+    bytecodes->set_source_position_table(*source_position_table);
   }
 
   if (ShouldPrintBytecode(shared_info)) {
     StdoutStream os;
     std::unique_ptr<char[]> name =
         compilation_info()->literal()->GetDebugName();
-    os << "[generated bytecode for function: " << name.get() << "]"
-       << std::endl;
+    os << "[generated bytecode for function: " << name.get() << " ("
+       << shared_info << ")]" << std::endl;
     bytecodes->Disassemble(os);
     os << std::flush;
   }
 
-  compilation_info()->SetBytecodeArray(bytecodes);
+#ifdef DEBUG
+  CheckAndPrintBytecodeMismatch(isolate, bytecodes);
+#endif
+
   return SUCCEEDED;
 }
 
@@ -238,6 +273,16 @@ std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewCompilationJob(
       parse_info, literal, allocator, eager_inner_literals);
 }
 
+std::unique_ptr<UnoptimizedCompilationJob>
+Interpreter::NewSourcePositionCollectionJob(
+    ParseInfo* parse_info, FunctionLiteral* literal,
+    Handle<BytecodeArray> existing_bytecode, AccountingAllocator* allocator) {
+  auto job = base::make_unique<InterpreterCompilationJob>(parse_info, literal,
+                                                          allocator, nullptr);
+  job->compilation_info()->SetBytecodeArray(existing_bytecode);
+  return job;
+}
+
 void Interpreter::ForEachBytecode(
     const std::function<void(Bytecode, OperandScale)>& f) {
   constexpr OperandScale kOperandScales[] = {
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index e8c494a6cec719..002c9701a8813c 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -18,10 +18,11 @@
 namespace v8 {
 namespace internal {
 
-class Isolate;
+class BytecodeArray;
 class Callable;
 class UnoptimizedCompilationJob;
 class FunctionLiteral;
+class Isolate;
 class ParseInfo;
 class RootVisitor;
 class SetupIsolateDelegate;
@@ -48,6 +49,14 @@ class Interpreter {
       AccountingAllocator* allocator,
       std::vector<FunctionLiteral*>* eager_inner_literals);
 
+  // Creates a compilation job which will generate source positions for
+  // |literal| and when finalized, store the result into |existing_bytecode|.
+  static std::unique_ptr<UnoptimizedCompilationJob>
+  NewSourcePositionCollectionJob(ParseInfo* parse_info,
+                                 FunctionLiteral* literal,
+                                 Handle<BytecodeArray> existing_bytecode,
+                                 AccountingAllocator* allocator);
+
   // If the bytecode handler for |bytecode| and |operand_scale| has not yet
   // been loaded, deserialize it. Then return the handler.
   V8_EXPORT_PRIVATE Code GetBytecodeHandler(Bytecode bytecode,
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index fa2118af1e7c89..e49775704db86c 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -65,8 +65,8 @@ enum class EscapeKind : uint8_t {
 };
 
 using EscapeKindField = BitField8<EscapeKind, 0, 3>;
-using MayTerminateStringField = BitField8<bool, EscapeKindField::kNext, 1>;
-using NumberPartField = BitField8<bool, MayTerminateStringField::kNext, 1>;
+using MayTerminateStringField = EscapeKindField::Next<bool, 1>;
+using NumberPartField = MayTerminateStringField::Next<bool, 1>;
 
 constexpr bool MayTerminateJsonString(uint8_t flags) {
   return MayTerminateStringField::decode(flags);
@@ -539,7 +539,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
   Handle<ByteArray> mutable_double_buffer;
   // Allocate enough space so we can double-align the payload.
   const int kMutableDoubleSize = sizeof(double) * 2;
-  STATIC_ASSERT(MutableHeapNumber::kSize <= kMutableDoubleSize);
+  STATIC_ASSERT(HeapNumber::kSize <= kMutableDoubleSize);
   if (new_mutable_double > 0) {
     mutable_double_buffer =
         factory()->NewByteArray(kMutableDoubleSize * new_mutable_double);
@@ -563,7 +563,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
     if (IsAligned(mutable_double_address, kDoubleAlignment)) {
       mutable_double_address += kTaggedSize;
     } else {
-      filler_address += MutableHeapNumber::kSize;
+      filler_address += HeapNumber::kSize;
     }
     for (int j = 0; j < i; j++) {
       const JsonProperty& property = property_stack[start + j];
@@ -602,19 +602,19 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
           // payload, so we can skip notifying object layout change.
 
           HeapObject hn = HeapObject::FromAddress(mutable_double_address);
-          hn.set_map_after_allocation(*factory()->mutable_heap_number_map());
-          MutableHeapNumber::cast(hn).set_value_as_bits(bits);
+          hn.set_map_after_allocation(*factory()->heap_number_map());
+          HeapNumber::cast(hn).set_value_as_bits(bits);
           value = hn;
           mutable_double_address += kMutableDoubleSize;
         } else {
           DCHECK(value.IsHeapNumber());
           HeapObject::cast(value).synchronized_set_map(
-              *factory()->mutable_heap_number_map());
+              *factory()->heap_number_map());
         }
       }
       object->RawFastInobjectPropertyAtPut(index, value, mode);
     }
-    // Make all MutableHeapNumbers alive.
+    // Make all mutable HeapNumbers alive.
     if (!mutable_double_buffer.is_null()) {
 #ifdef DEBUG
       Address end =
@@ -825,8 +825,12 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue() {
               cont_stack.back().type() == JsonContinuation::kArrayElement &&
               cont_stack.back().index < element_stack.size() &&
               element_stack.back()->IsJSObject()) {
-            feedback =
-                handle(JSObject::cast(*element_stack.back()).map(), isolate_);
+            Map maybe_feedback = JSObject::cast(*element_stack.back()).map();
+            // Don't consume feedback from objects with a map that's detached
+            // from the transition tree.
+            if (!maybe_feedback.GetBackPointer().IsUndefined(isolate_)) {
+              feedback = handle(maybe_feedback, isolate_);
+            }
           }
           value = BuildJsonObject(cont, property_stack, feedback);
           property_stack.resize(cont.index);
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index a021fbbc1b96f2..684bcdcf545a33 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -269,7 +269,11 @@ bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
       if (key.is_null()) continue;
       // Object keys are internalized, so do it here.
       key = factory()->InternalizeString(key);
-      set = OrderedHashSet::Add(isolate_, set, key);
+      MaybeHandle<OrderedHashSet> set_candidate =
+          OrderedHashSet::Add(isolate_, set, key);
+      if (!set_candidate.ToHandle(&set)) {
+        return false;
+      }
     }
     property_list_ = OrderedHashSet::ConvertToKeysArray(
         isolate_, set, GetKeysConversion::kKeepNumbers);
@@ -534,7 +538,6 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
 
   switch (HeapObject::cast(*object).map().instance_type()) {
     case HEAP_NUMBER_TYPE:
-    case MUTABLE_HEAP_NUMBER_TYPE:
       if (deferred_string_key) SerializeDeferredKey(comma, key);
       return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
     case BIGINT_TYPE:
diff --git a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
index b625fdb57c0e22..213e98801a0b1b 100644
--- a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
@@ -73,7 +73,7 @@ DefaultWorkerThreadsTaskRunner::WorkerThread::WorkerThread(
     DefaultWorkerThreadsTaskRunner* runner)
     : Thread(Options("V8 DefaultWorkerThreadsTaskRunner WorkerThread")),
       runner_(runner) {
-  Start();
+  CHECK(Start());
 }
 
 DefaultWorkerThreadsTaskRunner::WorkerThread::~WorkerThread() { Join(); }
diff --git a/deps/v8/src/libplatform/tracing/trace-buffer.h b/deps/v8/src/libplatform/tracing/trace-buffer.h
index 95b9313338817e..3e58212d0eb43f 100644
--- a/deps/v8/src/libplatform/tracing/trace-buffer.h
+++ b/deps/v8/src/libplatform/tracing/trace-buffer.h
@@ -17,6 +17,7 @@ namespace tracing {
 
 class TraceBufferRingBuffer : public TraceBuffer {
  public:
+  // Takes ownership of |trace_writer|.
   TraceBufferRingBuffer(size_t max_chunks, TraceWriter* trace_writer);
   ~TraceBufferRingBuffer() override = default;
 
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 0700e348251227..3fb34366c2f768 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -26,14 +26,10 @@
 class V8DataSource : public perfetto::DataSource<V8DataSource> {
  public:
   void OnSetup(const SetupArgs&) override {}
-  void OnStart(const StartArgs&) override { started_.Signal(); }
+  void OnStart(const StartArgs&) override {}
   void OnStop(const StopArgs&) override {}
-
-  static v8::base::Semaphore started_;
 };
 
-v8::base::Semaphore V8DataSource::started_{0};
-
 PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(V8DataSource);
 #endif  // V8_USE_PERFETTO
 
@@ -294,14 +290,13 @@ void TracingController::StartTracing(TraceConfig* trace_config) {
 
   perfetto::DataSourceDescriptor dsd;
   dsd.set_name("v8.trace_events");
-  V8DataSource::Register(dsd);
+  bool registered = V8DataSource::Register(dsd);
+  CHECK(registered);
 
   tracing_session_ =
       perfetto::Tracing::NewTrace(perfetto::BackendType::kUnspecifiedBackend);
   tracing_session_->Setup(perfetto_trace_config);
-  // TODO(petermarshall): Switch to StartBlocking when available.
-  tracing_session_->Start();
-  V8DataSource::started_.Wait();
+  tracing_session_->StartBlocking();
 
 #endif  // V8_USE_PERFETTO
 
@@ -334,10 +329,17 @@ void TracingController::StopTracing() {
   }
 
 #ifdef V8_USE_PERFETTO
-  base::Semaphore stopped_{0};
-  tracing_session_->SetOnStopCallback([&stopped_]() { stopped_.Signal(); });
-  tracing_session_->Stop();
-  stopped_.Wait();
+  // Emit a fake trace event from the main thread. The final trace event is
+  // sometimes skipped because perfetto can't guarantee that the caller is
+  // totally finished writing to it without synchronization. To avoid the
+  // situation where we lose the last trace event, add a fake one here that will
+  // be sacrificed.
+  // TODO(petermarshall): Use the Client API to flush here rather than this
+  // workaround when that becomes available.
+  V8DataSource::Trace([&](V8DataSource::TraceContext ctx) {
+    auto packet = ctx.NewTracePacket();
+  });
+  tracing_session_->StopBlocking();
 
   std::vector<char> trace = tracing_session_->ReadTraceBlocking();
   json_listener_->ParseFromArray(trace);
diff --git a/deps/v8/src/libplatform/worker-thread.cc b/deps/v8/src/libplatform/worker-thread.cc
index 6a1f704a8285c7..a5658547510629 100644
--- a/deps/v8/src/libplatform/worker-thread.cc
+++ b/deps/v8/src/libplatform/worker-thread.cc
@@ -12,7 +12,7 @@ namespace platform {
 
 WorkerThread::WorkerThread(TaskQueue* queue)
     : Thread(Options("V8 WorkerThread")), queue_(queue) {
-  Start();
+  CHECK(Start());
 }
 
 WorkerThread::~WorkerThread() {
diff --git a/deps/v8/src/logging/OWNERS b/deps/v8/src/logging/OWNERS
index 852d438bb0a884..48d72aea5eec22 100644
--- a/deps/v8/src/logging/OWNERS
+++ b/deps/v8/src/logging/OWNERS
@@ -1 +1 @@
-file://COMMON_OWNERS
+file:../../COMMON_OWNERS
diff --git a/deps/v8/src/logging/code-events.h b/deps/v8/src/logging/code-events.h
index 262ddf7df39784..7df135c43fa40d 100644
--- a/deps/v8/src/logging/code-events.h
+++ b/deps/v8/src/logging/code-events.h
@@ -89,6 +89,7 @@ class CodeEventListener {
   virtual void RegExpCodeCreateEvent(AbstractCode code, String source) = 0;
   virtual void CodeMoveEvent(AbstractCode from, AbstractCode to) = 0;
   virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
+  virtual void NativeContextMoveEvent(Address from, Address to) = 0;
   virtual void CodeMovingGCEvent() = 0;
   virtual void CodeDisableOptEvent(AbstractCode code,
                                    SharedFunctionInfo shared) = 0;
@@ -164,6 +165,9 @@ class CodeEventDispatcher {
   void SharedFunctionInfoMoveEvent(Address from, Address to) {
     CODE_EVENT_DISPATCH(SharedFunctionInfoMoveEvent(from, to));
   }
+  void NativeContextMoveEvent(Address from, Address to) {
+    CODE_EVENT_DISPATCH(NativeContextMoveEvent(from, to));
+  }
   void CodeMovingGCEvent() { CODE_EVENT_DISPATCH(CodeMovingGCEvent()); }
   void CodeDisableOptEvent(AbstractCode code, SharedFunctionInfo shared) {
     CODE_EVENT_DISPATCH(CodeDisableOptEvent(code, shared));
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 3d517e29fcd075..8c808276faad5e 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -188,6 +188,8 @@ namespace internal {
      V8.WasmCompileModuleAsyncMicroSeconds, 100000000, MICROSECOND)            \
   HT(wasm_streaming_compile_wasm_module_time,                                  \
      V8.WasmCompileModuleStreamingMicroSeconds, 100000000, MICROSECOND)        \
+  HT(wasm_streaming_deserialize_wasm_module_time,                              \
+     V8.WasmDeserializeModuleStreamingMicroSeconds, 100000000, MICROSECOND)    \
   HT(wasm_tier_up_module_time, V8.WasmTierUpModuleMicroSeconds, 100000000,     \
      MICROSECOND)                                                              \
   HT(wasm_compile_asm_function_time, V8.WasmCompileFunctionMicroSeconds.asm,   \
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index 1efa7105cd45ee..4466e0a53bc6ca 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -764,6 +764,7 @@ class RuntimeCallTimer final {
   V(Int8Array_New)                                         \
   V(Isolate_DateTimeConfigurationChangeNotification)       \
   V(Isolate_LocaleConfigurationChangeNotification)         \
+  V(FinalizationGroup_Cleanup)                             \
   V(JSON_Parse)                                            \
   V(JSON_Stringify)                                        \
   V(Map_AsArray)                                           \
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index ecf4de676736ec..9b86a16031e84f 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -765,7 +765,7 @@ class Profiler : public base::Thread {
   void Disengage();
 
   // Inserts collected profiling data into buffer.
-  void Insert(v8::TickSample* sample) {
+  void Insert(TickSample* sample) {
     if (Succ(head_) == static_cast<int>(base::Relaxed_Load(&tail_))) {
       overflow_ = true;
     } else {
@@ -779,7 +779,7 @@ class Profiler : public base::Thread {
 
  private:
   // Waits for a signal and removes profiling data.
-  bool Remove(v8::TickSample* sample) {
+  bool Remove(TickSample* sample) {
     buffer_semaphore_.Wait();  // Wait for an element.
     *sample = buffer_[base::Relaxed_Load(&tail_)];
     bool result = overflow_;
@@ -796,7 +796,7 @@ class Profiler : public base::Thread {
   // Cyclic buffer for communicating profiling samples
   // between the signal handler and the worker thread.
   static const int kBufferSize = 128;
-  v8::TickSample buffer_[kBufferSize];  // Buffer storage.
+  TickSample buffer_[kBufferSize];      // Buffer storage.
   int head_;                            // Index to the buffer head.
   base::Atomic32 tail_;                 // Index to the buffer tail.
   bool overflow_;  // Tell whether a buffer overflow has occurred.
@@ -871,7 +871,7 @@ void Profiler::Engage() {
 
   // Start thread processing the profiler buffer.
   base::Relaxed_Store(&running_, 1);
-  Start();
+  CHECK(Start());
 
   // Register to get ticks.
   Logger* logger = isolate_->logger();
@@ -888,7 +888,7 @@ void Profiler::Disengage() {
   // inserting a fake element in the queue and then wait for
   // the thread to terminate.
   base::Relaxed_Store(&running_, 0);
-  v8::TickSample sample;
+  TickSample sample;
   Insert(&sample);
   Join();
 
@@ -896,7 +896,7 @@ void Profiler::Disengage() {
 }
 
 void Profiler::Run() {
-  v8::TickSample sample;
+  TickSample sample;
   bool overflow = Remove(&sample);
   while (base::Relaxed_Load(&running_)) {
     LOG(isolate_, TickEvent(&sample, overflow));
@@ -1549,7 +1549,7 @@ void Logger::RuntimeCallTimerEvent() {
   msg.WriteToLogFile();
 }
 
-void Logger::TickEvent(v8::TickSample* sample, bool overflow) {
+void Logger::TickEvent(TickSample* sample, bool overflow) {
   if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
   if (V8_UNLIKELY(TracingFlags::runtime_stats.load(std::memory_order_relaxed) ==
                   v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
@@ -1978,6 +1978,10 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
       description = "A JavaScript to Wasm adapter";
       tag = CodeEventListener::STUB_TAG;
       break;
+    case AbstractCode::JS_TO_JS_FUNCTION:
+      description = "A WebAssembly.Function adapter";
+      tag = CodeEventListener::STUB_TAG;
+      break;
     case AbstractCode::WASM_TO_CAPI_FUNCTION:
       description = "A Wasm to C-API adapter";
       tag = CodeEventListener::STUB_TAG;
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index e46409a66e3fe3..3c28222982cae6 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -15,14 +15,14 @@
 
 namespace v8 {
 
-struct TickSample;
-
 namespace sampler {
 class Sampler;
 }
 
 namespace internal {
 
+struct TickSample;
+
 // Logger is used for collecting logging information from V8 during
 // execution. The result is dumped to a file.
 //
@@ -216,6 +216,8 @@ class Logger : public CodeEventListener {
 
   void SharedFunctionInfoMoveEvent(Address from, Address to) override;
 
+  void NativeContextMoveEvent(Address from, Address to) override {}
+
   void CodeNameEvent(Address addr, int pos, const char* code_name);
 
   void CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
@@ -401,6 +403,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
   void GetterCallbackEvent(Name name, Address entry_point) override {}
   void SetterCallbackEvent(Name name, Address entry_point) override {}
   void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+  void NativeContextMoveEvent(Address from, Address to) override {}
   void CodeMovingGCEvent() override {}
   void CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
                       int fp_to_sp_delta) override {}
@@ -453,6 +456,7 @@ class ExternalCodeEventListener : public CodeEventListener {
   void GetterCallbackEvent(Name name, Address entry_point) override {}
   void SetterCallbackEvent(Name name, Address entry_point) override {}
   void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+  void NativeContextMoveEvent(Address from, Address to) override {}
   void CodeMoveEvent(AbstractCode from, AbstractCode to) override {}
   void CodeDisableOptEvent(AbstractCode code,
                            SharedFunctionInfo shared) override {}
diff --git a/deps/v8/src/objects/OWNERS b/deps/v8/src/objects/OWNERS
index 450423f87850ba..f52e1c9ca8effc 100644
--- a/deps/v8/src/objects/OWNERS
+++ b/deps/v8/src/objects/OWNERS
@@ -1,3 +1,3 @@
-file://COMMON_OWNERS
+file:../../COMMON_OWNERS
 
 # COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index 9289a83f705b45..c1b6417ae135d4 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -66,14 +66,14 @@ class AllocationSite : public Struct {
   bool IsNested();
 
   // transition_info bitfields, for constructed array transition info.
-  class ElementsKindBits : public BitField<ElementsKind, 0, 5> {};
-  class DoNotInlineBit : public BitField<bool, 5, 1> {};
+  using ElementsKindBits = BitField<ElementsKind, 0, 5>;
+  using DoNotInlineBit = BitField<bool, 5, 1>;
   // Unused bits 6-30.
 
   // Bitfields for pretenure_data
-  class MementoFoundCountBits : public BitField<int, 0, 26> {};
-  class PretenureDecisionBits : public BitField<PretenureDecision, 26, 3> {};
-  class DeoptDependentCodeBit : public BitField<bool, 29, 1> {};
+  using MementoFoundCountBits = BitField<int, 0, 26>;
+  using PretenureDecisionBits = BitField<PretenureDecision, 26, 3>;
+  using DeoptDependentCodeBit = BitField<bool, 29, 1>;
   STATIC_ASSERT(PretenureDecisionBits::kMax >= kLastPretenureDecisionValue);
 
   // Increments the mementos found counter and returns true when the first
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index c327a35746c675..d0698d13a1c813 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -21,15 +21,13 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(AccessCheckInfo, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(AccessCheckInfo)
 OBJECT_CONSTRUCTORS_IMPL(AccessorInfo, Struct)
-OBJECT_CONSTRUCTORS_IMPL(InterceptorInfo, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(InterceptorInfo)
 
 TQ_OBJECT_CONSTRUCTORS_IMPL(CallHandlerInfo)
 
 CAST_ACCESSOR(AccessorInfo)
-CAST_ACCESSOR(AccessCheckInfo)
-CAST_ACCESSOR(InterceptorInfo)
 
 ACCESSORS(AccessorInfo, name, Name, kNameOffset)
 SMI_ACCESSORS(AccessorInfo, flags, kFlagsOffset)
@@ -98,21 +96,7 @@ bool AccessorInfo::HasExpectedReceiverType() {
   return expected_receiver_type().IsFunctionTemplateInfo();
 }
 
-ACCESSORS(AccessCheckInfo, callback, Object, kCallbackOffset)
-ACCESSORS(AccessCheckInfo, named_interceptor, Object, kNamedInterceptorOffset)
-ACCESSORS(AccessCheckInfo, indexed_interceptor, Object,
-          kIndexedInterceptorOffset)
-ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
-
-ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
-ACCESSORS(InterceptorInfo, setter, Object, kSetterOffset)
-ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
-ACCESSORS(InterceptorInfo, descriptor, Object, kDescriptorOffset)
-ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
-ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
-ACCESSORS(InterceptorInfo, definer, Object, kDefinerOffset)
-ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
-SMI_ACCESSORS(InterceptorInfo, flags, kFlagsOffset)
+TQ_SMI_ACCESSORS(InterceptorInfo, flags)
 BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
                kCanInterceptSymbolsBit)
 BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, kAllCanReadBit)
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index 518339f7d4afb9..72be5deb8fa553 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -102,37 +102,20 @@ class AccessorInfo : public Struct {
   OBJECT_CONSTRUCTORS(AccessorInfo, Struct);
 };
 
-class AccessCheckInfo : public Struct {
+class AccessCheckInfo
+    : public TorqueGeneratedAccessCheckInfo<AccessCheckInfo, Struct> {
  public:
-  DECL_ACCESSORS(callback, Object)
-  DECL_ACCESSORS(named_interceptor, Object)
-  DECL_ACCESSORS(indexed_interceptor, Object)
-  DECL_ACCESSORS(data, Object)
-
-  DECL_CAST(AccessCheckInfo)
-
   // Dispatched behavior.
   DECL_PRINTER(AccessCheckInfo)
-  DECL_VERIFIER(AccessCheckInfo)
 
   static AccessCheckInfo Get(Isolate* isolate, Handle<JSObject> receiver);
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
-                                TORQUE_GENERATED_ACCESS_CHECK_INFO_FIELDS)
-
-  OBJECT_CONSTRUCTORS(AccessCheckInfo, Struct);
+  TQ_OBJECT_CONSTRUCTORS(AccessCheckInfo)
 };
 
-class InterceptorInfo : public Struct {
+class InterceptorInfo
+    : public TorqueGeneratedInterceptorInfo<InterceptorInfo, Struct> {
  public:
-  DECL_ACCESSORS(getter, Object)
-  DECL_ACCESSORS(setter, Object)
-  DECL_ACCESSORS(query, Object)
-  DECL_ACCESSORS(descriptor, Object)
-  DECL_ACCESSORS(deleter, Object)
-  DECL_ACCESSORS(enumerator, Object)
-  DECL_ACCESSORS(definer, Object)
-  DECL_ACCESSORS(data, Object)
   DECL_BOOLEAN_ACCESSORS(can_intercept_symbols)
   DECL_BOOLEAN_ACCESSORS(all_can_read)
   DECL_BOOLEAN_ACCESSORS(non_masking)
@@ -142,14 +125,8 @@ class InterceptorInfo : public Struct {
   inline int flags() const;
   inline void set_flags(int flags);
 
-  DECL_CAST(InterceptorInfo)
-
   // Dispatched behavior.
   DECL_PRINTER(InterceptorInfo)
-  DECL_VERIFIER(InterceptorInfo)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
-                                TORQUE_GENERATED_INTERCEPTOR_INFO_FIELDS)
 
   static const int kCanInterceptSymbolsBit = 0;
   static const int kAllCanReadBit = 1;
@@ -157,7 +134,7 @@ class InterceptorInfo : public Struct {
   static const int kNamed = 3;
   static const int kHasNoSideEffect = 4;
 
-  OBJECT_CONSTRUCTORS(InterceptorInfo, Struct);
+  TQ_OBJECT_CONSTRUCTORS(InterceptorInfo)
 };
 
 class CallHandlerInfo
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 2931c5b0a02d0d..3de88d6a9bd68d 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -19,15 +19,12 @@ namespace v8 {
 namespace internal {
 
 OBJECT_CONSTRUCTORS_IMPL(SloppyArgumentsElements, FixedArray)
-OBJECT_CONSTRUCTORS_IMPL(JSArgumentsObject, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(AliasedArgumentsEntry, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSArgumentsObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(AliasedArgumentsEntry)
 
-CAST_ACCESSOR(AliasedArgumentsEntry)
 CAST_ACCESSOR(SloppyArgumentsElements)
-CAST_ACCESSOR(JSArgumentsObject)
 
-SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot,
-              kAliasedContextSlotOffset)
+TQ_SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot)
 
 DEF_GETTER(SloppyArgumentsElements, context, Context) {
   return TaggedField<Context>::load(isolate, *this,
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 79d2e604bd22de..a306ef592aa3e2 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -17,11 +17,11 @@ namespace v8 {
 namespace internal {
 
 // Superclass for all objects with instance type {JS_ARGUMENTS_TYPE}
-class JSArgumentsObject : public JSObject {
+class JSArgumentsObject
+    : public TorqueGeneratedJSArgumentsObject<JSArgumentsObject, JSObject> {
  public:
   DECL_VERIFIER(JSArgumentsObject)
-  DECL_CAST(JSArgumentsObject)
-  OBJECT_CONSTRUCTORS(JSArgumentsObject, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSArgumentsObject)
 };
 
 // Common superclass for JSSloppyArgumentsObject and JSStrictArgumentsObject.
@@ -125,21 +125,17 @@ class SloppyArgumentsElements : public FixedArray {
 // - the parameter map contains no fast alias mapping (i.e. the hole)
 // - this struct (in the slow backing store) contains an index into the context
 // - all attributes are available as part if the property details
-class AliasedArgumentsEntry : public Struct {
+class AliasedArgumentsEntry
+    : public TorqueGeneratedAliasedArgumentsEntry<AliasedArgumentsEntry,
+                                                  Struct> {
  public:
   inline int aliased_context_slot() const;
   inline void set_aliased_context_slot(int count);
 
-  DECL_CAST(AliasedArgumentsEntry)
-
   // Dispatched behavior.
   DECL_PRINTER(AliasedArgumentsEntry)
-  DECL_VERIFIER(AliasedArgumentsEntry)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
-                                TORQUE_GENERATED_ALIASED_ARGUMENTS_ENTRY_FIELDS)
 
-  OBJECT_CONSTRUCTORS(AliasedArgumentsEntry, Struct);
+  TQ_OBJECT_CONSTRUCTORS(AliasedArgumentsEntry)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index b02c0f29d6fffc..2905bb44c6f28a 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -992,7 +992,7 @@ ComparisonResult BigInt::CompareToDouble(Handle<BigInt> x, double y) {
 MaybeHandle<String> BigInt::ToString(Isolate* isolate, Handle<BigInt> bigint,
                                      int radix, ShouldThrow should_throw) {
   if (bigint->is_zero()) {
-    return isolate->factory()->NewStringFromStaticChars("0");
+    return isolate->factory()->zero_string();
   }
   if (base::bits::IsPowerOfTwo(radix)) {
     return MutableBigInt::ToStringBasePowerOfTwo(isolate, bigint, radix,
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index a5ca5148679abe..ca80547230fbaa 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -57,9 +57,9 @@ class BigIntBase : public HeapObject {
   // able to read the length concurrently, the getters and setters are atomic.
   static const int kLengthFieldBits = 30;
   STATIC_ASSERT(kMaxLength <= ((1 << kLengthFieldBits) - 1));
-  class SignBits : public BitField<bool, 0, 1> {};
-  class LengthBits : public BitField<int, SignBits::kNext, kLengthFieldBits> {};
-  STATIC_ASSERT(LengthBits::kNext <= 32);
+  using SignBits = BitField<bool, 0, 1>;
+  using LengthBits = SignBits::Next<int, kLengthFieldBits>;
+  STATIC_ASSERT(LengthBits::kLastUsedBit < 32);
 
   // Layout description.
 #define BIGINT_FIELDS(V)                                                  \
diff --git a/deps/v8/src/objects/cell-inl.h b/deps/v8/src/objects/cell-inl.h
index 90266b75996b33..0bd6808fbc6a34 100644
--- a/deps/v8/src/objects/cell-inl.h
+++ b/deps/v8/src/objects/cell-inl.h
@@ -16,11 +16,7 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(Cell, HeapObject)
-
-CAST_ACCESSOR(Cell)
-
-ACCESSORS(Cell, value, Object, kValueOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Cell)
 
 Cell Cell::FromValueAddress(Address value) {
   return Cell::cast(HeapObject::FromAddress(value - kValueOffset));
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
index 9c77f5d332960f..fc49f164b24a96 100644
--- a/deps/v8/src/objects/cell.h
+++ b/deps/v8/src/objects/cell.h
@@ -6,7 +6,7 @@
 #define V8_OBJECTS_CELL_H_
 
 #include "src/objects/heap-object.h"
-#include "torque-generated/field-offsets-tq.h"
+#include "torque-generated/class-definitions-tq.h"
 
 // Has to be the last include (doesn't have include guards):
 #include "src/objects/object-macros.h"
@@ -14,27 +14,18 @@
 namespace v8 {
 namespace internal {
 
-class Cell : public HeapObject {
+class Cell : public TorqueGeneratedCell<Cell, HeapObject> {
  public:
-  // [value]: value of the cell.
-  DECL_ACCESSORS(value, Object)
-
-  DECL_CAST(Cell)
-
   static inline Cell FromValueAddress(Address value);
 
   inline Address ValueAddress() { return address() + kValueOffset; }
 
   // Dispatched behavior.
   DECL_PRINTER(Cell)
-  DECL_VERIFIER(Cell)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
-                                TORQUE_GENERATED_CELL_FIELDS)
 
   using BodyDescriptor = FixedBodyDescriptor<kValueOffset, kSize, kSize>;
 
-  OBJECT_CONSTRUCTORS(Cell, HeapObject);
+  TQ_OBJECT_CONSTRUCTORS(Cell)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index e6f00b0fb2bb74..6e00a3363cd7d1 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -768,6 +768,7 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
 DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
 DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
 DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
 
 DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
 DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index a51a8c5b79d731..b416df8878a413 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -193,7 +193,10 @@ namespace {
 template <typename Code>
 void DropStackFrameCacheCommon(Code code) {
   i::Object maybe_table = code.source_position_table();
-  if (maybe_table.IsUndefined() || maybe_table.IsByteArray()) return;
+  if (maybe_table.IsUndefined() || maybe_table.IsByteArray() ||
+      maybe_table.IsException()) {
+    return;
+  }
   DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
   code.set_source_position_table(
       i::SourcePositionTableWithFrameCache::cast(maybe_table)
@@ -1086,15 +1089,5 @@ const char* DependentCode::DependencyGroupName(DependencyGroup group) {
   UNREACHABLE();
 }
 
-bool BytecodeArray::IsBytecodeEqual(const BytecodeArray other) const {
-  if (length() != other.length()) return false;
-
-  for (int i = 0; i < length(); ++i) {
-    if (get(i) != other.get(i)) return false;
-  }
-
-  return true;
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 2f85d4ac7b3457..6a5ac9f31a8e5b 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -45,6 +45,7 @@ class Code : public HeapObject {
   V(WASM_TO_CAPI_FUNCTION)  \
   V(WASM_TO_JS_FUNCTION)    \
   V(JS_TO_WASM_FUNCTION)    \
+  V(JS_TO_JS_FUNCTION)      \
   V(WASM_INTERPRETER_ENTRY) \
   V(C_WASM_ENTRY)
 
@@ -438,7 +439,7 @@ class Code : public HeapObject {
   DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
 #undef CODE_FLAGS_BIT_FIELDS
   static_assert(NUMBER_OF_KINDS <= KindField::kMax, "Code::KindField size");
-  static_assert(IsOffHeapTrampoline::kNext <= 32,
+  static_assert(IsOffHeapTrampoline::kLastUsedBit < 32,
                 "Code::flags field exhausted");
 
   // KindSpecificFlags layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
@@ -451,7 +452,8 @@ class Code : public HeapObject {
   V(IsExceptionCaughtField, bool, 1, _)
   DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
 #undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
-  static_assert(IsExceptionCaughtField::kNext <= 32, "KindSpecificFlags full");
+  static_assert(IsExceptionCaughtField::kLastUsedBit < 32,
+                "KindSpecificFlags full");
 
   // The {marked_for_deoptimization} field is accessed from generated code.
   static const int kMarkedForDeoptimizationBit =
@@ -705,8 +707,8 @@ class DependentCode : public WeakFixedArray {
 
   inline int flags();
   inline void set_flags(int flags);
-  class GroupField : public BitField<int, 0, 3> {};
-  class CountField : public BitField<int, 3, 27> {};
+  using GroupField = BitField<int, 0, 3>;
+  using CountField = BitField<int, 3, 27>;
   STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
 
   OBJECT_CONSTRUCTORS(DependentCode, WeakFixedArray);
@@ -825,9 +827,6 @@ class BytecodeArray : public FixedArrayBase {
   // is deterministic.
   inline void clear_padding();
 
-  // Compares only the bytecode array but not any of the header fields.
-  bool IsBytecodeEqual(const BytecodeArray other) const;
-
   // Layout description.
   DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
                                 TORQUE_GENERATED_BYTECODE_ARRAY_FIELDS)
@@ -865,7 +864,8 @@ class DeoptimizationData : public FixedArray {
   static const int kOptimizationIdIndex = 5;
   static const int kSharedFunctionInfoIndex = 6;
   static const int kInliningPositionsIndex = 7;
-  static const int kFirstDeoptEntryIndex = 8;
+  static const int kDeoptExitStartIndex = 8;
+  static const int kFirstDeoptEntryIndex = 9;
 
   // Offsets of deopt entry elements relative to the start of the entry.
   static const int kBytecodeOffsetRawOffset = 0;
@@ -886,6 +886,7 @@ class DeoptimizationData : public FixedArray {
   DECL_ELEMENT_ACCESSORS(OptimizationId, Smi)
   DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
   DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
+  DECL_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
 
 #undef DECL_ELEMENT_ACCESSORS
 
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index 861e06d87f1625..74fb4477b18096 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -44,7 +44,7 @@ bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
     DCHECK(context.IsScriptContext());
     int slot_index = ScopeInfo::ContextSlotIndex(
         context.scope_info(), name, &result->mode, &result->init_flag,
-        &result->maybe_assigned_flag, &result->requires_brand_check);
+        &result->maybe_assigned_flag);
 
     if (slot_index >= 0) {
       result->context_index = i;
@@ -161,8 +161,8 @@ static Maybe<bool> UnscopableLookup(LookupIterator* it) {
 }
 
 static PropertyAttributes GetAttributesForMode(VariableMode mode) {
-  DCHECK(IsDeclaredVariableMode(mode));
-  return mode == VariableMode::kConst ? READ_ONLY : NONE;
+  DCHECK(IsSerializableVariableMode(mode));
+  return IsConstVariableMode(mode) ? READ_ONLY : NONE;
 }
 
 // static
@@ -287,10 +287,8 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
       VariableMode mode;
       InitializationFlag flag;
       MaybeAssignedFlag maybe_assigned_flag;
-      RequiresBrandCheckFlag requires_brand_check;
       int slot_index = ScopeInfo::ContextSlotIndex(scope_info, *name, &mode,
-                                                   &flag, &maybe_assigned_flag,
-                                                   &requires_brand_check);
+                                                   &flag, &maybe_assigned_flag);
       DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
       if (slot_index >= 0) {
         if (FLAG_trace_contexts) {
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 0c00aba08e492f..a7b60ff7b95ae7 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -227,10 +227,14 @@ enum ContextLookupFlags {
   V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function)              \
   V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function)                        \
   V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info)     \
+  V(REGEXP_MATCH_ALL_FUNCTION_INDEX, JSFunction, regexp_match_all_function)    \
+  V(REGEXP_MATCH_FUNCTION_INDEX, JSFunction, regexp_match_function)            \
   V(REGEXP_PROTOTYPE_INDEX, JSObject, regexp_prototype)                        \
   V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map)                     \
+  V(REGEXP_REPLACE_FUNCTION_INDEX, JSFunction, regexp_replace_function)        \
   V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)                           \
-  V(REGEXP_SPECIES_PROTECTOR_INDEX, PropertyCell, regexp_species_protector)    \
+  V(REGEXP_SEARCH_FUNCTION_INDEX, JSFunction, regexp_search_function)          \
+  V(REGEXP_SPLIT_FUNCTION_INDEX, JSFunction, regexp_split_function)            \
   V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map,                   \
     initial_regexp_string_iterator_prototype_map)                              \
   V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table)      \
@@ -249,6 +253,8 @@ enum ContextLookupFlags {
     slow_object_with_object_prototype_map)                                     \
   V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, SimpleNumberDictionary,          \
     slow_template_instantiations_cache)                                        \
+  /* Fast Path Protectors */                                                   \
+  V(REGEXP_SPECIES_PROTECTOR_INDEX, PropertyCell, regexp_species_protector)    \
   /* All *_FUNCTION_MAP_INDEX definitions used by Context::FunctionMapIndex */ \
   /* must remain together. */                                                  \
   V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map)                       \
@@ -356,7 +362,6 @@ class ScriptContextTable : public FixedArray {
     VariableMode mode;
     InitializationFlag init_flag;
     MaybeAssignedFlag maybe_assigned_flag;
-    RequiresBrandCheckFlag requires_brand_check;
   };
 
   inline int used() const;
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index fe6001f58c9a33..957c06d8ec74b7 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -170,7 +170,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
 
   // Collect the keys into the given KeyAccumulator, in ascending chronological
   // order of property creation.
-  static void CollectKeysTo(Handle<Derived> dictionary, KeyAccumulator* keys);
+  V8_WARN_UNUSED_RESULT static ExceptionStatus CollectKeysTo(
+      Handle<Derived> dictionary, KeyAccumulator* keys);
 
   // Return the key indices sorted by its enumeration index.
   static Handle<FixedArray> IterationIndices(Isolate* isolate,
diff --git a/deps/v8/src/objects/elements-inl.h b/deps/v8/src/objects/elements-inl.h
index c4f2e2bf787d22..dfec2e35f4bfe6 100644
--- a/deps/v8/src/objects/elements-inl.h
+++ b/deps/v8/src/objects/elements-inl.h
@@ -5,6 +5,7 @@
 #ifndef V8_OBJECTS_ELEMENTS_INL_H_
 #define V8_OBJECTS_ELEMENTS_INL_H_
 
+#include "src/common/globals.h"
 #include "src/objects/elements.h"
 
 #include "src/handles/handles-inl.h"
@@ -13,10 +14,11 @@
 namespace v8 {
 namespace internal {
 
-inline void ElementsAccessor::CollectElementIndices(Handle<JSObject> object,
-                                                    KeyAccumulator* keys) {
-  CollectElementIndices(object, handle(object->elements(), keys->isolate()),
-                        keys);
+V8_WARN_UNUSED_RESULT inline ExceptionStatus
+ElementsAccessor::CollectElementIndices(Handle<JSObject> object,
+                                        KeyAccumulator* keys) {
+  return CollectElementIndices(
+      object, handle(object->elements(), keys->isolate()), keys);
 }
 
 inline MaybeHandle<FixedArray> ElementsAccessor::PrependElementIndices(
diff --git a/deps/v8/src/objects/elements-kind.cc b/deps/v8/src/objects/elements-kind.cc
index a819caf45900de..ec2b79d10b830d 100644
--- a/deps/v8/src/objects/elements-kind.cc
+++ b/deps/v8/src/objects/elements-kind.cc
@@ -35,10 +35,12 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
     case PACKED_ELEMENTS:
     case PACKED_FROZEN_ELEMENTS:
     case PACKED_SEALED_ELEMENTS:
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_SMI_ELEMENTS:
     case HOLEY_ELEMENTS:
     case HOLEY_FROZEN_ELEMENTS:
     case HOLEY_SEALED_ELEMENTS:
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
     case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -79,6 +81,10 @@ const char* ElementsKindToString(ElementsKind kind) {
       return "PACKED_DOUBLE_ELEMENTS";
     case HOLEY_DOUBLE_ELEMENTS:
       return "HOLEY_DOUBLE_ELEMENTS";
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
+      return "PACKED_NONEXTENSIBLE_ELEMENTS";
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
+      return "HOLEY_NONEXTENSIBLE_ELEMENTS";
     case PACKED_SEALED_ELEMENTS:
       return "PACKED_SEALED_ELEMENTS";
     case HOLEY_SEALED_ELEMENTS:
diff --git a/deps/v8/src/objects/elements-kind.h b/deps/v8/src/objects/elements-kind.h
index 3ed6ea66ece889..e1335fa3c0a89a 100644
--- a/deps/v8/src/objects/elements-kind.h
+++ b/deps/v8/src/objects/elements-kind.h
@@ -43,6 +43,10 @@ enum ElementsKind : uint8_t {
   PACKED_DOUBLE_ELEMENTS,
   HOLEY_DOUBLE_ELEMENTS,
 
+  // The nonextensible kind for elements.
+  PACKED_NONEXTENSIBLE_ELEMENTS,
+  HOLEY_NONEXTENSIBLE_ELEMENTS,
+
   // The sealed kind for elements.
   PACKED_SEALED_ELEMENTS,
   HOLEY_SEALED_ELEMENTS,
@@ -79,7 +83,8 @@ enum ElementsKind : uint8_t {
   FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
   LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = BIGINT64_ELEMENTS,
   TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS,
-  LAST_FROZEN_ELEMENTS_KIND = HOLEY_FROZEN_ELEMENTS,
+  FIRST_ANY_NONEXTENSIBLE_ELEMENTS_KIND = PACKED_NONEXTENSIBLE_ELEMENTS,
+  LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND = HOLEY_FROZEN_ELEMENTS,
 
 // Alias for kSystemPointerSize-sized elements
 #ifdef V8_COMPRESS_POINTERS
@@ -156,14 +161,23 @@ inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
 }
 
 // This predicate is used for disabling respective functionality in builtins.
-inline bool IsFrozenOrSealedElementsKindUnchecked(ElementsKind kind) {
-  return IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_FROZEN_ELEMENTS);
+inline bool IsAnyNonextensibleElementsKindUnchecked(ElementsKind kind) {
+  return IsInRange(kind, FIRST_ANY_NONEXTENSIBLE_ELEMENTS_KIND,
+                   LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND);
 }
 
-inline bool IsFrozenOrSealedElementsKind(ElementsKind kind) {
-  DCHECK_IMPLIES(IsFrozenOrSealedElementsKindUnchecked(kind),
+inline bool IsAnyNonextensibleElementsKind(ElementsKind kind) {
+  DCHECK_IMPLIES(IsAnyNonextensibleElementsKindUnchecked(kind),
                  FLAG_enable_sealed_frozen_elements_kind);
-  return IsFrozenOrSealedElementsKindUnchecked(kind);
+  return IsAnyNonextensibleElementsKindUnchecked(kind);
+}
+
+inline bool IsNonextensibleElementsKind(ElementsKind kind) {
+  DCHECK_IMPLIES(IsInRange(kind, PACKED_NONEXTENSIBLE_ELEMENTS,
+                           HOLEY_NONEXTENSIBLE_ELEMENTS),
+                 FLAG_enable_sealed_frozen_elements_kind);
+  return IsInRange(kind, PACKED_NONEXTENSIBLE_ELEMENTS,
+                   HOLEY_NONEXTENSIBLE_ELEMENTS);
 }
 
 inline bool IsSealedElementsKind(ElementsKind kind) {
@@ -194,10 +208,13 @@ inline bool IsObjectElementsKind(ElementsKind kind) {
   return IsInRange(kind, PACKED_ELEMENTS, HOLEY_ELEMENTS);
 }
 
-inline bool IsHoleyFrozenOrSealedElementsKind(ElementsKind kind) {
-  DCHECK_IMPLIES(kind == HOLEY_SEALED_ELEMENTS || kind == HOLEY_FROZEN_ELEMENTS,
+inline bool IsAnyHoleyNonextensibleElementsKind(ElementsKind kind) {
+  DCHECK_IMPLIES(kind == HOLEY_NONEXTENSIBLE_ELEMENTS ||
+                     kind == HOLEY_SEALED_ELEMENTS ||
+                     kind == HOLEY_FROZEN_ELEMENTS,
                  FLAG_enable_sealed_frozen_elements_kind);
-  return kind == HOLEY_SEALED_ELEMENTS || kind == HOLEY_FROZEN_ELEMENTS;
+  return kind == HOLEY_NONEXTENSIBLE_ELEMENTS ||
+         kind == HOLEY_SEALED_ELEMENTS || kind == HOLEY_FROZEN_ELEMENTS;
 }
 
 inline bool IsHoleyElementsKind(ElementsKind kind) {
@@ -239,6 +256,9 @@ inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
   if (packed_kind == PACKED_ELEMENTS) {
     return HOLEY_ELEMENTS;
   }
+  if (packed_kind == PACKED_NONEXTENSIBLE_ELEMENTS) {
+    return HOLEY_NONEXTENSIBLE_ELEMENTS;
+  }
   return packed_kind;
 }
 
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 4bdfba052dfb33..6e5648d2f4d5a2 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -33,6 +33,9 @@
 //       - FastPackedSmiElementsAccessor
 //       - FastHoleySmiElementsAccessor
 //       - FastPackedObjectElementsAccessor
+//       - FastNonextensibleObjectElementsAccessor: template
+//         - FastPackedNonextensibleObjectElementsAccessor
+//         - FastHoleyNonextensibleObjectElementsAccessor
 //       - FastSealedObjectElementsAccessor: template
 //         - FastPackedSealedObjectElementsAccessor
 //         - FastHoleySealedObjectElementsAccessor
@@ -68,6 +71,17 @@ namespace internal {
 
 namespace {
 
+#define RETURN_NOTHING_IF_NOT_SUCCESSFUL(call) \
+  do {                                         \
+    if (!(call)) return Nothing<bool>();       \
+  } while (false)
+
+#define RETURN_FAILURE_IF_NOT_SUCCESSFUL(call)          \
+  do {                                                  \
+    ExceptionStatus status_enum_result = (call);        \
+    if (!status_enum_result) return status_enum_result; \
+  } while (false)
+
 static const int kPackedSizeNotKnown = -1;
 
 enum Where { AT_START, AT_END };
@@ -85,6 +99,10 @@ enum Where { AT_START, AT_END };
   V(FastPackedDoubleElementsAccessor, PACKED_DOUBLE_ELEMENTS,                 \
     FixedDoubleArray)                                                         \
   V(FastHoleyDoubleElementsAccessor, HOLEY_DOUBLE_ELEMENTS, FixedDoubleArray) \
+  V(FastPackedNonextensibleObjectElementsAccessor,                            \
+    PACKED_NONEXTENSIBLE_ELEMENTS, FixedArray)                                \
+  V(FastHoleyNonextensibleObjectElementsAccessor,                             \
+    HOLEY_NONEXTENSIBLE_ELEMENTS, FixedArray)                                 \
   V(FastPackedSealedObjectElementsAccessor, PACKED_SEALED_ELEMENTS,           \
     FixedArray)                                                               \
   V(FastHoleySealedObjectElementsAccessor, HOLEY_SEALED_ELEMENTS, FixedArray) \
@@ -992,8 +1010,8 @@ class ElementsAccessorBase : public InternalElementsAccessor {
     DCHECK_EQ(*nof_items, 0);
     KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
                                ALL_PROPERTIES);
-    Subclass::CollectElementIndicesImpl(
-        object, handle(object->elements(), isolate), &accumulator);
+    RETURN_NOTHING_IF_NOT_SUCCESSFUL(Subclass::CollectElementIndicesImpl(
+        object, handle(object->elements(), isolate), &accumulator));
     Handle<FixedArray> keys = accumulator.GetKeys();
 
     int count = 0;
@@ -1055,16 +1073,16 @@ class ElementsAccessorBase : public InternalElementsAccessor {
     return Just(true);
   }
 
-  void CollectElementIndices(Handle<JSObject> object,
-                             Handle<FixedArrayBase> backing_store,
-                             KeyAccumulator* keys) final {
-    if (keys->filter() & ONLY_ALL_CAN_READ) return;
-    Subclass::CollectElementIndicesImpl(object, backing_store, keys);
+  V8_WARN_UNUSED_RESULT ExceptionStatus CollectElementIndices(
+      Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+      KeyAccumulator* keys) final {
+    if (keys->filter() & ONLY_ALL_CAN_READ) return ExceptionStatus::kSuccess;
+    return Subclass::CollectElementIndicesImpl(object, backing_store, keys);
   }
 
-  static void CollectElementIndicesImpl(Handle<JSObject> object,
-                                        Handle<FixedArrayBase> backing_store,
-                                        KeyAccumulator* keys) {
+  V8_WARN_UNUSED_RESULT static ExceptionStatus CollectElementIndicesImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+      KeyAccumulator* keys) {
     DCHECK_NE(DICTIONARY_ELEMENTS, kind());
     // Non-dictionary elements can't have all-can-read accessors.
     uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
@@ -1074,9 +1092,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
     for (uint32_t i = 0; i < length; i++) {
       if (Subclass::HasElementImpl(isolate, *object, i, *backing_store,
                                    filter)) {
-        keys->AddKey(factory->NewNumberFromUint(i));
+        RETURN_FAILURE_IF_NOT_SUCCESSFUL(
+            keys->AddKey(factory->NewNumberFromUint(i)));
       }
     }
+    return ExceptionStatus::kSuccess;
   }
 
   static Handle<FixedArray> DirectCollectElementIndicesImpl(
@@ -1189,10 +1209,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
     return combined_keys;
   }
 
-  void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
-                                   KeyAccumulator* accumulator,
-                                   AddKeyConversion convert) final {
-    Subclass::AddElementsToKeyAccumulatorImpl(receiver, accumulator, convert);
+  V8_WARN_UNUSED_RESULT ExceptionStatus AddElementsToKeyAccumulator(
+      Handle<JSObject> receiver, KeyAccumulator* accumulator,
+      AddKeyConversion convert) final {
+    return Subclass::AddElementsToKeyAccumulatorImpl(receiver, accumulator,
+                                                     convert);
   }
 
   static uint32_t GetCapacityImpl(JSObject holder,
@@ -1266,7 +1287,8 @@ class ElementsAccessorBase : public InternalElementsAccessor {
   static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
                                        FixedArrayBase backing_store,
                                        uint32_t index, PropertyFilter filter) {
-    DCHECK(IsFastElementsKind(kind()) || IsFrozenOrSealedElementsKind(kind()));
+    DCHECK(IsFastElementsKind(kind()) ||
+           IsAnyNonextensibleElementsKind(kind()));
     uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
     if (IsHoleyElementsKindForRead(kind())) {
       return index < length && !BackingStore::cast(backing_store)
@@ -1529,10 +1551,10 @@ class DictionaryElementsAccessor
     return FilterKey(dictionary, entry, raw_key, filter);
   }
 
-  static void CollectElementIndicesImpl(Handle<JSObject> object,
-                                        Handle<FixedArrayBase> backing_store,
-                                        KeyAccumulator* keys) {
-    if (keys->filter() & SKIP_STRINGS) return;
+  V8_WARN_UNUSED_RESULT static ExceptionStatus CollectElementIndicesImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+      KeyAccumulator* keys) {
+    if (keys->filter() & SKIP_STRINGS) return ExceptionStatus::kSuccess;
     Isolate* isolate = keys->isolate();
     Handle<NumberDictionary> dictionary =
         Handle<NumberDictionary>::cast(backing_store);
@@ -1555,8 +1577,9 @@ class DictionaryElementsAccessor
     }
     SortIndices(isolate, elements, insertion_index);
     for (int i = 0; i < insertion_index; i++) {
-      keys->AddKey(elements->get(i));
+      RETURN_FAILURE_IF_NOT_SUCCESSFUL(keys->AddKey(elements->get(i)));
     }
+    return ExceptionStatus::kSuccess;
   }
 
   static Handle<FixedArray> DirectCollectElementIndicesImpl(
@@ -1581,9 +1604,9 @@ class DictionaryElementsAccessor
     return list;
   }
 
-  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
-                                              KeyAccumulator* accumulator,
-                                              AddKeyConversion convert) {
+  V8_WARN_UNUSED_RESULT static ExceptionStatus AddElementsToKeyAccumulatorImpl(
+      Handle<JSObject> receiver, KeyAccumulator* accumulator,
+      AddKeyConversion convert) {
     Isolate* isolate = accumulator->isolate();
     Handle<NumberDictionary> dictionary(
         NumberDictionary::cast(receiver->elements()), isolate);
@@ -1596,8 +1619,9 @@ class DictionaryElementsAccessor
       DCHECK(!value.IsTheHole(isolate));
       DCHECK(!value.IsAccessorPair());
       DCHECK(!value.IsAccessorInfo());
-      accumulator->AddKey(value, convert);
+      RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(value, convert));
     }
+    return ExceptionStatus::kSuccess;
   }
 
   static bool IncludesValueFastPath(Isolate* isolate, Handle<JSObject> receiver,
@@ -1877,7 +1901,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
   static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
                            Handle<FixedArrayBase> store) {
     DCHECK(obj->HasSmiOrObjectElements() || obj->HasDoubleElements() ||
-           obj->HasFastArgumentsElements() ||
+           obj->HasNonextensibleElements() || obj->HasFastArgumentsElements() ||
            obj->HasFastStringWrapperElements());
     Handle<BackingStore> backing_store = Handle<BackingStore>::cast(store);
     if (!obj->IsJSArray() &&
@@ -1981,10 +2005,12 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
 
   static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
     ElementsKind kind = KindTraits::Kind;
-    if (IsFastPackedElementsKind(kind)) {
+    if (IsFastPackedElementsKind(kind) ||
+        kind == PACKED_NONEXTENSIBLE_ELEMENTS) {
       JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind));
     }
-    if (IsSmiOrObjectElementsKind(KindTraits::Kind)) {
+    if (IsSmiOrObjectElementsKind(KindTraits::Kind) ||
+        IsNonextensibleElementsKind(kind)) {
       JSObject::EnsureWritableFastElements(obj);
     }
     DeleteCommon(obj, entry, handle(obj->elements(), obj->GetIsolate()));
@@ -2007,18 +2033,20 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
     return count;
   }
 
-  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
-                                              KeyAccumulator* accumulator,
-                                              AddKeyConversion convert) {
+  V8_WARN_UNUSED_RESULT static ExceptionStatus AddElementsToKeyAccumulatorImpl(
+      Handle<JSObject> receiver, KeyAccumulator* accumulator,
+      AddKeyConversion convert) {
     Isolate* isolate = accumulator->isolate();
     Handle<FixedArrayBase> elements(receiver->elements(), isolate);
     uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements);
     for (uint32_t i = 0; i < length; i++) {
       if (IsFastPackedElementsKind(KindTraits::Kind) ||
           HasEntryImpl(isolate, *elements, i)) {
-        accumulator->AddKey(Subclass::GetImpl(isolate, *elements, i), convert);
+        RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(
+            Subclass::GetImpl(isolate, *elements, i), convert));
       }
     }
+    return ExceptionStatus::kSuccess;
   }
 
   static void ValidateContents(JSObject holder, int length) {
@@ -2164,7 +2192,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
         // Hole here, since the {length} used here can be larger than
         // JSArray::length.
         if (IsSmiOrObjectElementsKind(Subclass::kind()) ||
-            IsFrozenOrSealedElementsKind(Subclass::kind())) {
+            IsAnyNonextensibleElementsKind(Subclass::kind())) {
           auto elements = FixedArray::cast(receiver->elements());
 
           for (uint32_t k = start_from; k < length; ++k) {
@@ -2189,7 +2217,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
           return Just(false);
         }
       } else if (!IsObjectElementsKind(Subclass::kind()) &&
-                 !IsFrozenOrSealedElementsKind(Subclass::kind())) {
+                 !IsAnyNonextensibleElementsKind(Subclass::kind())) {
         // Search for non-number, non-Undefined value, with either
         // PACKED_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS, HOLEY_SMI_ELEMENTS or
         // HOLEY_DOUBLE_ELEMENTS. Guaranteed to return false, since these
@@ -2199,7 +2227,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
         // Search for non-number, non-Undefined value with either
         // PACKED_ELEMENTS or HOLEY_ELEMENTS.
         DCHECK(IsObjectElementsKind(Subclass::kind()) ||
-               IsFrozenOrSealedElementsKind(Subclass::kind()));
+               IsAnyNonextensibleElementsKind(Subclass::kind()));
         auto elements = FixedArray::cast(receiver->elements());
 
         for (uint32_t k = start_from; k < length; ++k) {
@@ -2265,7 +2293,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
           // PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS. Return true if
           // elementK->IsHeapNumber() && std::isnan(elementK->Number())
           DCHECK(IsSmiOrObjectElementsKind(Subclass::kind()) ||
-                 IsFrozenOrSealedElementsKind(Subclass::kind()));
+                 IsAnyNonextensibleElementsKind(Subclass::kind()));
           auto elements = FixedArray::cast(receiver->elements());
 
           for (uint32_t k = start_from; k < length; ++k) {
@@ -2414,9 +2442,11 @@ class FastSmiOrObjectElementsAccessor
       case PACKED_ELEMENTS:
       case PACKED_FROZEN_ELEMENTS:
       case PACKED_SEALED_ELEMENTS:
+      case PACKED_NONEXTENSIBLE_ELEMENTS:
       case HOLEY_ELEMENTS:
       case HOLEY_FROZEN_ELEMENTS:
       case HOLEY_SEALED_ELEMENTS:
+      case HOLEY_NONEXTENSIBLE_ELEMENTS:
         CopyObjectToObjectElements(isolate, from, from_kind, from_start, to,
                                    to_kind, to_start, copy_size);
         break;
@@ -2493,7 +2523,7 @@ class FastSmiOrObjectElementsAccessor
 
     // Only FAST_{,HOLEY_}ELEMENTS can store non-numbers.
     if (!value.IsNumber() && !IsObjectElementsKind(Subclass::kind()) &&
-        !IsFrozenOrSealedElementsKind(Subclass::kind())) {
+        !IsAnyNonextensibleElementsKind(Subclass::kind())) {
       return Just<int64_t>(-1);
     }
     // NaN can never be found by strict equality.
@@ -2526,6 +2556,80 @@ class FastPackedObjectElementsAccessor
           FastPackedObjectElementsAccessor,
           ElementsKindTraits<PACKED_ELEMENTS>> {};
 
+template <typename Subclass, typename KindTraits>
+class FastNonextensibleObjectElementsAccessor
+    : public FastSmiOrObjectElementsAccessor<Subclass, KindTraits> {
+ public:
+  using BackingStore = typename KindTraits::BackingStore;
+
+  static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+                           uint32_t push_size) {
+    UNREACHABLE();
+  }
+
+  static void AddImpl(Handle<JSObject> object, uint32_t index,
+                      Handle<Object> value, PropertyAttributes attributes,
+                      uint32_t new_capacity) {
+    UNREACHABLE();
+  }
+
+  // TODO(duongn): refactor this due to code duplication of sealed version.
+  // Consider using JSObject::NormalizeElements(). Also consider follow the fast
+  // element logic instead of changing to dictionary mode.
+  static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+                            uint32_t length,
+                            Handle<FixedArrayBase> backing_store) {
+    uint32_t old_length = 0;
+    CHECK(array->length().ToArrayIndex(&old_length));
+    if (length == old_length) {
+      // Do nothing.
+      return;
+    }
+
+    // Transition to DICTIONARY_ELEMENTS.
+    // Convert to dictionary mode.
+    Handle<NumberDictionary> new_element_dictionary =
+        old_length == 0 ? isolate->factory()->empty_slow_element_dictionary()
+                        : array->GetElementsAccessor()->Normalize(array);
+
+    // Migrate map.
+    Handle<Map> new_map = Map::Copy(isolate, handle(array->map(), isolate),
+                                    "SlowCopyForSetLengthImpl");
+    new_map->set_is_extensible(false);
+    new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+    JSObject::MigrateToMap(isolate, array, new_map);
+
+    if (!new_element_dictionary.is_null()) {
+      array->set_elements(*new_element_dictionary);
+    }
+
+    if (array->elements() !=
+        ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
+      Handle<NumberDictionary> dictionary(array->element_dictionary(), isolate);
+      // Make sure we never go back to the fast case
+      array->RequireSlowElements(*dictionary);
+      JSObject::ApplyAttributesToDictionary(isolate, ReadOnlyRoots(isolate),
+                                            dictionary,
+                                            PropertyAttributes::NONE);
+    }
+
+    // Set length.
+    Handle<FixedArrayBase> new_backing_store(array->elements(), isolate);
+    DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
+                                              new_backing_store);
+  }
+};
+
+class FastPackedNonextensibleObjectElementsAccessor
+    : public FastNonextensibleObjectElementsAccessor<
+          FastPackedNonextensibleObjectElementsAccessor,
+          ElementsKindTraits<PACKED_NONEXTENSIBLE_ELEMENTS>> {};
+
+class FastHoleyNonextensibleObjectElementsAccessor
+    : public FastNonextensibleObjectElementsAccessor<
+          FastHoleyNonextensibleObjectElementsAccessor,
+          ElementsKindTraits<HOLEY_NONEXTENSIBLE_ELEMENTS>> {};
+
 template <typename Subclass, typename KindTraits>
 class FastSealedObjectElementsAccessor
     : public FastSmiOrObjectElementsAccessor<Subclass, KindTraits> {
@@ -2564,6 +2668,9 @@ class FastSealedObjectElementsAccessor
     UNREACHABLE();
   }
 
+  // TODO(duongn): refactor this due to code duplication of nonextensible
+  // version. Consider using JSObject::NormalizeElements(). Also consider follow
+  // the fast element logic instead of changing to dictionary mode.
   static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
                             uint32_t length,
                             Handle<FixedArrayBase> backing_store) {
@@ -2749,9 +2856,11 @@ class FastDoubleElementsAccessor
       case PACKED_ELEMENTS:
       case PACKED_FROZEN_ELEMENTS:
       case PACKED_SEALED_ELEMENTS:
+      case PACKED_NONEXTENSIBLE_ELEMENTS:
       case HOLEY_ELEMENTS:
       case HOLEY_FROZEN_ELEMENTS:
       case HOLEY_SEALED_ELEMENTS:
+      case HOLEY_NONEXTENSIBLE_ELEMENTS:
         CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
         break;
       case DICTIONARY_ELEMENTS:
@@ -3006,16 +3115,17 @@ class TypedElementsAccessor
     return AccessorClass::GetCapacityImpl(receiver, backing_store);
   }
 
-  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
-                                              KeyAccumulator* accumulator,
-                                              AddKeyConversion convert) {
+  V8_WARN_UNUSED_RESULT static ExceptionStatus AddElementsToKeyAccumulatorImpl(
+      Handle<JSObject> receiver, KeyAccumulator* accumulator,
+      AddKeyConversion convert) {
     Isolate* isolate = receiver->GetIsolate();
     Handle<FixedArrayBase> elements(receiver->elements(), isolate);
     uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
     for (uint32_t i = 0; i < length; i++) {
       Handle<Object> value = AccessorClass::GetInternalImpl(receiver, i);
-      accumulator->AddKey(value, convert);
+      RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(value, convert));
     }
+    return ExceptionStatus::kSuccess;
   }
 
   static Maybe<bool> CollectValuesOrEntriesImpl(
@@ -3886,17 +3996,18 @@ class SloppyArgumentsElementsAccessor
            ArgumentsAccessor::NumberOfElementsImpl(receiver, arguments);
   }
 
-  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
-                                              KeyAccumulator* accumulator,
-                                              AddKeyConversion convert) {
+  V8_WARN_UNUSED_RESULT static ExceptionStatus AddElementsToKeyAccumulatorImpl(
+      Handle<JSObject> receiver, KeyAccumulator* accumulator,
+      AddKeyConversion convert) {
     Isolate* isolate = accumulator->isolate();
     Handle<FixedArrayBase> elements(receiver->elements(), isolate);
     uint32_t length = GetCapacityImpl(*receiver, *elements);
     for (uint32_t entry = 0; entry < length; entry++) {
       if (!HasEntryImpl(isolate, *elements, entry)) continue;
       Handle<Object> value = GetImpl(isolate, *elements, entry);
-      accumulator->AddKey(value, convert);
+      RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(value, convert));
     }
+    return ExceptionStatus::kSuccess;
   }
 
   static bool HasEntryImpl(Isolate* isolate, FixedArrayBase parameters,
@@ -3986,9 +4097,9 @@ class SloppyArgumentsElementsAccessor
     UNREACHABLE();
   }
 
-  static void CollectElementIndicesImpl(Handle<JSObject> object,
-                                        Handle<FixedArrayBase> backing_store,
-                                        KeyAccumulator* keys) {
+  V8_WARN_UNUSED_RESULT static ExceptionStatus CollectElementIndicesImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+      KeyAccumulator* keys) {
     Isolate* isolate = keys->isolate();
     uint32_t nof_indices = 0;
     Handle<FixedArray> indices = isolate->factory()->NewFixedArray(
@@ -3998,8 +4109,9 @@ class SloppyArgumentsElementsAccessor
                                     ENUMERABLE_STRINGS, indices, &nof_indices);
     SortIndices(isolate, indices, nof_indices);
     for (uint32_t i = 0; i < nof_indices; i++) {
-      keys->AddKey(indices->get(i));
+      RETURN_FAILURE_IF_NOT_SUCCESSFUL(keys->AddKey(indices->get(i)));
     }
+    return ExceptionStatus::kSuccess;
   }
 
   static Handle<FixedArray> DirectCollectElementIndicesImpl(
@@ -4418,33 +4530,34 @@ class StringWrapperElementsAccessor
                                           attributes);
   }
 
-  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
-                                              KeyAccumulator* accumulator,
-                                              AddKeyConversion convert) {
+  V8_WARN_UNUSED_RESULT static ExceptionStatus AddElementsToKeyAccumulatorImpl(
+      Handle<JSObject> receiver, KeyAccumulator* accumulator,
+      AddKeyConversion convert) {
     Isolate* isolate = receiver->GetIsolate();
     Handle<String> string(GetString(*receiver), isolate);
     string = String::Flatten(isolate, string);
     uint32_t length = static_cast<uint32_t>(string->length());
     for (uint32_t i = 0; i < length; i++) {
-      accumulator->AddKey(
+      Handle<String> key =
           isolate->factory()->LookupSingleCharacterStringFromCode(
-              string->Get(i)),
-          convert);
+              string->Get(i));
+      RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(key, convert));
     }
-    BackingStoreAccessor::AddElementsToKeyAccumulatorImpl(receiver, accumulator,
-                                                          convert);
+    return BackingStoreAccessor::AddElementsToKeyAccumulatorImpl(
+        receiver, accumulator, convert);
   }
 
-  static void CollectElementIndicesImpl(Handle<JSObject> object,
-                                        Handle<FixedArrayBase> backing_store,
-                                        KeyAccumulator* keys) {
+  V8_WARN_UNUSED_RESULT static ExceptionStatus CollectElementIndicesImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+      KeyAccumulator* keys) {
     uint32_t length = GetString(*object).length();
     Factory* factory = keys->isolate()->factory();
     for (uint32_t i = 0; i < length; i++) {
-      keys->AddKey(factory->NewNumberFromUint(i));
+      RETURN_FAILURE_IF_NOT_SUCCESSFUL(
+          keys->AddKey(factory->NewNumberFromUint(i)));
     }
-    BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store,
-                                                    keys);
+    return BackingStoreAccessor::CollectElementIndicesImpl(object,
+                                                           backing_store, keys);
   }
 
   static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
@@ -4737,5 +4850,7 @@ Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
 ElementsAccessor** ElementsAccessor::elements_accessors_ = nullptr;
 
 #undef ELEMENTS_LIST
+#undef RETURN_NOTHING_IF_NOT_SUCCESSFUL
+#undef RETURN_FAILURE_IF_NOT_SUCCESSFUL
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/objects/elements.h b/deps/v8/src/objects/elements.h
index a72a6b068e42e6..b7fcd907a3792f 100644
--- a/deps/v8/src/objects/elements.h
+++ b/deps/v8/src/objects/elements.h
@@ -69,12 +69,12 @@ class ElementsAccessor {
   // Copy all indices that have elements from |object| into the given
   // KeyAccumulator. For Dictionary-based element-kinds we filter out elements
   // whose PropertyAttribute match |filter|.
-  virtual void CollectElementIndices(Handle<JSObject> object,
-                                     Handle<FixedArrayBase> backing_store,
-                                     KeyAccumulator* keys) = 0;
+  V8_WARN_UNUSED_RESULT virtual ExceptionStatus CollectElementIndices(
+      Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+      KeyAccumulator* keys) = 0;
 
-  inline void CollectElementIndices(Handle<JSObject> object,
-                                    KeyAccumulator* keys);
+  V8_WARN_UNUSED_RESULT inline ExceptionStatus CollectElementIndices(
+      Handle<JSObject> object, KeyAccumulator* keys);
 
   virtual Maybe<bool> CollectValuesOrEntries(
       Isolate* isolate, Handle<JSObject> object,
@@ -90,9 +90,9 @@ class ElementsAccessor {
       Handle<JSObject> object, Handle<FixedArray> keys,
       GetKeysConversion convert, PropertyFilter filter = ALL_PROPERTIES);
 
-  virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
-                                           KeyAccumulator* accumulator,
-                                           AddKeyConversion convert) = 0;
+  V8_WARN_UNUSED_RESULT virtual ExceptionStatus AddElementsToKeyAccumulator(
+      Handle<JSObject> receiver, KeyAccumulator* accumulator,
+      AddKeyConversion convert) = 0;
 
   virtual void TransitionElementsKind(Handle<JSObject> object,
                                       Handle<Map> map) = 0;
diff --git a/deps/v8/src/objects/embedder-data-array-inl.h b/deps/v8/src/objects/embedder-data-array-inl.h
index 34f22b1115d986..6775fa807579f0 100644
--- a/deps/v8/src/objects/embedder-data-array-inl.h
+++ b/deps/v8/src/objects/embedder-data-array-inl.h
@@ -16,11 +16,9 @@
 namespace v8 {
 namespace internal {
 
-CAST_ACCESSOR(EmbedderDataArray)
+TQ_SMI_ACCESSORS(EmbedderDataArray, length)
 
-SMI_ACCESSORS(EmbedderDataArray, length, kLengthOffset)
-
-OBJECT_CONSTRUCTORS_IMPL(EmbedderDataArray, HeapObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(EmbedderDataArray)
 
 Address EmbedderDataArray::slots_start() {
   return FIELD_ADDR(*this, OffsetOfElementAt(0));
diff --git a/deps/v8/src/objects/embedder-data-array.h b/deps/v8/src/objects/embedder-data-array.h
index ba4fe25465fc1d..668d890817d7e1 100644
--- a/deps/v8/src/objects/embedder-data-array.h
+++ b/deps/v8/src/objects/embedder-data-array.h
@@ -8,7 +8,7 @@
 #include "src/common/globals.h"
 #include "src/handles/maybe-handles.h"
 #include "src/objects/heap-object.h"
-#include "torque-generated/field-offsets-tq.h"
+#include "torque-generated/class-definitions-tq.h"
 
 // Has to be the last include (doesn't have include guards):
 #include "src/objects/object-macros.h"
@@ -20,16 +20,13 @@ namespace internal {
 // It's basically an "array of EmbedderDataSlots".
 // Note, if the pointer compression is enabled the embedder data slot also
 // contains a raw data part in addition to tagged part.
-class EmbedderDataArray : public HeapObject {
+class EmbedderDataArray
+    : public TorqueGeneratedEmbedderDataArray<EmbedderDataArray, HeapObject> {
  public:
   // [length]: length of the array in an embedder data slots.
   V8_INLINE int length() const;
   V8_INLINE void set_length(int value);
 
-  DECL_CAST(EmbedderDataArray)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
-                                TORQUE_GENERATED_EMBEDDER_DATA_ARRAY_FIELDS)
   // TODO(v8:8989): [torque] Support marker constants.
   static const int kHeaderSize = kSize;
 
@@ -64,7 +61,7 @@ class EmbedderDataArray : public HeapObject {
  private:
   STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
 
-  OBJECT_CONSTRUCTORS(EmbedderDataArray, HeapObject);
+  TQ_OBJECT_CONSTRUCTORS(EmbedderDataArray)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 4f4826eab36e0d..2fbc48a95eded9 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -887,8 +887,7 @@ float FeedbackNexus::ComputeCallFrequency() {
 
   double const invocation_count = vector().invocation_count();
   double const call_count = GetCallCount();
-  if (invocation_count == 0) {
-    // Prevent division by 0.
+  if (invocation_count == 0.0) {  // Prevent division by 0.
     return 0.0f;
   }
   return static_cast<float>(call_count / invocation_count);
@@ -1094,6 +1093,12 @@ Name FeedbackNexus::GetName() const {
       return Name::cast(feedback->GetHeapObjectAssumeStrong());
     }
   }
+  if (IsStoreDataPropertyInLiteralKind(kind())) {
+    MaybeObject extra = GetFeedbackExtra();
+    if (IsPropertyNameFeedback(extra)) {
+      return Name::cast(extra->GetHeapObjectAssumeStrong());
+    }
+  }
   return Name();
 }
 
@@ -1180,7 +1185,8 @@ KeyedAccessStoreMode KeyedAccessStoreModeForBuiltin(int builtin_index) {
 }  // namespace
 
 KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
-  DCHECK(IsKeyedStoreICKind(kind()) || IsStoreInArrayLiteralICKind(kind()));
+  DCHECK(IsKeyedStoreICKind(kind()) || IsStoreInArrayLiteralICKind(kind()) ||
+         IsStoreDataPropertyInLiteralKind(kind()));
   KeyedAccessStoreMode mode = STANDARD_STORE;
   MapHandles maps;
   MaybeObjectHandles handlers;
@@ -1220,14 +1226,17 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
 
 IcCheckType FeedbackNexus::GetKeyType() const {
   DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
-         IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
+         IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()) ||
+         IsStoreDataPropertyInLiteralKind(kind()));
   MaybeObject feedback = GetFeedback();
   if (feedback == MaybeObject::FromObject(
                       *FeedbackVector::MegamorphicSentinel(GetIsolate()))) {
     return static_cast<IcCheckType>(
         Smi::ToInt(GetFeedbackExtra()->cast<Object>()));
   }
-  return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
+  MaybeObject maybe_name =
+      IsStoreDataPropertyInLiteralKind(kind()) ? GetFeedbackExtra() : feedback;
+  return IsPropertyNameFeedback(maybe_name) ? PROPERTY : ELEMENT;
 }
 
 BinaryOperationHint FeedbackNexus::GetBinaryOperationFeedback() const {
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
index a6657634c824c1..4fae87774d4b53 100644
--- a/deps/v8/src/objects/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -107,18 +107,16 @@ class FieldIndex final {
       (kDescriptorIndexBitCount + 1 + kTaggedSizeLog2);
 
   // Index from beginning of object.
-  class OffsetBits : public BitField64<int, 0, kOffsetBitsSize> {};
-  class IsInObjectBits : public BitField64<bool, OffsetBits::kNext, 1> {};
-  class EncodingBits : public BitField64<Encoding, IsInObjectBits::kNext, 2> {};
+  using OffsetBits = BitField64<int, 0, kOffsetBitsSize>;
+  using IsInObjectBits = OffsetBits::Next<bool, 1>;
+  using EncodingBits = IsInObjectBits::Next<Encoding, 2>;
   // Number of inobject properties.
-  class InObjectPropertyBits
-      : public BitField64<int, EncodingBits::kNext, kDescriptorIndexBitCount> {
-  };
+  using InObjectPropertyBits =
+      EncodingBits::Next<int, kDescriptorIndexBitCount>;
   // Offset of first inobject property from beginning of object.
-  class FirstInobjectPropertyOffsetBits
-      : public BitField64<int, InObjectPropertyBits::kNext,
-                          kFirstInobjectPropertyOffsetBitCount> {};
-  STATIC_ASSERT(FirstInobjectPropertyOffsetBits::kNext <= 64);
+  using FirstInobjectPropertyOffsetBits =
+      InObjectPropertyBits::Next<int, kFirstInobjectPropertyOffsetBitCount>;
+  STATIC_ASSERT(FirstInobjectPropertyOffsetBits::kLastUsedBit < 64);
 
   uint64_t bit_field_;
 };
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index ca6f06e83cde8d..40290797f71230 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -247,7 +247,10 @@ class FixedDoubleArray : public FixedArrayBase {
 
   DECL_CAST(FixedDoubleArray)
 
-  // Maximally allowed length of a FixedArray.
+  // Start offset of elements.
+  static constexpr int kFloatsOffset = kHeaderSize;
+
+  // Maximally allowed length of a FixedDoubleArray.
   static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
   static_assert(Internals::IsValidSmi(kMaxLength),
                 "FixedDoubleArray maxLength not a Smi");
diff --git a/deps/v8/src/objects/function-kind.h b/deps/v8/src/objects/function-kind.h
index 4a1819813c631d..8e9c68e426c132 100644
--- a/deps/v8/src/objects/function-kind.h
+++ b/deps/v8/src/objects/function-kind.h
@@ -1,4 +1,3 @@
-
 // Copyright 2019 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
@@ -21,12 +20,12 @@ enum FunctionKind : uint8_t {
   // BEGIN default constructors
   kDefaultBaseConstructor,
   // END base constructors
-  // BEGIN derived cosntructors
+  // BEGIN derived constructors
   kDefaultDerivedConstructor,
   // END default constructors
   kDerivedConstructor,
-  // END derived costructors
-  // END class cosntructors
+  // END derived constructors
+  // END class constructors
   // END constructable functions.
   // BEGIN accessors
   kGetterFunction,
diff --git a/deps/v8/src/objects/function-syntax-kind.h b/deps/v8/src/objects/function-syntax-kind.h
new file mode 100644
index 00000000000000..074ccc4286f905
--- /dev/null
+++ b/deps/v8/src/objects/function-syntax-kind.h
@@ -0,0 +1,46 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FUNCTION_SYNTAX_KIND_H_
+#define V8_OBJECTS_FUNCTION_SYNTAX_KIND_H_
+
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+enum class FunctionSyntaxKind : uint8_t {
+  kAnonymousExpression,
+  kNamedExpression,
+  kDeclaration,
+  kAccessorOrMethod,
+  kWrapped,
+
+  kLastFunctionSyntaxKind = kWrapped,
+};
+
+inline const char* FunctionSyntaxKind2String(FunctionSyntaxKind kind) {
+  switch (kind) {
+    case FunctionSyntaxKind::kAnonymousExpression:
+      return "AnonymousExpression";
+    case FunctionSyntaxKind::kNamedExpression:
+      return "NamedExpression";
+    case FunctionSyntaxKind::kDeclaration:
+      return "Declaration";
+    case FunctionSyntaxKind::kAccessorOrMethod:
+      return "AccessorOrMethod";
+    case FunctionSyntaxKind::kWrapped:
+      return "Wrapped";
+  }
+  UNREACHABLE();
+}
+
+inline std::ostream& operator<<(std::ostream& os, FunctionSyntaxKind kind) {
+  return os << FunctionSyntaxKind2String(kind);
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_OBJECTS_FUNCTION_SYNTAX_KIND_H_
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index 3d70d71c8981ef..78e65ca2313df1 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -16,34 +16,31 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(HeapNumberBase, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(HeapNumber, HeapNumberBase)
-OBJECT_CONSTRUCTORS_IMPL(MutableHeapNumber, HeapNumberBase)
+OBJECT_CONSTRUCTORS_IMPL(HeapNumber, HeapObject)
 
 CAST_ACCESSOR(HeapNumber)
-CAST_ACCESSOR(MutableHeapNumber)
 
-double HeapNumberBase::value() const { return ReadField<double>(kValueOffset); }
+double HeapNumber::value() const { return ReadField<double>(kValueOffset); }
 
-void HeapNumberBase::set_value(double value) {
+void HeapNumber::set_value(double value) {
   WriteField<double>(kValueOffset, value);
 }
 
-uint64_t HeapNumberBase::value_as_bits() const {
+uint64_t HeapNumber::value_as_bits() const {
   // Bug(v8:8875): HeapNumber's double may be unaligned.
   return base::ReadUnalignedValue<uint64_t>(field_address(kValueOffset));
 }
 
-void HeapNumberBase::set_value_as_bits(uint64_t bits) {
+void HeapNumber::set_value_as_bits(uint64_t bits) {
   base::WriteUnalignedValue<uint64_t>(field_address(kValueOffset), bits);
 }
 
-int HeapNumberBase::get_exponent() {
+int HeapNumber::get_exponent() {
   return ((ReadField<int>(kExponentOffset) & kExponentMask) >> kExponentShift) -
          kExponentBias;
 }
 
-int HeapNumberBase::get_sign() {
+int HeapNumber::get_sign() {
   return ReadField<int>(kExponentOffset) & kSignMask;
 }
 
diff --git a/deps/v8/src/objects/heap-number.h b/deps/v8/src/objects/heap-number.h
index 15e821e966a749..9063f3d22c9084 100644
--- a/deps/v8/src/objects/heap-number.h
+++ b/deps/v8/src/objects/heap-number.h
@@ -14,10 +14,8 @@ namespace v8 {
 namespace internal {
 
 // The HeapNumber class describes heap allocated numbers that cannot be
-// represented in a Smi (small integer). MutableHeapNumber is the same, but its
-// number value can change over time (it is used only as property storage).
-// HeapNumberBase merely exists to avoid code duplication.
-class HeapNumberBase : public HeapObject {
+// represented in a Smi (small integer).
+class HeapNumber : public HeapObject {
  public:
   // [value]: number value.
   inline double value() const;
@@ -58,27 +56,10 @@ class HeapNumberBase : public HeapObject {
   static const int kMantissaBitsInTopWord = 20;
   static const int kNonMantissaBitsInTopWord = 12;
 
-  // Just to make the macro-generated constructor happy. Subclasses should
-  // perform their own proper type checking.
-  inline bool IsHeapNumberBase() const { return true; }
-
-  OBJECT_CONSTRUCTORS(HeapNumberBase, HeapObject);
-};
-
-class HeapNumber : public HeapNumberBase {
- public:
   DECL_CAST(HeapNumber)
   V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os);
 
-  OBJECT_CONSTRUCTORS(HeapNumber, HeapNumberBase);
-};
-
-class MutableHeapNumber : public HeapNumberBase {
- public:
-  DECL_CAST(MutableHeapNumber)
-  V8_EXPORT_PRIVATE void MutableHeapNumberPrint(std::ostream& os);
-
-  OBJECT_CONSTRUCTORS(MutableHeapNumber, HeapNumberBase);
+  OBJECT_CONSTRUCTORS(HeapNumber, HeapObject);
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index 79c953aa872f3e..9a855de95bcf0c 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -133,7 +133,6 @@ enum InstanceType : uint16_t {
 
   // "Data", objects that cannot contain non-map-word pointers to heap
   // objects.
-  MUTABLE_HEAP_NUMBER_TYPE,
   FOREIGN_TYPE,
   BYTE_ARRAY_TYPE,
   BYTECODE_ARRAY_TYPE,
@@ -181,8 +180,7 @@ enum InstanceType : uint16_t {
   CALLBACK_TASK_TYPE,
   PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
   PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
-  PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
-  FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE,  // LAST_MICROTASK_TYPE
+  PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,  // LAST_MICROTASK_TYPE
 
 #define MAKE_TORQUE_INSTANCE_TYPE(V) V,
   TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_INSTANCE_TYPE)
@@ -346,7 +344,7 @@ enum InstanceType : uint16_t {
   LAST_CONTEXT_TYPE = WITH_CONTEXT_TYPE,
   // Boundaries for testing if given HeapObject is a subclass of Microtask.
   FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
-  LAST_MICROTASK_TYPE = FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE,
+  LAST_MICROTASK_TYPE = PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
   // Boundaries of module record types
   FIRST_MODULE_TYPE = SOURCE_TEXT_MODULE_TYPE,
   LAST_MODULE_TYPE = SYNTHETIC_MODULE_TYPE,
@@ -462,7 +460,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
   V(JSWeakSet, JS_WEAK_SET_TYPE)                                             \
   V(LoadHandler, LOAD_HANDLER_TYPE)                                          \
   V(Map, MAP_TYPE)                                                           \
-  V(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)                             \
   V(NameDictionary, NAME_DICTIONARY_TYPE)                                    \
   V(NativeContext, NATIVE_CONTEXT_TYPE)                                      \
   V(NumberDictionary, NUMBER_DICTIONARY_TYPE)                                \
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
index fb0cf1652e8f42..6bbaa9bc1f4e60 100644
--- a/deps/v8/src/objects/js-collection-inl.h
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -19,17 +19,17 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(JSCollection, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSMap, JSCollection)
-OBJECT_CONSTRUCTORS_IMPL(JSSet, JSCollection)
-OBJECT_CONSTRUCTORS_IMPL(JSWeakCollection, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSWeakMap, JSWeakCollection)
-OBJECT_CONSTRUCTORS_IMPL(JSWeakSet, JSWeakCollection)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollection)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSMap)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSSet)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakCollection)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakMap)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakSet)
 
 // TODO(jkummerow): Move JSCollectionIterator to js-collection.h?
 // TODO(jkummerow): Introduce IsJSCollectionIterator() check? Or unchecked
 // version of OBJECT_CONSTRUCTORS_IMPL macro?
-JSCollectionIterator::JSCollectionIterator(Address ptr) : JSObject(ptr) {}
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollectionIterator)
 template <class Derived, class TableType>
 OrderedHashTableIterator<Derived, TableType>::OrderedHashTableIterator(
     Address ptr)
@@ -45,20 +45,8 @@ JSSetIterator::JSSetIterator(Address ptr)
   SLOW_DCHECK(IsJSSetIterator());
 }
 
-ACCESSORS(JSCollection, table, Object, kTableOffset)
-ACCESSORS(JSCollectionIterator, table, Object, kTableOffset)
-ACCESSORS(JSCollectionIterator, index, Object, kIndexOffset)
-
-ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
-
-CAST_ACCESSOR(JSCollection)
-CAST_ACCESSOR(JSSet)
 CAST_ACCESSOR(JSSetIterator)
-CAST_ACCESSOR(JSMap)
 CAST_ACCESSOR(JSMapIterator)
-CAST_ACCESSOR(JSWeakCollection)
-CAST_ACCESSOR(JSWeakMap)
-CAST_ACCESSOR(JSWeakSet)
 
 Object JSMapIterator::CurrentValue() {
   OrderedHashMap table = OrderedHashMap::cast(this->table());
diff --git a/deps/v8/src/objects/js-collection-iterator.h b/deps/v8/src/objects/js-collection-iterator.h
index c002294b017aa5..b193aa84cdde68 100644
--- a/deps/v8/src/objects/js-collection-iterator.h
+++ b/deps/v8/src/objects/js-collection-iterator.h
@@ -16,21 +16,13 @@
 namespace v8 {
 namespace internal {
 
-class JSCollectionIterator : public JSObject {
+class JSCollectionIterator
+    : public TorqueGeneratedJSCollectionIterator<JSCollectionIterator,
+                                                 JSObject> {
  public:
-  // [table]: the backing hash table mapping keys to values.
-  DECL_ACCESSORS(table, Object)
-
-  // [index]: The index into the data table.
-  DECL_ACCESSORS(index, Object)
-
   void JSCollectionIteratorPrint(std::ostream& os, const char* name);
-  DECL_VERIFIER(JSCollectionIterator)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
-                                TORQUE_GENERATED_JSCOLLECTION_ITERATOR_FIELDS)
 
-  OBJECT_CONSTRUCTORS(JSCollectionIterator, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSCollectionIterator)
 };
 
 // OrderedHashTableIterator is an iterator that iterates over the keys and
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 0a856ca0628998..d1b2ae862a151e 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -17,39 +17,25 @@ namespace internal {
 class OrderedHashSet;
 class OrderedHashMap;
 
-class JSCollection : public JSObject {
+class JSCollection
+    : public TorqueGeneratedJSCollection<JSCollection, JSObject> {
  public:
-  DECL_CAST(JSCollection)
-
-  // [table]: the backing hash table
-  DECL_ACCESSORS(table, Object)
-
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
-                                TORQUE_GENERATED_JSCOLLECTION_FIELDS)
-
   static const int kAddFunctionDescriptorIndex = 3;
 
-  DECL_VERIFIER(JSCollection)
-
-  OBJECT_CONSTRUCTORS(JSCollection, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSCollection)
 };
 
 // The JSSet describes EcmaScript Harmony sets
-class JSSet : public JSCollection {
+class JSSet : public TorqueGeneratedJSSet<JSSet, JSCollection> {
  public:
-  DECL_CAST(JSSet)
-
   static void Initialize(Handle<JSSet> set, Isolate* isolate);
   static void Clear(Isolate* isolate, Handle<JSSet> set);
 
   // Dispatched behavior.
   DECL_PRINTER(JSSet)
   DECL_VERIFIER(JSSet)
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSCollection::kHeaderSize,
-                                TORQUE_GENERATED_JSWEAK_SET_FIELDS)
 
-  OBJECT_CONSTRUCTORS(JSSet, JSCollection);
+  TQ_OBJECT_CONSTRUCTORS(JSSet)
 };
 
 class JSSetIterator
@@ -66,20 +52,16 @@ class JSSetIterator
 };
 
 // The JSMap describes EcmaScript Harmony maps
-class JSMap : public JSCollection {
+class JSMap : public TorqueGeneratedJSMap<JSMap, JSCollection> {
  public:
-  DECL_CAST(JSMap)
-
   static void Initialize(Handle<JSMap> map, Isolate* isolate);
   static void Clear(Isolate* isolate, Handle<JSMap> map);
 
   // Dispatched behavior.
   DECL_PRINTER(JSMap)
   DECL_VERIFIER(JSMap)
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSCollection::kHeaderSize,
-                                TORQUE_GENERATED_JSWEAK_MAP_FIELDS)
 
-  OBJECT_CONSTRUCTORS(JSMap, JSCollection);
+  TQ_OBJECT_CONSTRUCTORS(JSMap)
 };
 
 class JSMapIterator
@@ -100,13 +82,9 @@ class JSMapIterator
 };
 
 // Base class for both JSWeakMap and JSWeakSet
-class JSWeakCollection : public JSObject {
+class JSWeakCollection
+    : public TorqueGeneratedJSWeakCollection<JSWeakCollection, JSObject> {
  public:
-  DECL_CAST(JSWeakCollection)
-
-  // [table]: the backing hash table mapping keys to values.
-  DECL_ACCESSORS(table, Object)
-
   static void Initialize(Handle<JSWeakCollection> collection, Isolate* isolate);
   V8_EXPORT_PRIVATE static void Set(Handle<JSWeakCollection> collection,
                                     Handle<Object> key, Handle<Object> value,
@@ -116,11 +94,6 @@ class JSWeakCollection : public JSObject {
   static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
                                     int max_entries);
 
-  DECL_VERIFIER(JSWeakCollection)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
-                                TORQUE_GENERATED_JSWEAK_COLLECTION_FIELDS)
-
   static const int kAddFunctionDescriptorIndex = 3;
 
   // Iterates the function object according to the visiting policy.
@@ -131,37 +104,29 @@ class JSWeakCollection : public JSObject {
 
   static const int kSizeOfAllWeakCollections = kHeaderSize;
 
-  OBJECT_CONSTRUCTORS(JSWeakCollection, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSWeakCollection)
 };
 
 // The JSWeakMap describes EcmaScript Harmony weak maps
-class JSWeakMap : public JSWeakCollection {
+class JSWeakMap : public TorqueGeneratedJSWeakMap<JSWeakMap, JSWeakCollection> {
  public:
-  DECL_CAST(JSWeakMap)
-
   // Dispatched behavior.
   DECL_PRINTER(JSWeakMap)
   DECL_VERIFIER(JSWeakMap)
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSWeakCollection::kHeaderSize,
-                                TORQUE_GENERATED_JSWEAK_MAP_FIELDS)
   STATIC_ASSERT(kSize == kSizeOfAllWeakCollections);
-  OBJECT_CONSTRUCTORS(JSWeakMap, JSWeakCollection);
+  TQ_OBJECT_CONSTRUCTORS(JSWeakMap)
 };
 
 // The JSWeakSet describes EcmaScript Harmony weak sets
-class JSWeakSet : public JSWeakCollection {
+class JSWeakSet : public TorqueGeneratedJSWeakSet<JSWeakSet, JSWeakCollection> {
  public:
-  DECL_CAST(JSWeakSet)
-
   // Dispatched behavior.
   DECL_PRINTER(JSWeakSet)
   DECL_VERIFIER(JSWeakSet)
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSWeakCollection::kHeaderSize,
-                                TORQUE_GENERATED_JSWEAK_SET_FIELDS)
-  STATIC_ASSERT(kSize == kSizeOfAllWeakCollections);
 
-  OBJECT_CONSTRUCTORS(JSWeakSet, JSWeakCollection);
+  STATIC_ASSERT(kSize == kSizeOfAllWeakCollections);
+  TQ_OBJECT_CONSTRUCTORS(JSWeakSet)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index db7ba27312445a..29fcfb0d7cbfad 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -8,8 +8,11 @@
 
 #include "src/objects/js-date-time-format.h"
 
+#include <algorithm>
+#include <map>
 #include <memory>
 #include <string>
+#include <utility>
 #include <vector>
 
 #include "src/date/date.h"
@@ -1191,6 +1194,12 @@ class DateTimePatternGeneratorCache {
     UErrorCode status = U_ZERO_ERROR;
     map_[key].reset(icu::DateTimePatternGenerator::createInstance(
         icu::Locale(key.c_str()), status));
+    // Fallback to use "root".
+    if (U_FAILURE(status)) {
+      status = U_ZERO_ERROR;
+      map_[key].reset(
+          icu::DateTimePatternGenerator::createInstance("root", status));
+    }
     CHECK(U_SUCCESS(status));
     return map_[key]->clone();
   }
diff --git a/deps/v8/src/objects/js-generator-inl.h b/deps/v8/src/objects/js-generator-inl.h
index d0fe2cd90e1804..e3c57198c443fc 100644
--- a/deps/v8/src/objects/js-generator-inl.h
+++ b/deps/v8/src/objects/js-generator-inl.h
@@ -16,29 +16,15 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(JSGeneratorObject, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSAsyncFunctionObject, JSGeneratorObject)
-OBJECT_CONSTRUCTORS_IMPL(JSAsyncGeneratorObject, JSGeneratorObject)
-OBJECT_CONSTRUCTORS_IMPL(AsyncGeneratorRequest, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSGeneratorObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFunctionObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncGeneratorObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(AsyncGeneratorRequest)
 
-CAST_ACCESSOR(JSAsyncFunctionObject)
-CAST_ACCESSOR(JSAsyncGeneratorObject)
-CAST_ACCESSOR(JSGeneratorObject)
-CAST_ACCESSOR(AsyncGeneratorRequest)
+TQ_SMI_ACCESSORS(JSGeneratorObject, resume_mode)
+TQ_SMI_ACCESSORS(JSGeneratorObject, continuation)
 
-ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
-ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
-ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
-ACCESSORS(JSGeneratorObject, input_or_debug_pos, Object, kInputOrDebugPosOffset)
-SMI_ACCESSORS(JSGeneratorObject, resume_mode, kResumeModeOffset)
-SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
-ACCESSORS(JSGeneratorObject, parameters_and_registers, FixedArray,
-          kParametersAndRegistersOffset)
-
-ACCESSORS(AsyncGeneratorRequest, next, Object, kNextOffset)
-SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode, kResumeModeOffset)
-ACCESSORS(AsyncGeneratorRequest, value, Object, kValueOffset)
-ACCESSORS(AsyncGeneratorRequest, promise, Object, kPromiseOffset)
+TQ_SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode)
 
 bool JSGeneratorObject::is_suspended() const {
   DCHECK_LT(kGeneratorExecuting, 0);
@@ -54,10 +40,7 @@ bool JSGeneratorObject::is_executing() const {
   return continuation() == kGeneratorExecuting;
 }
 
-ACCESSORS(JSAsyncFunctionObject, promise, JSPromise, kPromiseOffset)
-
-ACCESSORS(JSAsyncGeneratorObject, queue, HeapObject, kQueueOffset)
-SMI_ACCESSORS(JSAsyncGeneratorObject, is_awaiting, kIsAwaitingOffset)
+TQ_SMI_ACCESSORS(JSAsyncGeneratorObject, is_awaiting)
 
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/objects/js-generator.h b/deps/v8/src/objects/js-generator.h
index 0e99d824ab62f8..a56ea917ebdc7b 100644
--- a/deps/v8/src/objects/js-generator.h
+++ b/deps/v8/src/objects/js-generator.h
@@ -17,24 +17,9 @@ namespace internal {
 // Forward declarations.
 class JSPromise;
 
-class JSGeneratorObject : public JSObject {
+class JSGeneratorObject
+    : public TorqueGeneratedJSGeneratorObject<JSGeneratorObject, JSObject> {
  public:
-  // [function]: The function corresponding to this generator object.
-  DECL_ACCESSORS(function, JSFunction)
-
-  // [context]: The context of the suspended computation.
-  DECL_ACCESSORS(context, Context)
-
-  // [receiver]: The receiver of the suspended computation.
-  DECL_ACCESSORS(receiver, Object)
-
-  // [input_or_debug_pos]
-  // For executing generators: the most recent input value.
-  // For suspended generators: debug information (bytecode offset).
-  // There is currently no need to remember the most recent input value for a
-  // suspended generator.
-  DECL_ACCESSORS(input_or_debug_pos, Object)
-
   // [resume_mode]: The most recent resume mode.
   enum ResumeMode { kNext, kReturn, kThrow };
   DECL_INT_ACCESSORS(resume_mode)
@@ -54,84 +39,50 @@ class JSGeneratorObject : public JSObject {
   // is suspended.
   int source_position() const;
 
-  // [parameters_and_registers]: Saved interpreter register file.
-  DECL_ACCESSORS(parameters_and_registers, FixedArray)
-
-  DECL_CAST(JSGeneratorObject)
-
   // Dispatched behavior.
   DECL_PRINTER(JSGeneratorObject)
-  DECL_VERIFIER(JSGeneratorObject)
 
   // Magic sentinel values for the continuation.
   static const int kGeneratorExecuting = -2;
   static const int kGeneratorClosed = -1;
 
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
-                                TORQUE_GENERATED_JSGENERATOR_OBJECT_FIELDS)
-
-  OBJECT_CONSTRUCTORS(JSGeneratorObject, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSGeneratorObject)
 };
 
-class JSAsyncFunctionObject : public JSGeneratorObject {
+class JSAsyncFunctionObject
+    : public TorqueGeneratedJSAsyncFunctionObject<JSAsyncFunctionObject,
+                                                  JSGeneratorObject> {
  public:
-  DECL_CAST(JSAsyncFunctionObject)
-
   // Dispatched behavior.
   DECL_VERIFIER(JSAsyncFunctionObject)
 
-  // [promise]: The promise of the async function.
-  DECL_ACCESSORS(promise, JSPromise)
-
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSGeneratorObject::kSize,
-                                TORQUE_GENERATED_JSASYNC_FUNCTION_OBJECT_FIELDS)
-
-  OBJECT_CONSTRUCTORS(JSAsyncFunctionObject, JSGeneratorObject);
+  TQ_OBJECT_CONSTRUCTORS(JSAsyncFunctionObject)
 };
 
-class JSAsyncGeneratorObject : public JSGeneratorObject {
+class JSAsyncGeneratorObject
+    : public TorqueGeneratedJSAsyncGeneratorObject<JSAsyncGeneratorObject,
+                                                   JSGeneratorObject> {
  public:
-  DECL_CAST(JSAsyncGeneratorObject)
-
   // Dispatched behavior.
   DECL_VERIFIER(JSAsyncGeneratorObject)
 
-  // [queue]
-  // Pointer to the head of a singly linked list of AsyncGeneratorRequest, or
-  // undefined.
-  DECL_ACCESSORS(queue, HeapObject)
-
   // [is_awaiting]
   // Whether or not the generator is currently awaiting.
   DECL_INT_ACCESSORS(is_awaiting)
 
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      JSGeneratorObject::kSize,
-      TORQUE_GENERATED_JSASYNC_GENERATOR_OBJECT_FIELDS)
-#undef JS_ASYNC_GENERATOR_FIELDS
-
-  OBJECT_CONSTRUCTORS(JSAsyncGeneratorObject, JSGeneratorObject);
+  TQ_OBJECT_CONSTRUCTORS(JSAsyncGeneratorObject)
 };
 
-class AsyncGeneratorRequest : public Struct {
+class AsyncGeneratorRequest
+    : public TorqueGeneratedAsyncGeneratorRequest<AsyncGeneratorRequest,
+                                                  Struct> {
  public:
-  // Holds an AsyncGeneratorRequest, or Undefined.
-  DECL_ACCESSORS(next, Object)
   DECL_INT_ACCESSORS(resume_mode)
-  DECL_ACCESSORS(value, Object)
-  DECL_ACCESSORS(promise, Object)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
-                                TORQUE_GENERATED_ASYNC_GENERATOR_REQUEST_FIELDS)
 
-  DECL_CAST(AsyncGeneratorRequest)
   DECL_PRINTER(AsyncGeneratorRequest)
   DECL_VERIFIER(AsyncGeneratorRequest)
 
-  OBJECT_CONSTRUCTORS(AsyncGeneratorRequest, Struct);
+  TQ_OBJECT_CONSTRUCTORS(AsyncGeneratorRequest)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index d1e3ef4d0cbd6f..ff564975d6f4a1 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -172,16 +172,21 @@ std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
   status = U_ZERO_ERROR;
   // See the list in ecma402 #sec-issanctionedsimpleunitidentifier
   std::set<std::string> sanctioned(
-      {"acre",       "bit",         "byte",      "celsius",
-       "centimeter", "day",         "degree",    "fahrenheit",
-       "foot",       "gigabit",     "gigabyte",  "gram",
-       "hectare",    "hour",        "inch",      "kilobit",
-       "kilobyte",   "kilogram",    "kilometer", "megabit",
-       "megabyte",   "meter",       "mile",      "mile-scandinavian",
-       "millimeter", "millisecond", "minute",    "month",
-       "ounce",      "percent",     "petabyte",  "pound",
-       "second",     "stone",       "terabit",   "terabyte",
-       "week",       "yard",        "year"});
+      {"acre",       "bit",        "byte",
+       "celsius",    "centimeter", "day",
+       "degree",     "fahrenheit", "fluid-ounce",
+       "foot",       "gallon",     "gigabit",
+       "gigabyte",   "gram",       "hectare",
+       "hour",       "inch",       "kilobit",
+       "kilobyte",   "kilogram",   "kilometer",
+       "liter",      "megabit",    "megabyte",
+       "meter",      "mile",       "mile-scandinavian",
+       "millimeter", "milliliter", "millisecond",
+       "minute",     "month",      "ounce",
+       "percent",    "petabyte",   "pound",
+       "second",     "stone",      "terabit",
+       "terabyte",   "week",       "yard",
+       "year"});
   std::vector<icu::MeasureUnit> units(total);
   total = icu::MeasureUnit::getAvailable(units.data(), total, status);
   CHECK(U_SUCCESS(status));
@@ -1031,7 +1036,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
             isolate,
             NewTypeError(MessageTemplate::kInvalidUnit,
                          factory->NewStringFromStaticChars("Intl.NumberFormat"),
-                         factory->NewStringFromStaticChars("")),
+                         factory->empty_string()),
             JSNumberFormat);
       }
 
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 10672d4443872a..f8fe069d3dddd7 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -31,9 +31,9 @@ namespace internal {
 
 OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
 TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSDate, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSDate)
 OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSObject)
 OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSObject)
 TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy)
@@ -44,9 +44,6 @@ OBJECT_CONSTRUCTORS_IMPL(JSStringIterator, JSObject)
 
 NEVER_READ_ONLY_SPACE_IMPL(JSReceiver)
 
-CAST_ACCESSOR(JSAsyncFromSyncIterator)
-CAST_ACCESSOR(JSBoundFunction)
-CAST_ACCESSOR(JSDate)
 CAST_ACCESSOR(JSFunction)
 CAST_ACCESSOR(JSGlobalObject)
 CAST_ACCESSOR(JSIteratorResult)
@@ -369,10 +366,10 @@ void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
 
 void JSObject::FastPropertyAtPut(FieldIndex index, Object value) {
   if (IsUnboxedDoubleField(index)) {
-    DCHECK(value.IsMutableHeapNumber());
+    DCHECK(value.IsHeapNumber());
     // Ensure that all bits of the double value are preserved.
-    RawFastDoublePropertyAsBitsAtPut(
-        index, MutableHeapNumber::cast(value).value_as_bits());
+    RawFastDoublePropertyAsBitsAtPut(index,
+                                     HeapNumber::cast(value).value_as_bits());
   } else {
     RawFastPropertyAtPut(index, value);
   }
@@ -401,7 +398,7 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
     if (IsUnboxedDoubleField(index)) {
       RawFastDoublePropertyAsBitsAtPut(index, bits);
     } else {
-      auto box = MutableHeapNumber::cast(RawFastPropertyAt(index));
+      auto box = HeapNumber::cast(RawFastPropertyAt(index));
       box.set_value_as_bits(bits);
     }
   } else {
@@ -450,11 +447,6 @@ void JSObject::InitializeBody(Map map, int start_offset,
   }
 }
 
-ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
-          kBoundTargetFunctionOffset)
-ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
-ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
-
 ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
 
 ACCESSORS(JSGlobalObject, native_context, NativeContext, kNativeContextOffset)
@@ -712,16 +704,6 @@ void JSFunction::ResetIfBytecodeFlushed() {
   }
 }
 
-ACCESSORS(JSDate, value, Object, kValueOffset)
-ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
-ACCESSORS(JSDate, year, Object, kYearOffset)
-ACCESSORS(JSDate, month, Object, kMonthOffset)
-ACCESSORS(JSDate, day, Object, kDayOffset)
-ACCESSORS(JSDate, weekday, Object, kWeekdayOffset)
-ACCESSORS(JSDate, hour, Object, kHourOffset)
-ACCESSORS(JSDate, min, Object, kMinOffset)
-ACCESSORS(JSDate, sec, Object, kSecOffset)
-
 bool JSMessageObject::DidEnsureSourcePositionsAvailable() const {
   return shared_info().IsUndefined();
 }
@@ -774,7 +756,8 @@ DEF_GETTER(JSObject, GetElementsKind, ElementsKind) {
       DCHECK(fixed_array.IsFixedArray(isolate));
       DCHECK(fixed_array.IsNumberDictionary(isolate));
     } else {
-      DCHECK(kind > DICTIONARY_ELEMENTS || IsFrozenOrSealedElementsKind(kind));
+      DCHECK(kind > DICTIONARY_ELEMENTS ||
+             IsAnyNonextensibleElementsKind(kind));
     }
     DCHECK(
         !IsSloppyArgumentsElementsKind(kind) ||
@@ -824,14 +807,18 @@ DEF_GETTER(JSObject, HasPackedElements, bool) {
   return GetElementsKind(isolate) == PACKED_ELEMENTS;
 }
 
-DEF_GETTER(JSObject, HasFrozenOrSealedElements, bool) {
-  return IsFrozenOrSealedElementsKind(GetElementsKind(isolate));
+DEF_GETTER(JSObject, HasAnyNonextensibleElements, bool) {
+  return IsAnyNonextensibleElementsKind(GetElementsKind(isolate));
 }
 
 DEF_GETTER(JSObject, HasSealedElements, bool) {
   return IsSealedElementsKind(GetElementsKind(isolate));
 }
 
+DEF_GETTER(JSObject, HasNonextensibleElements, bool) {
+  return IsNonextensibleElementsKind(GetElementsKind(isolate));
+}
+
 DEF_GETTER(JSObject, HasFastArgumentsElements, bool) {
   return GetElementsKind(isolate) == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
 }
@@ -1020,10 +1007,6 @@ inline int JSGlobalProxy::SizeWithEmbedderFields(int embedder_field_count) {
 ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
 ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
 
-ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
-          kSyncIteratorOffset)
-ACCESSORS(JSAsyncFromSyncIterator, next, Object, kNextOffset)
-
 ACCESSORS(JSStringIterator, string, String, kStringOffset)
 SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
 
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 5c4db162067fa4..3666f5afbe2dc0 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -1509,20 +1509,27 @@ namespace {
 
 Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
                                                  PropertyDescriptor* desc) {
+  Handle<InterceptorInfo> interceptor;
+
   if (it->state() == LookupIterator::ACCESS_CHECK) {
     if (it->HasAccess()) {
       it->Next();
-    } else if (!JSObject::AllCanRead(it) ||
-               it->state() != LookupIterator::INTERCEPTOR) {
-      it->Restart();
-      return Just(false);
+    } else {
+      interceptor = it->GetInterceptorForFailedAccessCheck();
+      if (interceptor.is_null() &&
+          (!JSObject::AllCanRead(it) ||
+           it->state() != LookupIterator::INTERCEPTOR)) {
+        it->Restart();
+        return Just(false);
+      }
     }
   }
 
-  if (it->state() != LookupIterator::INTERCEPTOR) return Just(false);
-
+  if (it->state() == LookupIterator::INTERCEPTOR) {
+    interceptor = it->GetInterceptor();
+  }
+  if (interceptor.is_null()) return Just(false);
   Isolate* isolate = it->isolate();
-  Handle<InterceptorInfo> interceptor = it->GetInterceptor();
   if (interceptor->descriptor().IsUndefined(isolate)) return Just(false);
 
   Handle<Object> result;
@@ -1607,12 +1614,14 @@ Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
     // 6. Else X is an accessor property, so
     Handle<AccessorPair> accessors =
         Handle<AccessorPair>::cast(it->GetAccessors());
+    Handle<NativeContext> native_context =
+        it->GetHolder<JSReceiver>()->GetCreationContext();
     // 6a. Set D.[[Get]] to the value of X's [[Get]] attribute.
-    desc->set_get(
-        AccessorPair::GetComponent(isolate, accessors, ACCESSOR_GETTER));
+    desc->set_get(AccessorPair::GetComponent(isolate, native_context, accessors,
+                                             ACCESSOR_GETTER));
     // 6b. Set D.[[Set]] to the value of X's [[Set]] attribute.
-    desc->set_set(
-        AccessorPair::GetComponent(isolate, accessors, ACCESSOR_SETTER));
+    desc->set_set(AccessorPair::GetComponent(isolate, native_context, accessors,
+                                             ACCESSOR_SETTER));
   }
 
   // 7. Set D.[[Enumerable]] to the value of X's [[Enumerable]] attribute.
@@ -2039,7 +2048,7 @@ MaybeHandle<JSObject> JSObject::ObjectCreate(Isolate* isolate,
 void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
   DCHECK(object->HasSmiOrObjectElements() ||
          object->HasFastStringWrapperElements() ||
-         object->HasFrozenOrSealedElements());
+         object->HasAnyNonextensibleElements());
   FixedArray raw_elems = FixedArray::cast(object->elements());
   Isolate* isolate = object->GetIsolate();
   if (raw_elems.map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) return;
@@ -2622,12 +2631,12 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
         FieldIndex::ForDescriptor(isolate, *new_map, new_map->LastAdded());
     if (index.is_inobject() || index.outobject_array_index() <
                                    object->property_array(isolate).length()) {
-      // We still need to allocate MutableHeapNumbers for double fields
+      // We still need to allocate HeapNumbers for double fields
       // if either double field unboxing is disabled or the double field
       // is in the PropertyArray backing store (where we don't support
       // double field unboxing).
       if (index.is_double() && !new_map->IsUnboxedDoubleField(isolate, index)) {
-        auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+        auto value = isolate->factory()->NewHeapNumberWithHoleNaN();
         object->RawFastPropertyAtPut(index, *value);
       }
       object->synchronized_set_map(*new_map);
@@ -2644,7 +2653,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
     // Properly initialize newly added property.
     Handle<Object> value;
     if (details.representation().IsDouble()) {
-      value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+      value = isolate->factory()->NewHeapNumberWithHoleNaN();
     } else {
       value = isolate->factory()->uninitialized_value();
     }
@@ -2708,7 +2717,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
         // must already be prepared for data of certain type.
         DCHECK(!details.representation().IsNone());
         if (details.representation().IsDouble()) {
-          value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+          value = isolate->factory()->NewHeapNumberWithHoleNaN();
         } else {
           value = isolate->factory()->uninitialized_value();
         }
@@ -2722,11 +2731,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
       FieldIndex index = FieldIndex::ForDescriptor(isolate, *old_map, i);
       if (object->IsUnboxedDoubleField(isolate, index)) {
         uint64_t old_bits = object->RawFastDoublePropertyAsBitsAt(index);
-        if (representation.IsDouble()) {
-          value = isolate->factory()->NewMutableHeapNumberFromBits(old_bits);
-        } else {
-          value = isolate->factory()->NewHeapNumberFromBits(old_bits);
-        }
+        value = isolate->factory()->NewHeapNumberFromBits(old_bits);
       } else {
         value = handle(object->RawFastPropertyAt(isolate, index), isolate);
         if (!old_representation.IsDouble() && representation.IsDouble()) {
@@ -2754,7 +2759,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
     DCHECK_EQ(kData, details.kind());
     Handle<Object> value;
     if (details.representation().IsDouble()) {
-      value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+      value = isolate->factory()->NewHeapNumberWithHoleNaN();
     } else {
       value = isolate->factory()->uninitialized_value();
     }
@@ -2784,10 +2789,10 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
     // Can't use JSObject::FastPropertyAtPut() because proper map was not set
     // yet.
     if (new_map->IsUnboxedDoubleField(isolate, index)) {
-      DCHECK(value.IsMutableHeapNumber(isolate));
+      DCHECK(value.IsHeapNumber(isolate));
       // Ensure that all bits of the double value are preserved.
       object->RawFastDoublePropertyAsBitsAtPut(
-          index, MutableHeapNumber::cast(value).value_as_bits());
+          index, HeapNumber::cast(value).value_as_bits());
       if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
         // Transition from tagged to untagged slot.
         heap->ClearRecordedSlot(*object, object->RawField(index.offset()));
@@ -2859,8 +2864,8 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
         } else {
           value = handle(object->RawFastPropertyAt(isolate, index), isolate);
           if (details.representation().IsDouble()) {
-            DCHECK(value->IsMutableHeapNumber(isolate));
-            double old_value = Handle<MutableHeapNumber>::cast(value)->value();
+            DCHECK(value->IsHeapNumber(isolate));
+            double old_value = Handle<HeapNumber>::cast(value)->value();
             value = isolate->factory()->NewHeapNumber(old_value);
           }
         }
@@ -3048,7 +3053,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
       if (!representation.IsDouble()) continue;
       FieldIndex index = FieldIndex::ForDescriptor(*map, i);
       if (map->IsUnboxedDoubleField(index)) continue;
-      auto box = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+      auto box = isolate->factory()->NewHeapNumberWithHoleNaN();
       if (index.is_inobject()) {
         storage->set(index.property_index(), *box);
       } else {
@@ -3464,7 +3469,8 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
 
   DCHECK(object->HasSmiOrObjectElements() || object->HasDoubleElements() ||
          object->HasFastArgumentsElements() ||
-         object->HasFastStringWrapperElements() || object->HasSealedElements());
+         object->HasFastStringWrapperElements() ||
+         object->HasSealedElements() || object->HasNonextensibleElements());
 
   Handle<NumberDictionary> dictionary =
       object->GetElementsAccessor()->Normalize(object);
@@ -3637,6 +3643,7 @@ bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
   }
   if (IsFrozenElementsKind(kind)) return true;
   if (IsSealedElementsKind(kind) && level != FROZEN) return true;
+  if (IsNonextensibleElementsKind(kind) && level == NONE) return true;
 
   ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
   // Only DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS have
@@ -3795,9 +3802,9 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
   if (attrs == NONE && !object->map().is_extensible()) return Just(true);
   {
     ElementsKind old_elements_kind = object->map().elements_kind();
+    if (IsFrozenElementsKind(old_elements_kind)) return Just(true);
     if (attrs != FROZEN && IsSealedElementsKind(old_elements_kind))
       return Just(true);
-    if (old_elements_kind == PACKED_FROZEN_ELEMENTS) return Just(true);
   }
 
   if (object->IsJSGlobalProxy()) {
@@ -3842,8 +3849,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
   // elements kind change in one go. If seal or freeze with Smi or Double
   // elements kind, we will transition to Object elements kind first to make
   // sure of valid element access.
-  if (FLAG_enable_sealed_frozen_elements_kind &&
-      (attrs == SEALED || attrs == FROZEN)) {
+  if (FLAG_enable_sealed_frozen_elements_kind) {
     switch (object->map().elements_kind()) {
       case PACKED_SMI_ELEMENTS:
       case PACKED_DOUBLE_ELEMENTS:
@@ -3871,9 +3877,9 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
     DCHECK(transition_map->has_dictionary_elements() ||
            transition_map->has_typed_array_elements() ||
            transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS ||
-           transition_map->has_frozen_or_sealed_elements());
+           transition_map->has_any_nonextensible_elements());
     DCHECK(!transition_map->is_extensible());
-    if (!transition_map->has_frozen_or_sealed_elements()) {
+    if (!transition_map->has_any_nonextensible_elements()) {
       new_element_dictionary = CreateElementDictionary(isolate, object);
     }
     JSObject::MigrateToMap(isolate, object, transition_map);
@@ -3881,7 +3887,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
     // Create a new descriptor array with the appropriate property attributes
     Handle<Map> new_map = Map::CopyForPreventExtensions(
         isolate, old_map, attrs, transition_marker, "CopyForPreventExtensions");
-    if (!new_map->has_frozen_or_sealed_elements()) {
+    if (!new_map->has_any_nonextensible_elements()) {
       new_element_dictionary = CreateElementDictionary(isolate, object);
     }
     JSObject::MigrateToMap(isolate, object, new_map);
@@ -3922,7 +3928,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
     }
   }
 
-  if (object->map().has_frozen_or_sealed_elements()) {
+  if (object->map().has_any_nonextensible_elements()) {
     DCHECK(new_element_dictionary.is_null());
     return Just(true);
   }
@@ -3980,6 +3986,7 @@ bool JSObject::HasEnumerableElements() {
     case PACKED_ELEMENTS:
     case PACKED_FROZEN_ELEMENTS:
     case PACKED_SEALED_ELEMENTS:
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
     case PACKED_DOUBLE_ELEMENTS: {
       int length = object.IsJSArray()
                        ? Smi::ToInt(JSArray::cast(object).length())
@@ -3989,6 +3996,7 @@ bool JSObject::HasEnumerableElements() {
     case HOLEY_SMI_ELEMENTS:
     case HOLEY_FROZEN_ELEMENTS:
     case HOLEY_SEALED_ELEMENTS:
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_ELEMENTS: {
       FixedArray elements = FixedArray::cast(object.elements());
       int length = object.IsJSArray()
@@ -4146,7 +4154,7 @@ Object JSObject::SlowReverseLookup(Object value) {
         } else {
           Object property = RawFastPropertyAt(field_index);
           if (field_index.is_double()) {
-            DCHECK(property.IsMutableHeapNumber());
+            DCHECK(property.IsHeapNumber());
             if (value_is_number && property.Number() == value.Number()) {
               return descs.GetKey(i);
             }
@@ -4691,8 +4699,9 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
   if (from_kind == to_kind) return;
 
   // This method should never be called for any other case.
-  DCHECK(IsFastElementsKind(from_kind));
-  DCHECK(IsFastElementsKind(to_kind));
+  DCHECK(IsFastElementsKind(from_kind) ||
+         IsNonextensibleElementsKind(from_kind));
+  DCHECK(IsFastElementsKind(to_kind) || IsNonextensibleElementsKind(to_kind));
   DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
 
   UpdateAllocationSite(object, to_kind);
@@ -4735,6 +4744,7 @@ int JSObject::GetFastElementsUsage() {
     case PACKED_ELEMENTS:
     case PACKED_FROZEN_ELEMENTS:
     case PACKED_SEALED_ELEMENTS:
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
       return IsJSArray() ? Smi::ToInt(JSArray::cast(*this).length())
                          : store.length();
     case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -4744,6 +4754,7 @@ int JSObject::GetFastElementsUsage() {
     case HOLEY_ELEMENTS:
     case HOLEY_FROZEN_ELEMENTS:
     case HOLEY_SEALED_ELEMENTS:
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
     case FAST_STRING_WRAPPER_ELEMENTS:
       return HoleyElementsUsage(*this, FixedArray::cast(store));
     case HOLEY_DOUBLE_ELEMENTS:
@@ -4801,6 +4812,8 @@ bool JSObject::IsApiWrapper() {
   return instance_type == JS_API_OBJECT_TYPE ||
          instance_type == JS_ARRAY_BUFFER_TYPE ||
          instance_type == JS_DATA_VIEW_TYPE ||
+         instance_type == JS_GLOBAL_OBJECT_TYPE ||
+         instance_type == JS_GLOBAL_PROXY_TYPE ||
          instance_type == JS_SPECIAL_API_OBJECT_TYPE ||
          instance_type == JS_TYPED_ARRAY_TYPE;
 }
@@ -4987,13 +5000,9 @@ void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function) {
   Isolate* const isolate = function->GetIsolate();
 
   if (function->has_feedback_vector()) {
-    // TODO(984344): Make this a CHECK that feedback vectors are identical to
-    // what we expect once we have removed all bytecode generation differences
-    // between eager and lazy compilation. For now just reset if they aren't
-    // identical
-    FeedbackVector vector = function->feedback_vector();
-    if (vector.length() == vector.metadata().slot_count()) return;
-    function->raw_feedback_cell().reset();
+    CHECK_EQ(function->feedback_vector().length(),
+             function->feedback_vector().metadata().slot_count());
+    return;
   }
 
   bool needs_feedback_vector = !FLAG_lazy_feedback_allocation;
@@ -5241,7 +5250,6 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
     case JS_GLOBAL_PROXY_TYPE:
     case JS_PROXY_TYPE:
     case MAP_TYPE:
-    case MUTABLE_HEAP_NUMBER_TYPE:
     case ODDBALL_TYPE:
     case PROPERTY_CELL_TYPE:
     case SHARED_FUNCTION_INFO_TYPE:
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index bcea3a28df212d..a9510642f1bf61 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -323,8 +323,9 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
 
   // Returns true if an object has elements of PACKED_ELEMENTS
   DECL_GETTER(HasPackedElements, bool)
-  DECL_GETTER(HasFrozenOrSealedElements, bool)
+  DECL_GETTER(HasAnyNonextensibleElements, bool)
   DECL_GETTER(HasSealedElements, bool)
+  DECL_GETTER(HasNonextensibleElements, bool)
 
   DECL_GETTER(HasTypedArrayElements, bool)
 
@@ -893,19 +894,9 @@ class JSIteratorResult : public JSObject {
 };
 
 // JSBoundFunction describes a bound function exotic object.
-class JSBoundFunction : public JSObject {
+class JSBoundFunction
+    : public TorqueGeneratedJSBoundFunction<JSBoundFunction, JSObject> {
  public:
-  // [bound_target_function]: The wrapped function object.
-  DECL_ACCESSORS(bound_target_function, JSReceiver)
-
-  // [bound_this]: The value that is always passed as the this value when
-  // calling the wrapped function.
-  DECL_ACCESSORS(bound_this, Object)
-
-  // [bound_arguments]: A list of values whose elements are used as the first
-  // arguments to any call to the wrapped function.
-  DECL_ACCESSORS(bound_arguments, FixedArray)
-
   static MaybeHandle<String> GetName(Isolate* isolate,
                                      Handle<JSBoundFunction> function);
   static Maybe<int> GetLength(Isolate* isolate,
@@ -913,8 +904,6 @@ class JSBoundFunction : public JSObject {
   static MaybeHandle<NativeContext> GetFunctionRealm(
       Handle<JSBoundFunction> function);
 
-  DECL_CAST(JSBoundFunction)
-
   // Dispatched behavior.
   DECL_PRINTER(JSBoundFunction)
   DECL_VERIFIER(JSBoundFunction)
@@ -923,11 +912,7 @@ class JSBoundFunction : public JSObject {
   // to ES6 section 19.2.3.5 Function.prototype.toString ( ).
   static Handle<String> ToString(Handle<JSBoundFunction> function);
 
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
-                                TORQUE_GENERATED_JSBOUND_FUNCTION_FIELDS)
-
-  OBJECT_CONSTRUCTORS(JSBoundFunction, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSBoundFunction)
 };
 
 // JSFunction describes JavaScript functions.
@@ -1213,34 +1198,11 @@ class JSPrimitiveWrapper
 class DateCache;
 
 // Representation for JS date objects.
-class JSDate : public JSObject {
+class JSDate : public TorqueGeneratedJSDate<JSDate, JSObject> {
  public:
   static V8_WARN_UNUSED_RESULT MaybeHandle<JSDate> New(
       Handle<JSFunction> constructor, Handle<JSReceiver> new_target, double tv);
 
-  // If one component is NaN, all of them are, indicating a NaN time value.
-  // [value]: the time value.
-  DECL_ACCESSORS(value, Object)
-  // [year]: caches year. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(year, Object)
-  // [month]: caches month. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(month, Object)
-  // [day]: caches day. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(day, Object)
-  // [weekday]: caches day of week. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(weekday, Object)
-  // [hour]: caches hours. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(hour, Object)
-  // [min]: caches minutes. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(min, Object)
-  // [sec]: caches seconds. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(sec, Object)
-  // [cache stamp]: sample of the date cache stamp at the
-  // moment when chached fields were cached.
-  DECL_ACCESSORS(cache_stamp, Object)
-
-  DECL_CAST(JSDate)
-
   // Returns the time value (UTC) identifying the current time.
   static double CurrentTimeValue(Isolate* isolate);
 
@@ -1290,9 +1252,6 @@ class JSDate : public JSObject {
     kTimezoneOffset
   };
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
-                                TORQUE_GENERATED_JSDATE_FIELDS)
-
  private:
   inline Object DoGetField(FieldIndex index);
 
@@ -1301,7 +1260,7 @@ class JSDate : public JSObject {
   // Computes and caches the cacheable fields of the date.
   inline void SetCachedFields(int64_t local_time_ms, DateCache* date_cache);
 
-  OBJECT_CONSTRUCTORS(JSDate, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSDate)
 };
 
 // Representation of message objects used for error reporting through
@@ -1396,27 +1355,19 @@ class JSMessageObject : public JSObject {
 // An object which wraps an ordinary Iterator and converts it to behave
 // according to the Async Iterator protocol.
 // (See https://tc39.github.io/proposal-async-iteration/#sec-iteration)
-class JSAsyncFromSyncIterator : public JSObject {
+class JSAsyncFromSyncIterator
+    : public TorqueGeneratedJSAsyncFromSyncIterator<JSAsyncFromSyncIterator,
+                                                    JSObject> {
  public:
-  DECL_CAST(JSAsyncFromSyncIterator)
   DECL_PRINTER(JSAsyncFromSyncIterator)
-  DECL_VERIFIER(JSAsyncFromSyncIterator)
 
   // Async-from-Sync Iterator instances are ordinary objects that inherit
   // properties from the %AsyncFromSyncIteratorPrototype% intrinsic object.
   // Async-from-Sync Iterator instances are initially created with the internal
   // slots listed in Table 4.
   // (proposal-async-iteration/#table-async-from-sync-iterator-internal-slots)
-  DECL_ACCESSORS(sync_iterator, JSReceiver)
-
-  // The "next" method is loaded during GetIterator, and is not reloaded for
-  // subsequent "next" invocations.
-  DECL_ACCESSORS(next, Object)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      JSObject::kHeaderSize, TORQUE_GENERATED_JSASYNC_FROM_SYNC_ITERATOR_FIELDS)
 
-  OBJECT_CONSTRUCTORS(JSAsyncFromSyncIterator, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSAsyncFromSyncIterator)
 };
 
 class JSStringIterator : public JSObject {
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index ecfeb533069030..8b7a11a151c6b9 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -16,11 +16,9 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(JSPromise, JSObject)
-CAST_ACCESSOR(JSPromise)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSPromise)
 
-ACCESSORS(JSPromise, reactions_or_result, Object, kReactionsOrResultOffset)
-SMI_ACCESSORS(JSPromise, flags, kFlagsOffset)
+TQ_SMI_ACCESSORS(JSPromise, flags)
 BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
 BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
 
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index bbe6f724d1493c..06569a3fcd9c74 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -24,12 +24,8 @@ namespace internal {
 // We also overlay the result and reactions fields on the JSPromise, since
 // the reactions are only necessary for pending promises, whereas the result
 // is only meaningful for settled promises.
-class JSPromise : public JSObject {
+class JSPromise : public TorqueGeneratedJSPromise<JSPromise, JSObject> {
  public:
-  // [reactions_or_result]: Smi 0 terminated list of PromiseReaction objects
-  // in case the JSPromise was not settled yet, otherwise the result.
-  DECL_ACCESSORS(reactions_or_result, Object)
-
   // [result]: Checks that the promise is settled and returns the result.
   inline Object result() const;
 
@@ -62,15 +58,10 @@ class JSPromise : public JSObject {
   V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Resolve(
       Handle<JSPromise> promise, Handle<Object> resolution);
 
-  DECL_CAST(JSPromise)
-
   // Dispatched behavior.
   DECL_PRINTER(JSPromise)
   DECL_VERIFIER(JSPromise)
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
-                                TORQUE_GENERATED_JSPROMISE_FIELDS)
-
   static const int kSizeWithEmbedderFields =
       kSize + v8::Promise::kEmbedderFieldCount * kEmbedderDataSlotSize;
 
@@ -79,7 +70,7 @@ class JSPromise : public JSObject {
   static const int kStatusBits = 2;
   static const int kHasHandlerBit = 2;
   static const int kHandledHintBit = 3;
-  class AsyncTaskIdField : public BitField<int, kHandledHintBit + 1, 22> {};
+  using AsyncTaskIdField = BitField<int, kHandledHintBit + 1, 22>;
 
   static const int kStatusShift = 0;
   static const int kStatusMask = 0x3;
@@ -94,7 +85,7 @@ class JSPromise : public JSObject {
                                                 Handle<Object> argument,
                                                 PromiseReaction::Type type);
 
-  OBJECT_CONSTRUCTORS(JSPromise, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSPromise)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 93e6ee008d2f63..b69d1cca975db9 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -17,13 +17,8 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(JSRegExp, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExp)
 
-CAST_ACCESSOR(JSRegExp)
-
-ACCESSORS(JSRegExp, data, Object, kDataOffset)
-ACCESSORS(JSRegExp, flags, Object, kFlagsOffset)
-ACCESSORS(JSRegExp, source, Object, kSourceOffset)
 ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
 
 JSRegExp::Type JSRegExp::TypeTag() const {
@@ -80,23 +75,28 @@ void JSRegExp::SetDataAt(int index, Object value) {
 
 bool JSRegExp::HasCompiledCode() const {
   if (TypeTag() != IRREGEXP) return false;
+  Smi uninitialized = Smi::FromInt(kUninitializedValue);
 #ifdef DEBUG
   DCHECK(DataAt(kIrregexpLatin1CodeIndex).IsCode() ||
-         DataAt(kIrregexpLatin1CodeIndex).IsByteArray() ||
-         DataAt(kIrregexpLatin1CodeIndex) == Smi::FromInt(kUninitializedValue));
+         DataAt(kIrregexpLatin1CodeIndex) == uninitialized);
   DCHECK(DataAt(kIrregexpUC16CodeIndex).IsCode() ||
-         DataAt(kIrregexpUC16CodeIndex).IsByteArray() ||
-         DataAt(kIrregexpUC16CodeIndex) == Smi::FromInt(kUninitializedValue));
+         DataAt(kIrregexpUC16CodeIndex) == uninitialized);
+  DCHECK(DataAt(kIrregexpLatin1BytecodeIndex).IsByteArray() ||
+         DataAt(kIrregexpLatin1BytecodeIndex) == uninitialized);
+  DCHECK(DataAt(kIrregexpUC16BytecodeIndex).IsByteArray() ||
+         DataAt(kIrregexpUC16BytecodeIndex) == uninitialized);
 #endif  // DEBUG
-  Smi uninitialized = Smi::FromInt(kUninitializedValue);
   return (DataAt(kIrregexpLatin1CodeIndex) != uninitialized ||
           DataAt(kIrregexpUC16CodeIndex) != uninitialized);
 }
 
 void JSRegExp::DiscardCompiledCodeForSerialization() {
   DCHECK(HasCompiledCode());
-  SetDataAt(kIrregexpLatin1CodeIndex, Smi::FromInt(kUninitializedValue));
-  SetDataAt(kIrregexpUC16CodeIndex, Smi::FromInt(kUninitializedValue));
+  Smi uninitialized = Smi::FromInt(kUninitializedValue);
+  SetDataAt(kIrregexpLatin1CodeIndex, uninitialized);
+  SetDataAt(kIrregexpUC16CodeIndex, uninitialized);
+  SetDataAt(kIrregexpLatin1BytecodeIndex, uninitialized);
+  SetDataAt(kIrregexpUC16BytecodeIndex, uninitialized);
 }
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index 08e2f99d7e7f79..b2046190582c8b 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -15,20 +15,13 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator)
 
-ACCESSORS(JSRegExpStringIterator, iterating_regexp, Object,
-          kIteratingRegExpOffset)
-ACCESSORS(JSRegExpStringIterator, iterating_string, String,
-          kIteratedStringOffset)
-
-SMI_ACCESSORS(JSRegExpStringIterator, flags, kFlagsOffset)
+TQ_SMI_ACCESSORS(JSRegExpStringIterator, flags)
 BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, kDoneBit)
 BOOL_ACCESSORS(JSRegExpStringIterator, flags, global, kGlobalBit)
 BOOL_ACCESSORS(JSRegExpStringIterator, flags, unicode, kUnicodeBit)
 
-CAST_ACCESSOR(JSRegExpStringIterator)
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.h b/deps/v8/src/objects/js-regexp-string-iterator.h
index 871b72496656df..ffd38fe21157cb 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator.h
@@ -13,14 +13,10 @@
 namespace v8 {
 namespace internal {
 
-class JSRegExpStringIterator : public JSObject {
+class JSRegExpStringIterator
+    : public TorqueGeneratedJSRegExpStringIterator<JSRegExpStringIterator,
+                                                   JSObject> {
  public:
-  // [regexp]: the [[IteratingRegExp]] internal property.
-  DECL_ACCESSORS(iterating_regexp, Object)
-
-  // [string]: The [[IteratedString]] internal property.
-  DECL_ACCESSORS(iterating_string, String)
-
   DECL_INT_ACCESSORS(flags)
 
   // [boolean]: The [[Done]] internal property.
@@ -32,20 +28,13 @@ class JSRegExpStringIterator : public JSObject {
   // [boolean]: The [[Unicode]] internal property.
   DECL_BOOLEAN_ACCESSORS(unicode)
 
-  DECL_CAST(JSRegExpStringIterator)
   DECL_PRINTER(JSRegExpStringIterator)
-  DECL_VERIFIER(JSRegExpStringIterator)
-
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-    JSObject::kHeaderSize,
-    TORQUE_GENERATED_JSREG_EXP_STRING_ITERATOR_FIELDS)
 
   static const int kDoneBit = 0;
   static const int kGlobalBit = 1;
   static const int kUnicodeBit = 2;
 
-  OBJECT_CONSTRUCTORS(JSRegExpStringIterator, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSRegExpStringIterator)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 18355079f8eb41..b3ef06bd5cbd53 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -29,7 +29,7 @@ namespace internal {
 // used for tracking the last usage (used for regexp code flushing).
 // - max number of registers used by irregexp implementations.
 // - number of capture registers (output values) of the regexp.
-class JSRegExp : public JSObject {
+class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
  public:
   // Meaning of Type:
   // NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
@@ -82,10 +82,7 @@ class JSRegExp : public JSObject {
   STATIC_ASSERT(static_cast<int>(kDotAll) == v8::RegExp::kDotAll);
   STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
 
-  DECL_ACCESSORS(data, Object)
-  DECL_ACCESSORS(flags, Object)
   DECL_ACCESSORS(last_index, Object)
-  DECL_ACCESSORS(source, Object)
 
   V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(Isolate* isolate,
                                                      Handle<String> source,
@@ -98,7 +95,15 @@ class JSRegExp : public JSObject {
                                           Handle<String> source,
                                           Handle<String> flags_string);
 
+  bool MarkedForTierUp();
+  void ResetTierUp();
+  void MarkTierUpForNextExec();
+
   inline Type TypeTag() const;
+
+  // Maximum number of captures allowed.
+  static constexpr int kMaxCaptures = 1 << 16;
+
   // Number of captures (without the match itself).
   inline int CaptureCount();
   inline Flags GetFlags();
@@ -108,26 +113,27 @@ class JSRegExp : public JSObject {
   // Set implementation data after the object has been prepared.
   inline void SetDataAt(int index, Object value);
 
-  static int code_index(bool is_latin1) {
-    if (is_latin1) {
-      return kIrregexpLatin1CodeIndex;
-    } else {
-      return kIrregexpUC16CodeIndex;
-    }
+  static constexpr int code_index(bool is_latin1) {
+    return is_latin1 ? kIrregexpLatin1CodeIndex : kIrregexpUC16CodeIndex;
   }
 
+  static constexpr int bytecode_index(bool is_latin1) {
+    return is_latin1 ? kIrregexpLatin1BytecodeIndex
+                     : kIrregexpUC16BytecodeIndex;
+  }
+
+  // This could be a Smi kUninitializedValue or Code.
+  Object Code(bool is_latin1) const;
+  // This could be a Smi kUninitializedValue or ByteArray.
+  Object Bytecode(bool is_latin1) const;
+  bool ShouldProduceBytecode();
   inline bool HasCompiledCode() const;
   inline void DiscardCompiledCodeForSerialization();
 
-  DECL_CAST(JSRegExp)
-
   // Dispatched behavior.
   DECL_PRINTER(JSRegExp)
   DECL_VERIFIER(JSRegExp)
 
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
-                                TORQUE_GENERATED_JSREG_EXP_FIELDS)
   /* This is already an in-object field. */
   // TODO(v8:8944): improve handling of in-object fields
   static constexpr int kLastIndexOffset = kSize;
@@ -144,24 +150,35 @@ class JSRegExp : public JSObject {
 
   static const int kAtomDataSize = kAtomPatternIndex + 1;
 
-  // Irregexp compiled code or bytecode for Latin1. If compilation
-  // fails, this fields hold an exception object that should be
+  // Irregexp compiled code or trampoline to interpreter for Latin1. If
+  // compilation fails, this fields hold an exception object that should be
   // thrown if the regexp is used again.
   static const int kIrregexpLatin1CodeIndex = kDataIndex;
-  // Irregexp compiled code or bytecode for UC16.  If compilation
-  // fails, this fields hold an exception object that should be
+  // Irregexp compiled code or trampoline to interpreter for UC16.  If
+  // compilation fails, this fields hold an exception object that should be
   // thrown if the regexp is used again.
   static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
+  // Bytecode to interpret the regexp for Latin1. Contains kUninitializedValue
+  // if we haven't compiled the regexp yet, regexp are always compiled or if
+  // tier-up has happened (i.e. when kIrregexpLatin1CodeIndex contains native
+  // irregexp code).
+  static const int kIrregexpLatin1BytecodeIndex = kDataIndex + 2;
+  // Bytecode to interpret the regexp for UC16. Contains kUninitializedValue if
+  // we haven't compiled the regxp yet, regexp are always compiled or if tier-up
+  // has happened (i.e. when kIrregexpUC16CodeIndex contains native irregexp
+  // code).
+  static const int kIrregexpUC16BytecodeIndex = kDataIndex + 3;
   // Maximal number of registers used by either Latin1 or UC16.
   // Only used to check that there is enough stack space
-  static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 2;
+  static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
   // Number of captures in the compiled regexp.
-  static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
+  static const int kIrregexpCaptureCountIndex = kDataIndex + 5;
   // Maps names of named capture groups (at indices 2i) to their corresponding
   // (1-based) capture group indices (at indices 2i + 1).
-  static const int kIrregexpCaptureNameMapIndex = kDataIndex + 4;
+  static const int kIrregexpCaptureNameMapIndex = kDataIndex + 6;
+  static const int kIrregexpTierUpTicksIndex = kDataIndex + 7;
 
-  static const int kIrregexpDataSize = kIrregexpCaptureNameMapIndex + 1;
+  static const int kIrregexpDataSize = kIrregexpTierUpTicksIndex + 1;
 
   // In-object fields.
   static const int kLastIndexFieldIndex = 0;
@@ -178,7 +195,7 @@ class JSRegExp : public JSObject {
   // The uninitialized value for a regexp code object.
   static const int kUninitializedValue = -1;
 
-  OBJECT_CONSTRUCTORS(JSRegExp, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSRegExp)
 };
 
 DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
diff --git a/deps/v8/src/objects/js-segment-iterator.cc b/deps/v8/src/objects/js-segment-iterator.cc
index 509db37d4404ff..d893f3705f25da 100644
--- a/deps/v8/src/objects/js-segment-iterator.cc
+++ b/deps/v8/src/objects/js-segment-iterator.cc
@@ -207,7 +207,7 @@ Maybe<bool> JSSegmentIterator::Following(
       THROW_NEW_ERROR_RETURN_VALUE(
           isolate,
           NewRangeError(MessageTemplate::kParameterOfFunctionOutOfRange,
-                        factory->NewStringFromStaticChars("from"),
+                        factory->from_string(),
                         factory->NewStringFromStaticChars("following"), index),
           Nothing<bool>());
     }
@@ -220,7 +220,7 @@ Maybe<bool> JSSegmentIterator::Following(
       THROW_NEW_ERROR_RETURN_VALUE(
           isolate,
           NewRangeError(MessageTemplate::kParameterOfFunctionOutOfRange,
-                        factory->NewStringFromStaticChars("from"),
+                        factory->from_string(),
                         factory->NewStringFromStaticChars("following"),
                         from_obj),
           Nothing<bool>());
@@ -260,7 +260,7 @@ Maybe<bool> JSSegmentIterator::Preceding(
       THROW_NEW_ERROR_RETURN_VALUE(
           isolate,
           NewRangeError(MessageTemplate::kParameterOfFunctionOutOfRange,
-                        factory->NewStringFromStaticChars("from"),
+                        factory->from_string(),
                         factory->NewStringFromStaticChars("preceding"), index),
           Nothing<bool>());
     }
@@ -272,7 +272,7 @@ Maybe<bool> JSSegmentIterator::Preceding(
       THROW_NEW_ERROR_RETURN_VALUE(
           isolate,
           NewRangeError(MessageTemplate::kParameterOfFunctionOutOfRange,
-                        factory->NewStringFromStaticChars("from"),
+                        factory->from_string(),
                         factory->NewStringFromStaticChars("preceding"),
                         from_obj),
           Nothing<bool>());
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 46f28e883edec8..004ffd6d791365 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -9,7 +9,6 @@
 
 #include "src/api/api-inl.h"
 #include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects/microtask-inl.h"
 #include "src/objects/smi-inl.h"
 
 // Has to be the last include (doesn't have include guards):
@@ -22,7 +21,6 @@ OBJECT_CONSTRUCTORS_IMPL(WeakCell, HeapObject)
 OBJECT_CONSTRUCTORS_IMPL(JSWeakRef, JSObject)
 OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroup, JSObject)
 OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroupCleanupIterator, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(FinalizationGroupCleanupJobTask, Microtask)
 
 ACCESSORS(JSFinalizationGroup, native_context, NativeContext,
           kNativeContextOffset)
@@ -51,10 +49,6 @@ ACCESSORS(JSFinalizationGroupCleanupIterator, finalization_group,
           JSFinalizationGroup, kFinalizationGroupOffset)
 CAST_ACCESSOR(JSFinalizationGroupCleanupIterator)
 
-ACCESSORS(FinalizationGroupCleanupJobTask, finalization_group,
-          JSFinalizationGroup, kFinalizationGroupOffset)
-CAST_ACCESSOR(FinalizationGroupCleanupJobTask)
-
 void JSFinalizationGroup::Register(
     Handle<JSFinalizationGroup> finalization_group, Handle<JSReceiver> target,
     Handle<Object> holdings, Handle<Object> key, Isolate* isolate) {
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index 6a401fecee1ed2..723e0e31358de9 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -58,16 +58,18 @@ class JSFinalizationGroup : public JSObject {
 
   // Constructs an iterator for the WeakCells in the cleared_cells list and
   // calls the user's cleanup function.
-  static void Cleanup(Isolate* isolate,
-                      Handle<JSFinalizationGroup> finalization_group,
-                      Handle<Object> callback);
+  //
+  // Returns Nothing<bool> if exception occurs, otherwise returns Just(true).
+  static V8_WARN_UNUSED_RESULT Maybe<bool> Cleanup(
+      Isolate* isolate, Handle<JSFinalizationGroup> finalization_group,
+      Handle<Object> callback);
 
   // Layout description.
   DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
                                 TORQUE_GENERATED_JSFINALIZATION_GROUP_FIELDS)
 
   // Bitfields in flags.
-  class ScheduledForCleanupField : public BitField<bool, 0, 1> {};
+  using ScheduledForCleanupField = BitField<bool, 0, 1>;
 
   OBJECT_CONSTRUCTORS(JSFinalizationGroup, JSObject);
 };
@@ -133,27 +135,6 @@ class JSWeakRef : public JSObject {
   OBJECT_CONSTRUCTORS(JSWeakRef, JSObject);
 };
 
-class FinalizationGroupCleanupJobTask : public Microtask {
- public:
-  DECL_ACCESSORS(finalization_group, JSFinalizationGroup)
-
-  DECL_CAST(FinalizationGroupCleanupJobTask)
-  DECL_VERIFIER(FinalizationGroupCleanupJobTask)
-  DECL_PRINTER(FinalizationGroupCleanupJobTask)
-
-// Layout description.
-#define FINALIZATION_GROUP_CLEANUP_JOB_TASK_FIELDS(V) \
-  V(kFinalizationGroupOffset, kTaggedSize)            \
-  /* Total size. */                                   \
-  V(kSize, 0)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize,
-                                FINALIZATION_GROUP_CLEANUP_JOB_TASK_FIELDS)
-#undef FINALIZATION_GROUP_CLEANUP_JOB_TASK_FIELDS
-
-  OBJECT_CONSTRUCTORS(FinalizationGroupCleanupJobTask, Microtask);
-};
-
 class JSFinalizationGroupCleanupIterator : public JSObject {
  public:
   DECL_PRINTER(JSFinalizationGroupCleanupIterator)
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index 18b38ed7447047..7496399cad42cf 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -22,6 +22,17 @@
 namespace v8 {
 namespace internal {
 
+#define RETURN_NOTHING_IF_NOT_SUCCESSFUL(call) \
+  do {                                         \
+    if (!(call)) return Nothing<bool>();       \
+  } while (false)
+
+#define RETURN_FAILURE_IF_NOT_SUCCESSFUL(call)          \
+  do {                                                  \
+    ExceptionStatus status_enum_result = (call);        \
+    if (!status_enum_result) return status_enum_result; \
+  } while (false)
+
 namespace {
 
 static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
@@ -64,32 +75,39 @@ Handle<OrderedHashSet> KeyAccumulator::keys() {
   return Handle<OrderedHashSet>::cast(keys_);
 }
 
-void KeyAccumulator::AddKey(Object key, AddKeyConversion convert) {
-  AddKey(handle(key, isolate_), convert);
+ExceptionStatus KeyAccumulator::AddKey(Object key, AddKeyConversion convert) {
+  return AddKey(handle(key, isolate_), convert);
 }
 
-void KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
+ExceptionStatus KeyAccumulator::AddKey(Handle<Object> key,
+                                       AddKeyConversion convert) {
   if (filter_ == PRIVATE_NAMES_ONLY) {
-    if (!key->IsSymbol()) return;
-    if (!Symbol::cast(*key).is_private_name()) return;
+    if (!key->IsSymbol()) return ExceptionStatus::kSuccess;
+    if (!Symbol::cast(*key).is_private_name()) return ExceptionStatus::kSuccess;
   } else if (key->IsSymbol()) {
-    if (filter_ & SKIP_SYMBOLS) return;
-
-    if (Symbol::cast(*key).is_private()) return;
+    if (filter_ & SKIP_SYMBOLS) return ExceptionStatus::kSuccess;
+    if (Symbol::cast(*key).is_private()) return ExceptionStatus::kSuccess;
   } else if (filter_ & SKIP_STRINGS) {
-    return;
+    return ExceptionStatus::kSuccess;
   }
 
-  if (IsShadowed(key)) return;
+  if (IsShadowed(key)) return ExceptionStatus::kSuccess;
   if (keys_.is_null()) {
-    keys_ = OrderedHashSet::Allocate(isolate_, 16);
+    keys_ = OrderedHashSet::Allocate(isolate_, 16).ToHandleChecked();
   }
   uint32_t index;
   if (convert == CONVERT_TO_ARRAY_INDEX && key->IsString() &&
       Handle<String>::cast(key)->AsArrayIndex(&index)) {
     key = isolate_->factory()->NewNumberFromUint(index);
   }
-  Handle<OrderedHashSet> new_set = OrderedHashSet::Add(isolate(), keys(), key);
+  MaybeHandle<OrderedHashSet> new_set_candidate =
+      OrderedHashSet::Add(isolate(), keys(), key);
+  Handle<OrderedHashSet> new_set;
+  if (!new_set_candidate.ToHandle(&new_set)) {
+    THROW_NEW_ERROR_RETURN_VALUE(
+        isolate_, NewRangeError(MessageTemplate::kTooManyProperties),
+        ExceptionStatus::kException);
+  }
   if (*new_set != *keys_) {
     // The keys_ Set is converted directly to a FixedArray in GetKeys which can
     // be left-trimmer. Hence the previous Set should not keep a pointer to the
@@ -97,22 +115,24 @@ void KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
     keys_->set(OrderedHashSet::NextTableIndex(), Smi::kZero);
     keys_ = new_set;
   }
+  return ExceptionStatus::kSuccess;
 }
 
-void KeyAccumulator::AddKeys(Handle<FixedArray> array,
-                             AddKeyConversion convert) {
+ExceptionStatus KeyAccumulator::AddKeys(Handle<FixedArray> array,
+                                        AddKeyConversion convert) {
   int add_length = array->length();
   for (int i = 0; i < add_length; i++) {
     Handle<Object> current(array->get(i), isolate_);
-    AddKey(current, convert);
+    RETURN_FAILURE_IF_NOT_SUCCESSFUL(AddKey(current, convert));
   }
+  return ExceptionStatus::kSuccess;
 }
 
-void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
-                             AddKeyConversion convert) {
+ExceptionStatus KeyAccumulator::AddKeys(Handle<JSObject> array_like,
+                                        AddKeyConversion convert) {
   DCHECK(array_like->IsJSArray() || array_like->HasSloppyArgumentsElements());
   ElementsAccessor* accessor = array_like->GetElementsAccessor();
-  accessor->AddElementsToKeyAccumulator(array_like, this, convert);
+  return accessor->AddElementsToKeyAccumulator(array_like, this, convert);
 }
 
 MaybeHandle<FixedArray> FilterProxyKeys(KeyAccumulator* accumulator,
@@ -162,7 +182,8 @@ Maybe<bool> KeyAccumulator::AddKeysFromJSProxy(Handle<JSProxy> proxy,
       return Just(true);
     }
   }
-  AddKeys(keys, is_for_in_ ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
+  RETURN_NOTHING_IF_NOT_SUCCESSFUL(
+      AddKeys(keys, is_for_in_ ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT));
   return Just(true);
 }
 
@@ -488,12 +509,10 @@ namespace {
 
 enum IndexedOrNamed { kIndexed, kNamed };
 
-void FilterForEnumerableProperties(Handle<JSReceiver> receiver,
-                                   Handle<JSObject> object,
-                                   Handle<InterceptorInfo> interceptor,
-                                   KeyAccumulator* accumulator,
-                                   Handle<JSObject> result,
-                                   IndexedOrNamed type) {
+V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
+    Handle<JSReceiver> receiver, Handle<JSObject> object,
+    Handle<InterceptorInfo> interceptor, KeyAccumulator* accumulator,
+    Handle<JSObject> result, IndexedOrNamed type) {
   DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
   ElementsAccessor* accessor = result->GetElementsAccessor();
 
@@ -521,10 +540,12 @@ void FilterForEnumerableProperties(Handle<JSReceiver> receiver,
       int32_t value;
       CHECK(attributes->ToInt32(&value));
       if ((value & DONT_ENUM) == 0) {
-        accumulator->AddKey(element, DO_NOT_CONVERT);
+        RETURN_FAILURE_IF_NOT_SUCCESSFUL(
+            accumulator->AddKey(element, DO_NOT_CONVERT));
       }
     }
   }
+  return ExceptionStatus::kSuccess;
 }
 
 // Returns |true| on success, |nothing| on exception.
@@ -551,11 +572,11 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
 
   if ((accumulator->filter() & ONLY_ENUMERABLE) &&
       !interceptor->query().IsUndefined(isolate)) {
-    FilterForEnumerableProperties(receiver, object, interceptor, accumulator,
-                                  result, type);
+    RETURN_NOTHING_IF_NOT_SUCCESSFUL(FilterForEnumerableProperties(
+        receiver, object, interceptor, accumulator, result, type));
   } else {
-    accumulator->AddKeys(
-        result, type == kIndexed ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
+    RETURN_NOTHING_IF_NOT_SUCCESSFUL(accumulator->AddKeys(
+        result, type == kIndexed ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT));
   }
   return Just(true);
 }
@@ -589,18 +610,17 @@ Maybe<bool> KeyAccumulator::CollectOwnElementIndices(
   if (filter_ & SKIP_STRINGS || skip_indices_) return Just(true);
 
   ElementsAccessor* accessor = object->GetElementsAccessor();
-  accessor->CollectElementIndices(object, this);
-
+  RETURN_NOTHING_IF_NOT_SUCCESSFUL(
+      accessor->CollectElementIndices(object, this));
   return CollectInterceptorKeys(receiver, object, this, kIndexed);
 }
 
 namespace {
 
 template <bool skip_symbols>
-int CollectOwnPropertyNamesInternal(Handle<JSObject> object,
-                                    KeyAccumulator* keys,
-                                    Handle<DescriptorArray> descs,
-                                    int start_index, int limit) {
+base::Optional<int> CollectOwnPropertyNamesInternal(
+    Handle<JSObject> object, KeyAccumulator* keys,
+    Handle<DescriptorArray> descs, int start_index, int limit) {
   int first_skipped = -1;
   PropertyFilter filter = keys->filter();
   KeyCollectionMode mode = keys->mode();
@@ -633,7 +653,9 @@ int CollectOwnPropertyNamesInternal(Handle<JSObject> object,
     if (is_shadowing_key) {
       keys->AddShadowingKey(key);
     } else {
-      keys->AddKey(key, DO_NOT_CONVERT);
+      if (keys->AddKey(key, DO_NOT_CONVERT) != ExceptionStatus::kSuccess) {
+        return base::Optional<int>();
+      }
     }
   }
   return first_skipped;
@@ -696,48 +718,50 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
         }
       }
     }
-    AddKeys(enum_keys, DO_NOT_CONVERT);
+    RETURN_NOTHING_IF_NOT_SUCCESSFUL(AddKeys(enum_keys, DO_NOT_CONVERT));
   } else {
     if (object->HasFastProperties()) {
       int limit = object->map().NumberOfOwnDescriptors();
       Handle<DescriptorArray> descs(object->map().instance_descriptors(),
                                     isolate_);
       // First collect the strings,
-      int first_symbol =
+      base::Optional<int> first_symbol =
           CollectOwnPropertyNamesInternal<true>(object, this, descs, 0, limit);
       // then the symbols.
-      if (first_symbol != -1) {
-        CollectOwnPropertyNamesInternal<false>(object, this, descs,
-                                               first_symbol, limit);
+      RETURN_NOTHING_IF_NOT_SUCCESSFUL(first_symbol);
+      if (first_symbol.value() != -1) {
+        RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectOwnPropertyNamesInternal<false>(
+            object, this, descs, first_symbol.value(), limit));
       }
     } else if (object->IsJSGlobalObject()) {
-      GlobalDictionary::CollectKeysTo(
+      RETURN_NOTHING_IF_NOT_SUCCESSFUL(GlobalDictionary::CollectKeysTo(
           handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
-          this);
+          this));
     } else {
-      NameDictionary::CollectKeysTo(
-          handle(object->property_dictionary(), isolate_), this);
+      RETURN_NOTHING_IF_NOT_SUCCESSFUL(NameDictionary::CollectKeysTo(
+          handle(object->property_dictionary(), isolate_), this));
     }
   }
   // Add the property keys from the interceptor.
   return CollectInterceptorKeys(receiver, object, this, kNamed);
 }
 
-void KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
-                                         Handle<JSObject> object) {
+ExceptionStatus KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
+                                                    Handle<JSObject> object) {
   if (object->HasFastProperties()) {
     int limit = object->map().NumberOfOwnDescriptors();
     Handle<DescriptorArray> descs(object->map().instance_descriptors(),
                                   isolate_);
     CollectOwnPropertyNamesInternal<false>(object, this, descs, 0, limit);
   } else if (object->IsJSGlobalObject()) {
-    GlobalDictionary::CollectKeysTo(
+    RETURN_FAILURE_IF_NOT_SUCCESSFUL(GlobalDictionary::CollectKeysTo(
         handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
-        this);
+        this));
   } else {
-    NameDictionary::CollectKeysTo(
-        handle(object->property_dictionary(), isolate_), this);
+    RETURN_FAILURE_IF_NOT_SUCCESSFUL(NameDictionary::CollectKeysTo(
+        handle(object->property_dictionary(), isolate_), this));
   }
+  return ExceptionStatus::kSuccess;
 }
 
 Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
@@ -795,7 +819,7 @@ Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
     filter_ = static_cast<PropertyFilter>(filter_ | ONLY_ALL_CAN_READ);
   }
   if (filter_ & PRIVATE_NAMES_ONLY) {
-    CollectPrivateNames(receiver, object);
+    RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectPrivateNames(receiver, object));
     return Just(true);
   }
 
@@ -843,8 +867,8 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
                                                   Handle<JSProxy> proxy) {
   STACK_CHECK(isolate_, Nothing<bool>());
   if (filter_ == PRIVATE_NAMES_ONLY) {
-    NameDictionary::CollectKeysTo(
-        handle(proxy->property_dictionary(), isolate_), this);
+    RETURN_NOTHING_IF_NOT_SUCCESSFUL(NameDictionary::CollectKeysTo(
+        handle(proxy->property_dictionary(), isolate_), this));
     return Just(true);
   }
 
@@ -1018,5 +1042,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyTargetKeys(
   return result;
 }
 
+#undef RETURN_NOTHING_IF_NOT_SUCCESSFUL
+#undef RETURN_FAILURE_IF_NOT_SUCCESSFUL
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/objects/keys.h b/deps/v8/src/objects/keys.h
index 69f61a886e8cb0..5d8632e2a77cd5 100644
--- a/deps/v8/src/objects/keys.h
+++ b/deps/v8/src/objects/keys.h
@@ -52,8 +52,8 @@ class KeyAccumulator final {
                                        Handle<JSObject> object);
   Maybe<bool> CollectOwnPropertyNames(Handle<JSReceiver> receiver,
                                       Handle<JSObject> object);
-  void CollectPrivateNames(Handle<JSReceiver> receiver,
-                           Handle<JSObject> object);
+  V8_WARN_UNUSED_RESULT ExceptionStatus
+  CollectPrivateNames(Handle<JSReceiver> receiver, Handle<JSObject> object);
   Maybe<bool> CollectAccessCheckInterceptorKeys(
       Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
       Handle<JSObject> object);
@@ -65,10 +65,14 @@ class KeyAccumulator final {
   static Handle<FixedArray> GetOwnEnumPropertyKeys(Isolate* isolate,
                                                    Handle<JSObject> object);
 
-  void AddKey(Object key, AddKeyConversion convert = DO_NOT_CONVERT);
-  void AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
-  void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
-  void AddKeys(Handle<JSObject> array_like, AddKeyConversion convert);
+  V8_WARN_UNUSED_RESULT ExceptionStatus
+  AddKey(Object key, AddKeyConversion convert = DO_NOT_CONVERT);
+  V8_WARN_UNUSED_RESULT ExceptionStatus
+  AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
+  V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<FixedArray> array,
+                                                AddKeyConversion convert);
+  V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<JSObject> array_like,
+                                                AddKeyConversion convert);
 
   // Jump to the next level, pushing the current |levelLength_| to
   // |levelLengths_| and adding a new list to |elements_|.
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index 32b43cd8f70c5e..96aa8d8c8bf1ca 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -124,14 +124,9 @@ ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
 // ArrayBoilerplateDescription
 //
 
-OBJECT_CONSTRUCTORS_IMPL(ArrayBoilerplateDescription, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ArrayBoilerplateDescription)
 
-CAST_ACCESSOR(ArrayBoilerplateDescription)
-
-SMI_ACCESSORS(ArrayBoilerplateDescription, flags, kFlagsOffset)
-
-ACCESSORS(ArrayBoilerplateDescription, constant_elements, FixedArrayBase,
-          kConstantElementsOffset)
+TQ_SMI_ACCESSORS(ArrayBoilerplateDescription, flags)
 
 ElementsKind ArrayBoilerplateDescription::elements_kind() const {
   return static_cast<ElementsKind>(flags());
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index 7328c11f311708..95beb6cbdb6c6a 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -528,8 +528,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
 
   // Add name accessor to the class object if necessary.
   bool install_class_name_accessor = false;
-  if (!expr->has_name_static_property() &&
-      expr->constructor()->has_shared_name()) {
+  if (!expr->has_name_static_property()) {
     if (static_desc.HasDictionaryProperties()) {
       // Install class name accessor if necessary during class literal
       // instantiation.
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index f009a54f8a9474..cf551ea576ae61 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -55,29 +55,22 @@ class ObjectBoilerplateDescription : public FixedArray {
   OBJECT_CONSTRUCTORS(ObjectBoilerplateDescription, FixedArray);
 };
 
-class ArrayBoilerplateDescription : public Struct {
+class ArrayBoilerplateDescription
+    : public TorqueGeneratedArrayBoilerplateDescription<
+          ArrayBoilerplateDescription, Struct> {
  public:
-  // store constant_elements of a fixed array
-  DECL_ACCESSORS(constant_elements, FixedArrayBase)
-
   inline ElementsKind elements_kind() const;
   inline void set_elements_kind(ElementsKind kind);
 
   inline bool is_empty() const;
 
-  DECL_CAST(ArrayBoilerplateDescription)
   // Dispatched behavior.
   DECL_PRINTER(ArrayBoilerplateDescription)
-  DECL_VERIFIER(ArrayBoilerplateDescription)
   void BriefPrintDetails(std::ostream& os);
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      HeapObject::kHeaderSize,
-      TORQUE_GENERATED_ARRAY_BOILERPLATE_DESCRIPTION_FIELDS)
-
  private:
   DECL_INT_ACCESSORS(flags)
-  OBJECT_CONSTRUCTORS(ArrayBoilerplateDescription, Struct);
+  TQ_OBJECT_CONSTRUCTORS(ArrayBoilerplateDescription)
 };
 
 class ClassBoilerplate : public FixedArray {
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 33130aafe5d70b..445d0815f32659 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -6,6 +6,7 @@
 
 #include "src/deoptimizer/deoptimizer.h"
 #include "src/execution/isolate-inl.h"
+#include "src/execution/protectors-inl.h"
 #include "src/init/bootstrapper.h"
 #include "src/logging/counters.h"
 #include "src/objects/elements.h"
@@ -235,30 +236,42 @@ void LookupIterator::InternalUpdateProtector() {
   if (!receiver_->IsHeapObject()) return;
   Handle<HeapObject> receiver = Handle<HeapObject>::cast(receiver_);
 
+  // Getting the native_context from the isolate as a fallback. If possible, we
+  // use the receiver's creation context instead.
   Handle<NativeContext> native_context = isolate_->native_context();
 
   ReadOnlyRoots roots(isolate_);
   if (*name_ == roots.constructor_string()) {
-    if (!isolate_->IsArraySpeciesLookupChainIntact() &&
+    // Fetching the context in here since the operation is rather expensive.
+    if (receiver->IsJSReceiver()) {
+      native_context = Handle<JSReceiver>::cast(receiver)->GetCreationContext();
+    }
+
+    if (!Protectors::IsArraySpeciesLookupChainIntact(isolate_) &&
         !isolate_->IsPromiseSpeciesLookupChainIntact() &&
-        !isolate_->IsRegExpSpeciesLookupChainIntact(native_context) &&
+        !Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
+            native_context) &&
         !isolate_->IsTypedArraySpeciesLookupChainIntact()) {
       return;
     }
     // Setting the constructor property could change an instance's @@species
     if (receiver->IsJSArray(isolate_)) {
-      if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+      if (!Protectors::IsArraySpeciesLookupChainIntact(isolate_)) return;
       isolate_->CountUsage(
           v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
-      isolate_->InvalidateArraySpeciesProtector();
+      Protectors::InvalidateArraySpeciesLookupChain(isolate_);
       return;
     } else if (receiver->IsJSPromise(isolate_)) {
       if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
       isolate_->InvalidatePromiseSpeciesProtector();
       return;
     } else if (receiver->IsJSRegExp(isolate_)) {
-      if (!isolate_->IsRegExpSpeciesLookupChainIntact(native_context)) return;
-      isolate_->InvalidateRegExpSpeciesProtector(native_context);
+      if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
+              native_context)) {
+        return;
+      }
+      Protectors::InvalidateRegExpSpeciesLookupChainProtector(isolate_,
+                                                              native_context);
       return;
     } else if (receiver->IsJSTypedArray(isolate_)) {
       if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
@@ -274,18 +287,22 @@ void LookupIterator::InternalUpdateProtector() {
       // prototype is pointing the same TYPED_ARRAY_PROTOTYPE.
       if (isolate_->IsInAnyContext(*receiver,
                                    Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
-        if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+        if (!Protectors::IsArraySpeciesLookupChainIntact(isolate_)) return;
         isolate_->CountUsage(
             v8::Isolate::UseCounterFeature::kArrayPrototypeConstructorModified);
-        isolate_->InvalidateArraySpeciesProtector();
+        Protectors::InvalidateArraySpeciesLookupChain(isolate_);
       } else if (isolate_->IsInAnyContext(*receiver,
                                           Context::PROMISE_PROTOTYPE_INDEX)) {
         if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
         isolate_->InvalidatePromiseSpeciesProtector();
       } else if (isolate_->IsInAnyContext(*receiver,
                                           Context::REGEXP_PROTOTYPE_INDEX)) {
-        if (!isolate_->IsRegExpSpeciesLookupChainIntact(native_context)) return;
-        isolate_->InvalidateRegExpSpeciesProtector(native_context);
+        if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
+                native_context)) {
+          return;
+        }
+        Protectors::InvalidateRegExpSpeciesLookupChainProtector(isolate_,
+                                                                native_context);
       } else if (isolate_->IsInAnyContext(
                      receiver->map(isolate_).prototype(isolate_),
                      Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
@@ -321,27 +338,37 @@ void LookupIterator::InternalUpdateProtector() {
       isolate_->InvalidateStringIteratorProtector();
     }
   } else if (*name_ == roots.species_symbol()) {
-    if (!isolate_->IsArraySpeciesLookupChainIntact() &&
+    // Fetching the context in here since the operation is rather expensive.
+    if (receiver->IsJSReceiver()) {
+      native_context = Handle<JSReceiver>::cast(receiver)->GetCreationContext();
+    }
+
+    if (!Protectors::IsArraySpeciesLookupChainIntact(isolate_) &&
         !isolate_->IsPromiseSpeciesLookupChainIntact() &&
-        !isolate_->IsRegExpSpeciesLookupChainIntact(native_context) &&
+        !Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
+            native_context) &&
         !isolate_->IsTypedArraySpeciesLookupChainIntact()) {
       return;
     }
     // Setting the Symbol.species property of any Array, Promise or TypedArray
     // constructor invalidates the @@species protector
     if (isolate_->IsInAnyContext(*receiver, Context::ARRAY_FUNCTION_INDEX)) {
-      if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+      if (!Protectors::IsArraySpeciesLookupChainIntact(isolate_)) return;
       isolate_->CountUsage(
           v8::Isolate::UseCounterFeature::kArraySpeciesModified);
-      isolate_->InvalidateArraySpeciesProtector();
+      Protectors::InvalidateArraySpeciesLookupChain(isolate_);
     } else if (isolate_->IsInAnyContext(*receiver,
                                         Context::PROMISE_FUNCTION_INDEX)) {
       if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
       isolate_->InvalidatePromiseSpeciesProtector();
     } else if (isolate_->IsInAnyContext(*receiver,
                                         Context::REGEXP_FUNCTION_INDEX)) {
-      if (!isolate_->IsRegExpSpeciesLookupChainIntact(native_context)) return;
-      isolate_->InvalidateRegExpSpeciesProtector(native_context);
+      if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
+              native_context)) {
+        return;
+      }
+      Protectors::InvalidateRegExpSpeciesLookupChainProtector(isolate_,
+                                                              native_context);
     } else if (IsTypedArrayFunctionInAnyContext(isolate_, *receiver)) {
       if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
       isolate_->InvalidateTypedArraySpeciesProtector();
@@ -433,7 +460,8 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
     }
 
     // Copy the backing store if it is copy-on-write.
-    if (IsSmiOrObjectElementsKind(to) || IsSealedElementsKind(to)) {
+    if (IsSmiOrObjectElementsKind(to) || IsSealedElementsKind(to) ||
+        IsNonextensibleElementsKind(to)) {
       JSObject::EnsureWritableFastElements(holder_obj);
     }
     return;
@@ -901,8 +929,8 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
       bits = holder->RawFastDoublePropertyAsBitsAt(field_index);
     } else {
       Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
-      DCHECK(current_value.IsMutableHeapNumber(isolate_));
-      bits = MutableHeapNumber::cast(current_value).value_as_bits();
+      DCHECK(current_value.IsHeapNumber(isolate_));
+      bits = HeapNumber::cast(current_value).value_as_bits();
     }
     // Use bit representation of double to to check for hole double, since
     // manipulating the signaling NaN used for the hole in C++, e.g. with
@@ -1137,9 +1165,10 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
                                              : NOT_FOUND;
     }
     property_details_ = accessor->GetDetails(js_object, number_);
-    if (map.has_frozen_or_sealed_elements()) {
-      PropertyAttributes attrs = map.has_sealed_elements() ? SEALED : FROZEN;
-      property_details_ = property_details_.CopyAddAttributes(attrs);
+    if (map.has_frozen_elements()) {
+      property_details_ = property_details_.CopyAddAttributes(FROZEN);
+    } else if (map.has_sealed_elements()) {
+      property_details_ = property_details_.CopyAddAttributes(SEALED);
     }
   } else if (!map.is_dictionary_map()) {
     DescriptorArray descriptors = map.instance_descriptors(isolate_);
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 6a9359e3a0dce4..48bb86e2dab454 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -507,8 +507,12 @@ bool Map::has_dictionary_elements() const {
   return IsDictionaryElementsKind(elements_kind());
 }
 
-bool Map::has_frozen_or_sealed_elements() const {
-  return IsFrozenOrSealedElementsKind(elements_kind());
+bool Map::has_any_nonextensible_elements() const {
+  return IsAnyNonextensibleElementsKind(elements_kind());
+}
+
+bool Map::has_nonextensible_elements() const {
+  return IsNonextensibleElementsKind(elements_kind());
 }
 
 bool Map::has_sealed_elements() const {
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index d21f0e1a1294d6..49b9ccea91f73c 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -324,7 +324,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
     DCHECK(to_kind == DICTIONARY_ELEMENTS ||
            to_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
            IsTypedArrayElementsKind(to_kind) ||
-           IsFrozenOrSealedElementsKind(to_kind));
+           IsAnyNonextensibleElementsKind(to_kind));
     to_kind = integrity_source_map_->elements_kind();
   }
 
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 7b4f1abd05fe72..a672d6580a0837 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -281,8 +281,6 @@ VisitorId Map::GetVisitorId(Map map) {
     case JS_DATE_TYPE:
     case JS_ARRAY_ITERATOR_TYPE:
     case JS_ARRAY_TYPE:
-    case JS_GLOBAL_PROXY_TYPE:
-    case JS_GLOBAL_OBJECT_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
     case JS_SET_TYPE:
     case JS_MAP_TYPE:
@@ -321,6 +319,8 @@ VisitorId Map::GetVisitorId(Map map) {
       return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast;
     }
     case JS_API_OBJECT_TYPE:
+    case JS_GLOBAL_PROXY_TYPE:
+    case JS_GLOBAL_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
       return kVisitJSApiObject;
 
@@ -333,7 +333,6 @@ VisitorId Map::GetVisitorId(Map map) {
     case FILLER_TYPE:
     case FOREIGN_TYPE:
     case HEAP_NUMBER_TYPE:
-    case MUTABLE_HEAP_NUMBER_TYPE:
     case FEEDBACK_METADATA_TYPE:
       return kVisitDataObject;
 
@@ -681,6 +680,10 @@ void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
   if (details.location() != kField) return;
   DCHECK_EQ(kData, details.kind());
 
+  if (new_constness != details.constness() && is_prototype_map()) {
+    JSObject::InvalidatePrototypeChains(*this);
+  }
+
   Zone zone(isolate->allocator(), ZONE_NAME);
   ZoneQueue<Map> backlog(&zone);
   backlog.push(*this);
@@ -966,7 +969,7 @@ Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
     DCHECK(to_kind == DICTIONARY_ELEMENTS ||
            to_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
            IsTypedArrayElementsKind(to_kind) ||
-           IsHoleyFrozenOrSealedElementsKind(to_kind));
+           IsAnyHoleyNonextensibleElementsKind(to_kind));
     to_kind = info.integrity_level_source_map.elements_kind();
   }
   if (from_kind != to_kind) {
@@ -1730,6 +1733,12 @@ Handle<Map> Map::CopyReplaceDescriptors(
       descriptors->GeneralizeAllFields();
       result->InitializeDescriptors(isolate, *descriptors,
                                     LayoutDescriptor::FastPointerLayout());
+      // If we were trying to insert a transition but failed because there are
+      // too many transitions already, mark the object as a prototype to avoid
+      // tracking transitions from the detached map.
+      if (flag == INSERT_TRANSITION) {
+        result->set_is_prototype_map(true);
+      }
     }
   } else {
     result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
@@ -2002,6 +2011,15 @@ Handle<Map> Map::CopyForPreventExtensions(
         !old_map_is_dictionary_elements_kind) {
       switch (map->elements_kind()) {
         case PACKED_ELEMENTS:
+          if (attrs_to_add == SEALED) {
+            new_kind = PACKED_SEALED_ELEMENTS;
+          } else if (attrs_to_add == FROZEN) {
+            new_kind = PACKED_FROZEN_ELEMENTS;
+          } else {
+            new_kind = PACKED_NONEXTENSIBLE_ELEMENTS;
+          }
+          break;
+        case PACKED_NONEXTENSIBLE_ELEMENTS:
           if (attrs_to_add == SEALED) {
             new_kind = PACKED_SEALED_ELEMENTS;
           } else if (attrs_to_add == FROZEN) {
@@ -2014,6 +2032,15 @@ Handle<Map> Map::CopyForPreventExtensions(
           }
           break;
         case HOLEY_ELEMENTS:
+          if (attrs_to_add == SEALED) {
+            new_kind = HOLEY_SEALED_ELEMENTS;
+          } else if (attrs_to_add == FROZEN) {
+            new_kind = HOLEY_FROZEN_ELEMENTS;
+          } else {
+            new_kind = HOLEY_NONEXTENSIBLE_ELEMENTS;
+          }
+          break;
+        case HOLEY_NONEXTENSIBLE_ELEMENTS:
           if (attrs_to_add == SEALED) {
             new_kind = HOLEY_SEALED_ELEMENTS;
           } else if (attrs_to_add == FROZEN) {
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index c9da19b3e305bf..ef16019685f1c2 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -423,7 +423,8 @@ class Map : public HeapObject {
   inline bool has_fast_string_wrapper_elements() const;
   inline bool has_typed_array_elements() const;
   inline bool has_dictionary_elements() const;
-  inline bool has_frozen_or_sealed_elements() const;
+  inline bool has_any_nonextensible_elements() const;
+  inline bool has_nonextensible_elements() const;
   inline bool has_sealed_elements() const;
   inline bool has_frozen_elements() const;
 
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
index 91fa5890cb4495..613ee096c5f482 100644
--- a/deps/v8/src/objects/microtask-inl.h
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -18,19 +18,9 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(Microtask, Struct)
-OBJECT_CONSTRUCTORS_IMPL(CallbackTask, Microtask)
-OBJECT_CONSTRUCTORS_IMPL(CallableTask, Microtask)
-
-CAST_ACCESSOR(Microtask)
-CAST_ACCESSOR(CallbackTask)
-CAST_ACCESSOR(CallableTask)
-
-ACCESSORS(CallableTask, callable, JSReceiver, kCallableOffset)
-ACCESSORS(CallableTask, context, Context, kContextOffset)
-
-ACCESSORS(CallbackTask, callback, Foreign, kCallbackOffset)
-ACCESSORS(CallbackTask, data, Foreign, kDataOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Microtask)
+TQ_OBJECT_CONSTRUCTORS_IMPL(CallbackTask)
+TQ_OBJECT_CONSTRUCTORS_IMPL(CallableTask)
 
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
index d631bf6903dcad..cd8a71f58c9f4c 100644
--- a/deps/v8/src/objects/microtask.h
+++ b/deps/v8/src/objects/microtask.h
@@ -17,52 +17,35 @@ namespace internal {
 // Abstract base class for all microtasks that can be scheduled on the
 // microtask queue. This class merely serves the purpose of a marker
 // interface.
-class Microtask : public Struct {
+class Microtask : public TorqueGeneratedMicrotask<Microtask, Struct> {
  public:
-  // Dispatched behavior.
-  DECL_CAST(Microtask)
-  DECL_VERIFIER(Microtask)
-
-  OBJECT_CONSTRUCTORS(Microtask, Struct);
+  TQ_OBJECT_CONSTRUCTORS(Microtask)
 };
 
 // A CallbackTask is a special Microtask that allows us to schedule
 // C++ microtask callbacks on the microtask queue. This is heavily
 // used by Blink for example.
-class CallbackTask : public Microtask {
+class CallbackTask
+    : public TorqueGeneratedCallbackTask<CallbackTask, Microtask> {
  public:
-  DECL_ACCESSORS(callback, Foreign)
-  DECL_ACCESSORS(data, Foreign)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize,
-                                TORQUE_GENERATED_CALLBACK_TASK_FIELDS)
-
   // Dispatched behavior.
-  DECL_CAST(CallbackTask)
   DECL_PRINTER(CallbackTask)
-  DECL_VERIFIER(CallbackTask)
 
-  OBJECT_CONSTRUCTORS(CallbackTask, Microtask);
+  TQ_OBJECT_CONSTRUCTORS(CallbackTask)
 };
 
 // A CallableTask is a special (internal) Microtask that allows us to
 // schedule arbitrary callables on the microtask queue. We use this
 // for various tests of the microtask queue.
-class CallableTask : public Microtask {
+class CallableTask
+    : public TorqueGeneratedCallableTask<CallableTask, Microtask> {
  public:
-  DECL_ACCESSORS(callable, JSReceiver)
-  DECL_ACCESSORS(context, Context)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize,
-                                TORQUE_GENERATED_CALLABLE_TASK_FIELDS)
-
   // Dispatched behavior.
-  DECL_CAST(CallableTask)
   DECL_PRINTER(CallableTask)
   DECL_VERIFIER(CallableTask)
   void BriefPrintDetails(std::ostream& os);
 
-  OBJECT_CONSTRUCTORS(CallableTask, Microtask);
+  TQ_OBJECT_CONSTRUCTORS(CallableTask)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index 1ab9b9fb045d5b..ac5451637666e5 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -20,37 +20,24 @@ namespace v8 {
 namespace internal {
 
 OBJECT_CONSTRUCTORS_IMPL(Module, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(SourceTextModule, Module)
-OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfoEntry, Struct)
-OBJECT_CONSTRUCTORS_IMPL(SyntheticModule, Module)
-OBJECT_CONSTRUCTORS_IMPL(JSModuleNamespace, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModule)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfoEntry)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SyntheticModule)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSModuleNamespace)
 
 NEVER_READ_ONLY_SPACE_IMPL(Module)
 NEVER_READ_ONLY_SPACE_IMPL(SourceTextModule)
 NEVER_READ_ONLY_SPACE_IMPL(SyntheticModule)
 
 CAST_ACCESSOR(Module)
-CAST_ACCESSOR(SourceTextModule)
-CAST_ACCESSOR(SyntheticModule)
 ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
 ACCESSORS(Module, module_namespace, HeapObject, kModuleNamespaceOffset)
 ACCESSORS(Module, exception, Object, kExceptionOffset)
 SMI_ACCESSORS(Module, status, kStatusOffset)
 SMI_ACCESSORS(Module, hash, kHashOffset)
 
-ACCESSORS(SourceTextModule, code, Object, kCodeOffset)
-ACCESSORS(SourceTextModule, regular_exports, FixedArray, kRegularExportsOffset)
-ACCESSORS(SourceTextModule, regular_imports, FixedArray, kRegularImportsOffset)
-ACCESSORS(SourceTextModule, requested_modules, FixedArray,
-          kRequestedModulesOffset)
-ACCESSORS(SourceTextModule, script, Script, kScriptOffset)
-ACCESSORS(SourceTextModule, import_meta, Object, kImportMetaOffset)
-SMI_ACCESSORS(SourceTextModule, dfs_index, kDfsIndexOffset)
-SMI_ACCESSORS(SourceTextModule, dfs_ancestor_index, kDfsAncestorIndexOffset)
-
-ACCESSORS(SyntheticModule, name, String, kNameOffset)
-ACCESSORS(SyntheticModule, export_names, FixedArray, kExportNamesOffset)
-ACCESSORS(SyntheticModule, evaluation_steps, Foreign, kEvaluationStepsOffset)
+TQ_SMI_ACCESSORS(SourceTextModule, dfs_index)
+TQ_SMI_ACCESSORS(SourceTextModule, dfs_ancestor_index)
 
 SourceTextModuleInfo SourceTextModule::info() const {
   return (status() >= kEvaluating)
@@ -58,17 +45,10 @@ SourceTextModuleInfo SourceTextModule::info() const {
              : GetSharedFunctionInfo().scope_info().ModuleDescriptorInfo();
 }
 
-CAST_ACCESSOR(JSModuleNamespace)
-ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
-
-CAST_ACCESSOR(SourceTextModuleInfoEntry)
-ACCESSORS(SourceTextModuleInfoEntry, export_name, Object, kExportNameOffset)
-ACCESSORS(SourceTextModuleInfoEntry, local_name, Object, kLocalNameOffset)
-ACCESSORS(SourceTextModuleInfoEntry, import_name, Object, kImportNameOffset)
-SMI_ACCESSORS(SourceTextModuleInfoEntry, module_request, kModuleRequestOffset)
-SMI_ACCESSORS(SourceTextModuleInfoEntry, cell_index, kCellIndexOffset)
-SMI_ACCESSORS(SourceTextModuleInfoEntry, beg_pos, kBegPosOffset)
-SMI_ACCESSORS(SourceTextModuleInfoEntry, end_pos, kEndPosOffset)
+TQ_SMI_ACCESSORS(SourceTextModuleInfoEntry, module_request)
+TQ_SMI_ACCESSORS(SourceTextModuleInfoEntry, cell_index)
+TQ_SMI_ACCESSORS(SourceTextModuleInfoEntry, beg_pos)
+TQ_SMI_ACCESSORS(SourceTextModuleInfoEntry, end_pos)
 
 OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfo, FixedArray)
 CAST_ACCESSOR(SourceTextModuleInfo)
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 4e89050360cbd9..60b9145d10181e 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -107,21 +107,18 @@ void Module::Reset(Isolate* isolate, Handle<Module> module) {
   module->PrintStatusTransition(kUninstantiated);
 #endif  // DEBUG
 
-  int export_count;
+  const int export_count =
+      module->IsSourceTextModule()
+          ? Handle<SourceTextModule>::cast(module)->regular_exports().length()
+          : Handle<SyntheticModule>::cast(module)->export_names().length();
+  Handle<ObjectHashTable> exports = ObjectHashTable::New(isolate, export_count);
 
   if (module->IsSourceTextModule()) {
-    Handle<SourceTextModule> source_text_module =
-        Handle<SourceTextModule>::cast(module);
-    export_count = source_text_module->regular_exports().length();
-    SourceTextModule::Reset(isolate, source_text_module);
+    SourceTextModule::Reset(isolate, Handle<SourceTextModule>::cast(module));
   } else {
-    export_count =
-        Handle<SyntheticModule>::cast(module)->export_names().length();
     // Nothing to do here.
   }
 
-  Handle<ObjectHashTable> exports = ObjectHashTable::New(isolate, export_count);
-
   module->set_exports(*exports);
   module->set_status(kUninstantiated);
 }
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index b776ddb0be1ea7..08badf0357d220 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -136,14 +136,10 @@ class Module : public HeapObject {
 // When importing a module namespace (import * as foo from "bar"), a
 // JSModuleNamespace object (representing module "bar") is created and bound to
 // the declared variable (foo).  A module can have at most one namespace object.
-class JSModuleNamespace : public JSObject {
+class JSModuleNamespace
+    : public TorqueGeneratedJSModuleNamespace<JSModuleNamespace, JSObject> {
  public:
-  DECL_CAST(JSModuleNamespace)
   DECL_PRINTER(JSModuleNamespace)
-  DECL_VERIFIER(JSModuleNamespace)
-
-  // The actual module whose namespace is being represented.
-  DECL_ACCESSORS(module, Module)
 
   // Retrieve the value exported by [module] under the given [name]. If there is
   // no such export, return Just(undefined). If the export is uninitialized,
@@ -163,16 +159,12 @@ class JSModuleNamespace : public JSObject {
     kInObjectFieldCount,
   };
 
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
-                                TORQUE_GENERATED_JSMODULE_NAMESPACE_FIELDS)
-
   // We need to include in-object fields
   // TODO(v8:8944): improve handling of in-object fields
   static constexpr int kSize =
       kHeaderSize + (kTaggedSize * kInObjectFieldCount);
 
-  OBJECT_CONSTRUCTORS(JSModuleNamespace, JSObject);
+  TQ_OBJECT_CONSTRUCTORS(JSModuleNamespace)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index 8aded12fb5d7b3..b76ae245a2d7f5 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -22,7 +22,8 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(Symbol)
 BIT_FIELD_ACCESSORS(Symbol, flags, is_private, Symbol::IsPrivateBit)
 BIT_FIELD_ACCESSORS(Symbol, flags, is_well_known_symbol,
                     Symbol::IsWellKnownSymbolBit)
-BIT_FIELD_ACCESSORS(Symbol, flags, is_public, Symbol::IsPublicBit)
+BIT_FIELD_ACCESSORS(Symbol, flags, is_in_public_symbol_table,
+                    Symbol::IsInPublicSymbolTableBit)
 BIT_FIELD_ACCESSORS(Symbol, flags, is_interesting_symbol,
                     Symbol::IsInterestingSymbolBit)
 
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index b13aa30fb09286..a02bb3d794520a 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -99,12 +99,11 @@ class Name : public TorqueGeneratedName<Name, HeapObject> {
   STATIC_ASSERT(kArrayIndexLengthBits > 0);
   STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
 
-  class ArrayIndexValueBits
-      : public BitField<unsigned int, kNofHashBitFields, kArrayIndexValueBits> {
-  };  // NOLINT
-  class ArrayIndexLengthBits
-      : public BitField<unsigned int, kNofHashBitFields + kArrayIndexValueBits,
-                        kArrayIndexLengthBits> {};  // NOLINT
+  using ArrayIndexValueBits =
+      BitField<unsigned int, kNofHashBitFields, kArrayIndexValueBits>;
+  using ArrayIndexLengthBits =
+      BitField<unsigned int, kNofHashBitFields + kArrayIndexValueBits,
+               kArrayIndexLengthBits>;
 
   // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
   // could use a mask to test if the length of string is less than or equal to
@@ -147,9 +146,10 @@ class Symbol : public TorqueGeneratedSymbol<Symbol, Name> {
   // for a detailed description.
   DECL_BOOLEAN_ACCESSORS(is_interesting_symbol)
 
-  // [is_public]: Whether this is a symbol created by Symbol.for. Calling
-  // Symbol.keyFor on such a symbol simply needs to return the attached name.
-  DECL_BOOLEAN_ACCESSORS(is_public)
+  // [is_in_public_symbol_table]: Whether this is a symbol created by
+  // Symbol.for. Calling Symbol.keyFor on such a symbol simply needs
+  // to return the attached name.
+  DECL_BOOLEAN_ACCESSORS(is_in_public_symbol_table)
 
   // [is_private_name]: Whether this is a private name.  Private names
   // are the same as private symbols except they throw on missing
@@ -164,11 +164,11 @@ class Symbol : public TorqueGeneratedSymbol<Symbol, Name> {
   DECL_VERIFIER(Symbol)
 
 // Flags layout.
-#define FLAGS_BIT_FIELDS(V, _)          \
-  V(IsPrivateBit, bool, 1, _)           \
-  V(IsWellKnownSymbolBit, bool, 1, _)   \
-  V(IsPublicBit, bool, 1, _)            \
-  V(IsInterestingSymbolBit, bool, 1, _) \
+#define FLAGS_BIT_FIELDS(V, _)            \
+  V(IsPrivateBit, bool, 1, _)             \
+  V(IsWellKnownSymbolBit, bool, 1, _)     \
+  V(IsInPublicSymbolTableBit, bool, 1, _) \
+  V(IsInterestingSymbolBit, bool, 1, _)   \
   V(IsPrivateNameBit, bool, 1, _)
 
   DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index c15b212eeca8df..d5bce62d43393f 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -37,7 +37,7 @@ class LookupIterator;
 class FieldType;
 class Module;
 class SourceTextModuleInfoEntry;
-class MutableHeapNumber;
+class HeapNumber;
 class ObjectHashTable;
 class ObjectTemplateInfo;
 class ObjectVisitor;
@@ -173,7 +173,6 @@ class ZoneForwardList;
   V(MapCache)                                  \
   V(Module)                                    \
   V(Microtask)                                 \
-  V(MutableHeapNumber)                         \
   V(Name)                                      \
   V(NameDictionary)                            \
   V(NativeContext)                             \
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 8f9e51ca9efd03..da8c404c1445a6 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -290,20 +290,31 @@
 #define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
   TaggedField<MaybeObject>::Relaxed_Store(p, offset, value)
 
+#ifdef V8_DISABLE_WRITE_BARRIERS
+#define WRITE_BARRIER(object, offset, value)
+#else
 #define WRITE_BARRIER(object, offset, value)                       \
   do {                                                             \
     DCHECK_NOT_NULL(GetHeapFromWritableObject(object));            \
     MarkingBarrier(object, (object).RawField(offset), value);      \
     GenerationalBarrier(object, (object).RawField(offset), value); \
   } while (false)
+#endif
 
+#ifdef V8_DISABLE_WRITE_BARRIERS
+#define WEAK_WRITE_BARRIER(object, offset, value)
+#else
 #define WEAK_WRITE_BARRIER(object, offset, value)                           \
   do {                                                                      \
     DCHECK_NOT_NULL(GetHeapFromWritableObject(object));                     \
     MarkingBarrier(object, (object).RawMaybeWeakField(offset), value);      \
     GenerationalBarrier(object, (object).RawMaybeWeakField(offset), value); \
   } while (false)
+#endif
 
+#ifdef V8_DISABLE_WRITE_BARRIERS
+#define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value)
+#else
 #define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value)                    \
   do {                                                                        \
     DCHECK_NOT_NULL(GetHeapFromWritableObject(object));                       \
@@ -311,7 +322,11 @@
     MarkingBarrier(object, (object).RawField(offset), value);                 \
     GenerationalEphemeronKeyBarrier(table, (object).RawField(offset), value); \
   } while (false)
+#endif
 
+#ifdef V8_DISABLE_WRITE_BARRIERS
+#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode)
+#else
 #define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode)       \
   do {                                                               \
     DCHECK_NOT_NULL(GetHeapFromWritableObject(object));              \
@@ -323,7 +338,11 @@
       GenerationalBarrier(object, (object).RawField(offset), value); \
     }                                                                \
   } while (false)
+#endif
 
+#ifdef V8_DISABLE_WRITE_BARRIERS
+#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode)
+#else
 #define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode)           \
   do {                                                                        \
     DCHECK_NOT_NULL(GetHeapFromWritableObject(object));                       \
@@ -335,7 +354,11 @@
       GenerationalBarrier(object, (object).RawMaybeWeakField(offset), value); \
     }                                                                         \
   } while (false)
+#endif
 
+#ifdef V8_DISABLE_WRITE_BARRIERS
+#define CONDITIONAL_EPHEMERON_KEY_WRITE_BARRIER(object, offset, value, mode)
+#else
 #define CONDITIONAL_EPHEMERON_KEY_WRITE_BARRIER(object, offset, value, mode) \
   do {                                                                       \
     DCHECK_NOT_NULL(GetHeapFromWritableObject(object));                      \
@@ -349,6 +372,7 @@
                                       value);                                \
     }                                                                        \
   } while (false)
+#endif
 
 #define ACQUIRE_READ_INT32_FIELD(p, offset) \
   static_cast<int32_t>(base::Acquire_Load(  \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 51e380695ef3a7..4c980b2697c87b 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -1022,7 +1022,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
       return Op::template apply<UncompiledDataWithPreparseData::BodyDescriptor>(
           p1, p2, p3, p4);
     case HEAP_NUMBER_TYPE:
-    case MUTABLE_HEAP_NUMBER_TYPE:
     case FILLER_TYPE:
     case BYTE_ARRAY_TYPE:
     case FREE_SPACE_TYPE:
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index b4c8591e5cc297..b346b5b7d15188 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -67,7 +67,6 @@ namespace internal {
                                                          \
   V(MAP_TYPE)                                            \
   V(CODE_TYPE)                                           \
-  V(MUTABLE_HEAP_NUMBER_TYPE)                            \
   V(FOREIGN_TYPE)                                        \
   V(BYTE_ARRAY_TYPE)                                     \
   V(BYTECODE_ARRAY_TYPE)                                 \
@@ -116,7 +115,6 @@ namespace internal {
   V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE)              \
   V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE)               \
   V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE)              \
-  V(FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE)            \
                                                          \
   TORQUE_DEFINED_INSTANCE_TYPES(V)                       \
                                                          \
@@ -348,9 +346,7 @@ namespace internal {
   V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask,    \
     promise_reject_reaction_job_task)                                          \
   V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask,  \
-    promise_resolve_thenable_job_task)                                         \
-  V(_, FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE,                               \
-    FinalizationGroupCleanupJobTask, finalization_group_cleanup_job_task)
+    promise_resolve_thenable_job_task)
 
 // Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
 #define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index b6748401c07e8c..cf8c3ffad25044 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -133,6 +133,13 @@ bool Object::IsNullOrUndefined() const {
 
 bool Object::IsZero() const { return *this == Smi::zero(); }
 
+bool Object::IsPublicSymbol() const {
+  return IsSymbol() && !Symbol::cast(*this).is_private();
+}
+bool Object::IsPrivateSymbol() const {
+  return IsSymbol() && Symbol::cast(*this).is_private();
+}
+
 bool Object::IsNoSharedNameSentinel() const {
   return *this == SharedFunctionInfo::kNoSharedNameSentinel;
 }
@@ -560,7 +567,7 @@ bool Object::FitsRepresentation(Representation representation) {
   if (FLAG_track_fields && representation.IsSmi()) {
     return IsSmi();
   } else if (FLAG_track_double_fields && representation.IsDouble()) {
-    return IsMutableHeapNumber() || IsNumber();
+    return IsNumber();
   } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
     return IsHeapObject();
   } else if (FLAG_track_fields && representation.IsNone()) {
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 9963cba4727a34..134cb3998a5585 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -33,6 +33,7 @@
 #include "src/execution/frames-inl.h"
 #include "src/execution/isolate-inl.h"
 #include "src/execution/microtask-queue.h"
+#include "src/execution/protectors-inl.h"
 #include "src/heap/heap-inl.h"
 #include "src/heap/read-only-heap.h"
 #include "src/ic/ic.h"
@@ -193,12 +194,12 @@ Handle<FieldType> Object::OptimalType(Isolate* isolate,
 Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
                                      Representation representation) {
   if (!representation.IsDouble()) return object;
-  auto result = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+  auto result = isolate->factory()->NewHeapNumberWithHoleNaN();
   if (object->IsUninitialized(isolate)) {
     result->set_value_as_bits(kHoleNanInt64);
-  } else if (object->IsMutableHeapNumber()) {
+  } else if (object->IsHeapNumber()) {
     // Ensure that all bits of the double value are preserved.
-    result->set_value_as_bits(MutableHeapNumber::cast(*object).value_as_bits());
+    result->set_value_as_bits(HeapNumber::cast(*object).value_as_bits());
   } else {
     result->set_value(object->Number());
   }
@@ -213,7 +214,7 @@ Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
     return object;
   }
   return isolate->factory()->NewHeapNumberFromBits(
-      MutableHeapNumber::cast(*object).value_as_bits());
+      HeapNumber::cast(*object).value_as_bits());
 }
 
 MaybeHandle<JSReceiver> Object::ToObjectImpl(Isolate* isolate,
@@ -1667,7 +1668,7 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
   Handle<Object> default_species = isolate->array_function();
   if (original_array->IsJSArray() &&
       Handle<JSArray>::cast(original_array)->HasArrayPrototype(isolate) &&
-      isolate->IsArraySpeciesLookupChainIntact()) {
+      Protectors::IsArraySpeciesLookupChainIntact(isolate)) {
     return default_species;
   }
   Handle<Object> constructor = isolate->factory()->undefined_value();
@@ -2077,12 +2078,6 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) {  // NOLINT
       os << ">";
       break;
     }
-    case MUTABLE_HEAP_NUMBER_TYPE: {
-      os << "<MutableHeapNumber ";
-      MutableHeapNumber::cast(*this).MutableHeapNumberPrint(os);
-      os << '>';
-      break;
-    }
     case BIGINT_TYPE: {
       os << "<BigInt ";
       BigInt::cast(*this).BigIntShortPrint(os);
@@ -2192,8 +2187,7 @@ int HeapObject::SizeFromMap(Map map) const {
         FixedArray::unchecked_cast(*this).synchronized_length());
   }
   if (IsInRange(instance_type, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)) {
-    // Native context has fixed size.
-    DCHECK_NE(instance_type, NATIVE_CONTEXT_TYPE);
+    if (instance_type == NATIVE_CONTEXT_TYPE) return NativeContext::kSize;
     return Context::SizeFor(Context::unchecked_cast(*this).length());
   }
   if (instance_type == ONE_BYTE_STRING_TYPE ||
@@ -4289,11 +4283,13 @@ Handle<AccessorPair> AccessorPair::Copy(Isolate* isolate,
 }
 
 Handle<Object> AccessorPair::GetComponent(Isolate* isolate,
+                                          Handle<NativeContext> native_context,
                                           Handle<AccessorPair> accessor_pair,
                                           AccessorComponent component) {
   Object accessor = accessor_pair->get(component);
   if (accessor.IsFunctionTemplateInfo()) {
     return ApiNatives::InstantiateFunction(
+               isolate, native_context,
                handle(FunctionTemplateInfo::cast(accessor), isolate))
         .ToHandleChecked();
   }
@@ -5339,12 +5335,9 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
                                               lit->end_position());
     needs_position_info = false;
   }
-  shared_info->set_is_declaration(lit->is_declaration());
-  shared_info->set_is_named_expression(lit->is_named_expression());
-  shared_info->set_is_anonymous_expression(lit->is_anonymous_expression());
+  shared_info->set_syntax_kind(lit->syntax_kind());
   shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
   shared_info->set_language_mode(lit->language_mode());
-  shared_info->set_is_wrapped(lit->is_wrapped());
   shared_info->set_function_literal_id(lit->function_literal_id());
   //  shared_info->set_kind(lit->kind());
   // FunctionKind must have already been set.
@@ -5650,8 +5643,7 @@ bool AllocationSite::IsNested() {
 }
 
 bool AllocationSite::ShouldTrack(ElementsKind from, ElementsKind to) {
-  return IsSmiElementsKind(from) &&
-         IsMoreGeneralElementsKindTransition(from, to);
+  return IsMoreGeneralElementsKindTransition(from, to);
 }
 
 const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
@@ -6145,6 +6137,43 @@ Handle<JSRegExp> JSRegExp::Copy(Handle<JSRegExp> regexp) {
   return Handle<JSRegExp>::cast(isolate->factory()->CopyJSObject(regexp));
 }
 
+Object JSRegExp::Code(bool is_latin1) const {
+  return DataAt(code_index(is_latin1));
+}
+
+Object JSRegExp::Bytecode(bool is_latin1) const {
+  return DataAt(bytecode_index(is_latin1));
+}
+
+bool JSRegExp::ShouldProduceBytecode() {
+  return FLAG_regexp_interpret_all ||
+         (FLAG_regexp_tier_up && !MarkedForTierUp());
+}
+
+// An irregexp is considered to be marked for tier up if the tier-up ticks value
+// is not zero. An atom is not subject to tier-up implementation, so the tier-up
+// ticks value is not set.
+bool JSRegExp::MarkedForTierUp() {
+  DCHECK(data().IsFixedArray());
+  if (TypeTag() == JSRegExp::ATOM) {
+    return false;
+  }
+  return Smi::ToInt(DataAt(kIrregexpTierUpTicksIndex)) != 0;
+}
+
+void JSRegExp::ResetTierUp() {
+  DCHECK(FLAG_regexp_tier_up);
+  DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+  FixedArray::cast(data()).set(JSRegExp::kIrregexpTierUpTicksIndex, Smi::kZero);
+}
+
+void JSRegExp::MarkTierUpForNextExec() {
+  DCHECK(FLAG_regexp_tier_up);
+  DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+  FixedArray::cast(data()).set(JSRegExp::kIrregexpTierUpTicksIndex,
+                               Smi::FromInt(1));
+}
+
 namespace {
 
 template <typename Char>
@@ -7405,7 +7434,7 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
 }
 
 template <typename Derived, typename Shape>
-void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
+ExceptionStatus BaseNameDictionary<Derived, Shape>::CollectKeysTo(
     Handle<Derived> dictionary, KeyAccumulator* keys) {
   Isolate* isolate = keys->isolate();
   ReadOnlyRoots roots(isolate);
@@ -7450,16 +7479,19 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
       has_seen_symbol = true;
       continue;
     }
-    keys->AddKey(key, DO_NOT_CONVERT);
+    ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
+    if (!status) return status;
   }
   if (has_seen_symbol) {
     for (int i = 0; i < array_size; i++) {
       int index = Smi::ToInt(array->get(i));
       Object key = dictionary->NameAt(index);
       if (!key.IsSymbol()) continue;
-      keys->AddKey(key, DO_NOT_CONVERT);
+      ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
+      if (!status) return status;
     }
   }
+  return ExceptionStatus::kSuccess;
 }
 
 // Backwards lookup (slow).
@@ -8067,7 +8099,10 @@ HashTable<NameDictionary, NameDictionaryShape>::Shrink(Isolate* isolate,
                                                        Handle<NameDictionary>,
                                                        int additionalCapacity);
 
-void JSFinalizationGroup::Cleanup(
+template void HashTable<GlobalDictionary, GlobalDictionaryShape>::Rehash(
+    ReadOnlyRoots roots);
+
+Maybe<bool> JSFinalizationGroup::Cleanup(
     Isolate* isolate, Handle<JSFinalizationGroup> finalization_group,
     Handle<Object> cleanup) {
   DCHECK(cleanup->IsCallable());
@@ -8088,23 +8123,17 @@ void JSFinalizationGroup::Cleanup(
               Handle<AllocationSite>::null()));
       iterator->set_finalization_group(*finalization_group);
     }
-
-    v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
-    v8::Local<v8::Value> result;
-    MaybeHandle<Object> exception;
     Handle<Object> args[] = {iterator};
-    bool has_pending_exception = !ToLocal<Value>(
-        Execution::TryCall(
+    if (Execution::Call(
             isolate, cleanup,
-            handle(ReadOnlyRoots(isolate).undefined_value(), isolate), 1, args,
-            Execution::MessageHandling::kReport, &exception),
-        &result);
-    // TODO(marja): (spec): What if there's an exception?
-    USE(has_pending_exception);
-
+            handle(ReadOnlyRoots(isolate).undefined_value(), isolate), 1, args)
+            .is_null()) {
+      return Nothing<bool>();
+    }
     // TODO(marja): (spec): Should the iterator be invalidated after the
     // function returns?
   }
+  return Just(true);
 }
 
 MaybeHandle<FixedArray> JSReceiver::GetPrivateEntries(
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index d706b2dfb7b51a..b4e78a19377df7 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -289,6 +289,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
 
   V8_INLINE bool IsZero() const;
   V8_INLINE bool IsNoSharedNameSentinel() const;
+  V8_INLINE bool IsPrivateSymbol() const;
+  V8_INLINE bool IsPublicSymbol() const;
 
   enum class Conversion { kToNumber, kToNumeric };
 
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index 07536a4ccbb239..dda848f0100452 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -14,7 +14,7 @@ namespace v8 {
 namespace internal {
 
 template <class Derived, int entrysize>
-Handle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
+MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
     Isolate* isolate, int capacity, AllocationType allocation) {
   // Capacity must be a power of two, since we depend on being able
   // to divide and multiple by 2 (kLoadFactor) to derive capacity
@@ -23,7 +23,7 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
   // field of this object.
   capacity = base::bits::RoundUpToPowerOfTwo32(Max(kMinCapacity, capacity));
   if (capacity > MaxCapacity()) {
-    isolate->heap()->FatalProcessOutOfMemory("invalid table size");
+    return MaybeHandle<Derived>();
   }
   int num_buckets = capacity / kLoadFactor;
   Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
@@ -41,7 +41,7 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
 }
 
 template <class Derived, int entrysize>
-Handle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
+MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
     Isolate* isolate, Handle<Derived> table) {
   DCHECK(!table->IsObsolete());
 
@@ -64,7 +64,7 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Shrink(
   int nof = table->NumberOfElements();
   int capacity = table->Capacity();
   if (nof >= (capacity >> 2)) return table;
-  return Derived::Rehash(isolate, table, capacity / 2);
+  return Derived::Rehash(isolate, table, capacity / 2).ToHandleChecked();
 }
 
 template <class Derived, int entrysize>
@@ -72,10 +72,12 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
     Isolate* isolate, Handle<Derived> table) {
   DCHECK(!table->IsObsolete());
 
+  AllocationType allocation_type = Heap::InYoungGeneration(*table)
+                                       ? AllocationType::kYoung
+                                       : AllocationType::kOld;
+
   Handle<Derived> new_table =
-      Allocate(isolate, kMinCapacity,
-               Heap::InYoungGeneration(*table) ? AllocationType::kYoung
-                                               : AllocationType::kOld);
+      Allocate(isolate, kMinCapacity, allocation_type).ToHandleChecked();
 
   table->SetNextTable(*new_table);
   table->SetNumberOfDeletedElements(kClearedTableSentinel);
@@ -120,9 +122,9 @@ int OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
   return entry;
 }
 
-Handle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
-                                           Handle<OrderedHashSet> table,
-                                           Handle<Object> key) {
+MaybeHandle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
+                                                Handle<OrderedHashSet> table,
+                                                Handle<Object> key) {
   int hash = key->GetOrCreateHash(isolate).value();
   int entry = table->HashToEntry(hash);
   // Walk the chain of the bucket and try finding the key.
@@ -133,7 +135,11 @@ Handle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
     entry = table->NextChainEntry(entry);
   }
 
-  table = OrderedHashSet::EnsureGrowable(isolate, table);
+  MaybeHandle<OrderedHashSet> table_candidate =
+      OrderedHashSet::EnsureGrowable(isolate, table);
+  if (!table_candidate.ToHandle(&table)) {
+    return table_candidate;
+  }
   // Read the existing bucket values.
   int bucket = table->HashToBucket(hash);
   int previous_entry = table->HashToEntry(hash);
@@ -186,14 +192,18 @@ HeapObject OrderedHashMap::GetEmpty(ReadOnlyRoots ro_roots) {
 }
 
 template <class Derived, int entrysize>
-Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
+MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
     Isolate* isolate, Handle<Derived> table, int new_capacity) {
   DCHECK(!table->IsObsolete());
 
-  Handle<Derived> new_table =
+  MaybeHandle<Derived> new_table_candidate =
       Derived::Allocate(isolate, new_capacity,
                         Heap::InYoungGeneration(*table) ? AllocationType::kYoung
                                                         : AllocationType::kOld);
+  Handle<Derived> new_table;
+  if (!new_table_candidate.ToHandle(&new_table)) {
+    return new_table_candidate;
+  }
   int nof = table->NumberOfElements();
   int nod = table->NumberOfDeletedElements();
   int new_buckets = new_table->NumberOfBuckets();
@@ -227,30 +237,33 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
   new_table->SetNumberOfElements(nof);
   table->SetNextTable(*new_table);
 
-  return new_table;
+  return new_table_candidate;
 }
 
-Handle<OrderedHashSet> OrderedHashSet::Rehash(Isolate* isolate,
-                                              Handle<OrderedHashSet> table,
-                                              int new_capacity) {
+MaybeHandle<OrderedHashSet> OrderedHashSet::Rehash(Isolate* isolate,
+                                                   Handle<OrderedHashSet> table,
+                                                   int new_capacity) {
   return OrderedHashTable<OrderedHashSet, 1>::Rehash(isolate, table,
                                                      new_capacity);
 }
 
-Handle<OrderedHashMap> OrderedHashMap::Rehash(Isolate* isolate,
-                                              Handle<OrderedHashMap> table,
-                                              int new_capacity) {
+MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(Isolate* isolate,
+                                                   Handle<OrderedHashMap> table,
+                                                   int new_capacity) {
   return OrderedHashTable<OrderedHashMap, 2>::Rehash(isolate, table,
                                                      new_capacity);
 }
 
-Handle<OrderedNameDictionary> OrderedNameDictionary::Rehash(
+MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Rehash(
     Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity) {
-  Handle<OrderedNameDictionary> new_table =
+  MaybeHandle<OrderedNameDictionary> new_table_candidate =
       OrderedHashTable<OrderedNameDictionary, 3>::Rehash(isolate, table,
                                                          new_capacity);
-  new_table->SetHash(table->Hash());
-  return new_table;
+  Handle<OrderedNameDictionary> new_table;
+  if (new_table_candidate.ToHandle(&new_table)) {
+    new_table->SetHash(table->Hash());
+  }
+  return new_table_candidate;
 }
 
 template <class Derived, int entrysize>
@@ -286,10 +299,10 @@ Address OrderedHashMap::GetHash(Isolate* isolate, Address raw_key) {
   return hash.ptr();
 }
 
-Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
-                                           Handle<OrderedHashMap> table,
-                                           Handle<Object> key,
-                                           Handle<Object> value) {
+MaybeHandle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
+                                                Handle<OrderedHashMap> table,
+                                                Handle<Object> key,
+                                                Handle<Object> value) {
   int hash = key->GetOrCreateHash(isolate).value();
   int entry = table->HashToEntry(hash);
   // Walk the chain of the bucket and try finding the key.
@@ -304,7 +317,11 @@ Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
     }
   }
 
-  table = OrderedHashMap::EnsureGrowable(isolate, table);
+  MaybeHandle<OrderedHashMap> table_candidate =
+      OrderedHashMap::EnsureGrowable(isolate, table);
+  if (!table_candidate.ToHandle(&table)) {
+    return table_candidate;
+  }
   // Read the existing bucket values.
   int bucket = table->HashToBucket(hash);
   int previous_entry = table->HashToEntry(hash);
@@ -345,12 +362,16 @@ V8_EXPORT_PRIVATE int OrderedHashTable<OrderedNameDictionary, 3>::FindEntry(
   return kNotFound;
 }
 
-Handle<OrderedNameDictionary> OrderedNameDictionary::Add(
+MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
     Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
     Handle<Object> value, PropertyDetails details) {
   DCHECK_EQ(kNotFound, table->FindEntry(isolate, *key));
 
-  table = OrderedNameDictionary::EnsureGrowable(isolate, table);
+  MaybeHandle<OrderedNameDictionary> table_candidate =
+      OrderedNameDictionary::EnsureGrowable(isolate, table);
+  if (!table_candidate.ToHandle(&table)) {
+    return table_candidate;
+  }
   // Read the existing bucket values.
   int hash = key->Hash();
   int bucket = table->HashToBucket(hash);
@@ -405,28 +426,31 @@ Handle<OrderedNameDictionary> OrderedNameDictionary::DeleteEntry(
   return Shrink(isolate, table);
 }
 
-Handle<OrderedHashSet> OrderedHashSet::Allocate(Isolate* isolate, int capacity,
-                                                AllocationType allocation) {
+MaybeHandle<OrderedHashSet> OrderedHashSet::Allocate(
+    Isolate* isolate, int capacity, AllocationType allocation) {
   return OrderedHashTable<OrderedHashSet, 1>::Allocate(isolate, capacity,
                                                        allocation);
 }
 
-Handle<OrderedHashMap> OrderedHashMap::Allocate(Isolate* isolate, int capacity,
-                                                AllocationType allocation) {
+MaybeHandle<OrderedHashMap> OrderedHashMap::Allocate(
+    Isolate* isolate, int capacity, AllocationType allocation) {
   return OrderedHashTable<OrderedHashMap, 2>::Allocate(isolate, capacity,
                                                        allocation);
 }
 
-Handle<OrderedNameDictionary> OrderedNameDictionary::Allocate(
+MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Allocate(
     Isolate* isolate, int capacity, AllocationType allocation) {
-  Handle<OrderedNameDictionary> table =
+  MaybeHandle<OrderedNameDictionary> table_candidate =
       OrderedHashTable<OrderedNameDictionary, 3>::Allocate(isolate, capacity,
                                                            allocation);
-  table->SetHash(PropertyArray::kNoHashSentinel);
-  return table;
+  Handle<OrderedNameDictionary> table;
+  if (table_candidate.ToHandle(&table)) {
+    table->SetHash(PropertyArray::kNoHashSentinel);
+  }
+  return table_candidate;
 }
 
-template V8_EXPORT_PRIVATE Handle<OrderedHashSet>
+template V8_EXPORT_PRIVATE MaybeHandle<OrderedHashSet>
 OrderedHashTable<OrderedHashSet, 1>::EnsureGrowable(
     Isolate* isolate, Handle<OrderedHashSet> table);
 
@@ -447,7 +471,7 @@ template V8_EXPORT_PRIVATE bool OrderedHashTable<OrderedHashSet, 1>::Delete(
 template V8_EXPORT_PRIVATE int OrderedHashTable<OrderedHashSet, 1>::FindEntry(
     Isolate* isolate, Object key);
 
-template V8_EXPORT_PRIVATE Handle<OrderedHashMap>
+template V8_EXPORT_PRIVATE MaybeHandle<OrderedHashMap>
 OrderedHashTable<OrderedHashMap, 2>::EnsureGrowable(
     Isolate* isolate, Handle<OrderedHashMap> table);
 
@@ -472,7 +496,7 @@ template Handle<OrderedNameDictionary>
 OrderedHashTable<OrderedNameDictionary, 3>::Shrink(
     Isolate* isolate, Handle<OrderedNameDictionary> table);
 
-template Handle<OrderedNameDictionary>
+template MaybeHandle<OrderedNameDictionary>
 OrderedHashTable<OrderedNameDictionary, 3>::EnsureGrowable(
     Isolate* isolate, Handle<OrderedNameDictionary> table);
 
@@ -912,8 +936,9 @@ SmallOrderedHashTable<SmallOrderedNameDictionary>::Shrink(
     Isolate* isolate, Handle<SmallOrderedNameDictionary> table);
 
 template <class SmallTable, class LargeTable>
-Handle<HeapObject> OrderedHashTableHandler<SmallTable, LargeTable>::Allocate(
-    Isolate* isolate, int capacity) {
+MaybeHandle<HeapObject>
+OrderedHashTableHandler<SmallTable, LargeTable>::Allocate(Isolate* isolate,
+                                                          int capacity) {
   if (capacity < SmallTable::kMaxCapacity) {
     return SmallTable::Allocate(isolate, capacity);
   }
@@ -921,18 +946,17 @@ Handle<HeapObject> OrderedHashTableHandler<SmallTable, LargeTable>::Allocate(
   return LargeTable::Allocate(isolate, capacity);
 }
 
-template V8_EXPORT_PRIVATE Handle<HeapObject>
+template V8_EXPORT_PRIVATE MaybeHandle<HeapObject>
 OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::Allocate(
     Isolate* isolate, int capacity);
-template V8_EXPORT_PRIVATE Handle<HeapObject>
+template V8_EXPORT_PRIVATE MaybeHandle<HeapObject>
 OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap>::Allocate(
     Isolate* isolate, int capacity);
-template V8_EXPORT_PRIVATE Handle<HeapObject>
+template V8_EXPORT_PRIVATE MaybeHandle<HeapObject>
 OrderedHashTableHandler<SmallOrderedNameDictionary,
                         OrderedNameDictionary>::Allocate(Isolate* isolate,
                                                          int capacity);
 
-#if !defined(V8_OS_WIN)
 template <class SmallTable, class LargeTable>
 bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
     Handle<HeapObject> table, Handle<Object> key) {
@@ -945,9 +969,7 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
   // down to a smaller hash table.
   return LargeTable::Delete(Handle<LargeTable>::cast(table), key);
 }
-#endif
 
-#if !defined(V8_OS_WIN)
 template <class SmallTable, class LargeTable>
 bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
     Isolate* isolate, Handle<HeapObject> table, Handle<Object> key) {
@@ -958,7 +980,6 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
   DCHECK(LargeTable::Is(table));
   return LargeTable::HasKey(isolate, LargeTable::cast(*table), *key);
 }
-#endif
 
 template bool
 OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::HasKey(
@@ -967,10 +988,14 @@ template bool
 OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap>::HasKey(
     Isolate* isolate, Handle<HeapObject> table, Handle<Object> key);
 
-Handle<OrderedHashMap> OrderedHashMapHandler::AdjustRepresentation(
+MaybeHandle<OrderedHashMap> OrderedHashMapHandler::AdjustRepresentation(
     Isolate* isolate, Handle<SmallOrderedHashMap> table) {
-  Handle<OrderedHashMap> new_table =
+  MaybeHandle<OrderedHashMap> new_table_candidate =
       OrderedHashMap::Allocate(isolate, OrderedHashTableMinSize);
+  Handle<OrderedHashMap> new_table;
+  if (!new_table_candidate.ToHandle(&new_table)) {
+    return new_table_candidate;
+  }
   int nof = table->NumberOfElements();
   int nod = table->NumberOfDeletedElements();
 
@@ -982,16 +1007,23 @@ Handle<OrderedHashMap> OrderedHashMapHandler::AdjustRepresentation(
     if (key->IsTheHole(isolate)) continue;
     Handle<Object> value = handle(
         table->GetDataEntry(entry, SmallOrderedHashMap::kValueIndex), isolate);
-    new_table = OrderedHashMap::Add(isolate, new_table, key, value);
+    new_table_candidate = OrderedHashMap::Add(isolate, new_table, key, value);
+    if (!new_table_candidate.ToHandle(&new_table)) {
+      return new_table_candidate;
+    }
   }
 
-  return new_table;
+  return new_table_candidate;
 }
 
-Handle<OrderedHashSet> OrderedHashSetHandler::AdjustRepresentation(
+MaybeHandle<OrderedHashSet> OrderedHashSetHandler::AdjustRepresentation(
     Isolate* isolate, Handle<SmallOrderedHashSet> table) {
-  Handle<OrderedHashSet> new_table =
+  MaybeHandle<OrderedHashSet> new_table_candidate =
       OrderedHashSet::Allocate(isolate, OrderedHashTableMinSize);
+  Handle<OrderedHashSet> new_table;
+  if (!new_table_candidate.ToHandle(&new_table)) {
+    return new_table_candidate;
+  }
   int nof = table->NumberOfElements();
   int nod = table->NumberOfDeletedElements();
 
@@ -1001,17 +1033,24 @@ Handle<OrderedHashSet> OrderedHashSetHandler::AdjustRepresentation(
   for (int entry = 0; entry < (nof + nod); ++entry) {
     Handle<Object> key = handle(table->KeyAt(entry), isolate);
     if (key->IsTheHole(isolate)) continue;
-    new_table = OrderedHashSet::Add(isolate, new_table, key);
+    new_table_candidate = OrderedHashSet::Add(isolate, new_table, key);
+    if (!new_table_candidate.ToHandle(&new_table)) {
+      return new_table_candidate;
+    }
   }
 
-  return new_table;
+  return new_table_candidate;
 }
 
-Handle<OrderedNameDictionary>
+MaybeHandle<OrderedNameDictionary>
 OrderedNameDictionaryHandler::AdjustRepresentation(
     Isolate* isolate, Handle<SmallOrderedNameDictionary> table) {
-  Handle<OrderedNameDictionary> new_table =
+  MaybeHandle<OrderedNameDictionary> new_table_candidate =
       OrderedNameDictionary::Allocate(isolate, OrderedHashTableMinSize);
+  Handle<OrderedNameDictionary> new_table;
+  if (!new_table_candidate.ToHandle(&new_table)) {
+    return new_table_candidate;
+  }
   int nof = table->NumberOfElements();
   int nod = table->NumberOfDeletedElements();
 
@@ -1023,17 +1062,20 @@ OrderedNameDictionaryHandler::AdjustRepresentation(
     if (key->IsTheHole(isolate)) continue;
     Handle<Object> value(table->ValueAt(entry), isolate);
     PropertyDetails details = table->DetailsAt(entry);
-    new_table =
+    new_table_candidate =
         OrderedNameDictionary::Add(isolate, new_table, key, value, details);
+    if (!new_table_candidate.ToHandle(&new_table)) {
+      return new_table_candidate;
+    }
   }
 
-  return new_table;
+  return new_table_candidate;
 }
 
-Handle<HeapObject> OrderedHashMapHandler::Add(Isolate* isolate,
-                                              Handle<HeapObject> table,
-                                              Handle<Object> key,
-                                              Handle<Object> value) {
+MaybeHandle<HeapObject> OrderedHashMapHandler::Add(Isolate* isolate,
+                                                   Handle<HeapObject> table,
+                                                   Handle<Object> key,
+                                                   Handle<Object> value) {
   if (table->IsSmallOrderedHashMap()) {
     Handle<SmallOrderedHashMap> small_map =
         Handle<SmallOrderedHashMap>::cast(table);
@@ -1043,7 +1085,11 @@ Handle<HeapObject> OrderedHashMapHandler::Add(Isolate* isolate,
 
     // We couldn't add to the small table, let's migrate to the
     // big table.
-    table = OrderedHashMapHandler::AdjustRepresentation(isolate, small_map);
+    MaybeHandle<OrderedHashMap> table_candidate =
+        OrderedHashMapHandler::AdjustRepresentation(isolate, small_map);
+    if (!table_candidate.ToHandle(&table)) {
+      return table_candidate;
+    }
   }
 
   DCHECK(table->IsOrderedHashMap());
@@ -1051,9 +1097,9 @@ Handle<HeapObject> OrderedHashMapHandler::Add(Isolate* isolate,
                              value);
 }
 
-Handle<HeapObject> OrderedHashSetHandler::Add(Isolate* isolate,
-                                              Handle<HeapObject> table,
-                                              Handle<Object> key) {
+MaybeHandle<HeapObject> OrderedHashSetHandler::Add(Isolate* isolate,
+                                                   Handle<HeapObject> table,
+                                                   Handle<Object> key) {
   if (table->IsSmallOrderedHashSet()) {
     Handle<SmallOrderedHashSet> small_set =
         Handle<SmallOrderedHashSet>::cast(table);
@@ -1063,18 +1109,20 @@ Handle<HeapObject> OrderedHashSetHandler::Add(Isolate* isolate,
 
     // We couldn't add to the small table, let's migrate to the
     // big table.
-    table = OrderedHashSetHandler::AdjustRepresentation(isolate, small_set);
+    MaybeHandle<OrderedHashSet> table_candidate =
+        OrderedHashSetHandler::AdjustRepresentation(isolate, small_set);
+    if (!table_candidate.ToHandle(&table)) {
+      return table_candidate;
+    }
   }
 
   DCHECK(table->IsOrderedHashSet());
   return OrderedHashSet::Add(isolate, Handle<OrderedHashSet>::cast(table), key);
 }
 
-Handle<HeapObject> OrderedNameDictionaryHandler::Add(Isolate* isolate,
-                                                     Handle<HeapObject> table,
-                                                     Handle<Name> key,
-                                                     Handle<Object> value,
-                                                     PropertyDetails details) {
+MaybeHandle<HeapObject> OrderedNameDictionaryHandler::Add(
+    Isolate* isolate, Handle<HeapObject> table, Handle<Name> key,
+    Handle<Object> value, PropertyDetails details) {
   if (table->IsSmallOrderedNameDictionary()) {
     Handle<SmallOrderedNameDictionary> small_dict =
         Handle<SmallOrderedNameDictionary>::cast(table);
@@ -1085,8 +1133,11 @@ Handle<HeapObject> OrderedNameDictionaryHandler::Add(Isolate* isolate,
 
     // We couldn't add to the small table, let's migrate to the
     // big table.
-    table =
+    MaybeHandle<OrderedNameDictionary> table_candidate =
         OrderedNameDictionaryHandler::AdjustRepresentation(isolate, small_dict);
+    if (!table_candidate.ToHandle(&table)) {
+      return table_candidate;
+    }
   }
 
   DCHECK(table->IsOrderedNameDictionary());
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 66dc36e81fce46..21decaeba72246 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -64,8 +64,8 @@ class OrderedHashTable : public FixedArray {
  public:
   // Returns an OrderedHashTable (possibly |table|) with enough space
   // to add at least one new element.
-  static Handle<Derived> EnsureGrowable(Isolate* isolate,
-                                        Handle<Derived> table);
+  static MaybeHandle<Derived> EnsureGrowable(Isolate* isolate,
+                                             Handle<Derived> table);
 
   // Returns an OrderedHashTable (possibly |table|) that's shrunken
   // if possible.
@@ -197,11 +197,11 @@ class OrderedHashTable : public FixedArray {
 
  protected:
   // Returns an OrderedHashTable with a capacity of at least |capacity|.
-  static Handle<Derived> Allocate(
+  static MaybeHandle<Derived> Allocate(
       Isolate* isolate, int capacity,
       AllocationType allocation = AllocationType::kYoung);
-  static Handle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
-                                int new_capacity);
+  static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
+                                     int new_capacity);
 
   void SetNumberOfBuckets(int num) {
     set(NumberOfBucketsIndex(), Smi::FromInt(num));
@@ -235,16 +235,16 @@ class V8_EXPORT_PRIVATE OrderedHashSet
  public:
   DECL_CAST(OrderedHashSet)
 
-  static Handle<OrderedHashSet> Add(Isolate* isolate,
-                                    Handle<OrderedHashSet> table,
-                                    Handle<Object> value);
+  static MaybeHandle<OrderedHashSet> Add(Isolate* isolate,
+                                         Handle<OrderedHashSet> table,
+                                         Handle<Object> value);
   static Handle<FixedArray> ConvertToKeysArray(Isolate* isolate,
                                                Handle<OrderedHashSet> table,
                                                GetKeysConversion convert);
-  static Handle<OrderedHashSet> Rehash(Isolate* isolate,
-                                       Handle<OrderedHashSet> table,
-                                       int new_capacity);
-  static Handle<OrderedHashSet> Allocate(
+  static MaybeHandle<OrderedHashSet> Rehash(Isolate* isolate,
+                                            Handle<OrderedHashSet> table,
+                                            int new_capacity);
+  static MaybeHandle<OrderedHashSet> Allocate(
       Isolate* isolate, int capacity,
       AllocationType allocation = AllocationType::kYoung);
   static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
@@ -262,16 +262,17 @@ class V8_EXPORT_PRIVATE OrderedHashMap
 
   // Returns a value if the OrderedHashMap contains the key, otherwise
   // returns undefined.
-  static Handle<OrderedHashMap> Add(Isolate* isolate,
-                                    Handle<OrderedHashMap> table,
-                                    Handle<Object> key, Handle<Object> value);
+  static MaybeHandle<OrderedHashMap> Add(Isolate* isolate,
+                                         Handle<OrderedHashMap> table,
+                                         Handle<Object> key,
+                                         Handle<Object> value);
 
-  static Handle<OrderedHashMap> Allocate(
+  static MaybeHandle<OrderedHashMap> Allocate(
       Isolate* isolate, int capacity,
       AllocationType allocation = AllocationType::kYoung);
-  static Handle<OrderedHashMap> Rehash(Isolate* isolate,
-                                       Handle<OrderedHashMap> table,
-                                       int new_capacity);
+  static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
+                                            Handle<OrderedHashMap> table,
+                                            int new_capacity);
   Object ValueAt(int entry);
 
   // This takes and returns raw Address values containing tagged Object
@@ -656,7 +657,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) OrderedHashTableHandler {
  public:
   using Entry = int;
 
-  static Handle<HeapObject> Allocate(Isolate* isolate, int capacity);
+  static MaybeHandle<HeapObject> Allocate(Isolate* isolate, int capacity);
   static bool Delete(Handle<HeapObject> table, Handle<Object> key);
   static bool HasKey(Isolate* isolate, Handle<HeapObject> table,
                      Handle<Object> key);
@@ -672,9 +673,9 @@ extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
 class V8_EXPORT_PRIVATE OrderedHashMapHandler
     : public OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap> {
  public:
-  static Handle<HeapObject> Add(Isolate* isolate, Handle<HeapObject> table,
-                                Handle<Object> key, Handle<Object> value);
-  static Handle<OrderedHashMap> AdjustRepresentation(
+  static MaybeHandle<HeapObject> Add(Isolate* isolate, Handle<HeapObject> table,
+                                     Handle<Object> key, Handle<Object> value);
+  static MaybeHandle<OrderedHashMap> AdjustRepresentation(
       Isolate* isolate, Handle<SmallOrderedHashMap> table);
 };
 
@@ -684,9 +685,9 @@ extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
 class V8_EXPORT_PRIVATE OrderedHashSetHandler
     : public OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet> {
  public:
-  static Handle<HeapObject> Add(Isolate* isolate, Handle<HeapObject> table,
-                                Handle<Object> key);
-  static Handle<OrderedHashSet> AdjustRepresentation(
+  static MaybeHandle<HeapObject> Add(Isolate* isolate, Handle<HeapObject> table,
+                                     Handle<Object> key);
+  static MaybeHandle<OrderedHashSet> AdjustRepresentation(
       Isolate* isolate, Handle<SmallOrderedHashSet> table);
 };
 
@@ -695,7 +696,7 @@ class OrderedNameDictionary
  public:
   DECL_CAST(OrderedNameDictionary)
 
-  V8_EXPORT_PRIVATE static Handle<OrderedNameDictionary> Add(
+  V8_EXPORT_PRIVATE static MaybeHandle<OrderedNameDictionary> Add(
       Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
       Handle<Object> value, PropertyDetails details);
 
@@ -705,11 +706,11 @@ class OrderedNameDictionary
   V8_EXPORT_PRIVATE static Handle<OrderedNameDictionary> DeleteEntry(
       Isolate* isolate, Handle<OrderedNameDictionary> table, int entry);
 
-  static Handle<OrderedNameDictionary> Allocate(
+  static MaybeHandle<OrderedNameDictionary> Allocate(
       Isolate* isolate, int capacity,
       AllocationType allocation = AllocationType::kYoung);
 
-  static Handle<OrderedNameDictionary> Rehash(
+  static MaybeHandle<OrderedNameDictionary> Rehash(
       Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity);
 
   // Returns the value for entry.
@@ -745,9 +746,9 @@ class V8_EXPORT_PRIVATE OrderedNameDictionaryHandler
     : public OrderedHashTableHandler<SmallOrderedNameDictionary,
                                      OrderedNameDictionary> {
  public:
-  static Handle<HeapObject> Add(Isolate* isolate, Handle<HeapObject> table,
-                                Handle<Name> key, Handle<Object> value,
-                                PropertyDetails details);
+  static MaybeHandle<HeapObject> Add(Isolate* isolate, Handle<HeapObject> table,
+                                     Handle<Name> key, Handle<Object> value,
+                                     PropertyDetails details);
   static Handle<HeapObject> Shrink(Isolate* isolate, Handle<HeapObject> table);
 
   static Handle<HeapObject> DeleteEntry(Isolate* isolate,
@@ -779,7 +780,7 @@ class V8_EXPORT_PRIVATE OrderedNameDictionaryHandler
   static const int kNotFound = -1;
 
  protected:
-  static Handle<OrderedNameDictionary> AdjustRepresentation(
+  static MaybeHandle<OrderedNameDictionary> AdjustRepresentation(
       Isolate* isolate, Handle<SmallOrderedNameDictionary> table);
 };
 
diff --git a/deps/v8/src/objects/promise-inl.h b/deps/v8/src/objects/promise-inl.h
index 6807ac88f42b3e..da11731e25e669 100644
--- a/deps/v8/src/objects/promise-inl.h
+++ b/deps/v8/src/objects/promise-inl.h
@@ -16,41 +16,12 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(PromiseReactionJobTask, Microtask)
-OBJECT_CONSTRUCTORS_IMPL(PromiseFulfillReactionJobTask, PromiseReactionJobTask)
-OBJECT_CONSTRUCTORS_IMPL(PromiseRejectReactionJobTask, PromiseReactionJobTask)
-OBJECT_CONSTRUCTORS_IMPL(PromiseResolveThenableJobTask, Microtask)
-OBJECT_CONSTRUCTORS_IMPL(PromiseCapability, Struct)
-OBJECT_CONSTRUCTORS_IMPL(PromiseReaction, Struct)
-
-CAST_ACCESSOR(PromiseCapability)
-CAST_ACCESSOR(PromiseReaction)
-CAST_ACCESSOR(PromiseReactionJobTask)
-CAST_ACCESSOR(PromiseFulfillReactionJobTask)
-CAST_ACCESSOR(PromiseRejectReactionJobTask)
-CAST_ACCESSOR(PromiseResolveThenableJobTask)
-
-ACCESSORS(PromiseReaction, next, Object, kNextOffset)
-ACCESSORS(PromiseReaction, reject_handler, HeapObject, kRejectHandlerOffset)
-ACCESSORS(PromiseReaction, fulfill_handler, HeapObject, kFulfillHandlerOffset)
-ACCESSORS(PromiseReaction, promise_or_capability, HeapObject,
-          kPromiseOrCapabilityOffset)
-
-ACCESSORS(PromiseResolveThenableJobTask, context, Context, kContextOffset)
-ACCESSORS(PromiseResolveThenableJobTask, promise_to_resolve, JSPromise,
-          kPromiseToResolveOffset)
-ACCESSORS(PromiseResolveThenableJobTask, then, JSReceiver, kThenOffset)
-ACCESSORS(PromiseResolveThenableJobTask, thenable, JSReceiver, kThenableOffset)
-
-ACCESSORS(PromiseReactionJobTask, context, Context, kContextOffset)
-ACCESSORS(PromiseReactionJobTask, argument, Object, kArgumentOffset)
-ACCESSORS(PromiseReactionJobTask, handler, HeapObject, kHandlerOffset)
-ACCESSORS(PromiseReactionJobTask, promise_or_capability, HeapObject,
-          kPromiseOrCapabilityOffset)
-
-ACCESSORS(PromiseCapability, promise, HeapObject, kPromiseOffset)
-ACCESSORS(PromiseCapability, resolve, Object, kResolveOffset)
-ACCESSORS(PromiseCapability, reject, Object, kRejectOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseReactionJobTask)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseFulfillReactionJobTask)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseRejectReactionJobTask)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseResolveThenableJobTask)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseCapability)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseReaction)
 
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index f7c60413d1925a..2582543f77f663 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -24,93 +24,59 @@ class JSPromise;
 //
 // classes, which are used to represent either reactions, and we distinguish
 // them by their instance types.
-class PromiseReactionJobTask : public Microtask {
+class PromiseReactionJobTask
+    : public TorqueGeneratedPromiseReactionJobTask<PromiseReactionJobTask,
+                                                   Microtask> {
  public:
-  DECL_ACCESSORS(argument, Object)
-  DECL_ACCESSORS(context, Context)
-  DECL_ACCESSORS(handler, HeapObject)
-  // [promise_or_capability]: Either a JSPromise (in case of native promises),
-  // a PromiseCapability (general case), or undefined (in case of await).
-  DECL_ACCESSORS(promise_or_capability, HeapObject)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      Microtask::kHeaderSize, TORQUE_GENERATED_PROMISE_REACTION_JOB_TASK_FIELDS)
-
-  // Dispatched behavior.
-  DECL_CAST(PromiseReactionJobTask)
-  DECL_VERIFIER(PromiseReactionJobTask)
   static const int kSizeOfAllPromiseReactionJobTasks = kHeaderSize;
-  OBJECT_CONSTRUCTORS(PromiseReactionJobTask, Microtask);
+  TQ_OBJECT_CONSTRUCTORS(PromiseReactionJobTask)
 };
 
 // Struct to hold state required for a PromiseReactionJob of type "Fulfill".
-class PromiseFulfillReactionJobTask : public PromiseReactionJobTask {
+class PromiseFulfillReactionJobTask
+    : public TorqueGeneratedPromiseFulfillReactionJobTask<
+          PromiseFulfillReactionJobTask, PromiseReactionJobTask> {
  public:
   // Dispatched behavior.
-  DECL_CAST(PromiseFulfillReactionJobTask)
   DECL_PRINTER(PromiseFulfillReactionJobTask)
-  DECL_VERIFIER(PromiseFulfillReactionJobTask)
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      PromiseReactionJobTask::kHeaderSize,
-      TORQUE_GENERATED_PROMISE_FULFILL_REACTION_JOB_TASK_FIELDS)
   STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
 
-  OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask, PromiseReactionJobTask);
+  TQ_OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask)
 };
 
 // Struct to hold state required for a PromiseReactionJob of type "Reject".
-class PromiseRejectReactionJobTask : public PromiseReactionJobTask {
+class PromiseRejectReactionJobTask
+    : public TorqueGeneratedPromiseRejectReactionJobTask<
+          PromiseRejectReactionJobTask, PromiseReactionJobTask> {
  public:
   // Dispatched behavior.
-  DECL_CAST(PromiseRejectReactionJobTask)
   DECL_PRINTER(PromiseRejectReactionJobTask)
-  DECL_VERIFIER(PromiseRejectReactionJobTask)
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      PromiseReactionJobTask::kHeaderSize,
-      TORQUE_GENERATED_PROMISE_REJECT_REACTION_JOB_TASK_FIELDS)
   STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
 
-  OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask, PromiseReactionJobTask);
+  TQ_OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask)
 };
 
 // A container struct to hold state required for PromiseResolveThenableJob.
-class PromiseResolveThenableJobTask : public Microtask {
+class PromiseResolveThenableJobTask
+    : public TorqueGeneratedPromiseResolveThenableJobTask<
+          PromiseResolveThenableJobTask, Microtask> {
  public:
-  DECL_ACCESSORS(context, Context)
-  DECL_ACCESSORS(promise_to_resolve, JSPromise)
-  DECL_ACCESSORS(then, JSReceiver)
-  DECL_ACCESSORS(thenable, JSReceiver)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      Microtask::kHeaderSize,
-      TORQUE_GENERATED_PROMISE_RESOLVE_THENABLE_JOB_TASK_FIELDS)
-
   // Dispatched behavior.
-  DECL_CAST(PromiseResolveThenableJobTask)
   DECL_PRINTER(PromiseResolveThenableJobTask)
-  DECL_VERIFIER(PromiseResolveThenableJobTask)
 
-  OBJECT_CONSTRUCTORS(PromiseResolveThenableJobTask, Microtask);
+  TQ_OBJECT_CONSTRUCTORS(PromiseResolveThenableJobTask)
 };
 
 // Struct to hold the state of a PromiseCapability.
-class PromiseCapability : public Struct {
+class PromiseCapability
+    : public TorqueGeneratedPromiseCapability<PromiseCapability, Struct> {
  public:
-  DECL_ACCESSORS(promise, HeapObject)
-  DECL_ACCESSORS(resolve, Object)
-  DECL_ACCESSORS(reject, Object)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
-                                TORQUE_GENERATED_PROMISE_CAPABILITY_FIELDS)
-
   // Dispatched behavior.
-  DECL_CAST(PromiseCapability)
   DECL_PRINTER(PromiseCapability)
-  DECL_VERIFIER(PromiseCapability)
 
-  OBJECT_CONSTRUCTORS(PromiseCapability, Struct);
+  TQ_OBJECT_CONSTRUCTORS(PromiseCapability)
 };
 
 // A representation of promise reaction. This differs from the specification
@@ -130,26 +96,15 @@ class PromiseCapability : public Struct {
 // Smi 0. On the JSPromise instance they are linked in reverse order,
 // and are turned into the proper order again when scheduling them on
 // the microtask queue.
-class PromiseReaction : public Struct {
+class PromiseReaction
+    : public TorqueGeneratedPromiseReaction<PromiseReaction, Struct> {
  public:
   enum Type { kFulfill, kReject };
 
-  DECL_ACCESSORS(next, Object)
-  DECL_ACCESSORS(reject_handler, HeapObject)
-  DECL_ACCESSORS(fulfill_handler, HeapObject)
-  // [promise_or_capability]: Either a JSPromise (in case of native promises),
-  // a PromiseCapability (general case), or undefined (in case of await).
-  DECL_ACCESSORS(promise_or_capability, HeapObject)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
-                                TORQUE_GENERATED_PROMISE_REACTION_FIELDS)
-
   // Dispatched behavior.
-  DECL_CAST(PromiseReaction)
   DECL_PRINTER(PromiseReaction)
-  DECL_VERIFIER(PromiseReaction)
 
-  OBJECT_CONSTRUCTORS(PromiseReaction, Struct);
+  TQ_OBJECT_CONSTRUCTORS(PromiseReaction)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 5c71330280a634..62a472aa901eec 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -61,10 +61,10 @@ class PropertyArray : public HeapObject {
   using BodyDescriptor = FlexibleBodyDescriptor<kHeaderSize>;
 
   static const int kLengthFieldSize = 10;
-  class LengthField : public BitField<int, 0, kLengthFieldSize> {};
+  using LengthField = BitField<int, 0, kLengthFieldSize>;
   static const int kMaxLength = LengthField::kMax;
-  class HashField : public BitField<int, kLengthFieldSize,
-                                    kSmiValueSize - kLengthFieldSize - 1> {};
+  using HashField =
+      BitField<int, kLengthFieldSize, kSmiValueSize - kLengthFieldSize - 1>;
 
   static const int kNoHashSentinel = 0;
 
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
index 7836575edfdea1..e350fe2c278b83 100644
--- a/deps/v8/src/objects/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -310,13 +310,10 @@ class PropertyDetails {
 
   // Bit fields in value_ (type, shift, size). Must be public so the
   // constants can be embedded in generated code.
-  class KindField : public BitField<PropertyKind, 0, 1> {};
-  class LocationField : public BitField<PropertyLocation, KindField::kNext, 1> {
-  };
-  class ConstnessField
-      : public BitField<PropertyConstness, LocationField::kNext, 1> {};
-  class AttributesField
-      : public BitField<PropertyAttributes, ConstnessField::kNext, 3> {};
+  using KindField = BitField<PropertyKind, 0, 1>;
+  using LocationField = KindField::Next<PropertyLocation, 1>;
+  using ConstnessField = LocationField::Next<PropertyConstness, 1>;
+  using AttributesField = ConstnessField::Next<PropertyAttributes, 3>;
   static const int kAttributesReadOnlyMask =
       (READ_ONLY << AttributesField::kShift);
   static const int kAttributesDontDeleteMask =
@@ -325,24 +322,19 @@ class PropertyDetails {
       (DONT_ENUM << AttributesField::kShift);
 
   // Bit fields for normalized objects.
-  class PropertyCellTypeField
-      : public BitField<PropertyCellType, AttributesField::kNext, 2> {};
-  class DictionaryStorageField
-      : public BitField<uint32_t, PropertyCellTypeField::kNext, 23> {};
+  using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 2>;
+  using DictionaryStorageField = PropertyCellTypeField::Next<uint32_t, 23>;
 
   // Bit fields for fast objects.
-  class RepresentationField
-      : public BitField<uint32_t, AttributesField::kNext, 3> {};
-  class DescriptorPointer
-      : public BitField<uint32_t, RepresentationField::kNext,
-                        kDescriptorIndexBitCount> {};  // NOLINT
-  class FieldIndexField : public BitField<uint32_t, DescriptorPointer::kNext,
-                                          kDescriptorIndexBitCount> {
-  };  // NOLINT
+  using RepresentationField = AttributesField::Next<uint32_t, 3>;
+  using DescriptorPointer =
+      RepresentationField::Next<uint32_t, kDescriptorIndexBitCount>;
+  using FieldIndexField =
+      DescriptorPointer::Next<uint32_t, kDescriptorIndexBitCount>;
 
   // All bits for both fast and slow objects must fit in a smi.
-  STATIC_ASSERT(DictionaryStorageField::kNext <= 31);
-  STATIC_ASSERT(FieldIndexField::kNext <= 31);
+  STATIC_ASSERT(DictionaryStorageField::kLastUsedBit < 31);
+  STATIC_ASSERT(FieldIndexField::kLastUsedBit < 31);
 
   static const int kInitialIndex = 1;
 
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index eca8bc1ecd41f3..c390298b5d2cf6 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -166,7 +166,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
 
     bool has_simple_parameters = false;
     bool is_asm_module = false;
-    bool calls_sloppy_eval = false;
+    bool sloppy_eval_can_extend_vars = false;
     if (scope->is_function_scope()) {
       DeclarationScope* function_scope = scope->AsDeclarationScope();
       has_simple_parameters = function_scope->has_simple_parameters();
@@ -175,13 +175,14 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
     FunctionKind function_kind = kNormalFunction;
     if (scope->is_declaration_scope()) {
       function_kind = scope->AsDeclarationScope()->function_kind();
-      calls_sloppy_eval = scope->AsDeclarationScope()->calls_sloppy_eval();
+      sloppy_eval_can_extend_vars =
+          scope->AsDeclarationScope()->sloppy_eval_can_extend_vars();
     }
 
     // Encode the flags.
     int flags =
         ScopeTypeField::encode(scope->scope_type()) |
-        CallsSloppyEvalField::encode(calls_sloppy_eval) |
+        SloppyEvalCanExtendVarsField::encode(sloppy_eval_can_extend_vars) |
         LanguageModeField::encode(scope->language_mode()) |
         DeclarationScopeField::encode(scope->is_declaration_scope()) |
         ReceiverVariableField::encode(receiver_info) |
@@ -218,8 +219,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
           uint32_t info =
               VariableModeField::encode(var->mode()) |
               InitFlagField::encode(var->initialization_flag()) |
-              RequiresBrandCheckField::encode(
-                  var->get_requires_brand_check_flag()) |
               MaybeAssignedFlagField::encode(var->maybe_assigned()) |
               ParameterNumberField::encode(ParameterNumberField::kMax);
           scope_info.set(context_local_base + local_index, *var->name(), mode);
@@ -236,8 +235,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
               VariableModeField::encode(var->mode()) |
               InitFlagField::encode(var->initialization_flag()) |
               MaybeAssignedFlagField::encode(var->maybe_assigned()) |
-              RequiresBrandCheckField::encode(
-                  var->get_requires_brand_check_flag()) |
               ParameterNumberField::encode(ParameterNumberField::kMax);
           scope_info.set(module_var_entry + kModuleVariablePropertiesOffset,
                          Smi::FromInt(properties));
@@ -276,8 +273,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
               VariableModeField::encode(var->mode()) |
               InitFlagField::encode(var->initialization_flag()) |
               MaybeAssignedFlagField::encode(var->maybe_assigned()) |
-              RequiresBrandCheckField::encode(
-                  var->get_requires_brand_check_flag()) |
               ParameterNumberField::encode(ParameterNumberField::kMax);
           scope_info.set(context_local_base + local_index, *var->name(), mode);
           scope_info.set(context_local_info_base + local_index,
@@ -362,7 +357,8 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
 
   // Encode the flags.
   int flags =
-      ScopeTypeField::encode(WITH_SCOPE) | CallsSloppyEvalField::encode(false) |
+      ScopeTypeField::encode(WITH_SCOPE) |
+      SloppyEvalCanExtendVarsField::encode(false) |
       LanguageModeField::encode(LanguageMode::kSloppy) |
       DeclarationScopeField::encode(false) |
       ReceiverVariableField::encode(NONE) | HasClassBrandField::encode(false) |
@@ -419,11 +415,13 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
                      (has_position_info ? kPositionInfoEntries : 0);
 
   Factory* factory = isolate->factory();
-  Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
+  Handle<ScopeInfo> scope_info =
+      factory->NewScopeInfo(length, AllocationType::kReadOnly);
 
   // Encode the flags.
   int flags =
-      ScopeTypeField::encode(type) | CallsSloppyEvalField::encode(false) |
+      ScopeTypeField::encode(type) |
+      SloppyEvalCanExtendVarsField::encode(false) |
       LanguageModeField::encode(LanguageMode::kSloppy) |
       DeclarationScopeField::encode(true) |
       ReceiverVariableField::encode(is_empty_function ? UNUSED : CONTEXT) |
@@ -451,7 +449,6 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
         VariableModeField::encode(VariableMode::kConst) |
         InitFlagField::encode(kCreatedInitialized) |
         MaybeAssignedFlagField::encode(kNotAssigned) |
-        RequiresBrandCheckField::encode(kNoBrandCheck) |
         ParameterNumberField::encode(ParameterNumberField::kMax);
     scope_info->set(index++, Smi::FromInt(value));
   }
@@ -497,12 +494,12 @@ ScopeType ScopeInfo::scope_type() const {
   return ScopeTypeField::decode(Flags());
 }
 
-bool ScopeInfo::CallsSloppyEval() const {
-  bool calls_sloppy_eval =
-      length() > 0 && CallsSloppyEvalField::decode(Flags());
-  DCHECK_IMPLIES(calls_sloppy_eval, is_sloppy(language_mode()));
-  DCHECK_IMPLIES(calls_sloppy_eval, is_declaration_scope());
-  return calls_sloppy_eval;
+bool ScopeInfo::SloppyEvalCanExtendVars() const {
+  bool sloppy_eval_can_extend_vars =
+      length() > 0 && SloppyEvalCanExtendVarsField::decode(Flags());
+  DCHECK_IMPLIES(sloppy_eval_can_extend_vars, is_sloppy(language_mode()));
+  DCHECK_IMPLIES(sloppy_eval_can_extend_vars, is_declaration_scope());
+  return sloppy_eval_can_extend_vars;
 }
 
 LanguageMode ScopeInfo::language_mode() const {
@@ -523,9 +520,9 @@ int ScopeInfo::ContextLength() const {
     bool has_context =
         context_locals > 0 || force_context || function_name_context_slot ||
         scope_type() == WITH_SCOPE || scope_type() == CLASS_SCOPE ||
-        (scope_type() == BLOCK_SCOPE && CallsSloppyEval() &&
+        (scope_type() == BLOCK_SCOPE && SloppyEvalCanExtendVars() &&
          is_declaration_scope()) ||
-        (scope_type() == FUNCTION_SCOPE && CallsSloppyEval()) ||
+        (scope_type() == FUNCTION_SCOPE && SloppyEvalCanExtendVars()) ||
         (scope_type() == FUNCTION_SCOPE && IsAsmModule()) ||
         scope_type() == MODULE_SCOPE;
 
@@ -708,21 +705,13 @@ MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) const {
   return MaybeAssignedFlagField::decode(value);
 }
 
-RequiresBrandCheckFlag ScopeInfo::RequiresBrandCheck(int var) const {
-  DCHECK_LE(0, var);
-  DCHECK_LT(var, ContextLocalCount());
-  int info_index = ContextLocalInfosIndex() + var;
-  int value = Smi::ToInt(get(info_index));
-  return RequiresBrandCheckField::decode(value);
-}
-
 // static
 bool ScopeInfo::VariableIsSynthetic(String name) {
   // There's currently no flag stored on the ScopeInfo to indicate that a
   // variable is a compiler-introduced temporary. However, to avoid conflict
   // with user declarations, the current temporaries like .generator_object and
   // .result start with a dot, so we can use that as a flag. It's a hack!
-  return name.length() == 0 || name.Get(0) == '.' ||
+  return name.length() == 0 || name.Get(0) == '.' || name.Get(0) == '#' ||
          name.Equals(name.GetReadOnlyRoots().this_string());
 }
 
@@ -755,8 +744,7 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
 int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
                                 VariableMode* mode,
                                 InitializationFlag* init_flag,
-                                MaybeAssignedFlag* maybe_assigned_flag,
-                                RequiresBrandCheckFlag* requires_brand_check) {
+                                MaybeAssignedFlag* maybe_assigned_flag) {
   DisallowHeapAllocation no_gc;
   DCHECK(name.IsInternalizedString());
   DCHECK_NOT_NULL(mode);
@@ -773,7 +761,6 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
     *mode = scope_info.ContextLocalMode(var);
     *init_flag = scope_info.ContextLocalInitFlag(var);
     *maybe_assigned_flag = scope_info.ContextLocalMaybeAssignedFlag(var);
-    *requires_brand_check = scope_info.RequiresBrandCheck(var);
     int result = Context::MIN_CONTEXT_SLOTS + var;
 
     DCHECK_LT(result, scope_info.ContextLength());
@@ -892,9 +879,9 @@ std::ostream& operator<<(std::ostream& os,
 }
 
 Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
-    Isolate* isolate, Handle<Object> export_name, Handle<Object> local_name,
-    Handle<Object> import_name, int module_request, int cell_index, int beg_pos,
-    int end_pos) {
+    Isolate* isolate, Handle<HeapObject> export_name,
+    Handle<HeapObject> local_name, Handle<HeapObject> import_name,
+    int module_request, int cell_index, int beg_pos, int end_pos) {
   Handle<SourceTextModuleInfoEntry> result =
       Handle<SourceTextModuleInfoEntry>::cast(isolate->factory()->NewStruct(
           SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, AllocationType::kOld));
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 0b8eb61b00162c..123b9b17973288 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -51,7 +51,7 @@ class ScopeInfo : public FixedArray {
   bool is_class_scope() const;
 
   // Does this scope make a sloppy eval call?
-  bool CallsSloppyEval() const;
+  bool SloppyEvalCanExtendVars() const;
 
   // Return the number of context slots for code if a context is allocated. This
   // number consists of three parts:
@@ -130,9 +130,6 @@ class ScopeInfo : public FixedArray {
   // Return the initialization flag of the given context local.
   MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var) const;
 
-  // Return whether access to the variable requries a brand check.
-  RequiresBrandCheckFlag RequiresBrandCheck(int var) const;
-
   // Return true if this local was introduced by the compiler, and should not be
   // exposed to the user in a debugger.
   static bool VariableIsSynthetic(String name);
@@ -144,8 +141,7 @@ class ScopeInfo : public FixedArray {
   // mode for that variable.
   static int ContextSlotIndex(ScopeInfo scope_info, String name,
                               VariableMode* mode, InitializationFlag* init_flag,
-                              MaybeAssignedFlag* maybe_assigned_flag,
-                              RequiresBrandCheckFlag* requires_brand_check);
+                              MaybeAssignedFlag* maybe_assigned_flag);
 
   // Lookup metadata of a MODULE-allocated variable.  Return 0 if there is no
   // module variable with the given name (the index value of a MODULE variable
@@ -224,39 +220,26 @@ class ScopeInfo : public FixedArray {
   enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
 
   // Properties of scopes.
-  class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
-  class CallsSloppyEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {
-  };
+  using ScopeTypeField = BitField<ScopeType, 0, 4>;
+  using SloppyEvalCanExtendVarsField = ScopeTypeField::Next<bool, 1>;
   STATIC_ASSERT(LanguageModeSize == 2);
-  class LanguageModeField
-      : public BitField<LanguageMode, CallsSloppyEvalField::kNext, 1> {};
-  class DeclarationScopeField
-      : public BitField<bool, LanguageModeField::kNext, 1> {};
-  class ReceiverVariableField
-      : public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
-                        2> {};
-  class HasClassBrandField
-      : public BitField<bool, ReceiverVariableField::kNext, 1> {};
-  class HasNewTargetField
-      : public BitField<bool, HasClassBrandField::kNext, 1> {};
-  class FunctionVariableField
-      : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
+  using LanguageModeField = SloppyEvalCanExtendVarsField::Next<LanguageMode, 1>;
+  using DeclarationScopeField = LanguageModeField::Next<bool, 1>;
+  using ReceiverVariableField =
+      DeclarationScopeField::Next<VariableAllocationInfo, 2>;
+  using HasClassBrandField = ReceiverVariableField::Next<bool, 1>;
+  using HasNewTargetField = HasClassBrandField::Next<bool, 1>;
+  using FunctionVariableField =
+      HasNewTargetField::Next<VariableAllocationInfo, 2>;
   // TODO(cbruni): Combine with function variable field when only storing the
   // function name.
-  class HasInferredFunctionNameField
-      : public BitField<bool, FunctionVariableField::kNext, 1> {};
-  class IsAsmModuleField
-      : public BitField<bool, HasInferredFunctionNameField::kNext, 1> {};
-  class HasSimpleParametersField
-      : public BitField<bool, IsAsmModuleField::kNext, 1> {};
-  class FunctionKindField
-      : public BitField<FunctionKind, HasSimpleParametersField::kNext, 5> {};
-  class HasOuterScopeInfoField
-      : public BitField<bool, FunctionKindField::kNext, 1> {};
-  class IsDebugEvaluateScopeField
-      : public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
-  class ForceContextAllocationField
-      : public BitField<bool, IsDebugEvaluateScopeField::kNext, 1> {};
+  using HasInferredFunctionNameField = FunctionVariableField::Next<bool, 1>;
+  using IsAsmModuleField = HasInferredFunctionNameField::Next<bool, 1>;
+  using HasSimpleParametersField = IsAsmModuleField::Next<bool, 1>;
+  using FunctionKindField = HasSimpleParametersField::Next<FunctionKind, 5>;
+  using HasOuterScopeInfoField = FunctionKindField::Next<bool, 1>;
+  using IsDebugEvaluateScopeField = HasOuterScopeInfoField::Next<bool, 1>;
+  using ForceContextAllocationField = IsDebugEvaluateScopeField::Next<bool, 1>;
 
   STATIC_ASSERT(kLastFunctionKind <= FunctionKindField::kMax);
 
@@ -323,14 +306,10 @@ class ScopeInfo : public FixedArray {
   static const int kPositionInfoEntries = 2;
 
   // Properties of variables.
-  class VariableModeField : public BitField<VariableMode, 0, 3> {};
-  class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
-  class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
-  class RequiresBrandCheckField
-      : public BitField<RequiresBrandCheckFlag, MaybeAssignedFlagField::kNext,
-                        1> {};
-  class ParameterNumberField
-      : public BitField<uint32_t, RequiresBrandCheckField::kNext, 16> {};
+  using VariableModeField = BitField<VariableMode, 0, 4>;
+  using InitFlagField = VariableModeField::Next<InitializationFlag, 1>;
+  using MaybeAssignedFlagField = InitFlagField::Next<MaybeAssignedFlag, 1>;
+  using ParameterNumberField = MaybeAssignedFlagField::Next<uint32_t, 16>;
 
   friend class ScopeIterator;
   friend std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 9778db5d908031..6023c3b8286981 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -200,14 +200,13 @@ int SharedFunctionInfo::function_token_position() const {
   }
 }
 
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_wrapped,
-                    SharedFunctionInfo::IsWrappedBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, syntax_kind,
+                    SharedFunctionInfo::FunctionSyntaxKindBits)
+
 BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, allows_lazy_compilation,
                     SharedFunctionInfo::AllowLazyCompilationBit)
 BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_duplicate_parameters,
                     SharedFunctionInfo::HasDuplicateParametersBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_declaration,
-                    SharedFunctionInfo::IsDeclarationBit)
 
 BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, native,
                     SharedFunctionInfo::IsNativeBit)
@@ -219,13 +218,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
 
 BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, name_should_print_as_anonymous,
                     SharedFunctionInfo::NameShouldPrintAsAnonymousBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_anonymous_expression,
-                    SharedFunctionInfo::IsAnonymousExpressionBit)
 BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_reported_binary_coverage,
                     SharedFunctionInfo::HasReportedBinaryCoverageBit)
 
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_named_expression,
-                    SharedFunctionInfo::IsNamedExpressionBit)
 BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_toplevel,
                     SharedFunctionInfo::IsTopLevelBit)
 BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
@@ -271,6 +266,10 @@ void SharedFunctionInfo::set_kind(FunctionKind kind) {
   UpdateFunctionMapIndex();
 }
 
+bool SharedFunctionInfo::is_wrapped() const {
+  return syntax_kind() == FunctionSyntaxKind::kWrapped;
+}
+
 bool SharedFunctionInfo::needs_home_object() const {
   return NeedsHomeObjectBit::decode(flags());
 }
@@ -359,6 +358,11 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo scope_info,
   if (HasInferredName() && inferred_name().length() != 0) {
     scope_info.SetInferredFunctionName(inferred_name());
   }
+  set_raw_scope_info(scope_info, mode);
+}
+
+void SharedFunctionInfo::set_raw_scope_info(ScopeInfo scope_info,
+                                            WriteBarrierMode mode) {
   WRITE_FIELD(*this, kNameOrScopeInfoOffset, scope_info);
   CONDITIONAL_WRITE_BARRIER(*this, kNameOrScopeInfoOffset, scope_info, mode);
 }
@@ -572,7 +576,8 @@ UncompiledData SharedFunctionInfo::uncompiled_data() const {
 }
 
 void SharedFunctionInfo::set_uncompiled_data(UncompiledData uncompiled_data) {
-  DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
+  DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+         HasUncompiledData());
   DCHECK(uncompiled_data.IsUncompiledData());
   set_function_data(uncompiled_data);
 }
@@ -622,7 +627,7 @@ void SharedFunctionInfo::ClearPreparseData() {
       data.address() + UncompiledDataWithoutPreparseData::kSize,
       UncompiledDataWithPreparseData::kSize -
           UncompiledDataWithoutPreparseData::kSize,
-      ClearRecordedSlots::kNo);
+      ClearRecordedSlots::kYes);
 
   // Ensure that the clear was successful.
   DCHECK(HasUncompiledDataWithoutPreparseData());
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index f7a82964b19dea..dc84653ede2987 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -8,6 +8,7 @@
 #include "src/codegen/bailout-reason.h"
 #include "src/objects/compressed-slots.h"
 #include "src/objects/function-kind.h"
+#include "src/objects/function-syntax-kind.h"
 #include "src/objects/objects.h"
 #include "src/objects/script.h"
 #include "src/objects/slots.h"
@@ -256,6 +257,10 @@ class SharedFunctionInfo : public HeapObject {
   // [scope_info]: Scope info.
   DECL_ACCESSORS(scope_info, ScopeInfo)
 
+  // Set scope_info without moving the existing name onto the ScopeInfo.
+  inline void set_raw_scope_info(ScopeInfo scope_info,
+                                 WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
   // End position of this function in the script source.
   V8_EXPORT_PRIVATE int EndPosition() const;
 
@@ -429,9 +434,6 @@ class SharedFunctionInfo : public HeapObject {
   // [flags] Bit field containing various flags about the function.
   DECL_INT32_ACCESSORS(flags)
 
-  // Is this function a named function expression in the source code.
-  DECL_BOOLEAN_ACCESSORS(is_named_expression)
-
   // Is this function a top-level function (scripts, evals).
   DECL_BOOLEAN_ACCESSORS(is_toplevel)
 
@@ -442,8 +444,11 @@ class SharedFunctionInfo : public HeapObject {
   inline LanguageMode language_mode() const;
   inline void set_language_mode(LanguageMode language_mode);
 
+  // How the function appears in source text.
+  DECL_PRIMITIVE_ACCESSORS(syntax_kind, FunctionSyntaxKind)
+
   // Indicates whether the source is implicitly wrapped in a function.
-  DECL_BOOLEAN_ACCESSORS(is_wrapped)
+  inline bool is_wrapped() const;
 
   // True if the function has any duplicated parameter names.
   DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
@@ -454,9 +459,6 @@ class SharedFunctionInfo : public HeapObject {
   // global object.
   DECL_BOOLEAN_ACCESSORS(native)
 
-  // Whether this function was created from a FunctionDeclaration.
-  DECL_BOOLEAN_ACCESSORS(is_declaration)
-
   // Indicates that asm->wasm conversion failed and should not be re-attempted.
   DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
 
@@ -466,11 +468,6 @@ class SharedFunctionInfo : public HeapObject {
   // see a binding for it.
   DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
 
-  // Indicates that the function is either an anonymous expression
-  // or an arrow function (the name field can be set through the API,
-  // which does not change this flag).
-  DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
-
   // Indicates that the function represented by the shared function info was
   // classed as an immediately invoked function execution (IIFE) function and
   // is only executed once.
@@ -680,21 +677,18 @@ class SharedFunctionInfo : public HeapObject {
   V(FunctionKindBits, FunctionKind, 5, _)                    \
   V(IsNativeBit, bool, 1, _)                                 \
   V(IsStrictBit, bool, 1, _)                                 \
-  V(IsWrappedBit, bool, 1, _)                                \
+  V(FunctionSyntaxKindBits, FunctionSyntaxKind, 3, _)        \
   V(IsClassConstructorBit, bool, 1, _)                       \
   V(HasDuplicateParametersBit, bool, 1, _)                   \
   V(AllowLazyCompilationBit, bool, 1, _)                     \
   V(NeedsHomeObjectBit, bool, 1, _)                          \
-  V(IsDeclarationBit, bool, 1, _)                            \
   V(IsAsmWasmBrokenBit, bool, 1, _)                          \
   V(FunctionMapIndexBits, int, 5, _)                         \
   V(DisabledOptimizationReasonBits, BailoutReason, 4, _)     \
   V(RequiresInstanceMembersInitializer, bool, 1, _)          \
   V(ConstructAsBuiltinBit, bool, 1, _)                       \
-  V(IsAnonymousExpressionBit, bool, 1, _)                    \
   V(NameShouldPrintAsAnonymousBit, bool, 1, _)               \
   V(HasReportedBinaryCoverageBit, bool, 1, _)                \
-  V(IsNamedExpressionBit, bool, 1, _)                        \
   V(IsTopLevelBit, bool, 1, _)                               \
   V(IsOneshotIIFEOrPropertiesAreFinalBit, bool, 1, _)        \
   V(IsSafeToSkipArgumentsAdaptorBit, bool, 1, _)
@@ -706,6 +700,8 @@ class SharedFunctionInfo : public HeapObject {
                 DisabledOptimizationReasonBits::kMax);
 
   STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
+  STATIC_ASSERT(FunctionSyntaxKind::kLastFunctionSyntaxKind <=
+                FunctionSyntaxKindBits::kMax);
 
   // Indicates that this function uses a super property (or an eval that may
   // use a super property).
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index e6637415c18e0d..f17c59de1adfd7 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -380,7 +380,7 @@ bool SourceTextModule::RunInitializationCode(Isolate* isolate,
     return false;
   }
   DCHECK_EQ(*function, Handle<JSGeneratorObject>::cast(generator)->function());
-  module->set_code(*generator);
+  module->set_code(JSGeneratorObject::cast(*generator));
   return true;
 }
 
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
index 5c20b7018b410a..e6cf260e101532 100644
--- a/deps/v8/src/objects/source-text-module.h
+++ b/deps/v8/src/objects/source-text-module.h
@@ -17,43 +17,17 @@ class UnorderedModuleSet;
 
 // The runtime representation of an ECMAScript Source Text Module Record.
 // https://tc39.github.io/ecma262/#sec-source-text-module-records
-class SourceTextModule : public Module {
+class SourceTextModule
+    : public TorqueGeneratedSourceTextModule<SourceTextModule, Module> {
  public:
   NEVER_READ_ONLY_SPACE
-  DECL_CAST(SourceTextModule)
   DECL_VERIFIER(SourceTextModule)
   DECL_PRINTER(SourceTextModule)
 
-  // The code representing this module, or an abstraction thereof.
-  // This is either a SharedFunctionInfo, a JSFunction, a JSGeneratorObject, or
-  // a SourceTextModuleInfo, depending on the state (status) the module is in.
-  // See SourceTextModule::SourceTextModuleVerify() for the precise invariant.
-  DECL_ACCESSORS(code, Object)
-
-  // Arrays of cells corresponding to regular exports and regular imports.
-  // A cell's position in the array is determined by the cell index of the
-  // associated module entry (which coincides with the variable index of the
-  // associated variable).
-  DECL_ACCESSORS(regular_exports, FixedArray)
-  DECL_ACCESSORS(regular_imports, FixedArray)
-
   // The shared function info in case {status} is not kEvaluating, kEvaluated or
   // kErrored.
   SharedFunctionInfo GetSharedFunctionInfo() const;
 
-  // Modules imported or re-exported by this module.
-  // Corresponds 1-to-1 to the module specifier strings in
-  // SourceTextModuleInfo::module_requests.
-  DECL_ACCESSORS(requested_modules, FixedArray)
-
-  // [script]: Script from which the module originates.
-  DECL_ACCESSORS(script, Script)
-
-  // The value of import.meta inside of this module.
-  // Lazily initialized on first access. It's the hole before first access and
-  // a JSObject afterwards.
-  DECL_ACCESSORS(import_meta, Object)
-
   // Get the SourceTextModuleInfo associated with the code.
   inline SourceTextModuleInfo info() const;
 
@@ -72,10 +46,6 @@ class SourceTextModule : public Module {
   static Handle<JSModuleNamespace> GetModuleNamespace(
       Isolate* isolate, Handle<SourceTextModule> module, int module_request);
 
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(Module::kHeaderSize,
-                                TORQUE_GENERATED_SOURCE_TEXT_MODULE_FIELDS)
-
   using BodyDescriptor =
       SubclassBodyDescriptor<Module::BodyDescriptor,
                              FixedBodyDescriptor<kCodeOffset, kSize, kSize>>;
@@ -135,7 +105,7 @@ class SourceTextModule : public Module {
 
   static void Reset(Isolate* isolate, Handle<SourceTextModule> module);
 
-  OBJECT_CONSTRUCTORS(SourceTextModule, Module);
+  TQ_OBJECT_CONSTRUCTORS(SourceTextModule)
 };
 
 // SourceTextModuleInfo is to SourceTextModuleDescriptor what ScopeInfo is to
@@ -186,30 +156,24 @@ class SourceTextModuleInfo : public FixedArray {
   OBJECT_CONSTRUCTORS(SourceTextModuleInfo, FixedArray);
 };
 
-class SourceTextModuleInfoEntry : public Struct {
+class SourceTextModuleInfoEntry
+    : public TorqueGeneratedSourceTextModuleInfoEntry<SourceTextModuleInfoEntry,
+                                                      Struct> {
  public:
-  DECL_CAST(SourceTextModuleInfoEntry)
   DECL_PRINTER(SourceTextModuleInfoEntry)
   DECL_VERIFIER(SourceTextModuleInfoEntry)
 
-  DECL_ACCESSORS(export_name, Object)
-  DECL_ACCESSORS(local_name, Object)
-  DECL_ACCESSORS(import_name, Object)
   DECL_INT_ACCESSORS(module_request)
   DECL_INT_ACCESSORS(cell_index)
   DECL_INT_ACCESSORS(beg_pos)
   DECL_INT_ACCESSORS(end_pos)
 
   static Handle<SourceTextModuleInfoEntry> New(
-      Isolate* isolate, Handle<Object> export_name, Handle<Object> local_name,
-      Handle<Object> import_name, int module_request, int cell_index,
-      int beg_pos, int end_pos);
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      Struct::kHeaderSize,
-      TORQUE_GENERATED_SOURCE_TEXT_MODULE_INFO_ENTRY_FIELDS)
+      Isolate* isolate, Handle<HeapObject> export_name,
+      Handle<HeapObject> local_name, Handle<HeapObject> import_name,
+      int module_request, int cell_index, int beg_pos, int end_pos);
 
-  OBJECT_CONSTRUCTORS(SourceTextModuleInfoEntry, Struct);
+  TQ_OBJECT_CONSTRUCTORS(SourceTextModuleInfoEntry)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index e72af4df94a56d..42e3a4a10a87fb 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -28,6 +28,8 @@ SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberOffset)
 SMI_ACCESSORS(StackFrameInfo, column_number, kColumnNumberOffset)
 SMI_ACCESSORS(StackFrameInfo, script_id, kScriptIdOffset)
 SMI_ACCESSORS(StackFrameInfo, promise_all_index, kPromiseAllIndexOffset)
+SMI_ACCESSORS_CHECKED(StackFrameInfo, function_offset, kPromiseAllIndexOffset,
+                      is_wasm())
 ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameOffset)
 ACCESSORS(StackFrameInfo, script_name_or_source_url, Object,
           kScriptNameOrSourceUrlOffset)
@@ -36,6 +38,7 @@ ACCESSORS(StackFrameInfo, method_name, Object, kMethodNameOffset)
 ACCESSORS(StackFrameInfo, type_name, Object, kTypeNameOffset)
 ACCESSORS(StackFrameInfo, eval_origin, Object, kEvalOriginOffset)
 ACCESSORS(StackFrameInfo, wasm_module_name, Object, kWasmModuleNameOffset)
+ACCESSORS(StackFrameInfo, wasm_instance, Object, kWasmInstanceOffset)
 SMI_ACCESSORS(StackFrameInfo, flag, kFlagOffset)
 BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
 BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
@@ -46,14 +49,11 @@ BOOL_ACCESSORS(StackFrameInfo, flag, is_toplevel, kIsToplevelBit)
 BOOL_ACCESSORS(StackFrameInfo, flag, is_async, kIsAsyncBit)
 BOOL_ACCESSORS(StackFrameInfo, flag, is_promise_all, kIsPromiseAllBit)
 
-OBJECT_CONSTRUCTORS_IMPL(StackTraceFrame, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(StackTraceFrame)
 NEVER_READ_ONLY_SPACE_IMPL(StackTraceFrame)
-CAST_ACCESSOR(StackTraceFrame)
 
-ACCESSORS(StackTraceFrame, frame_array, Object, kFrameArrayOffset)
-SMI_ACCESSORS(StackTraceFrame, frame_index, kFrameIndexOffset)
-ACCESSORS(StackTraceFrame, frame_info, Object, kFrameInfoOffset)
-SMI_ACCESSORS(StackTraceFrame, id, kIdOffset)
+TQ_SMI_ACCESSORS(StackTraceFrame, frame_index)
+TQ_SMI_ACCESSORS(StackTraceFrame, id)
 
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index 558449d85aae34..323c4b8fcbb57f 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -51,6 +51,12 @@ int StackTraceFrame::GetPromiseAllIndex(Handle<StackTraceFrame> frame) {
   return GetFrameInfo(frame)->promise_all_index();
 }
 
+// static
+int StackTraceFrame::GetFunctionOffset(Handle<StackTraceFrame> frame) {
+  DCHECK(IsWasm(frame));
+  return GetFrameInfo(frame)->function_offset();
+}
+
 // static
 Handle<Object> StackTraceFrame::GetFileName(Handle<StackTraceFrame> frame) {
   auto name = GetFrameInfo(frame)->script_name();
@@ -95,6 +101,13 @@ Handle<Object> StackTraceFrame::GetWasmModuleName(
   return handle(module, frame->GetIsolate());
 }
 
+// static
+Handle<WasmInstanceObject> StackTraceFrame::GetWasmInstance(
+    Handle<StackTraceFrame> frame) {
+  Object instance = GetFrameInfo(frame)->wasm_instance();
+  return handle(WasmInstanceObject::cast(instance), frame->GetIsolate());
+}
+
 // static
 bool StackTraceFrame::IsEval(Handle<StackTraceFrame> frame) {
   return GetFrameInfo(frame)->is_eval();
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 3d91c5374f9e82..7c4918a3c6bde0 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -14,6 +14,7 @@ namespace v8 {
 namespace internal {
 
 class FrameArray;
+class WasmInstanceObject;
 
 class StackFrameInfo : public Struct {
  public:
@@ -22,6 +23,8 @@ class StackFrameInfo : public Struct {
   DECL_INT_ACCESSORS(column_number)
   DECL_INT_ACCESSORS(script_id)
   DECL_INT_ACCESSORS(promise_all_index)
+  // Wasm frames only: function_offset instead of promise_all_index.
+  DECL_INT_ACCESSORS(function_offset)
   DECL_ACCESSORS(script_name, Object)
   DECL_ACCESSORS(script_name_or_source_url, Object)
   DECL_ACCESSORS(function_name, Object)
@@ -29,6 +32,7 @@ class StackFrameInfo : public Struct {
   DECL_ACCESSORS(type_name, Object)
   DECL_ACCESSORS(eval_origin, Object)
   DECL_ACCESSORS(wasm_module_name, Object)
+  DECL_ACCESSORS(wasm_instance, Object)
   DECL_BOOLEAN_ACCESSORS(is_eval)
   DECL_BOOLEAN_ACCESSORS(is_constructor)
   DECL_BOOLEAN_ACCESSORS(is_wasm)
@@ -67,22 +71,15 @@ class StackFrameInfo : public Struct {
 // The first time any of the Get* or Is* methods is called, a
 // StackFrameInfo object is allocated and all necessary information
 // retrieved.
-class StackTraceFrame : public Struct {
+class StackTraceFrame
+    : public TorqueGeneratedStackTraceFrame<StackTraceFrame, Struct> {
  public:
   NEVER_READ_ONLY_SPACE
-  DECL_ACCESSORS(frame_array, Object)
   DECL_INT_ACCESSORS(frame_index)
-  DECL_ACCESSORS(frame_info, Object)
   DECL_INT_ACCESSORS(id)
 
-  DECL_CAST(StackTraceFrame)
-
   // Dispatched behavior.
   DECL_PRINTER(StackTraceFrame)
-  DECL_VERIFIER(StackTraceFrame)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
-                                TORQUE_GENERATED_STACK_TRACE_FRAME_FIELDS)
 
   static int GetLineNumber(Handle<StackTraceFrame> frame);
   static int GetOneBasedLineNumber(Handle<StackTraceFrame> frame);
@@ -90,6 +87,7 @@ class StackTraceFrame : public Struct {
   static int GetOneBasedColumnNumber(Handle<StackTraceFrame> frame);
   static int GetScriptId(Handle<StackTraceFrame> frame);
   static int GetPromiseAllIndex(Handle<StackTraceFrame> frame);
+  static int GetFunctionOffset(Handle<StackTraceFrame> frame);
 
   static Handle<Object> GetFileName(Handle<StackTraceFrame> frame);
   static Handle<Object> GetScriptNameOrSourceUrl(Handle<StackTraceFrame> frame);
@@ -98,6 +96,8 @@ class StackTraceFrame : public Struct {
   static Handle<Object> GetTypeName(Handle<StackTraceFrame> frame);
   static Handle<Object> GetEvalOrigin(Handle<StackTraceFrame> frame);
   static Handle<Object> GetWasmModuleName(Handle<StackTraceFrame> frame);
+  static Handle<WasmInstanceObject> GetWasmInstance(
+      Handle<StackTraceFrame> frame);
 
   static bool IsEval(Handle<StackTraceFrame> frame);
   static bool IsConstructor(Handle<StackTraceFrame> frame);
@@ -109,10 +109,10 @@ class StackTraceFrame : public Struct {
   static bool IsPromiseAll(Handle<StackTraceFrame> frame);
 
  private:
-  OBJECT_CONSTRUCTORS(StackTraceFrame, Struct);
-
   static Handle<StackFrameInfo> GetFrameInfo(Handle<StackTraceFrame> frame);
   static void InitializeFrameInfo(Handle<StackTraceFrame> frame);
+
+  TQ_OBJECT_CONSTRUCTORS(StackTraceFrame)
 };
 
 // Small helper that retrieves the FrameArray from a stack-trace
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index db724e0cf14036..083928d2119de5 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -137,6 +137,65 @@ STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
 
 STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
 
+template <typename TDispatcher, typename TResult, typename... TArgs>
+inline TResult StringShape::DispatchToSpecificTypeWithoutCast(TArgs&&... args) {
+  switch (full_representation_tag()) {
+    case kSeqStringTag | kOneByteStringTag:
+      return TDispatcher::HandleSeqOneByteString(std::forward<TArgs>(args)...);
+    case kSeqStringTag | kTwoByteStringTag:
+      return TDispatcher::HandleSeqTwoByteString(std::forward<TArgs>(args)...);
+    case kConsStringTag | kOneByteStringTag:
+    case kConsStringTag | kTwoByteStringTag:
+      return TDispatcher::HandleConsString(std::forward<TArgs>(args)...);
+    case kExternalStringTag | kOneByteStringTag:
+      return TDispatcher::HandleExternalOneByteString(
+          std::forward<TArgs>(args)...);
+    case kExternalStringTag | kTwoByteStringTag:
+      return TDispatcher::HandleExternalTwoByteString(
+          std::forward<TArgs>(args)...);
+    case kSlicedStringTag | kOneByteStringTag:
+    case kSlicedStringTag | kTwoByteStringTag:
+      return TDispatcher::HandleSlicedString(std::forward<TArgs>(args)...);
+    case kThinStringTag | kOneByteStringTag:
+    case kThinStringTag | kTwoByteStringTag:
+      return TDispatcher::HandleThinString(std::forward<TArgs>(args)...);
+    default:
+      return TDispatcher::HandleInvalidString(std::forward<TArgs>(args)...);
+  }
+}
+
+// All concrete subclasses of String (leaves of the inheritance tree).
+#define STRING_CLASS_TYPES(V) \
+  V(SeqOneByteString)         \
+  V(SeqTwoByteString)         \
+  V(ConsString)               \
+  V(ExternalOneByteString)    \
+  V(ExternalTwoByteString)    \
+  V(SlicedString)             \
+  V(ThinString)
+
+template <typename TDispatcher, typename TResult, typename... TArgs>
+inline TResult StringShape::DispatchToSpecificType(String str,
+                                                   TArgs&&... args) {
+  class CastingDispatcher : public AllStatic {
+   public:
+#define DEFINE_METHOD(Type)                                         \
+  static inline TResult Handle##Type(String str, TArgs&&... args) { \
+    return TDispatcher::Handle##Type(Type::cast(str),               \
+                                     std::forward<TArgs>(args)...); \
+  }
+    STRING_CLASS_TYPES(DEFINE_METHOD)
+#undef DEFINE_METHOD
+    static inline TResult HandleInvalidString(String str, TArgs&&... args) {
+      return TDispatcher::HandleInvalidString(str,
+                                              std::forward<TArgs>(args)...);
+    }
+  };
+
+  return DispatchToSpecificTypeWithoutCast<CastingDispatcher, TResult>(
+      str, std::forward<TArgs>(args)...);
+}
+
 DEF_GETTER(String, IsOneByteRepresentation, bool) {
   uint32_t type = map(isolate).instance_type();
   return (type & kStringEncodingMask) == kOneByteStringTag;
@@ -340,29 +399,22 @@ Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
 
 uint16_t String::Get(int index) {
   DCHECK(index >= 0 && index < length());
-  switch (StringShape(*this).full_representation_tag()) {
-    case kSeqStringTag | kOneByteStringTag:
-      return SeqOneByteString::cast(*this).Get(index);
-    case kSeqStringTag | kTwoByteStringTag:
-      return SeqTwoByteString::cast(*this).Get(index);
-    case kConsStringTag | kOneByteStringTag:
-    case kConsStringTag | kTwoByteStringTag:
-      return ConsString::cast(*this).Get(index);
-    case kExternalStringTag | kOneByteStringTag:
-      return ExternalOneByteString::cast(*this).Get(index);
-    case kExternalStringTag | kTwoByteStringTag:
-      return ExternalTwoByteString::cast(*this).Get(index);
-    case kSlicedStringTag | kOneByteStringTag:
-    case kSlicedStringTag | kTwoByteStringTag:
-      return SlicedString::cast(*this).Get(index);
-    case kThinStringTag | kOneByteStringTag:
-    case kThinStringTag | kTwoByteStringTag:
-      return ThinString::cast(*this).Get(index);
-    default:
-      break;
+
+  class StringGetDispatcher : public AllStatic {
+   public:
+#define DEFINE_METHOD(Type)                                  \
+  static inline uint16_t Handle##Type(Type str, int index) { \
+    return str.Get(index);                                   \
   }
+    STRING_CLASS_TYPES(DEFINE_METHOD)
+#undef DEFINE_METHOD
+    static inline uint16_t HandleInvalidString(String str, int index) {
+      UNREACHABLE();
+    }
+  };
 
-  UNREACHABLE();
+  return StringShape(*this)
+      .DispatchToSpecificType<StringGetDispatcher, uint16_t>(*this, index);
 }
 
 void String::Set(int index, uint16_t value) {
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index d1981fd24ded21..41de3aef04c30c 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -110,6 +110,8 @@ void String::MakeThin(Isolate* isolate, String internalized) {
     }
   }
 
+  bool has_pointers = StringShape(*this).IsIndirect();
+
   int old_size = this->Size();
   isolate->heap()->NotifyObjectLayoutChange(*this, old_size, no_gc);
   bool one_byte = internalized.IsOneByteRepresentation();
@@ -123,7 +125,9 @@ void String::MakeThin(Isolate* isolate, String internalized) {
   int size_delta = old_size - ThinString::kSize;
   if (size_delta != 0) {
     Heap* heap = isolate->heap();
-    heap->CreateFillerObjectAt(thin_end, size_delta, ClearRecordedSlots::kNo);
+    heap->CreateFillerObjectAt(
+        thin_end, size_delta,
+        has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
   }
 }
 
@@ -178,7 +182,8 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
   // Byte size of the external String object.
   int new_size = this->SizeFromMap(new_map);
   isolate->heap()->CreateFillerObjectAt(
-      this->address() + new_size, size - new_size, ClearRecordedSlots::kNo);
+      this->address() + new_size, size - new_size,
+      has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
   if (has_pointers) {
     isolate->heap()->ClearRecordedSlotRange(this->address(),
                                             this->address() + new_size);
@@ -250,7 +255,8 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
   // Byte size of the external String object.
   int new_size = this->SizeFromMap(new_map);
   isolate->heap()->CreateFillerObjectAt(
-      this->address() + new_size, size - new_size, ClearRecordedSlots::kNo);
+      this->address() + new_size, size - new_size,
+      has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
   if (has_pointers) {
     isolate->heap()->ClearRecordedSlotRange(this->address(),
                                             this->address() + new_size);
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 1a826eee3b5fb4..27bd7e87652859 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -61,6 +61,13 @@ class StringShape {
   inline void invalidate() {}
 #endif
 
+  // Run different behavior for each concrete string class type, as defined by
+  // the dispatcher.
+  template <typename TDispatcher, typename TResult, typename... TArgs>
+  inline TResult DispatchToSpecificTypeWithoutCast(TArgs&&... args);
+  template <typename TDispatcher, typename TResult, typename... TArgs>
+  inline TResult DispatchToSpecificType(String str, TArgs&&... args);
+
  private:
   uint32_t type_;
 #ifdef DEBUG
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
index 47d55a876f355f..af0fed126b4966 100644
--- a/deps/v8/src/objects/struct-inl.h
+++ b/deps/v8/src/objects/struct-inl.h
@@ -24,10 +24,9 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple2)
 TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple3)
 OBJECT_CONSTRUCTORS_IMPL(AccessorPair, Struct)
 
-OBJECT_CONSTRUCTORS_IMPL(ClassPositions, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ClassPositions)
 
 CAST_ACCESSOR(AccessorPair)
-CAST_ACCESSOR(ClassPositions)
 
 void Struct::InitializeBody(int object_size) {
   Object value = GetReadOnlyRoots().undefined_value();
@@ -39,8 +38,8 @@ void Struct::InitializeBody(int object_size) {
 ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
 ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
 
-SMI_ACCESSORS(ClassPositions, start, kStartOffset)
-SMI_ACCESSORS(ClassPositions, end, kEndOffset)
+TQ_SMI_ACCESSORS(ClassPositions, start)
+TQ_SMI_ACCESSORS(ClassPositions, end)
 
 Object AccessorPair::get(AccessorComponent component) {
   return component == ACCESSOR_GETTER ? getter() : setter();
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index b01a33561bb69e..c9372d9ada0d48 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -60,6 +60,7 @@ class AccessorPair : public Struct {
 
   // Note: Returns undefined if the component is not set.
   static Handle<Object> GetComponent(Isolate* isolate,
+                                     Handle<NativeContext> native_context,
                                      Handle<AccessorPair> accessor_pair,
                                      AccessorComponent component);
 
@@ -79,22 +80,17 @@ class AccessorPair : public Struct {
   OBJECT_CONSTRUCTORS(AccessorPair, Struct);
 };
 
-class ClassPositions : public Struct {
+class ClassPositions
+    : public TorqueGeneratedClassPositions<ClassPositions, Struct> {
  public:
   DECL_INT_ACCESSORS(start)
   DECL_INT_ACCESSORS(end)
 
-  DECL_CAST(ClassPositions)
-
   // Dispatched behavior.
   DECL_PRINTER(ClassPositions)
-  DECL_VERIFIER(ClassPositions)
   void BriefPrintDetails(std::ostream& os);
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
-                                TORQUE_GENERATED_CLASS_POSITIONS_FIELDS)
-
-  OBJECT_CONSTRUCTORS(ClassPositions, Struct);
+  TQ_OBJECT_CONSTRUCTORS(ClassPositions)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/synthetic-module.h b/deps/v8/src/objects/synthetic-module.h
index 9f91f2ce4a49a1..6f3bb0438e9597 100644
--- a/deps/v8/src/objects/synthetic-module.h
+++ b/deps/v8/src/objects/synthetic-module.h
@@ -17,26 +17,17 @@ namespace internal {
 // instantiated by an embedder with embedder-defined exports and evaluation
 // steps.
 // https://heycam.github.io/webidl/#synthetic-module-records
-class SyntheticModule : public Module {
+class SyntheticModule
+    : public TorqueGeneratedSyntheticModule<SyntheticModule, Module> {
  public:
   NEVER_READ_ONLY_SPACE
-  DECL_CAST(SyntheticModule)
   DECL_VERIFIER(SyntheticModule)
   DECL_PRINTER(SyntheticModule)
 
-  // The list of all names exported by this module
-  DECL_ACCESSORS(name, String)
-  DECL_ACCESSORS(export_names, FixedArray)
-  DECL_ACCESSORS(evaluation_steps, Foreign)
-
   static void SetExport(Isolate* isolate, Handle<SyntheticModule> module,
                         Handle<String> export_name,
                         Handle<Object> export_value);
 
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(Module::kHeaderSize,
-                                TORQUE_GENERATED_SYNTHETIC_MODULE_FIELDS)
-
   using BodyDescriptor = SubclassBodyDescriptor<
       Module::BodyDescriptor,
       FixedBodyDescriptor<kExportNamesOffset, kSize, kSize>>;
@@ -58,7 +49,7 @@ class SyntheticModule : public Module {
   static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
       Isolate* isolate, Handle<SyntheticModule> module);
 
-  OBJECT_CONSTRUCTORS(SyntheticModule, Module);
+  TQ_OBJECT_CONSTRUCTORS(SyntheticModule)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/template-objects-inl.h b/deps/v8/src/objects/template-objects-inl.h
index 85c1e6c8f4e8a6..b0f73e873b999a 100644
--- a/deps/v8/src/objects/template-objects-inl.h
+++ b/deps/v8/src/objects/template-objects-inl.h
@@ -15,16 +15,11 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription)
 OBJECT_CONSTRUCTORS_IMPL(CachedTemplateObject, Tuple3)
 
-CAST_ACCESSOR(TemplateObjectDescription)
 CAST_ACCESSOR(CachedTemplateObject)
 
-ACCESSORS(TemplateObjectDescription, raw_strings, FixedArray, kRawStringsOffset)
-ACCESSORS(TemplateObjectDescription, cooked_strings, FixedArray,
-          kCookedStringsOffset)
-
 SMI_ACCESSORS(CachedTemplateObject, slot_id, kSlotIdOffset)
 ACCESSORS(CachedTemplateObject, template_object, JSArray, kTemplateObjectOffset)
 ACCESSORS(CachedTemplateObject, next, HeapObject, kNextOffset)
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index 20ad742338333e..7bac29206b9ab0 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -39,25 +39,16 @@ class CachedTemplateObject final : public Tuple3 {
 // TemplateObjectDescription is a tuple of raw strings and cooked strings for
 // tagged template literals. Used to communicate with the runtime for template
 // object creation within the {Runtime_GetTemplateObject} method.
-class TemplateObjectDescription final : public Struct {
+class TemplateObjectDescription final
+    : public TorqueGeneratedTemplateObjectDescription<TemplateObjectDescription,
+                                                      Struct> {
  public:
-  DECL_ACCESSORS(raw_strings, FixedArray)
-  DECL_ACCESSORS(cooked_strings, FixedArray)
-
-  DECL_CAST(TemplateObjectDescription)
-
   static Handle<JSArray> GetTemplateObject(
       Isolate* isolate, Handle<NativeContext> native_context,
       Handle<TemplateObjectDescription> description,
       Handle<SharedFunctionInfo> shared_info, int slot_id);
 
-  DECL_PRINTER(TemplateObjectDescription)
-  DECL_VERIFIER(TemplateObjectDescription)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      Struct::kHeaderSize, TORQUE_GENERATED_TEMPLATE_OBJECT_DESCRIPTION_FIELDS)
-
-  OBJECT_CONSTRUCTORS(TemplateObjectDescription, Struct);
+  TQ_OBJECT_CONSTRUCTORS(TemplateObjectDescription)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index d344174a0c692b..be58fc12bc57c9 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -17,29 +17,16 @@
 namespace v8 {
 namespace internal {
 
-OBJECT_CONSTRUCTORS_IMPL(TemplateInfo, Struct)
-OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateInfo, TemplateInfo)
-OBJECT_CONSTRUCTORS_IMPL(ObjectTemplateInfo, TemplateInfo)
-OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateRareData, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TemplateInfo)
+TQ_OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateInfo)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ObjectTemplateInfo)
+TQ_OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateRareData)
 
 NEVER_READ_ONLY_SPACE_IMPL(TemplateInfo)
 
-ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
-ACCESSORS(TemplateInfo, serial_number, Object, kSerialNumberOffset)
-SMI_ACCESSORS(TemplateInfo, number_of_properties, kNumberOfPropertiesOffset)
-ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
-ACCESSORS(TemplateInfo, property_accessors, Object, kPropertyAccessorsOffset)
-
-ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
-ACCESSORS(FunctionTemplateInfo, class_name, Object, kClassNameOffset)
-ACCESSORS(FunctionTemplateInfo, signature, Object, kSignatureOffset)
-ACCESSORS(FunctionTemplateInfo, shared_function_info, Object,
-          kSharedFunctionInfoOffset)
-ACCESSORS(FunctionTemplateInfo, rare_data, HeapObject,
-          kFunctionTemplateRareDataOffset)
-ACCESSORS(FunctionTemplateInfo, cached_property_name, Object,
-          kCachedPropertyNameOffset)
-SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
+TQ_SMI_ACCESSORS(TemplateInfo, number_of_properties)
+
+TQ_SMI_ACCESSORS(FunctionTemplateInfo, length)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
                kNeedsAccessCheckBit)
@@ -50,7 +37,7 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache, kDoNotCacheBit)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
                kAcceptAnyReceiver)
-SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
+TQ_SMI_ACCESSORS(FunctionTemplateInfo, flag)
 
 // static
 FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
@@ -88,31 +75,6 @@ RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, Object)
 RARE_ACCESSORS(access_check_info, AccessCheckInfo, Object)
 #undef RARE_ACCESSORS
 
-ACCESSORS(FunctionTemplateRareData, prototype_template, Object,
-          kPrototypeTemplateOffset)
-ACCESSORS(FunctionTemplateRareData, prototype_provider_template, Object,
-          kPrototypeProviderTemplateOffset)
-ACCESSORS(FunctionTemplateRareData, parent_template, Object,
-          kParentTemplateOffset)
-ACCESSORS(FunctionTemplateRareData, named_property_handler, Object,
-          kNamedPropertyHandlerOffset)
-ACCESSORS(FunctionTemplateRareData, indexed_property_handler, Object,
-          kIndexedPropertyHandlerOffset)
-ACCESSORS(FunctionTemplateRareData, instance_template, Object,
-          kInstanceTemplateOffset)
-ACCESSORS(FunctionTemplateRareData, instance_call_handler, Object,
-          kInstanceCallHandlerOffset)
-ACCESSORS(FunctionTemplateRareData, access_check_info, Object,
-          kAccessCheckInfoOffset)
-
-ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
-ACCESSORS(ObjectTemplateInfo, data, Object, kDataOffset)
-
-CAST_ACCESSOR(TemplateInfo)
-CAST_ACCESSOR(FunctionTemplateInfo)
-CAST_ACCESSOR(FunctionTemplateRareData)
-CAST_ACCESSOR(ObjectTemplateInfo)
-
 bool FunctionTemplateInfo::instantiated() {
   return shared_function_info().IsSharedFunctionInfo();
 }
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index 99142266edf897..29671db83cf044 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -13,21 +13,10 @@
 namespace v8 {
 namespace internal {
 
-class TemplateInfo : public Struct {
+class TemplateInfo : public TorqueGeneratedTemplateInfo<TemplateInfo, Struct> {
  public:
   NEVER_READ_ONLY_SPACE
-  DECL_ACCESSORS(tag, Object)
-  DECL_ACCESSORS(serial_number, Object)
   DECL_INT_ACCESSORS(number_of_properties)
-  DECL_ACCESSORS(property_list, Object)
-  DECL_ACCESSORS(property_accessors, Object)
-
-  DECL_VERIFIER(TemplateInfo)
-
-  DECL_CAST(TemplateInfo)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
-                                TORQUE_GENERATED_TEMPLATE_INFO_FIELDS)
 
   static const int kFastTemplateInstantiationsCacheSize = 1 * KB;
 
@@ -36,55 +25,25 @@ class TemplateInfo : public Struct {
   // instead of caching them.
   static const int kSlowTemplateInstantiationsCacheSize = 1 * MB;
 
-  OBJECT_CONSTRUCTORS(TemplateInfo, Struct);
+  TQ_OBJECT_CONSTRUCTORS(TemplateInfo)
 };
 
 // Contains data members that are rarely set on a FunctionTemplateInfo.
-class FunctionTemplateRareData : public Struct {
+class FunctionTemplateRareData
+    : public TorqueGeneratedFunctionTemplateRareData<FunctionTemplateRareData,
+                                                     Struct> {
  public:
-  // See DECL_RARE_ACCESSORS in FunctionTemplateInfo.
-  DECL_ACCESSORS(prototype_template, Object)
-  DECL_ACCESSORS(prototype_provider_template, Object)
-  DECL_ACCESSORS(parent_template, Object)
-  DECL_ACCESSORS(named_property_handler, Object)
-  DECL_ACCESSORS(indexed_property_handler, Object)
-  DECL_ACCESSORS(instance_template, Object)
-  DECL_ACCESSORS(instance_call_handler, Object)
-  DECL_ACCESSORS(access_check_info, Object)
-
-  DECL_CAST(FunctionTemplateRareData)
-
   // Dispatched behavior.
   DECL_PRINTER(FunctionTemplateRareData)
-  DECL_VERIFIER(FunctionTemplateRareData)
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(
-      HeapObject::kHeaderSize,
-      TORQUE_GENERATED_FUNCTION_TEMPLATE_RARE_DATA_FIELDS)
-
-  OBJECT_CONSTRUCTORS(FunctionTemplateRareData, Struct);
+  TQ_OBJECT_CONSTRUCTORS(FunctionTemplateRareData)
 };
 
 // See the api-exposed FunctionTemplate for more information.
-class FunctionTemplateInfo : public TemplateInfo {
+class FunctionTemplateInfo
+    : public TorqueGeneratedFunctionTemplateInfo<FunctionTemplateInfo,
+                                                 TemplateInfo> {
  public:
-  // Handler invoked when calling an instance of this FunctionTemplateInfo.
-  // Either CallInfoHandler or Undefined.
-  DECL_ACCESSORS(call_code, Object)
-
-  DECL_ACCESSORS(class_name, Object)
-
-  // If the signature is a FunctionTemplateInfo it is used to check whether the
-  // receiver calling the associated JSFunction is a compatible receiver, i.e.
-  // it is an instance of the signature FunctionTemplateInfo or any of the
-  // receiver's prototypes are.
-  DECL_ACCESSORS(signature, Object)
-
-  // If any of the setters below declared by DECL_RARE_ACCESSORS are used then
-  // a FunctionTemplateRareData will be stored here. Until then this contains
-  // undefined.
-  DECL_ACCESSORS(rare_data, HeapObject)
-
 #define DECL_RARE_ACCESSORS(Name, CamelName, Type)                           \
   DECL_GETTER(Get##CamelName, Type)                                          \
   static inline void Set##CamelName(                                         \
@@ -125,19 +84,12 @@ class FunctionTemplateInfo : public TemplateInfo {
   DECL_RARE_ACCESSORS(access_check_info, AccessCheckInfo, Object)
 #undef DECL_RARE_ACCESSORS
 
-  DECL_ACCESSORS(shared_function_info, Object)
-
   // Internal field to store a flag bitfield.
   DECL_INT_ACCESSORS(flag)
 
   // "length" property of the final JSFunction.
   DECL_INT_ACCESSORS(length)
 
-  // Either the_hole or a private symbol. Used to cache the result on
-  // the receiver under the the cached_property_name when this
-  // FunctionTemplateInfo is used as a getter.
-  DECL_ACCESSORS(cached_property_name, Object)
-
   // Begin flag bits ---------------------
   DECL_BOOLEAN_ACCESSORS(undetectable)
 
@@ -160,17 +112,11 @@ class FunctionTemplateInfo : public TemplateInfo {
   DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
   // End flag bits ---------------------
 
-  DECL_CAST(FunctionTemplateInfo)
-
   // Dispatched behavior.
   DECL_PRINTER(FunctionTemplateInfo)
-  DECL_VERIFIER(FunctionTemplateInfo)
 
   static const int kInvalidSerialNumber = 0;
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kHeaderSize,
-                                TORQUE_GENERATED_FUNCTION_TEMPLATE_INFO_FIELDS)
-
   static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
       Isolate* isolate, Handle<FunctionTemplateInfo> info,
       MaybeHandle<Name> maybe_name);
@@ -202,36 +148,28 @@ class FunctionTemplateInfo : public TemplateInfo {
   static FunctionTemplateRareData AllocateFunctionTemplateRareData(
       Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info);
 
-  OBJECT_CONSTRUCTORS(FunctionTemplateInfo, TemplateInfo);
+  TQ_OBJECT_CONSTRUCTORS(FunctionTemplateInfo)
 };
 
-class ObjectTemplateInfo : public TemplateInfo {
+class ObjectTemplateInfo
+    : public TorqueGeneratedObjectTemplateInfo<ObjectTemplateInfo,
+                                               TemplateInfo> {
  public:
-  DECL_ACCESSORS(constructor, Object)
-  DECL_ACCESSORS(data, Object)
   DECL_INT_ACCESSORS(embedder_field_count)
   DECL_BOOLEAN_ACCESSORS(immutable_proto)
 
-  DECL_CAST(ObjectTemplateInfo)
-
   // Dispatched behavior.
   DECL_PRINTER(ObjectTemplateInfo)
-  DECL_VERIFIER(ObjectTemplateInfo)
-
-  // Layout description.
-  DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kHeaderSize,
-                                TORQUE_GENERATED_OBJECT_TEMPLATE_INFO_FIELDS)
 
   // Starting from given object template's constructor walk up the inheritance
   // chain till a function template that has an instance template is found.
   inline ObjectTemplateInfo GetParent(Isolate* isolate);
 
  private:
-  class IsImmutablePrototype : public BitField<bool, 0, 1> {};
-  class EmbedderFieldCount
-      : public BitField<int, IsImmutablePrototype::kNext, 29> {};
+  using IsImmutablePrototype = BitField<bool, 0, 1>;
+  using EmbedderFieldCount = IsImmutablePrototype::Next<int, 29>;
 
-  OBJECT_CONSTRUCTORS(ObjectTemplateInfo, TemplateInfo);
+  TQ_OBJECT_CONSTRUCTORS(ObjectTemplateInfo)
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 5a72dd6532a39c..3b3506fbb9178a 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -417,9 +417,6 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
     case HEAP_NUMBER_TYPE:
       WriteHeapNumber(HeapNumber::cast(*object));
       return ThrowIfOutOfMemory();
-    case MUTABLE_HEAP_NUMBER_TYPE:
-      WriteMutableHeapNumber(MutableHeapNumber::cast(*object));
-      return ThrowIfOutOfMemory();
     case BIGINT_TYPE:
       WriteBigInt(BigInt::cast(*object));
       return ThrowIfOutOfMemory();
@@ -485,11 +482,6 @@ void ValueSerializer::WriteHeapNumber(HeapNumber number) {
   WriteDouble(number.value());
 }
 
-void ValueSerializer::WriteMutableHeapNumber(MutableHeapNumber number) {
-  WriteTag(SerializationTag::kDouble);
-  WriteDouble(number.value());
-}
-
 void ValueSerializer::WriteBigInt(BigInt bigint) {
   WriteTag(SerializationTag::kBigInt);
   WriteBigIntContents(bigint);
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index 9e381d7e76df13..cc9bc1caea84b1 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -30,7 +30,6 @@ class JSMap;
 class JSPrimitiveWrapper;
 class JSRegExp;
 class JSSet;
-class MutableHeapNumber;
 class Object;
 class Oddball;
 class Smi;
@@ -111,7 +110,6 @@ class ValueSerializer {
   void WriteOddball(Oddball oddball);
   void WriteSmi(Smi smi);
   void WriteHeapNumber(HeapNumber number);
-  void WriteMutableHeapNumber(MutableHeapNumber number);
   void WriteBigInt(BigInt bigint);
   void WriteString(Handle<String> string);
   Maybe<bool> WriteJSReceiver(Handle<JSReceiver> receiver)
diff --git a/deps/v8/src/parsing/expression-scope-reparenter.cc b/deps/v8/src/parsing/expression-scope-reparenter.cc
index 78167a06e7c697..3f62616ebd33e2 100644
--- a/deps/v8/src/parsing/expression-scope-reparenter.cc
+++ b/deps/v8/src/parsing/expression-scope-reparenter.cc
@@ -105,7 +105,7 @@ void ReparentExpressionScope(uintptr_t stack_limit, Expression* expr,
   // sloppy eval.
   DCHECK(scope->is_block_scope());
   DCHECK(scope->is_declaration_scope());
-  DCHECK(scope->AsDeclarationScope()->calls_sloppy_eval());
+  DCHECK(scope->AsDeclarationScope()->sloppy_eval_can_extend_vars());
   DCHECK(scope->outer_scope()->is_function_scope());
 
   Reparenter r(stack_limit, expr, scope);
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
index 5a6ef376a8a52c..43bf754150fc92 100644
--- a/deps/v8/src/parsing/expression-scope.h
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -5,6 +5,8 @@
 #ifndef V8_PARSING_EXPRESSION_SCOPE_H_
 #define V8_PARSING_EXPRESSION_SCOPE_H_
 
+#include <utility>
+
 #include "src/ast/scopes.h"
 #include "src/common/message-template.h"
 #include "src/objects/function-kind.h"
@@ -82,6 +84,12 @@ class ExpressionScope {
     AsExpressionParsingScope()->ClearExpressionError();
   }
 
+  void ValidateAsExpression() {
+    if (!CanBeExpression()) return;
+    AsExpressionParsingScope()->ValidateExpression();
+    AsExpressionParsingScope()->ClearPatternError();
+  }
+
   // Record async arrow parameters errors in all ambiguous async arrow scopes in
   // the chain up to the first unambiguous scope.
   void RecordAsyncArrowParametersError(const Scanner::Location& loc,
@@ -173,6 +181,18 @@ class ExpressionScope {
     return IsInRange(type_, kParameterDeclaration, kLexicalDeclaration);
   }
 
+  int SetInitializers(int variable_index, int peek_position) {
+    if (CanBeExpression()) {
+      return AsExpressionParsingScope()->SetInitializers(variable_index,
+                                                         peek_position);
+    }
+    return variable_index;
+  }
+
+  bool has_possible_arrow_parameter_in_scope_chain() const {
+    return has_possible_arrow_parameter_in_scope_chain_;
+  }
+
  protected:
   enum ScopeType : uint8_t {
     // Expression or assignment target.
@@ -201,7 +221,11 @@ class ExpressionScope {
         type_(type),
         has_possible_parameter_in_scope_chain_(
             CanBeParameterDeclaration() ||
-            (parent_ && parent_->has_possible_parameter_in_scope_chain_)) {
+            (parent_ && parent_->has_possible_parameter_in_scope_chain_)),
+        has_possible_arrow_parameter_in_scope_chain_(
+            CanBeArrowParameterDeclaration() ||
+            (parent_ &&
+             parent_->has_possible_arrow_parameter_in_scope_chain_)) {
     parser->expression_scope_ = this;
   }
 
@@ -265,6 +289,10 @@ class ExpressionScope {
     return IsInRange(type_, kMaybeArrowParameterDeclaration,
                      kParameterDeclaration);
   }
+  bool CanBeArrowParameterDeclaration() const {
+    return IsInRange(type_, kMaybeArrowParameterDeclaration,
+                     kMaybeAsyncArrowParameterDeclaration);
+  }
   bool IsCertainlyParameterDeclaration() const {
     return type_ == kParameterDeclaration;
   }
@@ -273,6 +301,7 @@ class ExpressionScope {
   ExpressionScope<Types>* parent_;
   ScopeType type_;
   bool has_possible_parameter_in_scope_chain_;
+  bool has_possible_arrow_parameter_in_scope_chain_;
 
   DISALLOW_COPY_AND_ASSIGN(ExpressionScope);
 };
@@ -458,8 +487,8 @@ class ExpressionParsingScope : public ExpressionScope<Types> {
       ExpressionScopeT::Report(Scanner::Location(begin, end),
                                MessageTemplate::kInvalidDestructuringTarget);
     }
-    for (VariableProxy* proxy : variable_list_) {
-      proxy->set_is_assigned();
+    for (auto& variable_initializer_pair : variable_list_) {
+      variable_initializer_pair.first->set_is_assigned();
     }
   }
 
@@ -471,18 +500,42 @@ class ExpressionParsingScope : public ExpressionScope<Types> {
     clear(kExpressionIndex);
   }
 
+  void ClearPatternError() {
+    DCHECK(verified_);
+#ifdef DEBUG
+    verified_ = false;
+#endif
+    clear(kPatternIndex);
+  }
+
   void TrackVariable(VariableProxy* variable) {
     if (!this->CanBeDeclaration()) {
       this->parser()->scope()->AddUnresolved(variable);
     }
-    variable_list_.Add(variable);
+    variable_list_.Add({variable, kNoSourcePosition});
   }
 
   void MarkIdentifierAsAssigned() {
     // It's possible we're parsing a syntax error. In that case it's not
     // guaranteed that there's a variable in the list.
     if (variable_list_.length() == 0) return;
-    variable_list_.at(variable_list_.length() - 1)->set_is_assigned();
+    variable_list_.at(variable_list_.length() - 1).first->set_is_assigned();
+  }
+
+  int SetInitializers(int first_variable_index, int position) {
+    int len = variable_list_.length();
+    if (len == 0) return 0;
+
+    int end = len - 1;
+    // Loop backwards and abort as soon as we see one that's already set to
+    // avoid a loop on expressions like a,b,c,d,e,f,g (outside of an arrowhead).
+    // TODO(delphick): Look into removing this loop.
+    for (int i = end; i >= first_variable_index &&
+                      variable_list_.at(i).second == kNoSourcePosition;
+         --i) {
+      variable_list_.at(i).second = position;
+    }
+    return end;
   }
 
  protected:
@@ -496,7 +549,9 @@ class ExpressionParsingScope : public ExpressionScope<Types> {
 
   void ValidatePattern() { Validate(kPatternIndex); }
 
-  ScopedPtrList<VariableProxy>* variable_list() { return &variable_list_; }
+  ScopedList<std::pair<VariableProxy*, int>>* variable_list() {
+    return &variable_list_;
+  }
 
  private:
   friend class AccumulationScope<Types>;
@@ -542,7 +597,7 @@ class ExpressionParsingScope : public ExpressionScope<Types> {
   bool verified_ = false;
 #endif
 
-  ScopedPtrList<VariableProxy> variable_list_;
+  ScopedList<std::pair<VariableProxy*, int>> variable_list_;
   MessageTemplate messages_[kNumberOfErrors];
   Scanner::Location locations_[kNumberOfErrors];
   bool has_async_arrow_in_scope_chain_;
@@ -665,7 +720,8 @@ class ArrowHeadParsingScope : public ExpressionParsingScope<Types> {
     // references.
     this->parser()->next_arrow_function_info_.ClearStrictParameterError();
     ExpressionParsingScope<Types>::ValidateExpression();
-    for (VariableProxy* proxy : *this->variable_list()) {
+    for (auto& proxy_initializer_pair : *this->variable_list()) {
+      VariableProxy* proxy = proxy_initializer_pair.first;
       this->parser()->scope()->AddUnresolved(proxy);
     }
   }
@@ -683,21 +739,27 @@ class ArrowHeadParsingScope : public ExpressionParsingScope<Types> {
     VariableKind kind = PARAMETER_VARIABLE;
     VariableMode mode =
         has_simple_parameter_list_ ? VariableMode::kVar : VariableMode::kLet;
-    for (VariableProxy* proxy : *this->variable_list()) {
+    for (auto& proxy_initializer_pair : *this->variable_list()) {
+      VariableProxy* proxy = proxy_initializer_pair.first;
+      int initializer_position = proxy_initializer_pair.second;
+      // Default values for parameters will have been parsed as assignments so
+      // clear the is_assigned bit as they are not actually assignments.
+      proxy->clear_is_assigned();
       bool was_added;
-      this->parser()->DeclareAndBindVariable(
-          proxy, kind, mode, Variable::DefaultInitializationFlag(mode), result,
-          &was_added, proxy->position());
+      this->parser()->DeclareAndBindVariable(proxy, kind, mode, result,
+                                             &was_added, initializer_position);
       if (!was_added) {
         ExpressionScope<Types>::Report(proxy->location(),
                                        MessageTemplate::kParamDupe);
       }
     }
 
-    int initializer_position = this->parser()->end_position();
+#ifdef DEBUG
     for (auto declaration : *result->declarations()) {
-      declaration->var()->set_initializer_position(initializer_position);
+      DCHECK_NE(declaration->var()->initializer_position(), kNoSourcePosition);
     }
+#endif  // DEBUG
+
     if (uses_this_) result->UsesThis();
     return result;
   }
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 0ae09d9897aab4..e927c1a0d1aa69 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -28,6 +28,7 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
       stack_limit_(0),
       hash_seed_(0),
       function_kind_(FunctionKind::kNormalFunction),
+      function_syntax_kind_(FunctionSyntaxKind::kDeclaration),
       script_id_(-1),
       start_position_(0),
       end_position_(0),
@@ -62,7 +63,8 @@ ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
   set_allow_natives_syntax(FLAG_allow_natives_syntax);
   set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
   set_allow_harmony_import_meta(FLAG_harmony_import_meta);
-  set_allow_harmony_numeric_separator(FLAG_harmony_numeric_separator);
+  set_allow_harmony_optional_chaining(FLAG_harmony_optional_chaining);
+  set_allow_harmony_nullish(FLAG_harmony_nullish);
   set_allow_harmony_private_methods(FLAG_harmony_private_methods);
 }
 
@@ -74,15 +76,13 @@ ParseInfo::ParseInfo(Isolate* isolate)
 
 template <typename T>
 void ParseInfo::SetFunctionInfo(T function) {
-  set_is_named_expression(function->is_named_expression());
   set_language_mode(function->language_mode());
   set_function_kind(function->kind());
-  set_declaration(function->is_declaration());
+  set_function_syntax_kind(function->syntax_kind());
   set_requires_instance_members_initializer(
       function->requires_instance_members_initializer());
   set_toplevel(function->is_toplevel());
   set_is_oneshot_iife(function->is_oneshot_iife());
-  set_wrapped_as_function(function->is_wrapped());
 }
 
 ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
@@ -220,7 +220,9 @@ void ParseInfo::SetScriptForToplevelCompile(Isolate* isolate,
   set_toplevel();
   set_collect_type_profile(isolate->is_collecting_type_profile() &&
                            script->IsUserJavaScript());
-  set_wrapped_as_function(script->is_wrapped());
+  if (script->is_wrapped()) {
+    set_function_syntax_kind(FunctionSyntaxKind::kWrapped);
+  }
 }
 
 void ParseInfo::set_script(Handle<Script> script) {
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 7b74e7aa908a95..8afb12415513bb 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -13,6 +13,7 @@
 #include "src/common/globals.h"
 #include "src/handles/handles.h"
 #include "src/objects/function-kind.h"
+#include "src/objects/function-syntax-kind.h"
 #include "src/objects/script.h"
 #include "src/parsing/pending-compilation-error-handler.h"
 #include "src/parsing/preparse-data.h"
@@ -75,8 +76,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
   FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
   FLAG_ACCESSOR(kModule, is_module, set_module)
   FLAG_ACCESSOR(kAllowLazyParsing, allow_lazy_parsing, set_allow_lazy_parsing)
-  FLAG_ACCESSOR(kIsNamedExpression, is_named_expression,
-                set_is_named_expression)
   FLAG_ACCESSOR(kLazyCompile, lazy_compile, set_lazy_compile)
   FLAG_ACCESSOR(kCollectTypeProfile, collect_type_profile,
                 set_collect_type_profile)
@@ -88,10 +87,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
                 set_block_coverage_enabled)
   FLAG_ACCESSOR(kOnBackgroundThread, on_background_thread,
                 set_on_background_thread)
-  FLAG_ACCESSOR(kWrappedAsFunction, is_wrapped_as_function,
-                set_wrapped_as_function)
   FLAG_ACCESSOR(kAllowEvalCache, allow_eval_cache, set_allow_eval_cache)
-  FLAG_ACCESSOR(kIsDeclaration, is_declaration, set_declaration)
   FLAG_ACCESSOR(kRequiresInstanceMembersInitializer,
                 requires_instance_members_initializer,
                 set_requires_instance_members_initializer)
@@ -105,13 +101,16 @@ class V8_EXPORT_PRIVATE ParseInfo {
                 set_allow_harmony_dynamic_import)
   FLAG_ACCESSOR(kAllowHarmonyImportMeta, allow_harmony_import_meta,
                 set_allow_harmony_import_meta)
-  FLAG_ACCESSOR(kAllowHarmonyNumericSeparator, allow_harmony_numeric_separator,
-                set_allow_harmony_numeric_separator)
+  FLAG_ACCESSOR(kAllowHarmonyOptionalChaining, allow_harmony_optional_chaining,
+                set_allow_harmony_optional_chaining)
   FLAG_ACCESSOR(kAllowHarmonyPrivateMethods, allow_harmony_private_methods,
                 set_allow_harmony_private_methods)
   FLAG_ACCESSOR(kIsOneshotIIFE, is_oneshot_iife, set_is_oneshot_iife)
   FLAG_ACCESSOR(kCollectSourcePositions, collect_source_positions,
                 set_collect_source_positions)
+  FLAG_ACCESSOR(kAllowHarmonyNullish, allow_harmony_nullish,
+                set_allow_harmony_nullish)
+
 #undef FLAG_ACCESSOR
 
   void set_parse_restriction(ParseRestriction restriction) {
@@ -189,6 +188,17 @@ class V8_EXPORT_PRIVATE ParseInfo {
     function_kind_ = function_kind;
   }
 
+  FunctionSyntaxKind function_syntax_kind() const {
+    return function_syntax_kind_;
+  }
+  void set_function_syntax_kind(FunctionSyntaxKind function_syntax_kind) {
+    function_syntax_kind_ = function_syntax_kind;
+  }
+
+  bool is_wrapped_as_function() const {
+    return function_syntax_kind() == FunctionSyntaxKind::kWrapped;
+  }
+
   int max_function_literal_id() const { return max_function_literal_id_; }
   void set_max_function_literal_id(int max_function_literal_id) {
     max_function_literal_id_ = max_function_literal_id;
@@ -277,7 +287,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
   void SetFunctionInfo(T function);
 
   // Various configuration flags for parsing.
-  enum Flag {
+  enum Flag : uint32_t {
     // ---------- Input flags ---------------------------
     kToplevel = 1 << 0,
     kEager = 1 << 1,
@@ -287,40 +297,39 @@ class V8_EXPORT_PRIVATE ParseInfo {
     kParseRestriction = 1 << 5,
     kModule = 1 << 6,
     kAllowLazyParsing = 1 << 7,
-    kIsNamedExpression = 1 << 8,
-    kLazyCompile = 1 << 9,
-    kCollectTypeProfile = 1 << 10,
-    kCoverageEnabled = 1 << 11,
-    kBlockCoverageEnabled = 1 << 12,
-    kIsAsmWasmBroken = 1 << 13,
-    kOnBackgroundThread = 1 << 14,
-    kWrappedAsFunction = 1 << 15,  // Implicitly wrapped as function.
-    kAllowEvalCache = 1 << 16,
-    kIsDeclaration = 1 << 17,
-    kRequiresInstanceMembersInitializer = 1 << 18,
-    kContainsAsmModule = 1 << 19,
-    kMightAlwaysOpt = 1 << 20,
-    kAllowLazyCompile = 1 << 21,
-    kAllowNativeSyntax = 1 << 22,
-    kAllowHarmonyPublicFields = 1 << 23,
-    kAllowHarmonyStaticFields = 1 << 24,
-    kAllowHarmonyDynamicImport = 1 << 25,
-    kAllowHarmonyImportMeta = 1 << 26,
-    kAllowHarmonyNumericSeparator = 1 << 27,
-    kAllowHarmonyPrivateFields = 1 << 28,
-    kAllowHarmonyPrivateMethods = 1 << 29,
-    kIsOneshotIIFE = 1 << 30,
-    kCollectSourcePositions = 1 << 31,
+    kLazyCompile = 1 << 8,
+    kCollectTypeProfile = 1 << 9,
+    kCoverageEnabled = 1 << 10,
+    kBlockCoverageEnabled = 1 << 11,
+    kIsAsmWasmBroken = 1 << 12,
+    kOnBackgroundThread = 1 << 13,
+    kAllowEvalCache = 1 << 14,
+    kRequiresInstanceMembersInitializer = 1 << 15,
+    kContainsAsmModule = 1 << 16,
+    kMightAlwaysOpt = 1 << 17,
+    kAllowLazyCompile = 1 << 18,
+    kAllowNativeSyntax = 1 << 19,
+    kAllowHarmonyPublicFields = 1 << 20,
+    kAllowHarmonyStaticFields = 1 << 21,
+    kAllowHarmonyDynamicImport = 1 << 22,
+    kAllowHarmonyImportMeta = 1 << 23,
+    kAllowHarmonyOptionalChaining = 1 << 24,
+    kAllowHarmonyPrivateFields = 1 << 25,
+    kAllowHarmonyPrivateMethods = 1 << 26,
+    kIsOneshotIIFE = 1 << 27,
+    kCollectSourcePositions = 1 << 28,
+    kAllowHarmonyNullish = 1 << 29,
   };
 
   //------------- Inputs to parsing and scope analysis -----------------------
   std::unique_ptr<Zone> zone_;
-  unsigned flags_;
+  uint32_t flags_;
   v8::Extension* extension_;
   DeclarationScope* script_scope_;
   uintptr_t stack_limit_;
   uint64_t hash_seed_;
   FunctionKind function_kind_;
+  FunctionSyntaxKind function_syntax_kind_;
   int script_id_;
   int start_position_;
   int end_position_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 2dfb0d2461a280..f43496b429cd5f 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -6,6 +6,7 @@
 #define V8_PARSING_PARSER_BASE_H_
 
 #include <stdint.h>
+#include <utility>
 #include <vector>
 
 #include "src/ast/ast-source-ranges.h"
@@ -284,11 +285,21 @@ class ParserBase {
 #undef ALLOW_ACCESSORS
 
   V8_INLINE bool has_error() const { return scanner()->has_parser_error(); }
-  bool allow_harmony_numeric_separator() const {
-    return scanner()->allow_harmony_numeric_separator();
+
+  bool allow_harmony_optional_chaining() const {
+    return scanner()->allow_harmony_optional_chaining();
+  }
+
+  void set_allow_harmony_optional_chaining(bool allow) {
+    scanner()->set_allow_harmony_optional_chaining(allow);
+  }
+
+  bool allow_harmony_nullish() const {
+    return scanner()->allow_harmony_nullish();
   }
-  void set_allow_harmony_numeric_separator(bool allow) {
-    scanner()->set_allow_harmony_numeric_separator(allow);
+
+  void set_allow_harmony_nullish(bool allow) {
+    scanner()->set_allow_harmony_nullish(allow);
   }
 
   uintptr_t stack_limit() const { return stack_limit_; }
@@ -624,9 +635,17 @@ class ParserBase {
     }
   }
 
-  RequiresBrandCheckFlag RequiresBrandCheck(ClassLiteralProperty::Kind kind) {
-    return kind == ClassLiteralProperty::Kind::FIELD ? kNoBrandCheck
-                                                     : kRequiresBrandCheck;
+  VariableMode GetVariableMode(ClassLiteralProperty::Kind kind) {
+    switch (kind) {
+      case ClassLiteralProperty::Kind::FIELD:
+        return VariableMode::kConst;
+      case ClassLiteralProperty::Kind::METHOD:
+        return VariableMode::kPrivateMethod;
+      case ClassLiteralProperty::Kind::GETTER:
+        return VariableMode::kPrivateGetterOnly;
+      case ClassLiteralProperty::Kind::SETTER:
+        return VariableMode::kPrivateSetterOnly;
+    }
   }
 
   const AstRawString* ClassFieldVariableName(AstValueFactory* ast_value_factory,
@@ -1019,7 +1038,8 @@ class ParserBase {
   ExpressionT ParseAssignmentExpressionCoverGrammar();
 
   ExpressionT ParseArrowParametersWithRest(ExpressionListT* list,
-                                           AccumulationScope* scope);
+                                           AccumulationScope* scope,
+                                           int seen_variables);
 
   ExpressionT ParseArrayLiteral();
 
@@ -1047,6 +1067,8 @@ class ParserBase {
   ExpressionT ParseYieldExpression();
   V8_INLINE ExpressionT ParseConditionalExpression();
   ExpressionT ParseConditionalContinuation(ExpressionT expression, int pos);
+  ExpressionT ParseLogicalExpression();
+  ExpressionT ParseCoalesceExpression(ExpressionT expression);
   ExpressionT ParseBinaryContinuation(ExpressionT x, int prec, int prec1);
   V8_INLINE ExpressionT ParseBinaryExpression(int prec);
   ExpressionT ParseUnaryOrPrefixExpression();
@@ -1106,7 +1128,7 @@ class ParserBase {
   void ParseFunctionBody(StatementListT* body, IdentifierT function_name,
                          int pos, const FormalParametersT& parameters,
                          FunctionKind kind,
-                         FunctionLiteral::FunctionType function_type,
+                         FunctionSyntaxKind function_syntax_kind,
                          FunctionBodyType body_type);
 
   // Check if the scope has conflicting var/let declarations from different
@@ -1271,18 +1293,7 @@ class ParserBase {
                                            Scope* scope) {
     if (impl()->IsIdentifier(expression) &&
         impl()->IsEval(impl()->AsIdentifier(expression))) {
-      scope->RecordInnerScopeEvalCall();
       function_state_->RecordFunctionOrEvalCall();
-      if (is_sloppy(scope->language_mode())) {
-        // For sloppy scopes we also have to record the call at function level,
-        // in case it includes declarations that will be hoisted.
-        scope->GetDeclarationScope()->RecordEvalCall();
-      }
-
-      // This call is only necessary to track evals that may be
-      // inside arrow function parameter lists. In that case,
-      // Scope::Snapshot::Reparent will move this bit down into
-      // the arrow function's scope.
       scope->RecordEvalCall();
 
       return Call::IS_POSSIBLY_EVAL;
@@ -1320,6 +1331,11 @@ class ParserBase {
     return expression_scope_;
   }
 
+  bool MaybeParsingArrowhead() const {
+    return expression_scope_ != nullptr &&
+           expression_scope_->has_possible_arrow_parameter_in_scope_chain();
+  }
+
   class AcceptINScope final {
    public:
     AcceptINScope(ParserBase* parser, bool accept_IN)
@@ -1365,7 +1381,9 @@ class ParserBase {
   };
 
   std::vector<void*>* pointer_buffer() { return &pointer_buffer_; }
-  std::vector<void*>* variable_buffer() { return &variable_buffer_; }
+  std::vector<std::pair<VariableProxy*, int>>* variable_buffer() {
+    return &variable_buffer_;
+  }
 
   // Parser base's protected field members.
 
@@ -1390,7 +1408,7 @@ class ParserBase {
   ExpressionScope* expression_scope_;
 
   std::vector<void*> pointer_buffer_;
-  std::vector<void*> variable_buffer_;
+  std::vector<std::pair<VariableProxy*, int>> variable_buffer_;
 
   Scanner* scanner_;
 
@@ -1688,6 +1706,7 @@ ParserBase<Impl>::ParsePrimaryExpression() {
       ClassifyParameter(name, beg_pos, end_position());
       ExpressionT result =
           impl()->ExpressionFromIdentifier(name, beg_pos, InferName::kNo);
+      parsing_scope.SetInitializers(0, peek_position());
       next_arrow_function_info_.scope = parsing_scope.ValidateAndCreateScope();
       return result;
     }
@@ -1825,9 +1844,11 @@ ParserBase<Impl>::ParseExpressionCoverGrammar() {
   ExpressionListT list(pointer_buffer());
   ExpressionT expression;
   AccumulationScope accumulation_scope(expression_scope());
+  int variable_index = 0;
   while (true) {
     if (V8_UNLIKELY(peek() == Token::ELLIPSIS)) {
-      return ParseArrowParametersWithRest(&list, &accumulation_scope);
+      return ParseArrowParametersWithRest(&list, &accumulation_scope,
+                                          variable_index);
     }
 
     int expr_pos = peek_position();
@@ -1836,6 +1857,9 @@ ParserBase<Impl>::ParseExpressionCoverGrammar() {
     ClassifyArrowParameter(&accumulation_scope, expr_pos, expression);
     list.Add(expression);
 
+    variable_index =
+        expression_scope()->SetInitializers(variable_index, peek_position());
+
     if (!Check(Token::COMMA)) break;
 
     if (peek() == Token::RPAREN && PeekAhead() == Token::ARROW) {
@@ -1863,7 +1887,7 @@ template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
 ParserBase<Impl>::ParseArrowParametersWithRest(
     typename ParserBase<Impl>::ExpressionListT* list,
-    AccumulationScope* accumulation_scope) {
+    AccumulationScope* accumulation_scope, int seen_variables) {
   Consume(Token::ELLIPSIS);
 
   Scanner::Location ellipsis = scanner()->location();
@@ -1885,6 +1909,8 @@ ParserBase<Impl>::ParseArrowParametersWithRest(
     return impl()->FailureExpression();
   }
 
+  expression_scope()->SetInitializers(seen_variables, peek_position());
+
   // 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
   // as the formal parameters of'(x, y, ...z) => foo', and is not itself a
   // valid expression.
@@ -2204,7 +2230,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassInfo* class_info,
 
       ExpressionT value = impl()->ParseFunctionLiteral(
           prop_info->name, scanner()->location(), kSkipFunctionNameCheck, kind,
-          name_token_position, FunctionLiteral::kAccessorOrMethod,
+          name_token_position, FunctionSyntaxKind::kAccessorOrMethod,
           language_mode(), nullptr);
 
       ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
@@ -2236,7 +2262,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassInfo* class_info,
 
       FunctionLiteralT value = impl()->ParseFunctionLiteral(
           prop_info->name, scanner()->location(), kSkipFunctionNameCheck, kind,
-          name_token_position, FunctionLiteral::kAccessorOrMethod,
+          name_token_position, FunctionSyntaxKind::kAccessorOrMethod,
           language_mode(), nullptr);
 
       ClassLiteralProperty::Kind property_kind =
@@ -2417,8 +2443,8 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
 
       ExpressionT value = impl()->ParseFunctionLiteral(
           name, scanner()->location(), kSkipFunctionNameCheck, kind,
-          next_loc.beg_pos, FunctionLiteral::kAccessorOrMethod, language_mode(),
-          nullptr);
+          next_loc.beg_pos, FunctionSyntaxKind::kAccessorOrMethod,
+          language_mode(), nullptr);
 
       ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
           name_expression, value, ObjectLiteralProperty::COMPUTED,
@@ -2449,8 +2475,8 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
 
       FunctionLiteralT value = impl()->ParseFunctionLiteral(
           name, scanner()->location(), kSkipFunctionNameCheck, kind,
-          next_loc.beg_pos, FunctionLiteral::kAccessorOrMethod, language_mode(),
-          nullptr);
+          next_loc.beg_pos, FunctionSyntaxKind::kAccessorOrMethod,
+          language_mode(), nullptr);
 
       ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
           name_expression, value,
@@ -2545,6 +2571,7 @@ void ParserBase<Impl>::ParseArguments(
   Consume(Token::LPAREN);
   AccumulationScope accumulation_scope(expression_scope());
 
+  int variable_index = 0;
   while (peek() != Token::RPAREN) {
     int start_pos = peek_position();
     bool is_spread = Check(Token::ELLIPSIS);
@@ -2572,6 +2599,10 @@ void ParserBase<Impl>::ParseArguments(
       argument = factory()->NewSpread(argument, start_pos, expr_pos);
     }
     args->Add(argument);
+
+    variable_index =
+        expression_scope()->SetInitializers(variable_index, peek_position());
+
     if (!Check(Token::COMMA)) break;
   }
 
@@ -2650,6 +2681,7 @@ ParserBase<Impl>::ParseAssignmentExpressionCoverGrammar() {
     expression_scope()->RecordDeclarationError(
         Scanner::Location(lhs_beg_pos, end_position()),
         MessageTemplate::kInvalidPropertyBindingPattern);
+    expression_scope()->ValidateAsExpression();
   } else if (expression->IsPattern() && op == Token::ASSIGN) {
     // Destructuring assignmment.
     if (expression->is_parenthesized()) {
@@ -2777,17 +2809,70 @@ template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
 ParserBase<Impl>::ParseConditionalExpression() {
   // ConditionalExpression ::
-  //   LogicalOrExpression
-  //   LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
+  //   LogicalExpression
+  //   LogicalExpression '?' AssignmentExpression ':' AssignmentExpression
+  //
   int pos = peek_position();
-  // We start using the binary expression parser for prec >= 4 only!
-  ExpressionT expression = ParseBinaryExpression(4);
+  ExpressionT expression = ParseLogicalExpression();
   return peek() == Token::CONDITIONAL
              ? ParseConditionalContinuation(expression, pos)
              : expression;
 }
 
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseLogicalExpression() {
+  // LogicalExpression ::
+  //   LogicalORExpression
+  //   CoalesceExpression
+
+  // Both LogicalORExpression and CoalesceExpression start with BitwiseOR.
+  // Parse for binary expressions >= 6 (BitwiseOR);
+  ExpressionT expression = ParseBinaryExpression(6);
+  if (peek() == Token::AND || peek() == Token::OR) {
+    // LogicalORExpression, pickup parsing where we left off.
+    int prec1 = Token::Precedence(peek(), accept_IN_);
+    expression = ParseBinaryContinuation(expression, 4, prec1);
+  } else if (V8_UNLIKELY(peek() == Token::NULLISH)) {
+    expression = ParseCoalesceExpression(expression);
+  }
+  return expression;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseCoalesceExpression(ExpressionT expression) {
+  // CoalesceExpression ::
+  //   CoalesceExpressionHead ?? BitwiseORExpression
+  //
+  //   CoalesceExpressionHead ::
+  //     CoalesceExpression
+  //     BitwiseORExpression
+
+  // We create a binary operation for the first nullish, otherwise collapse
+  // into an nary expresion.
+  bool first_nullish = true;
+  while (peek() == Token::NULLISH) {
+    SourceRange right_range;
+    SourceRangeScope right_range_scope(scanner(), &right_range);
+    Consume(Token::NULLISH);
+    int pos = peek_position();
+
+    // Parse BitwiseOR or higher.
+    ExpressionT y = ParseBinaryExpression(6);
+    if (first_nullish) {
+      expression =
+          factory()->NewBinaryOperation(Token::NULLISH, expression, y, pos);
+      impl()->RecordBinaryOperationSourceRange(expression, right_range);
+      first_nullish = false;
+    } else {
+      impl()->CollapseNaryExpression(&expression, y, Token::NULLISH, pos,
+                                     right_range);
+    }
+  }
+  return expression;
+}
+
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
 ParserBase<Impl>::ParseConditionalContinuation(ExpressionT expression,
@@ -3059,7 +3144,7 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
     }
 
     if (has_spread) {
-      result = impl()->SpreadCall(result, args, pos, Call::NOT_EVAL);
+      result = impl()->SpreadCall(result, args, pos, Call::NOT_EVAL, false);
     } else {
       result = factory()->NewCall(result, args, pos, Call::NOT_EVAL);
     }
@@ -3070,25 +3155,42 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
     if (!Token::IsPropertyOrCall(peek())) return result;
   }
 
+  bool optional_chaining = false;
+  bool is_optional = false;
   do {
     switch (peek()) {
+      case Token::QUESTION_PERIOD: {
+        if (is_optional) {
+          ReportUnexpectedToken(peek());
+          return impl()->FailureExpression();
+        }
+        Consume(Token::QUESTION_PERIOD);
+        is_optional = true;
+        optional_chaining = true;
+        continue;
+      }
+
       /* Property */
       case Token::LBRACK: {
         Consume(Token::LBRACK);
         int pos = position();
         AcceptINScope scope(this, true);
         ExpressionT index = ParseExpressionCoverGrammar();
-        result = factory()->NewProperty(result, index, pos);
+        result = factory()->NewProperty(result, index, pos, is_optional);
         Expect(Token::RBRACK);
         break;
       }
 
       /* Property */
       case Token::PERIOD: {
+        if (is_optional) {
+          ReportUnexpectedToken(Next());
+          return impl()->FailureExpression();
+        }
         Consume(Token::PERIOD);
         int pos = position();
         ExpressionT key = ParsePropertyOrPrivatePropertyName();
-        result = factory()->NewProperty(result, key, pos);
+        result = factory()->NewProperty(result, key, pos, is_optional);
         break;
       }
 
@@ -3133,22 +3235,39 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
             CheckPossibleEvalCall(result, scope());
 
         if (has_spread) {
-          result = impl()->SpreadCall(result, args, pos, is_possibly_eval);
+          result = impl()->SpreadCall(result, args, pos, is_possibly_eval,
+                                      is_optional);
         } else {
-          result = factory()->NewCall(result, args, pos, is_possibly_eval);
+          result = factory()->NewCall(result, args, pos, is_possibly_eval,
+                                      is_optional);
         }
 
         fni_.RemoveLastFunction();
         break;
       }
 
-      /* Call */
       default:
+        /* Optional Property */
+        if (is_optional) {
+          DCHECK_EQ(scanner()->current_token(), Token::QUESTION_PERIOD);
+          int pos = position();
+          ExpressionT key = ParsePropertyOrPrivatePropertyName();
+          result = factory()->NewProperty(result, key, pos, is_optional);
+          break;
+        }
+        if (optional_chaining) {
+          impl()->ReportMessageAt(scanner()->peek_location(),
+                                  MessageTemplate::kOptionalChainingNoTemplate);
+          return impl()->FailureExpression();
+        }
+        /* Tagged Template */
         DCHECK(Token::IsTemplate(peek()));
         result = ParseTemplateLiteral(result, position(), true);
         break;
     }
-  } while (Token::IsPropertyOrCall(peek()));
+    is_optional = false;
+  } while (is_optional || Token::IsPropertyOrCall(peek()));
+  if (optional_chaining) return factory()->NewOptionalChain(result);
   return result;
 }
 
@@ -3210,6 +3329,13 @@ ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression() {
     // The expression can still continue with . or [ after the arguments.
     return ParseMemberExpressionContinuation(result);
   }
+
+  if (peek() == Token::QUESTION_PERIOD) {
+    impl()->ReportMessageAt(scanner()->peek_location(),
+                            MessageTemplate::kOptionalChainingNoNew);
+    return impl()->FailureExpression();
+  }
+
   // NewExpression without arguments.
   ExpressionListT args(pointer_buffer());
   return factory()->NewCallNew(result, args, new_pos);
@@ -3227,8 +3353,8 @@ ParserBase<Impl>::ParseFunctionExpression() {
   IdentifierT name = impl()->NullIdentifier();
   bool is_strict_reserved_name = Token::IsStrictReservedWord(peek());
   Scanner::Location function_name_location = Scanner::Location::invalid();
-  FunctionLiteral::FunctionType function_type =
-      FunctionLiteral::kAnonymousExpression;
+  FunctionSyntaxKind function_syntax_kind =
+      FunctionSyntaxKind::kAnonymousExpression;
   if (impl()->ParsingDynamicFunctionDeclaration()) {
     // We don't want dynamic functions to actually declare their name
     // "anonymous". We just want that name in the toString().
@@ -3239,14 +3365,14 @@ ParserBase<Impl>::ParseFunctionExpression() {
   } else if (peek_any_identifier()) {
     name = ParseIdentifier(function_kind);
     function_name_location = scanner()->location();
-    function_type = FunctionLiteral::kNamedExpression;
+    function_syntax_kind = FunctionSyntaxKind::kNamedExpression;
   }
   FunctionLiteralT result = impl()->ParseFunctionLiteral(
       name, function_name_location,
       is_strict_reserved_name ? kFunctionNameIsStrictReserved
                               : kFunctionNameValidityUnknown,
-      function_kind, function_token_position, function_type, language_mode(),
-      nullptr);
+      function_kind, function_token_position, function_syntax_kind,
+      language_mode(), nullptr);
   // TODO(verwaest): FailureFunctionLiteral?
   if (impl()->IsNull(result)) return impl()->FailureExpression();
   return result;
@@ -3332,6 +3458,11 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
         impl()->ReportMessage(MessageTemplate::kUnexpectedPrivateField);
         return impl()->FailureExpression();
       }
+      if (peek() == Token::QUESTION_PERIOD) {
+        Consume(Token::QUESTION_PERIOD);
+        impl()->ReportMessage(MessageTemplate::kOptionalChainingNoSuper);
+        return impl()->FailureExpression();
+      }
       scope->RecordSuperPropertyUsage();
       UseThis();
       return impl()->NewSuperPropertyReference(pos);
@@ -3342,7 +3473,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
       // TODO(rossberg): This might not be the correct FunctionState for the
       // method here.
       expression_scope()->RecordThisUse();
-      UseThis()->SetMaybeAssigned();
+      UseThis();
       return impl()->NewSuperCallReference(pos);
     }
   }
@@ -3749,7 +3880,7 @@ ParserBase<Impl>::ParseHoistableDeclaration(
 
   FunctionLiteralT function = impl()->ParseFunctionLiteral(
       name, scanner()->location(), name_validity, function_kind, pos,
-      FunctionLiteral::kDeclaration, language_mode(), nullptr);
+      FunctionSyntaxKind::kDeclaration, language_mode(), nullptr);
 
   // In ES6, a function behaves as a lexical binding, except in
   // a script scope, or the initial scope of eval or another function.
@@ -3859,7 +3990,7 @@ template <typename Impl>
 void ParserBase<Impl>::ParseFunctionBody(
     StatementListT* body, IdentifierT function_name, int pos,
     const FormalParametersT& parameters, FunctionKind kind,
-    FunctionLiteral::FunctionType function_type, FunctionBodyType body_type) {
+    FunctionSyntaxKind function_syntax_kind, FunctionBodyType body_type) {
   FunctionBodyParsingScope body_parsing_scope(impl());
 
   if (IsResumableFunction(kind)) impl()->PrepareGeneratorVariables();
@@ -3902,9 +4033,9 @@ void ParserBase<Impl>::ParseFunctionBody(
       DCHECK_EQ(FunctionBodyType::kBlock, body_type);
       // If we are parsing the source as if it is wrapped in a function, the
       // source ends without a closing brace.
-      Token::Value closing_token = function_type == FunctionLiteral::kWrapped
-                                       ? Token::EOS
-                                       : Token::RBRACE;
+      Token::Value closing_token =
+          function_syntax_kind == FunctionSyntaxKind::kWrapped ? Token::EOS
+                                                               : Token::RBRACE;
 
       if (IsAsyncGeneratorFunction(kind)) {
         impl()->ParseAndRewriteAsyncGeneratorFunctionBody(pos, kind,
@@ -3977,7 +4108,8 @@ void ParserBase<Impl>::ParseFunctionBody(
     function_scope->DeclareArguments(ast_value_factory());
   }
 
-  impl()->DeclareFunctionNameVar(function_name, function_type, function_scope);
+  impl()->DeclareFunctionNameVar(function_name, function_syntax_kind,
+                                 function_scope);
 
   inner_body.MergeInto(body);
 }
@@ -4104,7 +4236,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
         int dummy_function_length = -1;
         DCHECK_NE(kind & FunctionKind::kArrowFunction, 0);
         bool did_preparse_successfully = impl()->SkipFunction(
-            nullptr, kind, FunctionLiteral::kAnonymousExpression,
+            nullptr, kind, FunctionSyntaxKind::kAnonymousExpression,
             formal_parameters.scope, &dummy_num_parameters,
             &dummy_function_length, &produced_preparse_data);
 
@@ -4140,7 +4272,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
           AcceptINScope scope(this, true);
           ParseFunctionBody(&body, impl()->NullIdentifier(), kNoSourcePosition,
                             parameters, kind,
-                            FunctionLiteral::kAnonymousExpression,
+                            FunctionSyntaxKind::kAnonymousExpression,
                             FunctionBodyType::kBlock);
           CHECK(has_error());
           return impl()->FailureExpression();
@@ -4150,7 +4282,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
         AcceptINScope scope(this, true);
         ParseFunctionBody(&body, impl()->NullIdentifier(), kNoSourcePosition,
                           formal_parameters, kind,
-                          FunctionLiteral::kAnonymousExpression,
+                          FunctionSyntaxKind::kAnonymousExpression,
                           FunctionBodyType::kBlock);
         expected_property_count = function_state.expected_property_count();
       }
@@ -4159,7 +4291,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
       has_braces = false;
       ParseFunctionBody(&body, impl()->NullIdentifier(), kNoSourcePosition,
                         formal_parameters, kind,
-                        FunctionLiteral::kAnonymousExpression,
+                        FunctionSyntaxKind::kAnonymousExpression,
                         FunctionBodyType::kExpression);
       expected_property_count = function_state.expected_property_count();
     }
@@ -4179,7 +4311,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
       expected_property_count, formal_parameters.num_parameters(),
       formal_parameters.function_length,
       FunctionLiteral::kNoDuplicateParameters,
-      FunctionLiteral::kAnonymousExpression, eager_compile_hint,
+      FunctionSyntaxKind::kAnonymousExpression, eager_compile_hint,
       formal_parameters.scope->start_position(), has_braces,
       function_literal_id, produced_preparse_data);
 
@@ -4343,7 +4475,7 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral() {
   int pos = position();
   Consume(Token::FUNCTION);
   IdentifierT name = impl()->NullIdentifier();
-  FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
+  FunctionSyntaxKind syntax_kind = FunctionSyntaxKind::kAnonymousExpression;
 
   ParseFunctionFlags flags = ParseFunctionFlag::kIsAsync;
   if (Check(Token::MUL)) flags |= ParseFunctionFlag::kIsGenerator;
@@ -4361,14 +4493,14 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral() {
                    scanner()->CurrentSymbol(ast_value_factory()) ==
                        ast_value_factory()->anonymous_string());
   } else if (peek_any_identifier()) {
-    type = FunctionLiteral::kNamedExpression;
+    syntax_kind = FunctionSyntaxKind::kNamedExpression;
     name = ParseIdentifier(kind);
   }
   FunctionLiteralT result = impl()->ParseFunctionLiteral(
       name, scanner()->location(),
       is_strict_reserved ? kFunctionNameIsStrictReserved
                          : kFunctionNameValidityUnknown,
-      kind, pos, type, language_mode(), nullptr);
+      kind, pos, syntax_kind, language_mode(), nullptr);
   if (impl()->IsNull(result)) return impl()->FailureExpression();
   return result;
 }
@@ -5742,6 +5874,7 @@ typename ParserBase<Impl>::ForStatementT ParserBase<Impl>::ParseStandardForLoop(
     int stmt_pos, ZonePtrList<const AstRawString>* labels,
     ZonePtrList<const AstRawString>* own_labels, ExpressionT* cond,
     StatementT* next, StatementT* body) {
+  CheckStackOverflow();
   ForStatementT loop = factory()->NewForStatement(labels, own_labels, stmt_pos);
   TargetT target(this, loop);
 
@@ -5928,15 +6061,14 @@ void ParserBase<Impl>::CheckClassMethodName(IdentifierT name,
 
   AstValueFactory* avf = ast_value_factory();
 
-  if (is_static) {
+  if (impl()->IdentifierEquals(name, avf->private_constructor_string())) {
+    ReportMessage(MessageTemplate::kConstructorIsPrivate);
+    return;
+  } else if (is_static) {
     if (impl()->IdentifierEquals(name, avf->prototype_string())) {
       ReportMessage(MessageTemplate::kStaticPrototype);
       return;
     }
-  } else if (impl()->IdentifierEquals(name,
-                                      avf->private_constructor_string())) {
-    ReportMessage(MessageTemplate::kConstructorIsPrivate);
-    return;
   } else if (impl()->IdentifierEquals(name, avf->constructor_string())) {
     if (flags != ParseFunctionFlag::kIsNormal || IsAccessor(type)) {
       MessageTemplate msg = (flags & ParseFunctionFlag::kIsGenerator) != 0
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 2a860da3d0ccfc..e1bebc71f04bad 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -78,8 +78,8 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
   FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
       name, function_scope, body, expected_property_count, parameter_count,
       parameter_count, FunctionLiteral::kNoDuplicateParameters,
-      FunctionLiteral::kAnonymousExpression, default_eager_compile_hint(), pos,
-      true, GetNextFunctionLiteralId());
+      FunctionSyntaxKind::kAnonymousExpression, default_eager_compile_hint(),
+      pos, true, GetNextFunctionLiteralId());
   return function_literal;
 }
 
@@ -424,7 +424,8 @@ Parser::Parser(ParseInfo* info)
   set_allow_natives(info->allow_natives_syntax());
   set_allow_harmony_dynamic_import(info->allow_harmony_dynamic_import());
   set_allow_harmony_import_meta(info->allow_harmony_import_meta());
-  set_allow_harmony_numeric_separator(info->allow_harmony_numeric_separator());
+  set_allow_harmony_nullish(info->allow_harmony_nullish());
+  set_allow_harmony_optional_chaining(info->allow_harmony_optional_chaining());
   set_allow_harmony_private_methods(info->allow_harmony_private_methods());
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
@@ -676,7 +677,7 @@ void Parser::ParseWrapped(Isolate* isolate, ParseInfo* info,
 
   FunctionLiteral* function_literal = ParseFunctionLiteral(
       function_name, location, kSkipFunctionNameCheck, kNormalFunction,
-      kNoSourcePosition, FunctionLiteral::kWrapped, LanguageMode::kSloppy,
+      kNoSourcePosition, FunctionSyntaxKind::kWrapped, LanguageMode::kSloppy,
       arguments_for_wrapped_function);
 
   Statement* return_statement = factory()->NewReturnStatement(
@@ -728,20 +729,6 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
   return result;
 }
 
-static FunctionLiteral::FunctionType ComputeFunctionType(ParseInfo* info) {
-  if (info->is_wrapped_as_function()) {
-    return FunctionLiteral::kWrapped;
-  } else if (info->is_declaration()) {
-    return FunctionLiteral::kDeclaration;
-  } else if (info->is_named_expression()) {
-    return FunctionLiteral::kNamedExpression;
-  } else if (IsConciseMethod(info->function_kind()) ||
-             IsAccessorFunction(info->function_kind())) {
-    return FunctionLiteral::kAccessorOrMethod;
-  }
-  return FunctionLiteral::kAnonymousExpression;
-}
-
 FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
                                          const AstRawString* raw_name) {
   DCHECK_EQ(parsing_on_main_thread_, isolate != nullptr);
@@ -770,8 +757,10 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
     BlockState block_state(&scope_, outer);
     DCHECK(is_sloppy(outer->language_mode()) ||
            is_strict(info->language_mode()));
-    FunctionLiteral::FunctionType function_type = ComputeFunctionType(info);
     FunctionKind kind = info->function_kind();
+    DCHECK_IMPLIES(
+        IsConciseMethod(kind) || IsAccessorFunction(kind),
+        info->function_syntax_kind() == FunctionSyntaxKind::kAccessorOrMethod);
 
     if (IsArrowFunction(kind)) {
       if (IsAsyncFunction(kind)) {
@@ -857,8 +846,8 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
               : nullptr;
       result = ParseFunctionLiteral(
           raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
-          kNoSourcePosition, function_type, info->language_mode(),
-          arguments_for_wrapped_function);
+          kNoSourcePosition, info->function_syntax_kind(),
+          info->language_mode(), arguments_for_wrapped_function);
     }
 
     if (has_error()) return nullptr;
@@ -1377,11 +1366,12 @@ VariableProxy* Parser::DeclareBoundVariable(const AstRawString* name,
 }
 
 void Parser::DeclareAndBindVariable(VariableProxy* proxy, VariableKind kind,
-                                    VariableMode mode, InitializationFlag init,
-                                    Scope* scope, bool* was_added, int begin,
-                                    int end) {
-  Variable* var = DeclareVariable(proxy->raw_name(), kind, mode, init, scope,
-                                  was_added, begin, end);
+                                    VariableMode mode, Scope* scope,
+                                    bool* was_added, int initializer_position) {
+  Variable* var = DeclareVariable(
+      proxy->raw_name(), kind, mode, Variable::DefaultInitializationFlag(mode),
+      scope, was_added, proxy->position(), kNoSourcePosition);
+  var->set_initializer_position(initializer_position);
   proxy->BindTo(var);
 }
 
@@ -1791,9 +1781,9 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
 }
 
 void Parser::DeclareFunctionNameVar(const AstRawString* function_name,
-                                    FunctionLiteral::FunctionType function_type,
+                                    FunctionSyntaxKind function_syntax_kind,
                                     DeclarationScope* function_scope) {
-  if (function_type == FunctionLiteral::kNamedExpression &&
+  if (function_syntax_kind == FunctionSyntaxKind::kNamedExpression &&
       function_scope->LookupLocal(function_name) == nullptr) {
     DCHECK_EQ(function_scope, scope());
     function_scope->DeclareFunctionVar(function_name);
@@ -2238,7 +2228,7 @@ void Parser::PrepareGeneratorVariables() {
 FunctionLiteral* Parser::ParseFunctionLiteral(
     const AstRawString* function_name, Scanner::Location function_name_location,
     FunctionNameValidity function_name_validity, FunctionKind kind,
-    int function_token_pos, FunctionLiteral::FunctionType function_type,
+    int function_token_pos, FunctionSyntaxKind function_syntax_kind,
     LanguageMode language_mode,
     ZonePtrList<const AstRawString>* arguments_for_wrapped_function) {
   // Function ::
@@ -2250,7 +2240,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
   // Setter ::
   //   '(' PropertySetParameterList ')' '{' FunctionBody '}'
 
-  bool is_wrapped = function_type == FunctionLiteral::kWrapped;
+  bool is_wrapped = function_syntax_kind == FunctionSyntaxKind::kWrapped;
   DCHECK_EQ(is_wrapped, arguments_for_wrapped_function != nullptr);
 
   int pos = function_token_pos == kNoSourcePosition ? peek_position()
@@ -2385,15 +2375,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
   // try to lazy parse in the first place, we'll have to parse eagerly.
   bool did_preparse_successfully =
       should_preparse &&
-      SkipFunction(function_name, kind, function_type, scope, &num_parameters,
-                   &function_length, &produced_preparse_data);
+      SkipFunction(function_name, kind, function_syntax_kind, scope,
+                   &num_parameters, &function_length, &produced_preparse_data);
 
   if (!did_preparse_successfully) {
     // If skipping aborted, it rewound the scanner until before the LPAREN.
     // Consume it in that case.
     if (should_preparse) Consume(Token::LPAREN);
     should_post_parallel_task = false;
-    ParseFunction(&body, function_name, pos, kind, function_type, scope,
+    ParseFunction(&body, function_name, pos, kind, function_syntax_kind, scope,
                   &num_parameters, &function_length, &has_duplicate_parameters,
                   &expected_property_count, &suspend_count,
                   arguments_for_wrapped_function);
@@ -2439,8 +2429,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
   // Note that the FunctionLiteral needs to be created in the main Zone again.
   FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
       function_name, scope, body, expected_property_count, num_parameters,
-      function_length, duplicate_parameters, function_type, eager_compile_hint,
-      pos, true, function_literal_id, produced_preparse_data);
+      function_length, duplicate_parameters, function_syntax_kind,
+      eager_compile_hint, pos, true, function_literal_id,
+      produced_preparse_data);
   function_literal->set_function_token_position(function_token_pos);
   function_literal->set_suspend_count(suspend_count);
 
@@ -2458,7 +2449,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
 }
 
 bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
-                          FunctionLiteral::FunctionType function_type,
+                          FunctionSyntaxKind function_syntax_kind,
                           DeclarationScope* function_scope, int* num_parameters,
                           int* function_length,
                           ProducedPreparseData** produced_preparse_data) {
@@ -2513,7 +2504,7 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
 
   PreParser::PreParseResult result = reusable_preparser()->PreParseFunction(
-      function_name, kind, function_type, function_scope, use_counts_,
+      function_name, kind, function_syntax_kind, function_scope, use_counts_,
       produced_preparse_data, this->script_id());
 
   if (result == PreParser::kPreParseStackOverflow) {
@@ -2555,7 +2546,7 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
       closest_class_scope->MigrateUnresolvedPrivateNameTail(
           factory(), unresolved_private_tail);
     }
-    function_scope->AnalyzePartially(this, factory());
+    function_scope->AnalyzePartially(this, factory(), MaybeParsingArrowhead());
   }
 
   return true;
@@ -2583,12 +2574,11 @@ Block* Parser::BuildParameterInitializationBlock(
                                     initial_value, kNoSourcePosition);
     }
 
-    Scope* param_scope = scope();
+    DeclarationScope* param_scope = scope()->AsDeclarationScope();
     ScopedPtrList<Statement>* param_init_statements = &init_statements;
 
     base::Optional<ScopedPtrList<Statement>> non_simple_param_init_statements;
-    if (!parameter->is_simple() &&
-        scope()->AsDeclarationScope()->calls_sloppy_eval()) {
+    if (!parameter->is_simple() && param_scope->sloppy_eval_can_extend_vars()) {
       param_scope = NewVarblockScope();
       param_scope->set_start_position(parameter->pattern->position());
       param_scope->set_end_position(parameter->initializer_end_position);
@@ -2613,7 +2603,7 @@ Block* Parser::BuildParameterInitializationBlock(
           factory()->NewBlock(true, *non_simple_param_init_statements);
       non_simple_param_init_statements.reset();
       param_block->set_scope(param_scope);
-      param_scope = param_scope->FinalizeBlockScope();
+      param_scope = param_scope->FinalizeBlockScope()->AsDeclarationScope();
       init_statements.Add(param_block);
     }
     ++index;
@@ -2678,7 +2668,7 @@ Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
 
 void Parser::ParseFunction(
     ScopedPtrList<Statement>* body, const AstRawString* function_name, int pos,
-    FunctionKind kind, FunctionLiteral::FunctionType function_type,
+    FunctionKind kind, FunctionSyntaxKind function_syntax_kind,
     DeclarationScope* function_scope, int* num_parameters, int* function_length,
     bool* has_duplicate_parameters, int* expected_property_count,
     int* suspend_count,
@@ -2687,7 +2677,7 @@ void Parser::ParseFunction(
 
   FunctionState function_state(&function_state_, &scope_, function_scope);
 
-  bool is_wrapped = function_type == FunctionLiteral::kWrapped;
+  bool is_wrapped = function_syntax_kind == FunctionSyntaxKind::kWrapped;
 
   int expected_parameters_end_pos = parameters_end_pos_;
   if (expected_parameters_end_pos != kNoSourcePosition) {
@@ -2749,8 +2739,8 @@ void Parser::ParseFunction(
   *function_length = formals.function_length;
 
   AcceptINScope scope(this, true);
-  ParseFunctionBody(body, function_name, pos, formals, kind, function_type,
-                    FunctionBodyType::kBlock);
+  ParseFunctionBody(body, function_name, pos, formals, kind,
+                    function_syntax_kind, FunctionBodyType::kBlock);
 
   *has_duplicate_parameters = formals.has_duplicate();
 
@@ -2781,15 +2771,15 @@ Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name) {
   return proxy->var();
 }
 
-Variable* Parser::CreatePrivateNameVariable(
-    ClassScope* scope, RequiresBrandCheckFlag requires_brand_check,
-    const AstRawString* name) {
+Variable* Parser::CreatePrivateNameVariable(ClassScope* scope,
+                                            VariableMode mode,
+                                            const AstRawString* name) {
   DCHECK_NOT_NULL(name);
   int begin = position();
   int end = end_position();
   bool was_added = false;
-  Variable* var =
-      scope->DeclarePrivateName(name, requires_brand_check, &was_added);
+  DCHECK(IsConstVariableMode(mode));
+  Variable* var = scope->DeclarePrivateName(name, mode, &was_added);
   if (!was_added) {
     Scanner::Location loc(begin, end);
     ReportMessageAt(loc, MessageTemplate::kVarRedeclaration, var->raw_name());
@@ -2824,14 +2814,8 @@ void Parser::DeclarePrivateClassMember(ClassScope* scope,
                                        ClassLiteralProperty* property,
                                        ClassLiteralProperty::Kind kind,
                                        bool is_static, ClassInfo* class_info) {
-  DCHECK_IMPLIES(kind == ClassLiteralProperty::Kind::METHOD,
+  DCHECK_IMPLIES(kind != ClassLiteralProperty::Kind::FIELD,
                  allow_harmony_private_methods());
-  // TODO(joyee): We do not support private accessors yet (which allow
-  // declaring the same private name twice). Make them noops.
-  if (kind != ClassLiteralProperty::Kind::FIELD &&
-      kind != ClassLiteralProperty::Kind::METHOD) {
-    return;
-  }
 
   if (kind == ClassLiteralProperty::Kind::FIELD) {
     if (is_static) {
@@ -2842,7 +2826,7 @@ void Parser::DeclarePrivateClassMember(ClassScope* scope,
   }
 
   Variable* private_name_var =
-      CreatePrivateNameVariable(scope, RequiresBrandCheck(kind), property_name);
+      CreatePrivateNameVariable(scope, GetVariableMode(kind), property_name);
   int pos = property->value()->position();
   if (pos == kNoSourcePosition) {
     pos = property->key()->position();
@@ -2886,7 +2870,7 @@ FunctionLiteral* Parser::CreateInitializerFunction(
   FunctionLiteral* result = factory()->NewFunctionLiteral(
       ast_value_factory()->GetOneByteString(name), scope, statements, 0, 0, 0,
       FunctionLiteral::kNoDuplicateParameters,
-      FunctionLiteral::kAnonymousExpression,
+      FunctionSyntaxKind::kAccessorOrMethod,
       FunctionLiteral::kShouldEagerCompile, scope->start_position(), false,
       GetNextFunctionLiteralId());
 
@@ -3168,10 +3152,12 @@ ArrayLiteral* Parser::ArrayLiteralFromListWithSpread(
 
 Expression* Parser::SpreadCall(Expression* function,
                                const ScopedPtrList<Expression>& args_list,
-                               int pos, Call::PossiblyEval is_possibly_eval) {
+                               int pos, Call::PossiblyEval is_possibly_eval,
+                               bool optional_chain) {
   // Handle this case in BytecodeGenerator.
   if (OnlyLastArgIsSpread(args_list) || function->IsSuperCallReference()) {
-    return factory()->NewCall(function, args_list, pos);
+    return factory()->NewCall(function, args_list, pos, Call::NOT_EVAL,
+                              optional_chain);
   }
 
   ScopedPtrList<Expression> args(pointer_buffer());
@@ -3186,8 +3172,9 @@ Expression* Parser::SpreadCall(Expression* function,
       VariableProxy* obj = factory()->NewVariableProxy(temp);
       Assignment* assign_obj = factory()->NewAssignment(
           Token::ASSIGN, obj, function->AsProperty()->obj(), kNoSourcePosition);
-      function = factory()->NewProperty(
-          assign_obj, function->AsProperty()->key(), kNoSourcePosition);
+      function =
+          factory()->NewProperty(assign_obj, function->AsProperty()->key(),
+                                 kNoSourcePosition, optional_chain);
       args.Add(function);
       obj = factory()->NewVariableProxy(temp);
       args.Add(obj);
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index b7fb19c26fe366..8170dbb9207aa2 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -165,9 +165,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
   friend class i::VariableDeclarationParsingScope<ParserTypes<Parser>>;
   friend class i::ParameterDeclarationParsingScope<ParserTypes<Parser>>;
   friend class i::ArrowHeadParsingScope<ParserTypes<Parser>>;
-  friend bool v8::internal::parsing::ParseProgram(ParseInfo*, Isolate*);
+  friend bool v8::internal::parsing::ParseProgram(
+      ParseInfo*, Isolate*, parsing::ReportErrorsAndStatisticsMode stats_mode);
   friend bool v8::internal::parsing::ParseFunction(
-      ParseInfo*, Handle<SharedFunctionInfo> shared_info, Isolate*);
+      ParseInfo*, Handle<SharedFunctionInfo> shared_info, Isolate*,
+      parsing::ReportErrorsAndStatisticsMode stats_mode);
 
   bool AllowsLazyParsingWithoutUnresolvedVariables() const {
     return scope()->AllowsLazyParsingWithoutUnresolvedVariables(
@@ -289,7 +291,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
   void ParseAndRewriteAsyncGeneratorFunctionBody(
       int pos, FunctionKind kind, ScopedPtrList<Statement>* body);
   void DeclareFunctionNameVar(const AstRawString* function_name,
-                              FunctionLiteral::FunctionType function_type,
+                              FunctionSyntaxKind function_syntax_kind,
                               DeclarationScope* function_scope);
 
   Statement* DeclareFunction(const AstRawString* variable_name,
@@ -297,9 +299,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
                              VariableKind kind, int beg_pos, int end_pos,
                              ZonePtrList<const AstRawString>* names);
   Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name);
-  Variable* CreatePrivateNameVariable(
-      ClassScope* scope, RequiresBrandCheckFlag requires_brand_check,
-      const AstRawString* name);
+  Variable* CreatePrivateNameVariable(ClassScope* scope, VariableMode mode,
+                                      const AstRawString* name);
   FunctionLiteral* CreateInitializerFunction(
       const char* name, DeclarationScope* scope,
       ZonePtrList<ClassLiteral::Property>* fields);
@@ -365,7 +366,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
   FunctionLiteral* ParseFunctionLiteral(
       const AstRawString* name, Scanner::Location function_name_location,
       FunctionNameValidity function_name_validity, FunctionKind kind,
-      int function_token_position, FunctionLiteral::FunctionType type,
+      int function_token_position, FunctionSyntaxKind type,
       LanguageMode language_mode,
       ZonePtrList<const AstRawString>* arguments_for_wrapped_function);
 
@@ -387,9 +388,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
   VariableProxy* DeclareBoundVariable(const AstRawString* name,
                                       VariableMode mode, int pos);
   void DeclareAndBindVariable(VariableProxy* proxy, VariableKind kind,
-                              VariableMode mode, InitializationFlag init,
-                              Scope* declaration_scope, bool* was_added,
-                              int begin, int end = kNoSourcePosition);
+                              VariableMode mode, Scope* declaration_scope,
+                              bool* was_added, int initializer_position);
   V8_WARN_UNUSED_RESULT
   Variable* DeclareVariable(const AstRawString* name, VariableKind kind,
                             VariableMode mode, InitializationFlag init,
@@ -418,7 +418,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
   // parsing or could not identify an error correctly, meaning the caller needs
   // to fully reparse. In this case it resets the scanner and preparser state.
   bool SkipFunction(const AstRawString* function_name, FunctionKind kind,
-                    FunctionLiteral::FunctionType function_type,
+                    FunctionSyntaxKind function_syntax_kind,
                     DeclarationScope* function_scope, int* num_parameters,
                     int* function_length,
                     ProducedPreparseData** produced_preparsed_scope_data);
@@ -429,7 +429,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
 
   void ParseFunction(
       ScopedPtrList<Statement>* body, const AstRawString* function_name,
-      int pos, FunctionKind kind, FunctionLiteral::FunctionType function_type,
+      int pos, FunctionKind kind, FunctionSyntaxKind function_syntax_kind,
       DeclarationScope* function_scope, int* num_parameters,
       int* function_length, bool* has_duplicate_parameters,
       int* expected_property_count, int* suspend_count,
@@ -485,7 +485,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
       const ScopedPtrList<Expression>& list);
   Expression* SpreadCall(Expression* function,
                          const ScopedPtrList<Expression>& args, int pos,
-                         Call::PossiblyEval is_possibly_eval);
+                         Call::PossiblyEval is_possibly_eval,
+                         bool optional_chain);
   Expression* SpreadCallNew(Expression* function,
                             const ScopedPtrList<Expression>& args, int pos);
   Expression* RewriteSuperCall(Expression* call_expression);
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index af4cb9b5eed814..95cc1867874655 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -18,7 +18,8 @@ namespace v8 {
 namespace internal {
 namespace parsing {
 
-bool ParseProgram(ParseInfo* info, Isolate* isolate) {
+bool ParseProgram(ParseInfo* info, Isolate* isolate,
+                  ReportErrorsAndStatisticsMode mode) {
   DCHECK(info->is_toplevel());
   DCHECK_NULL(info->literal());
 
@@ -39,21 +40,25 @@ bool ParseProgram(ParseInfo* info, Isolate* isolate) {
 
   result = parser.ParseProgram(isolate, info);
   info->set_literal(result);
-  if (result == nullptr) {
-    info->pending_error_handler()->ReportErrors(isolate, info->script(),
-                                                info->ast_value_factory());
-  } else {
+  if (result) {
     info->set_language_mode(info->literal()->language_mode());
     if (info->is_eval()) {
       info->set_allow_eval_cache(parser.allow_eval_cache());
     }
   }
-  parser.UpdateStatistics(isolate, info->script());
+
+  if (mode == ReportErrorsAndStatisticsMode::kYes) {
+    if (result == nullptr) {
+      info->pending_error_handler()->ReportErrors(isolate, info->script(),
+                                                  info->ast_value_factory());
+    }
+    parser.UpdateStatistics(isolate, info->script());
+  }
   return (result != nullptr);
 }
 
 bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
-                   Isolate* isolate) {
+                   Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
   DCHECK(!info->is_toplevel());
   DCHECK(!shared_info.is_null());
   DCHECK_NULL(info->literal());
@@ -76,24 +81,28 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
 
   result = parser.ParseFunction(isolate, info, shared_info);
   info->set_literal(result);
-  if (result == nullptr) {
-    info->pending_error_handler()->ReportErrors(isolate, info->script(),
-                                                info->ast_value_factory());
-  } else {
+  if (result) {
     info->ast_value_factory()->Internalize(isolate);
     if (info->is_eval()) {
       info->set_allow_eval_cache(parser.allow_eval_cache());
     }
   }
-  parser.UpdateStatistics(isolate, info->script());
+
+  if (mode == ReportErrorsAndStatisticsMode::kYes) {
+    if (result == nullptr) {
+      info->pending_error_handler()->ReportErrors(isolate, info->script(),
+                                                  info->ast_value_factory());
+    }
+    parser.UpdateStatistics(isolate, info->script());
+  }
   return (result != nullptr);
 }
 
 bool ParseAny(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
-              Isolate* isolate) {
+              Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
   DCHECK(!shared_info.is_null());
-  return info->is_toplevel() ? ParseProgram(info, isolate)
-                             : ParseFunction(info, shared_info, isolate);
+  return info->is_toplevel() ? ParseProgram(info, isolate, mode)
+                             : ParseFunction(info, shared_info, isolate, mode);
 }
 
 }  // namespace parsing
diff --git a/deps/v8/src/parsing/parsing.h b/deps/v8/src/parsing/parsing.h
index 5f19500a192a80..bdf309d60b27c0 100644
--- a/deps/v8/src/parsing/parsing.h
+++ b/deps/v8/src/parsing/parsing.h
@@ -15,23 +15,27 @@ class SharedFunctionInfo;
 
 namespace parsing {
 
+enum class ReportErrorsAndStatisticsMode { kYes, kNo };
+
 // Parses the top-level source code represented by the parse info and sets its
 // function literal.  Returns false (and deallocates any allocated AST
 // nodes) if parsing failed.
-V8_EXPORT_PRIVATE bool ParseProgram(ParseInfo* info, Isolate* isolate);
+V8_EXPORT_PRIVATE bool ParseProgram(
+    ParseInfo* info, Isolate* isolate,
+    ReportErrorsAndStatisticsMode mode = ReportErrorsAndStatisticsMode::kYes);
 
 // Like ParseProgram but for an individual function which already has a
 // allocated shared function info.
-V8_EXPORT_PRIVATE bool ParseFunction(ParseInfo* info,
-                                     Handle<SharedFunctionInfo> shared_info,
-                                     Isolate* isolate);
+V8_EXPORT_PRIVATE bool ParseFunction(
+    ParseInfo* info, Handle<SharedFunctionInfo> shared_info, Isolate* isolate,
+    ReportErrorsAndStatisticsMode mode = ReportErrorsAndStatisticsMode::kYes);
 
 // If you don't know whether info->is_toplevel() is true or not, use this method
 // to dispatch to either of the above functions. Prefer to use the above methods
 // whenever possible.
-V8_EXPORT_PRIVATE bool ParseAny(ParseInfo* info,
-                                Handle<SharedFunctionInfo> shared_info,
-                                Isolate* isolate);
+V8_EXPORT_PRIVATE bool ParseAny(
+    ParseInfo* info, Handle<SharedFunctionInfo> shared_info, Isolate* isolate,
+    ReportErrorsAndStatisticsMode mode = ReportErrorsAndStatisticsMode::kYes);
 
 }  // namespace parsing
 }  // namespace internal
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index ea5e70a3c160fa..8743732ea2cedd 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -21,22 +21,19 @@ namespace internal {
 
 namespace {
 
-class ScopeCallsSloppyEvalField : public BitField8<bool, 0, 1> {};
-class InnerScopeCallsEvalField
-    : public BitField8<bool, ScopeCallsSloppyEvalField::kNext, 1> {};
-
-class VariableMaybeAssignedField : public BitField8<bool, 0, 1> {};
-class VariableContextAllocatedField
-    : public BitField8<bool, VariableMaybeAssignedField::kNext, 1> {};
-
-class HasDataField : public BitField<bool, 0, 1> {};
-class LengthEqualsParametersField
-    : public BitField<bool, HasDataField::kNext, 1> {};
-class NumberOfParametersField
-    : public BitField<uint16_t, LengthEqualsParametersField::kNext, 16> {};
-
-class LanguageField : public BitField8<LanguageMode, 0, 1> {};
-class UsesSuperField : public BitField8<bool, LanguageField::kNext, 1> {};
+using ScopeSloppyEvalCanExtendVarsField = BitField8<bool, 0, 1>;
+using InnerScopeCallsEvalField =
+    ScopeSloppyEvalCanExtendVarsField::Next<bool, 1>;
+
+using VariableMaybeAssignedField = BitField8<bool, 0, 1>;
+using VariableContextAllocatedField = VariableMaybeAssignedField::Next<bool, 1>;
+
+using HasDataField = BitField<bool, 0, 1>;
+using LengthEqualsParametersField = HasDataField::Next<bool, 1>;
+using NumberOfParametersField = LengthEqualsParametersField::Next<uint16_t, 16>;
+
+using LanguageField = BitField8<LanguageMode, 0, 1>;
+using UsesSuperField = LanguageField::Next<bool, 1>;
 STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
 
 }  // namespace
@@ -266,7 +263,7 @@ bool PreparseDataBuilder::ScopeNeedsData(Scope* scope) {
   }
   if (!scope->is_hidden()) {
     for (Variable* var : *scope->locals()) {
-      if (IsDeclaredVariableMode(var->mode())) return true;
+      if (IsSerializableVariableMode(var->mode())) return true;
     }
   }
   for (Scope* inner = scope->inner_scope(); inner != nullptr;
@@ -356,9 +353,9 @@ void PreparseDataBuilder::SaveDataForScope(Scope* scope) {
 #endif
 
   uint8_t eval =
-      ScopeCallsSloppyEvalField::encode(
+      ScopeSloppyEvalCanExtendVarsField::encode(
           scope->is_declaration_scope() &&
-          scope->AsDeclarationScope()->calls_sloppy_eval()) |
+          scope->AsDeclarationScope()->sloppy_eval_can_extend_vars()) |
       InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval());
   byte_data_.Reserve(kUint8Size);
   byte_data_.WriteUint8(eval);
@@ -369,7 +366,7 @@ void PreparseDataBuilder::SaveDataForScope(Scope* scope) {
   }
 
   for (Variable* var : *scope->locals()) {
-    if (IsDeclaredVariableMode(var->mode())) SaveDataForVariable(var);
+    if (IsSerializableVariableMode(var->mode())) SaveDataForVariable(var);
   }
 
   SaveDataForInnerScopes(scope);
@@ -603,7 +600,7 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(Scope* scope) {
 
   CHECK(scope_data_->HasRemainingBytes(ByteData::kUint8Size));
   uint32_t eval = scope_data_->ReadUint8();
-  if (ScopeCallsSloppyEvalField::decode(eval)) scope->RecordEvalCall();
+  if (ScopeSloppyEvalCanExtendVarsField::decode(eval)) scope->RecordEvalCall();
   if (InnerScopeCallsEvalField::decode(eval)) scope->RecordInnerScopeEvalCall();
 
   if (scope->is_function_scope()) {
@@ -612,7 +609,7 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(Scope* scope) {
   }
 
   for (Variable* var : *scope->locals()) {
-    if (IsDeclaredVariableMode(var->mode())) RestoreDataForVariable(var);
+    if (IsSerializableVariableMode(var->mode())) RestoreDataForVariable(var);
   }
 
   RestoreDataForInnerScopes(scope);
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index a078d792953c24..67ee1930accaca 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -107,9 +107,9 @@ void PreParserFormalParameters::ValidateStrictMode(PreParser* preparser) const {
 
 PreParser::PreParseResult PreParser::PreParseFunction(
     const AstRawString* function_name, FunctionKind kind,
-    FunctionLiteral::FunctionType function_type,
-    DeclarationScope* function_scope, int* use_counts,
-    ProducedPreparseData** produced_preparse_data, int script_id) {
+    FunctionSyntaxKind function_syntax_kind, DeclarationScope* function_scope,
+    int* use_counts, ProducedPreparseData** produced_preparse_data,
+    int script_id) {
   DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
   use_counts_ = use_counts;
   set_script_id(script_id);
@@ -229,7 +229,8 @@ PreParser::PreParseResult PreParser::PreParseFunction(
       // arguments'.
       function_scope->DeclareArguments(ast_value_factory());
 
-      DeclareFunctionNameVar(function_name, function_type, function_scope);
+      DeclareFunctionNameVar(function_name, function_syntax_kind,
+                             function_scope);
 
       if (preparse_data_builder_->HasData()) {
         *produced_preparse_data =
@@ -267,12 +268,12 @@ PreParser::PreParseResult PreParser::PreParseFunction(
 PreParser::Expression PreParser::ParseFunctionLiteral(
     Identifier function_name, Scanner::Location function_name_location,
     FunctionNameValidity function_name_validity, FunctionKind kind,
-    int function_token_pos, FunctionLiteral::FunctionType function_type,
+    int function_token_pos, FunctionSyntaxKind function_syntax_kind,
     LanguageMode language_mode,
     ZonePtrList<const AstRawString>* arguments_for_wrapped_function) {
   // Wrapped functions are not parsed in the preparser.
   DCHECK_NULL(arguments_for_wrapped_function);
-  DCHECK_NE(FunctionLiteral::kWrapped, function_type);
+  DCHECK_NE(FunctionSyntaxKind::kWrapped, function_syntax_kind);
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
   const RuntimeCallCounterId counters[2] = {
@@ -323,8 +324,8 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
     int pos = function_token_pos == kNoSourcePosition ? peek_position()
                                                       : function_token_pos;
     AcceptINScope scope(this, true);
-    ParseFunctionBody(&body, function_name, pos, formals, kind, function_type,
-                      FunctionBodyType::kBlock);
+    ParseFunctionBody(&body, function_name, pos, formals, kind,
+                      function_syntax_kind, FunctionBodyType::kBlock);
 
     // Parsing the body may change the language mode in our scope.
     language_mode = function_scope->language_mode();
@@ -385,7 +386,7 @@ PreParserBlock PreParser::BuildParameterInitializationBlock(
     const PreParserFormalParameters& parameters) {
   DCHECK(!parameters.is_simple);
   DCHECK(scope()->is_function_scope());
-  if (scope()->AsDeclarationScope()->calls_sloppy_eval() &&
+  if (scope()->AsDeclarationScope()->sloppy_eval_can_extend_vars() &&
       preparse_data_builder_ != nullptr) {
     // We cannot replicate the Scope structure constructed by the Parser,
     // because we've lost information whether each individual parameter was
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 33c312f392115e..d7c2a92dfa09e5 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -351,16 +351,14 @@ class PreParserExpression {
   // Expression nodes may be represented as multiple Types, not exclusively
   // through kExpression.
   // TODO(caitp, adamk): clean up PreParserExpression bitfields.
-  using IsParenthesizedField = BitField<bool, TypeField::kNext, 1>;
+  using IsParenthesizedField = TypeField::Next<bool, 1>;
 
   // The rest of the bits are interpreted depending on the value
   // of the Type field, so they can share the storage.
-  using ExpressionTypeField =
-      BitField<ExpressionType, IsParenthesizedField::kNext, 4>;
+  using ExpressionTypeField = IsParenthesizedField::Next<ExpressionType, 4>;
   using IdentifierTypeField =
-      BitField<PreParserIdentifier::Type, IsParenthesizedField::kNext, 8>;
-  using HasCoverInitializedNameField =
-      BitField<bool, IsParenthesizedField::kNext, 1>;
+      IsParenthesizedField::Next<PreParserIdentifier::Type, 8>;
+  using HasCoverInitializedNameField = IsParenthesizedField::Next<bool, 1>;
 
   uint32_t code_;
   friend class PreParser;
@@ -567,8 +565,13 @@ class PreParserFactory {
     return PreParserExpression::Default();
   }
 
+  PreParserExpression NewOptionalChain(const PreParserExpression& expr) {
+    return PreParserExpression::Default();
+  }
+
   PreParserExpression NewProperty(const PreParserExpression& obj,
-                                  const PreParserExpression& key, int pos) {
+                                  const PreParserExpression& key, int pos,
+                                  bool optional_chain = false) {
     if (key.IsIdentifier() && key.AsIdentifier().IsPrivateName()) {
       if (obj.IsThis()) {
         return PreParserExpression::ThisPrivateReference();
@@ -627,9 +630,10 @@ class PreParserFactory {
                                         int pos) {
     return PreParserExpression::Default();
   }
-  PreParserExpression NewCall(
-      PreParserExpression expression, const PreParserExpressionList& arguments,
-      int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
+  PreParserExpression NewCall(PreParserExpression expression,
+                              const PreParserExpressionList& arguments, int pos,
+                              Call::PossiblyEval possibly_eval = Call::NOT_EVAL,
+                              bool optional_chain = false) {
     if (possibly_eval == Call::IS_POSSIBLY_EVAL) {
       DCHECK(expression.IsIdentifier() && expression.AsIdentifier().IsEval());
       return PreParserExpression::CallEval();
@@ -661,7 +665,7 @@ class PreParserFactory {
       const PreParserScopedStatementList& body, int expected_property_count,
       int parameter_count, int function_length,
       FunctionLiteral::ParameterFlag has_duplicate_parameters,
-      FunctionLiteral::FunctionType function_type,
+      FunctionSyntaxKind function_syntax_kind,
       FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
       bool has_braces, int function_literal_id,
       ProducedPreparseData* produced_preparse_data = nullptr) {
@@ -966,9 +970,9 @@ class PreParser : public ParserBase<PreParser> {
   // the final '}'.
   PreParseResult PreParseFunction(
       const AstRawString* function_name, FunctionKind kind,
-      FunctionLiteral::FunctionType function_type,
-      DeclarationScope* function_scope, int* use_counts,
-      ProducedPreparseData** produced_preparser_scope_data, int script_id);
+      FunctionSyntaxKind function_syntax_kind, DeclarationScope* function_scope,
+      int* use_counts, ProducedPreparseData** produced_preparser_scope_data,
+      int script_id);
 
   PreparseDataBuilder* preparse_data_builder() const {
     return preparse_data_builder_;
@@ -1008,7 +1012,7 @@ class PreParser : public ParserBase<PreParser> {
   }
 
   V8_INLINE bool SkipFunction(const AstRawString* name, FunctionKind kind,
-                              FunctionLiteral::FunctionType function_type,
+                              FunctionSyntaxKind function_syntax_kind,
                               DeclarationScope* function_scope,
                               int* num_parameters, int* function_length,
                               ProducedPreparseData** produced_preparse_data) {
@@ -1018,7 +1022,7 @@ class PreParser : public ParserBase<PreParser> {
   Expression ParseFunctionLiteral(
       Identifier name, Scanner::Location function_name_location,
       FunctionNameValidity function_name_validity, FunctionKind kind,
-      int function_token_pos, FunctionLiteral::FunctionType function_type,
+      int function_token_pos, FunctionSyntaxKind function_syntax_kind,
       LanguageMode language_mode,
       ZonePtrList<const AstRawString>* arguments_for_wrapped_function);
 
@@ -1054,7 +1058,8 @@ class PreParser : public ParserBase<PreParser> {
   V8_INLINE PreParserExpression SpreadCall(const PreParserExpression& function,
                                            const PreParserExpressionList& args,
                                            int pos,
-                                           Call::PossiblyEval possibly_eval);
+                                           Call::PossiblyEval possibly_eval,
+                                           bool optional_chain);
   V8_INLINE PreParserExpression
   SpreadCallNew(const PreParserExpression& function,
                 const PreParserExpressionList& args, int pos);
@@ -1093,23 +1098,26 @@ class PreParser : public ParserBase<PreParser> {
   }
 
   void DeclareAndBindVariable(const VariableProxy* proxy, VariableKind kind,
-                              VariableMode mode, InitializationFlag init,
-                              Scope* scope, bool* was_added, int position) {
-    DeclareVariableName(proxy->raw_name(), mode, scope, was_added, position,
-                        kind);
+                              VariableMode mode, Scope* scope, bool* was_added,
+                              int initializer_position) {
+    Variable* var = DeclareVariableName(proxy->raw_name(), mode, scope,
+                                        was_added, proxy->position(), kind);
+    var->set_initializer_position(initializer_position);
     // Don't bother actually binding the proxy.
   }
 
-  Variable* DeclarePrivateVariableName(
-      const AstRawString* name, ClassScope* scope,
-      RequiresBrandCheckFlag requires_brand_check, bool* was_added) {
-    return scope->DeclarePrivateName(name, requires_brand_check, was_added);
+  Variable* DeclarePrivateVariableName(const AstRawString* name,
+                                       ClassScope* scope, VariableMode mode,
+                                       bool* was_added) {
+    DCHECK(IsConstVariableMode(mode));
+    return scope->DeclarePrivateName(name, mode, was_added);
   }
 
   Variable* DeclareVariableName(const AstRawString* name, VariableMode mode,
                                 Scope* scope, bool* was_added,
                                 int position = kNoSourcePosition,
                                 VariableKind kind = NORMAL_VARIABLE) {
+    DCHECK(!IsPrivateMethodOrAccessorVariableMode(mode));
     Variable* var = scope->DeclareVariableName(name, mode, was_added, kind);
     if (var == nullptr) {
       ReportUnidentifiableError();
@@ -1156,11 +1164,10 @@ class PreParser : public ParserBase<PreParser> {
       int pos, FunctionKind kind, PreParserScopedStatementList* body) {
     ParseStatementList(body, Token::RBRACE);
   }
-  V8_INLINE void DeclareFunctionNameVar(
-      const AstRawString* function_name,
-      FunctionLiteral::FunctionType function_type,
-      DeclarationScope* function_scope) {
-    if (function_type == FunctionLiteral::kNamedExpression &&
+  V8_INLINE void DeclareFunctionNameVar(const AstRawString* function_name,
+                                        FunctionSyntaxKind function_syntax_kind,
+                                        DeclarationScope* function_scope) {
+    if (function_syntax_kind == FunctionSyntaxKind::kNamedExpression &&
         function_scope->LookupLocal(function_name) == nullptr) {
       DCHECK_EQ(function_scope, scope());
       function_scope->DeclareFunctionVar(function_name);
@@ -1169,9 +1176,9 @@ class PreParser : public ParserBase<PreParser> {
 
   V8_INLINE void DeclareFunctionNameVar(
       const PreParserIdentifier& function_name,
-      FunctionLiteral::FunctionType function_type,
+      FunctionSyntaxKind function_syntax_kind,
       DeclarationScope* function_scope) {
-    DeclareFunctionNameVar(function_name.string_, function_type,
+    DeclareFunctionNameVar(function_name.string_, function_syntax_kind,
                            function_scope);
   }
 
@@ -1249,16 +1256,10 @@ class PreParser : public ParserBase<PreParser> {
       ClassScope* scope, const PreParserIdentifier& property_name,
       const PreParserExpression& property, ClassLiteralProperty::Kind kind,
       bool is_static, ClassInfo* class_info) {
-    // TODO(joyee): We do not support private accessors yet (which allow
-    // declaring the same private name twice). Make them noops.
-    if (kind != ClassLiteralProperty::Kind::FIELD &&
-        kind != ClassLiteralProperty::Kind::METHOD) {
-      return;
-    }
     bool was_added;
 
     DeclarePrivateVariableName(property_name.string_, scope,
-                               RequiresBrandCheck(kind), &was_added);
+                               GetVariableMode(kind), &was_added);
     if (!was_added) {
       Scanner::Location loc(property.position(), property.position() + 1);
       ReportMessageAt(loc, MessageTemplate::kVarRedeclaration,
@@ -1697,8 +1698,9 @@ class PreParser : public ParserBase<PreParser> {
 PreParserExpression PreParser::SpreadCall(const PreParserExpression& function,
                                           const PreParserExpressionList& args,
                                           int pos,
-                                          Call::PossiblyEval possibly_eval) {
-  return factory()->NewCall(function, args, pos, possibly_eval);
+                                          Call::PossiblyEval possibly_eval,
+                                          bool optional_chain) {
+  return factory()->NewCall(function, args, pos, possibly_eval, optional_chain);
 }
 
 PreParserExpression PreParser::SpreadCallNew(
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 01ea0a0d02db62..0cd295fd2924a8 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -359,7 +359,7 @@ class RelocatingCharacterStream
   void UpdateBufferPointers() {
     DisallowHeapAllocation no_gc;
     Range<uint16_t> range =
-        byte_stream_.GetDataAt(0, runtime_call_stats(), &no_gc);
+        byte_stream_.GetDataAt(buffer_pos_, runtime_call_stats(), &no_gc);
     if (range.start != buffer_start_) {
       buffer_cursor_ = (buffer_cursor_ - buffer_start_) + range.start;
       buffer_start_ = range.start;
diff --git a/deps/v8/src/parsing/scanner-inl.h b/deps/v8/src/parsing/scanner-inl.h
index ef5a8faf8526ec..b76076d92ffaaa 100644
--- a/deps/v8/src/parsing/scanner-inl.h
+++ b/deps/v8/src/parsing/scanner-inl.h
@@ -354,7 +354,6 @@ V8_INLINE Token::Value Scanner::ScanSingleToken() {
         case Token::RBRACE:
         case Token::LBRACK:
         case Token::RBRACK:
-        case Token::CONDITIONAL:
         case Token::COLON:
         case Token::SEMICOLON:
         case Token::COMMA:
@@ -363,6 +362,18 @@ V8_INLINE Token::Value Scanner::ScanSingleToken() {
           // One character tokens.
           return Select(token);
 
+        case Token::CONDITIONAL:
+          // ? ?. ??
+          Advance();
+          if (V8_UNLIKELY(allow_harmony_optional_chaining() && c0_ == '.')) {
+            Advance();
+            if (!IsDecimalDigit(c0_)) return Token::QUESTION_PERIOD;
+            PushBack('.');
+          } else if (V8_UNLIKELY(allow_harmony_nullish() && c0_ == '?')) {
+            return Select(Token::NULLISH);
+          }
+          return Token::CONDITIONAL;
+
         case Token::STRING:
           return ScanString();
 
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 2f74548020f19f..28e43747874605 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -92,7 +92,8 @@ bool Scanner::BookmarkScope::HasBeenApplied() const {
 Scanner::Scanner(Utf16CharacterStream* source, bool is_module)
     : source_(source),
       found_html_comment_(false),
-      allow_harmony_numeric_separator_(false),
+      allow_harmony_optional_chaining_(false),
+      allow_harmony_nullish_(false),
       is_module_(is_module),
       octal_pos_(Location::invalid()),
       octal_message_(MessageTemplate::kNone) {
@@ -628,13 +629,18 @@ bool Scanner::ScanDigitsWithNumericSeparators(bool (*predicate)(uc32 ch),
   return true;
 }
 
-bool Scanner::ScanDecimalDigits() {
-  if (allow_harmony_numeric_separator()) {
+bool Scanner::ScanDecimalDigits(bool allow_numeric_separator) {
+  if (allow_numeric_separator) {
     return ScanDigitsWithNumericSeparators(&IsDecimalDigit, false);
   }
   while (IsDecimalDigit(c0_)) {
     AddLiteralCharAdvance();
   }
+  if (c0_ == '_') {
+    ReportScannerError(Location(source_pos(), source_pos() + 1),
+                       MessageTemplate::kInvalidOrUnexpectedToken);
+    return false;
+  }
   return true;
 }
 
@@ -667,8 +673,8 @@ bool Scanner::ScanDecimalAsSmiWithNumericSeparators(uint64_t* value) {
   return true;
 }
 
-bool Scanner::ScanDecimalAsSmi(uint64_t* value) {
-  if (allow_harmony_numeric_separator()) {
+bool Scanner::ScanDecimalAsSmi(uint64_t* value, bool allow_numeric_separator) {
+  if (allow_numeric_separator) {
     return ScanDecimalAsSmiWithNumericSeparators(value);
   }
 
@@ -682,35 +688,11 @@ bool Scanner::ScanDecimalAsSmi(uint64_t* value) {
 }
 
 bool Scanner::ScanBinaryDigits() {
-  if (allow_harmony_numeric_separator()) {
-    return ScanDigitsWithNumericSeparators(&IsBinaryDigit, true);
-  }
-
-  // we must have at least one binary digit after 'b'/'B'
-  if (!IsBinaryDigit(c0_)) {
-    return false;
-  }
-
-  while (IsBinaryDigit(c0_)) {
-    AddLiteralCharAdvance();
-  }
-  return true;
+  return ScanDigitsWithNumericSeparators(&IsBinaryDigit, true);
 }
 
 bool Scanner::ScanOctalDigits() {
-  if (allow_harmony_numeric_separator()) {
-    return ScanDigitsWithNumericSeparators(&IsOctalDigit, true);
-  }
-
-  // we must have at least one octal digit after 'o'/'O'
-  if (!IsOctalDigit(c0_)) {
-    return false;
-  }
-
-  while (IsOctalDigit(c0_)) {
-    AddLiteralCharAdvance();
-  }
-  return true;
+  return ScanDigitsWithNumericSeparators(&IsOctalDigit, true);
 }
 
 bool Scanner::ScanImplicitOctalDigits(int start_pos,
@@ -734,26 +716,14 @@ bool Scanner::ScanImplicitOctalDigits(int start_pos,
 }
 
 bool Scanner::ScanHexDigits() {
-  if (allow_harmony_numeric_separator()) {
-    return ScanDigitsWithNumericSeparators(&IsHexDigit, true);
-  }
-
-  // we must have at least one hex digit after 'x'/'X'
-  if (!IsHexDigit(c0_)) {
-    return false;
-  }
-
-  while (IsHexDigit(c0_)) {
-    AddLiteralCharAdvance();
-  }
-  return true;
+  return ScanDigitsWithNumericSeparators(&IsHexDigit, true);
 }
 
 bool Scanner::ScanSignedInteger() {
   if (c0_ == '+' || c0_ == '-') AddLiteralCharAdvance();
   // we must have at least one decimal digit after 'e'/'E'
   if (!IsDecimalDigit(c0_)) return false;
-  return ScanDecimalDigits();
+  return ScanDecimalDigits(true);
 }
 
 Token::Value Scanner::ScanNumber(bool seen_period) {
@@ -767,11 +737,11 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
   if (seen_period) {
     // we have already seen a decimal point of the float
     AddLiteralChar('.');
-    if (allow_harmony_numeric_separator() && c0_ == '_') {
+    if (c0_ == '_') {
       return Token::ILLEGAL;
     }
     // we know we have at least one digit
-    if (!ScanDecimalDigits()) return Token::ILLEGAL;
+    if (!ScanDecimalDigits(true)) return Token::ILLEGAL;
   } else {
     // if the first character is '0' we must check for octals and hex
     if (c0_ == '0') {
@@ -801,7 +771,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
         }
       } else if (IsNonOctalDecimalDigit(c0_)) {
         kind = DECIMAL_WITH_LEADING_ZERO;
-      } else if (allow_harmony_numeric_separator() && c0_ == '_') {
+      } else if (c0_ == '_') {
         ReportScannerError(Location(source_pos(), source_pos() + 1),
                            MessageTemplate::kZeroDigitNumericSeparator);
         return Token::ILLEGAL;
@@ -810,11 +780,14 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
 
     // Parse decimal digits and allow trailing fractional part.
     if (IsDecimalNumberKind(kind)) {
+      bool allow_numeric_separator = kind != DECIMAL_WITH_LEADING_ZERO;
       // This is an optimization for parsing Decimal numbers as Smi's.
       if (at_start) {
         uint64_t value = 0;
         // scan subsequent decimal digits
-        if (!ScanDecimalAsSmi(&value)) return Token::ILLEGAL;
+        if (!ScanDecimalAsSmi(&value, allow_numeric_separator)) {
+          return Token::ILLEGAL;
+        }
 
         if (next().literal_chars.one_byte_literal().length() <= 10 &&
             value <= Smi::kMaxValue && c0_ != '.' && !IsIdentifierStart(c0_)) {
@@ -828,14 +801,16 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
         }
       }
 
-      if (!ScanDecimalDigits()) return Token::ILLEGAL;
+      if (!ScanDecimalDigits(allow_numeric_separator)) {
+        return Token::ILLEGAL;
+      }
       if (c0_ == '.') {
         seen_period = true;
         AddLiteralCharAdvance();
-        if (allow_harmony_numeric_separator() && c0_ == '_') {
+        if (c0_ == '_') {
           return Token::ILLEGAL;
         }
-        if (!ScanDecimalDigits()) return Token::ILLEGAL;
+        if (!ScanDecimalDigits(true)) return Token::ILLEGAL;
       }
     }
   }
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index e2865bca1c02e4..c40d8f4ba390b8 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -406,13 +406,18 @@ class V8_EXPORT_PRIVATE Scanner {
 
   bool FoundHtmlComment() const { return found_html_comment_; }
 
-  bool allow_harmony_numeric_separator() const {
-    return allow_harmony_numeric_separator_;
+  bool allow_harmony_optional_chaining() const {
+    return allow_harmony_optional_chaining_;
   }
-  void set_allow_harmony_numeric_separator(bool allow) {
-    allow_harmony_numeric_separator_ = allow;
+
+  void set_allow_harmony_optional_chaining(bool allow) {
+    allow_harmony_optional_chaining_ = allow;
   }
 
+  bool allow_harmony_nullish() const { return allow_harmony_nullish_; }
+
+  void set_allow_harmony_nullish(bool allow) { allow_harmony_nullish_ = allow; }
+
   const Utf16CharacterStream* stream() const { return source_; }
 
   // If the next characters in the stream are "#!", the line is skipped.
@@ -646,9 +651,9 @@ class V8_EXPORT_PRIVATE Scanner {
 
   bool ScanDigitsWithNumericSeparators(bool (*predicate)(uc32 ch),
                                        bool is_check_first_digit);
-  bool ScanDecimalDigits();
+  bool ScanDecimalDigits(bool allow_numeric_separator);
   // Optimized function to scan decimal number as Smi.
-  bool ScanDecimalAsSmi(uint64_t* value);
+  bool ScanDecimalAsSmi(uint64_t* value, bool allow_numeric_separator);
   bool ScanDecimalAsSmiWithNumericSeparators(uint64_t* value);
   bool ScanHexDigits();
   bool ScanBinaryDigits();
@@ -721,7 +726,8 @@ class V8_EXPORT_PRIVATE Scanner {
   bool found_html_comment_;
 
   // Harmony flags to allow ESNext features.
-  bool allow_harmony_numeric_separator_;
+  bool allow_harmony_optional_chaining_;
+  bool allow_harmony_nullish_;
 
   const bool is_module_;
 
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index 197a26f3257c77..3f2e0ec870d01f 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -65,6 +65,7 @@ namespace internal {
   T(LBRACK, "[", 0)                                                \
   /* END Property */                                               \
   /* END Member */                                                 \
+  T(QUESTION_PERIOD, "?.", 0)                                      \
   T(LPAREN, "(", 0)                                                \
   /* END PropertyOrCall */                                         \
   T(RPAREN, ")", 0)                                                \
@@ -95,6 +96,7 @@ namespace internal {
   /* IsBinaryOp() relies on this block of enum values */           \
   /* being contiguous and sorted in the same order! */             \
   T(COMMA, ",", 1)                                                 \
+  T(NULLISH, "??", 3)                                              \
   T(OR, "||", 4)                                                   \
   T(AND, "&&", 5)                                                  \
                                                                    \
@@ -215,8 +217,8 @@ class V8_EXPORT_PRIVATE Token {
     return name_[token];
   }
 
-  class IsKeywordBits : public BitField8<bool, 0, 1> {};
-  class IsPropertyNameBits : public BitField8<bool, IsKeywordBits::kNext, 1> {};
+  using IsKeywordBits = BitField8<bool, 0, 1>;
+  using IsPropertyNameBits = IsKeywordBits::Next<bool, 1>;
 
   // Predicates
   static bool IsKeyword(Value token) {
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 495840fabf5b0b..4c35159b2e9d0a 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -47,21 +47,56 @@ class CpuSampler : public sampler::Sampler {
   SamplingEventsProcessor* processor_;
 };
 
-ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
-                                                 ProfileGenerator* generator)
+ProfilingScope::ProfilingScope(Isolate* isolate, ProfilerListener* listener)
+    : isolate_(isolate), listener_(listener) {
+  size_t profiler_count = isolate_->num_cpu_profilers();
+  profiler_count++;
+  isolate_->set_num_cpu_profilers(profiler_count);
+  isolate_->set_is_profiling(true);
+  isolate_->wasm_engine()->EnableCodeLogging(isolate_);
+
+  Logger* logger = isolate_->logger();
+  logger->AddCodeEventListener(listener_);
+  // Populate the ProfilerCodeObserver with the initial functions and
+  // callbacks on the heap.
+  DCHECK(isolate_->heap()->HasBeenSetUp());
+
+  if (!FLAG_prof_browser_mode) {
+    logger->LogCodeObjects();
+  }
+  logger->LogCompiledFunctions();
+  logger->LogAccessorCallbacks();
+}
+
+ProfilingScope::~ProfilingScope() {
+  isolate_->logger()->RemoveCodeEventListener(listener_);
+
+  size_t profiler_count = isolate_->num_cpu_profilers();
+  DCHECK_GT(profiler_count, 0);
+  profiler_count--;
+  isolate_->set_num_cpu_profilers(profiler_count);
+  if (profiler_count == 0) isolate_->set_is_profiling(false);
+}
+
+ProfilerEventsProcessor::ProfilerEventsProcessor(
+    Isolate* isolate, ProfileGenerator* generator,
+    ProfilerCodeObserver* code_observer)
     : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
       generator_(generator),
+      code_observer_(code_observer),
       running_(1),
       last_code_event_id_(0),
       last_processed_code_event_id_(0),
-      isolate_(isolate),
-      profiling_scope_(isolate) {}
-
-SamplingEventsProcessor::SamplingEventsProcessor(Isolate* isolate,
-                                                 ProfileGenerator* generator,
-                                                 base::TimeDelta period,
-                                                 bool use_precise_sampling)
-    : ProfilerEventsProcessor(isolate, generator),
+      isolate_(isolate) {
+  DCHECK(!code_observer_->processor());
+  code_observer_->set_processor(this);
+}
+
+SamplingEventsProcessor::SamplingEventsProcessor(
+    Isolate* isolate, ProfileGenerator* generator,
+    ProfilerCodeObserver* code_observer, base::TimeDelta period,
+    bool use_precise_sampling)
+    : ProfilerEventsProcessor(isolate, generator, code_observer),
       sampler_(new CpuSampler(isolate, this)),
       period_(period),
       use_precise_sampling_(use_precise_sampling) {
@@ -70,7 +105,10 @@ SamplingEventsProcessor::SamplingEventsProcessor(Isolate* isolate,
 
 SamplingEventsProcessor::~SamplingEventsProcessor() { sampler_->Stop(); }
 
-ProfilerEventsProcessor::~ProfilerEventsProcessor() = default;
+ProfilerEventsProcessor::~ProfilerEventsProcessor() {
+  DCHECK_EQ(code_observer_->processor(), this);
+  code_observer_->clear_processor();
+}
 
 void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
   event.generic.order = ++last_code_event_id_;
@@ -123,16 +161,13 @@ void ProfilerEventsProcessor::StopSynchronously() {
 bool ProfilerEventsProcessor::ProcessCodeEvent() {
   CodeEventsContainer record;
   if (events_buffer_.Dequeue(&record)) {
-    switch (record.generic.type) {
-#define PROFILER_TYPE_CASE(type, clss)                          \
-      case CodeEventRecord::type:                               \
-        record.clss##_.UpdateCodeMap(generator_->code_map());   \
-        break;
-
-      CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
-
-#undef PROFILER_TYPE_CASE
-      default: return true;  // Skip record.
+    if (record.generic.type == CodeEventRecord::NATIVE_CONTEXT_MOVE) {
+      NativeContextMoveEventRecord& nc_record =
+          record.NativeContextMoveEventRecord_;
+      generator_->UpdateNativeContextAddress(nc_record.from_address,
+                                             nc_record.to_address);
+    } else {
+      code_observer_->CodeEventHandlerInternal(record);
     }
     last_processed_code_event_id_ = record.generic.order;
     return true;
@@ -146,6 +181,7 @@ void ProfilerEventsProcessor::CodeEventHandler(
     case CodeEventRecord::CODE_CREATION:
     case CodeEventRecord::CODE_MOVE:
     case CodeEventRecord::CODE_DISABLE_OPT:
+    case CodeEventRecord::NATIVE_CONTEXT_MOVE:
       Enqueue(evt_rec);
       break;
     case CodeEventRecord::CODE_DEOPT: {
@@ -262,6 +298,62 @@ void* SamplingEventsProcessor::operator new(size_t size) {
 
 void SamplingEventsProcessor::operator delete(void* ptr) { AlignedFree(ptr); }
 
+ProfilerCodeObserver::ProfilerCodeObserver(Isolate* isolate)
+    : isolate_(isolate), processor_(nullptr) {
+  CreateEntriesForRuntimeCallStats();
+  LogBuiltins();
+}
+
+void ProfilerCodeObserver::CodeEventHandler(
+    const CodeEventsContainer& evt_rec) {
+  if (processor_) {
+    processor_->CodeEventHandler(evt_rec);
+    return;
+  }
+  CodeEventHandlerInternal(evt_rec);
+}
+
+void ProfilerCodeObserver::CodeEventHandlerInternal(
+    const CodeEventsContainer& evt_rec) {
+  CodeEventsContainer record = evt_rec;
+  switch (evt_rec.generic.type) {
+#define PROFILER_TYPE_CASE(type, clss)        \
+  case CodeEventRecord::type:                 \
+    record.clss##_.UpdateCodeMap(&code_map_); \
+    break;
+
+    CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
+
+#undef PROFILER_TYPE_CASE
+    default:
+      break;
+  }
+}
+
+void ProfilerCodeObserver::CreateEntriesForRuntimeCallStats() {
+  RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
+  for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
+    RuntimeCallCounter* counter = rcs->GetCounter(i);
+    DCHECK(counter->name());
+    auto entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
+                               "native V8Runtime");
+    code_map_.AddCode(reinterpret_cast<Address>(counter), entry, 1);
+  }
+}
+
+void ProfilerCodeObserver::LogBuiltins() {
+  Builtins* builtins = isolate_->builtins();
+  DCHECK(builtins->is_initialized());
+  for (int i = 0; i < Builtins::builtin_count; i++) {
+    CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
+    ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
+    Builtins::Name id = static_cast<Builtins::Name>(i);
+    rec->instruction_start = builtins->builtin(id).InstructionStart();
+    rec->builtin_id = id;
+    CodeEventHandlerInternal(evt_rec);
+  }
+}
+
 int CpuProfiler::GetProfilesCount() {
   // The count of profiles doesn't depend on a security token.
   return static_cast<int>(profiles_->profiles()->size());
@@ -324,29 +416,37 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager)
 
 }  // namespace
 
-CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode)
-    : CpuProfiler(isolate, naming_mode, new CpuProfilesCollection(isolate),
-                  nullptr, nullptr) {}
+CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
+                         CpuProfilingLoggingMode logging_mode)
+    : CpuProfiler(isolate, naming_mode, logging_mode,
+                  new CpuProfilesCollection(isolate), nullptr, nullptr) {}
 
 CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
+                         CpuProfilingLoggingMode logging_mode,
                          CpuProfilesCollection* test_profiles,
                          ProfileGenerator* test_generator,
                          ProfilerEventsProcessor* test_processor)
     : isolate_(isolate),
       naming_mode_(naming_mode),
+      logging_mode_(logging_mode),
       base_sampling_interval_(base::TimeDelta::FromMicroseconds(
           FLAG_cpu_profiler_sampling_interval)),
       profiles_(test_profiles),
       generator_(test_generator),
       processor_(test_processor),
+      code_observer_(isolate),
       is_profiling_(false) {
   profiles_->set_cpu_profiler(this);
   GetProfilersManager()->AddProfiler(isolate, this);
+
+  if (logging_mode == kEagerLogging) EnableLogging();
 }
 
 CpuProfiler::~CpuProfiler() {
   DCHECK(!is_profiling_);
   GetProfilersManager()->RemoveProfiler(isolate_, this);
+
+  DisableLogging();
 }
 
 void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
@@ -362,20 +462,26 @@ void CpuProfiler::set_use_precise_sampling(bool value) {
 void CpuProfiler::ResetProfiles() {
   profiles_.reset(new CpuProfilesCollection(isolate_));
   profiles_->set_cpu_profiler(this);
-  profiler_listener_.reset();
   generator_.reset();
+  if (!profiling_scope_) profiler_listener_.reset();
 }
 
-void CpuProfiler::CreateEntriesForRuntimeCallStats() {
-  RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
-  CodeMap* code_map = generator_->code_map();
-  for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
-    RuntimeCallCounter* counter = rcs->GetCounter(i);
-    DCHECK(counter->name());
-    auto entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
-                               "native V8Runtime");
-    code_map->AddCode(reinterpret_cast<Address>(counter), entry, 1);
+void CpuProfiler::EnableLogging() {
+  if (profiling_scope_) return;
+
+  if (!profiler_listener_) {
+    profiler_listener_.reset(
+        new ProfilerListener(isolate_, &code_observer_, naming_mode_));
   }
+  profiling_scope_.reset(
+      new ProfilingScope(isolate_, profiler_listener_.get()));
+}
+
+void CpuProfiler::DisableLogging() {
+  if (!profiling_scope_) return;
+
+  DCHECK(profiler_listener_);
+  profiling_scope_.reset();
 }
 
 base::TimeDelta CpuProfiler::ComputeSamplingInterval() const {
@@ -418,36 +524,23 @@ void CpuProfiler::StartProcessorIfNotStarted() {
     processor_->AddCurrentStack();
     return;
   }
-  isolate_->wasm_engine()->EnableCodeLogging(isolate_);
-  Logger* logger = isolate_->logger();
 
-  bool codemap_needs_initialization = false;
+  if (!profiling_scope_) {
+    DCHECK_EQ(logging_mode_, kLazyLogging);
+    EnableLogging();
+  }
+
   if (!generator_) {
-    generator_.reset(new ProfileGenerator(profiles_.get()));
-    codemap_needs_initialization = true;
-    CreateEntriesForRuntimeCallStats();
+    generator_.reset(
+        new ProfileGenerator(profiles_.get(), code_observer_.code_map()));
   }
+
   base::TimeDelta sampling_interval = ComputeSamplingInterval();
-  processor_.reset(new SamplingEventsProcessor(
-      isolate_, generator_.get(), sampling_interval, use_precise_sampling_));
-  if (profiler_listener_) {
-    profiler_listener_->set_observer(processor_.get());
-  } else {
-    profiler_listener_.reset(
-        new ProfilerListener(isolate_, processor_.get(), naming_mode_));
-  }
-  logger->AddCodeEventListener(profiler_listener_.get());
+  processor_.reset(
+      new SamplingEventsProcessor(isolate_, generator_.get(), &code_observer_,
+                                  sampling_interval, use_precise_sampling_));
   is_profiling_ = true;
-  // Enumerate stuff we already have in the heap.
-  DCHECK(isolate_->heap()->HasBeenSetUp());
-  if (codemap_needs_initialization) {
-    if (!FLAG_prof_browser_mode) {
-      logger->LogCodeObjects();
-    }
-    logger->LogCompiledFunctions();
-    logger->LogAccessorCallbacks();
-    LogBuiltins();
-  }
+
   // Enable stack sampling.
   processor_->AddCurrentStack();
   processor_->StartSynchronously();
@@ -471,26 +564,14 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
 }
 
 void CpuProfiler::StopProcessor() {
-  Logger* logger = isolate_->logger();
   is_profiling_ = false;
-  logger->RemoveCodeEventListener(profiler_listener_.get());
   processor_->StopSynchronously();
   processor_.reset();
-}
-
 
-void CpuProfiler::LogBuiltins() {
-  Builtins* builtins = isolate_->builtins();
-  DCHECK(builtins->is_initialized());
-  for (int i = 0; i < Builtins::builtin_count; i++) {
-    CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
-    ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
-    Builtins::Name id = static_cast<Builtins::Name>(i);
-    rec->instruction_start = builtins->builtin(id).InstructionStart();
-    rec->builtin_id = id;
-    processor_->Enqueue(evt_rec);
+  DCHECK(profiling_scope_);
+  if (logging_mode_ == kLazyLogging) {
+    DisableLogging();
   }
 }
-
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 96d53356e67ce5..093f28aba34209 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -13,6 +13,7 @@
 #include "src/base/platform/mutex.h"
 #include "src/base/platform/time.h"
 #include "src/execution/isolate.h"
+#include "src/handles/maybe-handles.h"
 #include "src/libsampler/sampler.h"
 #include "src/profiler/circular-queue.h"
 #include "src/profiler/profiler-listener.h"
@@ -30,21 +31,21 @@ class CpuProfile;
 class CpuProfilesCollection;
 class ProfileGenerator;
 
-#define CODE_EVENTS_TYPE_LIST(V)                         \
-  V(CODE_CREATION, CodeCreateEventRecord)                \
-  V(CODE_MOVE, CodeMoveEventRecord)                      \
-  V(CODE_DISABLE_OPT, CodeDisableOptEventRecord)         \
-  V(CODE_DEOPT, CodeDeoptEventRecord)                    \
+#define CODE_EVENTS_TYPE_LIST(V)                 \
+  V(CODE_CREATION, CodeCreateEventRecord)        \
+  V(CODE_MOVE, CodeMoveEventRecord)              \
+  V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
+  V(CODE_DEOPT, CodeDeoptEventRecord)            \
   V(REPORT_BUILTIN, ReportBuiltinEventRecord)
 
+#define VM_EVENTS_TYPE_LIST(V) \
+  CODE_EVENTS_TYPE_LIST(V)     \
+  V(NATIVE_CONTEXT_MOVE, NativeContextMoveEventRecord)
 
 class CodeEventRecord {
  public:
 #define DECLARE_TYPE(type, ignore) type,
-  enum Type {
-    NONE = 0,
-    CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
-  };
+  enum Type { NONE = 0, VM_EVENTS_TYPE_LIST(DECLARE_TYPE) };
 #undef DECLARE_TYPE
 
   Type type;
@@ -102,6 +103,12 @@ class ReportBuiltinEventRecord : public CodeEventRecord {
   V8_INLINE void UpdateCodeMap(CodeMap* code_map);
 };
 
+// Signals that a native context's address has changed.
+class NativeContextMoveEventRecord : public CodeEventRecord {
+ public:
+  Address from_address;
+  Address to_address;
+};
 
 class TickSampleEventRecord {
  public:
@@ -124,33 +131,25 @@ class CodeEventsContainer {
   union  {
     CodeEventRecord generic;
 #define DECLARE_CLASS(ignore, type) type type##_;
-    CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
+    VM_EVENTS_TYPE_LIST(DECLARE_CLASS)
 #undef DECLARE_CLASS
   };
 };
 
-// Maintains the number of active CPU profilers in an isolate.
+// Maintains the number of active CPU profilers in an isolate, and routes
+// logging to a given ProfilerListener.
 class ProfilingScope {
  public:
-  explicit ProfilingScope(Isolate* isolate) : isolate_(isolate) {
-    size_t profiler_count = isolate_->num_cpu_profilers();
-    profiler_count++;
-    isolate_->set_num_cpu_profilers(profiler_count);
-    isolate_->set_is_profiling(true);
-  }
-
-  ~ProfilingScope() {
-    size_t profiler_count = isolate_->num_cpu_profilers();
-    DCHECK_GT(profiler_count, 0);
-    profiler_count--;
-    isolate_->set_num_cpu_profilers(profiler_count);
-    if (profiler_count == 0) isolate_->set_is_profiling(false);
-  }
+  ProfilingScope(Isolate* isolate, ProfilerListener* listener);
+  ~ProfilingScope();
 
  private:
   Isolate* const isolate_;
+  ProfilerListener* const listener_;
 };
 
+class ProfilerCodeObserver;
+
 // This class implements both the profile events processor thread and
 // methods called by event producers: VM and stack sampler threads.
 class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
@@ -175,7 +174,8 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
   virtual void SetSamplingInterval(base::TimeDelta) {}
 
  protected:
-  ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator);
+  ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
+                          ProfilerCodeObserver* code_observer);
 
   // Called from events processing thread (Run() method.)
   bool ProcessCodeEvent();
@@ -188,6 +188,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
   virtual SampleProcessingResult ProcessOneSample() = 0;
 
   ProfileGenerator* generator_;
+  ProfilerCodeObserver* code_observer_;
   base::Atomic32 running_;
   base::ConditionVariable running_cond_;
   base::Mutex running_mutex_;
@@ -196,13 +197,13 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
   std::atomic<unsigned> last_code_event_id_;
   unsigned last_processed_code_event_id_;
   Isolate* isolate_;
-  ProfilingScope profiling_scope_;
 };
 
 class V8_EXPORT_PRIVATE SamplingEventsProcessor
     : public ProfilerEventsProcessor {
  public:
   SamplingEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
+                          ProfilerCodeObserver* code_observer,
                           base::TimeDelta period, bool use_precise_sampling);
   ~SamplingEventsProcessor() override;
 
@@ -241,11 +242,47 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
                                      // low sampling intervals on Windows.
 };
 
+// Builds and maintains a CodeMap tracking code objects on the VM heap. While
+// alive, logs generated code, callbacks, and builtins from the isolate.
+// Redirects events to the profiler events processor when present.
+class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
+ public:
+  explicit ProfilerCodeObserver(Isolate*);
+
+  void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
+
+  CodeMap* code_map() { return &code_map_; }
+
+ private:
+  friend class ProfilerEventsProcessor;
+
+  void CodeEventHandlerInternal(const CodeEventsContainer& evt_rec);
+
+  void CreateEntriesForRuntimeCallStats();
+  void LogBuiltins();
+
+  ProfilerEventsProcessor* processor() { return processor_; }
+
+  // Redirects code events to be enqueued on the given events processor.
+  void set_processor(ProfilerEventsProcessor* processor) {
+    processor_ = processor;
+  }
+
+  // Stops redirection of code events onto an events processor.
+  void clear_processor() { processor_ = nullptr; }
+
+  Isolate* const isolate_;
+  CodeMap code_map_;
+  ProfilerEventsProcessor* processor_;
+};
+
 class V8_EXPORT_PRIVATE CpuProfiler {
  public:
-  explicit CpuProfiler(Isolate* isolate, CpuProfilingNamingMode = kDebugNaming);
+  explicit CpuProfiler(Isolate* isolate, CpuProfilingNamingMode = kDebugNaming,
+                       CpuProfilingLoggingMode = kLazyLogging);
 
   CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
+              CpuProfilingLoggingMode logging_mode,
               CpuProfilesCollection* profiles, ProfileGenerator* test_generator,
               ProfilerEventsProcessor* test_processor);
 
@@ -255,6 +292,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
 
   using ProfilingMode = v8::CpuProfilingMode;
   using NamingMode = v8::CpuProfilingNamingMode;
+  using LoggingMode = v8::CpuProfilingLoggingMode;
 
   base::TimeDelta sampling_interval() const { return base_sampling_interval_; }
   void set_sampling_interval(base::TimeDelta value);
@@ -262,6 +300,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
   void CollectSample();
   void StartProfiling(const char* title, CpuProfilingOptions options = {});
   void StartProfiling(String title, CpuProfilingOptions options = {});
+
   CpuProfile* StopProfiling(const char* title);
   CpuProfile* StopProfiling(String title);
   int GetProfilesCount();
@@ -284,8 +323,9 @@ class V8_EXPORT_PRIVATE CpuProfiler {
   void StopProcessorIfLastProfile(const char* title);
   void StopProcessor();
   void ResetProfiles();
-  void LogBuiltins();
-  void CreateEntriesForRuntimeCallStats();
+
+  void EnableLogging();
+  void DisableLogging();
 
   // Computes a sampling interval sufficient to accomodate attached profiles.
   base::TimeDelta ComputeSamplingInterval() const;
@@ -295,6 +335,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
 
   Isolate* const isolate_;
   const NamingMode naming_mode_;
+  const LoggingMode logging_mode_;
   bool use_precise_sampling_ = true;
   // Sampling interval to which per-profile sampling intervals will be clamped
   // to a multiple of, or used as the default if unspecified.
@@ -303,6 +344,8 @@ class V8_EXPORT_PRIVATE CpuProfiler {
   std::unique_ptr<ProfileGenerator> generator_;
   std::unique_ptr<ProfilerEventsProcessor> processor_;
   std::unique_ptr<ProfilerListener> profiler_listener_;
+  std::unique_ptr<ProfilingScope> profiling_scope_;
+  ProfilerCodeObserver code_observer_;
   bool is_profiling_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 472dbdbb10b730..a498e8e21432b8 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -151,6 +151,17 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
   return ids_->FindEntry(HeapObject::cast(*obj).address());
 }
 
+SnapshotObjectId HeapProfiler::GetSnapshotObjectId(NativeObject obj) {
+  // Try to find id of regular native node first.
+  SnapshotObjectId id = ids_->FindEntry(reinterpret_cast<Address>(obj));
+  // In case no id has been found, check whether there exists an entry where the
+  // native objects has been merged into a V8 entry.
+  if (id == v8::HeapProfiler::kUnknownObjectId) {
+    id = ids_->FindMergedNativeEntry(obj);
+  }
+  return id;
+}
+
 void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
   base::MutexGuard guard(&profiler_mutex_);
   bool known_object = ids_->MoveObject(from, to, size);
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 940574282efd01..f7336eb6be50c8 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -52,6 +52,7 @@ class HeapProfiler : public HeapObjectAllocationTracker {
   int GetSnapshotsCount();
   HeapSnapshot* GetSnapshot(int index);
   SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
+  SnapshotObjectId GetSnapshotObjectId(NativeObject obj);
   void DeleteAllSnapshots();
   void RemoveSnapshot(HeapSnapshot* snapshot);
 
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index df941eda96a8d1..75b6aa7b77e1d1 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -352,7 +352,7 @@ void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
 SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
   base::HashMap::Entry* entry = entries_map_.Lookup(
       reinterpret_cast<void*>(addr), ComputeAddressHash(addr));
-  if (entry == nullptr) return 0;
+  if (entry == nullptr) return v8::HeapProfiler::kUnknownObjectId;
   int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
   EntryInfo& entry_info = entries_.at(entry_index);
   DCHECK(static_cast<uint32_t>(entries_.size()) > entries_map_.occupancy());
@@ -386,6 +386,25 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
   return id;
 }
 
+SnapshotObjectId HeapObjectsMap::FindMergedNativeEntry(NativeObject addr) {
+  auto it = merged_native_entries_map_.find(addr);
+  if (it == merged_native_entries_map_.end())
+    return v8::HeapProfiler::kUnknownObjectId;
+  return entries_[it->second].id;
+}
+
+void HeapObjectsMap::AddMergedNativeEntry(NativeObject addr,
+                                          Address canonical_addr) {
+  base::HashMap::Entry* entry =
+      entries_map_.Lookup(reinterpret_cast<void*>(canonical_addr),
+                          ComputeAddressHash(canonical_addr));
+  auto result = merged_native_entries_map_.insert(
+      {addr, reinterpret_cast<size_t>(entry->value)});
+  if (!result.second) {
+    result.first->second = reinterpret_cast<size_t>(entry->value);
+  }
+}
+
 void HeapObjectsMap::StopHeapObjectsTracking() { time_intervals_.clear(); }
 
 void HeapObjectsMap::UpdateHeapObjectsMap() {
@@ -465,9 +484,20 @@ SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream,
 void HeapObjectsMap::RemoveDeadEntries() {
   DCHECK(entries_.size() > 0 && entries_.at(0).id == 0 &&
          entries_.at(0).addr == kNullAddress);
+
+  // Build up temporary reverse map.
+  std::unordered_map<size_t, NativeObject> reverse_merged_native_entries_map;
+  for (const auto& it : merged_native_entries_map_) {
+    auto result =
+        reverse_merged_native_entries_map.emplace(it.second, it.first);
+    DCHECK(result.second);
+    USE(result);
+  }
+
   size_t first_free_entry = 1;
   for (size_t i = 1; i < entries_.size(); ++i) {
     EntryInfo& entry_info = entries_.at(i);
+    auto merged_reverse_it = reverse_merged_native_entries_map.find(i);
     if (entry_info.accessed) {
       if (first_free_entry != i) {
         entries_.at(first_free_entry) = entry_info;
@@ -478,11 +508,19 @@ void HeapObjectsMap::RemoveDeadEntries() {
                               ComputeAddressHash(entry_info.addr));
       DCHECK(entry);
       entry->value = reinterpret_cast<void*>(first_free_entry);
+      if (merged_reverse_it != reverse_merged_native_entries_map.end()) {
+        auto it = merged_native_entries_map_.find(merged_reverse_it->second);
+        DCHECK_NE(merged_native_entries_map_.end(), it);
+        it->second = first_free_entry;
+      }
       ++first_free_entry;
     } else {
       if (entry_info.addr) {
         entries_map_.Remove(reinterpret_cast<void*>(entry_info.addr),
                             ComputeAddressHash(entry_info.addr));
+        if (merged_reverse_it != reverse_merged_native_entries_map.end()) {
+          merged_native_entries_map_.erase(merged_reverse_it->second);
+        }
       }
     }
   }
@@ -1853,10 +1891,14 @@ HeapEntry* EmbedderGraphEntriesAllocator::AllocateEntry(HeapThing ptr) {
       reinterpret_cast<EmbedderGraphImpl::Node*>(ptr);
   DCHECK(node->IsEmbedderNode());
   size_t size = node->SizeInBytes();
-  return snapshot_->AddEntry(
-      EmbedderGraphNodeType(node), EmbedderGraphNodeName(names_, node),
-      static_cast<SnapshotObjectId>(reinterpret_cast<uintptr_t>(node) << 1),
-      static_cast<int>(size), 0);
+  Address lookup_address = reinterpret_cast<Address>(node->GetNativeObject());
+  SnapshotObjectId id =
+      (lookup_address) ? heap_object_map_->FindOrAddEntry(lookup_address, 0)
+                       : static_cast<SnapshotObjectId>(
+                             reinterpret_cast<uintptr_t>(node) << 1);
+  return snapshot_->AddEntry(EmbedderGraphNodeType(node),
+                             EmbedderGraphNodeName(names_, node), id,
+                             static_cast<int>(size), 0);
 }
 
 NativeObjectsExplorer::NativeObjectsExplorer(
@@ -1865,12 +1907,14 @@ NativeObjectsExplorer::NativeObjectsExplorer(
           Isolate::FromHeap(snapshot->profiler()->heap_object_map()->heap())),
       snapshot_(snapshot),
       names_(snapshot_->profiler()->names()),
+      heap_object_map_(snapshot_->profiler()->heap_object_map()),
       embedder_graph_entries_allocator_(
           new EmbedderGraphEntriesAllocator(snapshot)) {}
 
 HeapEntry* NativeObjectsExplorer::EntryForEmbedderGraphNode(
     EmbedderGraphImpl::Node* node) {
   EmbedderGraphImpl::Node* wrapper = node->WrapperNode();
+  NativeObject native_object = node->GetNativeObject();
   if (wrapper) {
     node = wrapper;
   }
@@ -1882,8 +1926,16 @@ HeapEntry* NativeObjectsExplorer::EntryForEmbedderGraphNode(
         static_cast<EmbedderGraphImpl::V8NodeImpl*>(node);
     Object object = v8_node->GetObject();
     if (object.IsSmi()) return nullptr;
-    return generator_->FindEntry(
+    HeapEntry* entry = generator_->FindEntry(
         reinterpret_cast<void*>(Object::cast(object).ptr()));
+    if (native_object) {
+      HeapObject heap_object = HeapObject::cast(object);
+      heap_object_map_->AddMergedNativeEntry(native_object,
+                                             heap_object.address());
+      DCHECK_EQ(entry->id(),
+                heap_object_map_->FindMergedNativeEntry(native_object));
+    }
+    return entry;
   }
 }
 
@@ -1945,13 +1997,13 @@ HeapSnapshotGenerator::HeapSnapshotGenerator(
 }
 
 namespace {
-class NullContextScope {
+class NullContextForSnapshotScope {
  public:
-  explicit NullContextScope(Isolate* isolate)
+  explicit NullContextForSnapshotScope(Isolate* isolate)
       : isolate_(isolate), prev_(isolate->context()) {
     isolate_->set_context(Context());
   }
-  ~NullContextScope() { isolate_->set_context(prev_); }
+  ~NullContextForSnapshotScope() { isolate_->set_context(prev_); }
 
  private:
   Isolate* isolate_;
@@ -1971,7 +2023,7 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
   heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
                                   GarbageCollectionReason::kHeapProfiler);
 
-  NullContextScope null_context_scope(Isolate::FromHeap(heap_));
+  NullContextForSnapshotScope null_context_scope(Isolate::FromHeap(heap_));
 
 #ifdef VERIFY_HEAP
   Heap* debug_heap = heap_;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index d3d3330e27bb63..360ed1f009290f 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -82,8 +82,8 @@ class HeapGraphEdge {
   V8_INLINE HeapSnapshot* snapshot() const;
   int from_index() const { return FromIndexField::decode(bit_field_); }
 
-  class TypeField : public BitField<Type, 0, 3> {};
-  class FromIndexField : public BitField<int, 3, 29> {};
+  using TypeField = BitField<Type, 0, 3>;
+  using FromIndexField = BitField<int, 3, 29>;
   uint32_t bit_field_;
   HeapEntry* to_entry_;
   union {
@@ -249,6 +249,8 @@ class HeapObjectsMap {
   SnapshotObjectId FindOrAddEntry(Address addr,
                                   unsigned int size,
                                   bool accessed = true);
+  SnapshotObjectId FindMergedNativeEntry(NativeObject addr);
+  void AddMergedNativeEntry(NativeObject addr, Address canonical_addr);
   bool MoveObject(Address from, Address to, int size);
   void UpdateObjectSize(Address addr, int size);
   SnapshotObjectId last_assigned_id() const {
@@ -285,6 +287,8 @@ class HeapObjectsMap {
   base::HashMap entries_map_;
   std::vector<EntryInfo> entries_;
   std::vector<TimeInterval> time_intervals_;
+  // Map from NativeObject to EntryInfo index in entries_.
+  std::unordered_map<NativeObject, size_t> merged_native_entries_map_;
   Heap* heap_;
 
   DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
@@ -453,6 +457,7 @@ class NativeObjectsExplorer {
   Isolate* isolate_;
   HeapSnapshot* snapshot_;
   StringsStorage* names_;
+  HeapObjectsMap* heap_object_map_;
   std::unique_ptr<HeapEntriesAllocator> embedder_graph_entries_allocator_;
   // Used during references extraction.
   HeapSnapshotGenerator* generator_ = nullptr;
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 2d73c20a37fba5..bb5ef0da5b7fd4 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -28,7 +28,7 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
       instruction_start_(instruction_start) {}
 
 inline CodeEntry* ProfileGenerator::FindEntry(Address address) {
-  CodeEntry* entry = code_map_.FindEntry(address);
+  CodeEntry* entry = code_map_->FindEntry(address);
   if (entry) entry->mark_used();
   return entry;
 }
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index e869f657627674..f5f71846136543 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -19,6 +19,14 @@ void SourcePositionTable::SetPosition(int pc_offset, int line,
                                       int inlining_id) {
   DCHECK_GE(pc_offset, 0);
   DCHECK_GT(line, 0);  // The 1-based number of the source line.
+  // It's possible that we map multiple source positions to a pc_offset in
+  // optimized code. Usually these map to the same line, so there is no
+  // difference here as we only store line number and not line/col in the form
+  // of a script offset. Ignore any subsequent sets to the same offset.
+  if (!pc_offsets_to_lines_.empty() &&
+      pc_offsets_to_lines_.back().pc_offset == pc_offset) {
+    return;
+  }
   // Check that we are inserting in ascending order, so that the vector remains
   // sorted.
   DCHECK(pc_offsets_to_lines_.empty() ||
@@ -404,16 +412,18 @@ ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
 
 ProfileNode* ProfileTree::AddPathFromEnd(const ProfileStackTrace& path,
                                          int src_line, bool update_stats,
-                                         ProfilingMode mode) {
+                                         ProfilingMode mode,
+                                         ContextFilter* context_filter) {
   ProfileNode* node = root_;
   CodeEntry* last_entry = nullptr;
   int parent_line_number = v8::CpuProfileNode::kNoLineNumberInfo;
   for (auto it = path.rbegin(); it != path.rend(); ++it) {
-    if ((*it).code_entry == nullptr) continue;
-    last_entry = (*it).code_entry;
-    node = node->FindOrAddChild((*it).code_entry, parent_line_number);
+    if (it->entry.code_entry == nullptr) continue;
+    if (context_filter && !context_filter->Accept(*it)) continue;
+    last_entry = (*it).entry.code_entry;
+    node = node->FindOrAddChild((*it).entry.code_entry, parent_line_number);
     parent_line_number = mode == ProfilingMode::kCallerLineNumbers
-                             ? (*it).line_number
+                             ? (*it).entry.line_number
                              : v8::CpuProfileNode::kNoLineNumberInfo;
   }
   if (last_entry && last_entry->has_deopt_info()) {
@@ -428,7 +438,6 @@ ProfileNode* ProfileTree::AddPathFromEnd(const ProfileStackTrace& path,
   return node;
 }
 
-
 class Position {
  public:
   explicit Position(ProfileNode* node)
@@ -470,6 +479,21 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
   }
 }
 
+bool ContextFilter::Accept(const ProfileStackFrame& frame) {
+  // If a frame should always be included in profiles (e.g. metadata frames),
+  // skip the context check.
+  if (!frame.filterable) return true;
+
+  // Strip heap object tag from frame.
+  return (frame.native_context & ~kHeapObjectTag) == native_context_address_;
+}
+
+void ContextFilter::OnMoveEvent(Address from_address, Address to_address) {
+  if (native_context_address() != from_address) return;
+
+  set_native_context_address(to_address);
+}
+
 using v8::tracing::TracedValue;
 
 std::atomic<uint32_t> CpuProfile::last_id_;
@@ -488,6 +512,13 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
                    (start_time_ - base::TimeTicks()).InMicroseconds());
   TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
                               "Profile", id_, "data", std::move(value));
+
+  if (options_.has_filter_context()) {
+    DisallowHeapAllocation no_gc;
+    i::Address raw_filter_context =
+        reinterpret_cast<i::Address>(options_.raw_filter_context());
+    context_filter_ = base::make_unique<ContextFilter>(raw_filter_context);
+  }
 }
 
 bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) {
@@ -512,11 +543,11 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
                          bool update_stats, base::TimeDelta sampling_interval) {
   if (!CheckSubsample(sampling_interval)) return;
 
-  ProfileNode* top_frame_node =
-      top_down_.AddPathFromEnd(path, src_line, update_stats, options_.mode());
+  ProfileNode* top_frame_node = top_down_.AddPathFromEnd(
+      path, src_line, update_stats, options_.mode(), context_filter_.get());
 
   bool should_record_sample =
-      !timestamp.IsNull() &&
+      !timestamp.IsNull() && timestamp >= start_time_ &&
       (options_.max_samples() == CpuProfilingOptions::kNoSampleLimit ||
        samples_.size() < options_.max_samples());
 
@@ -615,6 +646,8 @@ void CpuProfile::StreamPendingTraceEvents() {
 
 void CpuProfile::FinishProfile() {
   end_time_ = base::TimeTicks::HighResolutionNow();
+  // Stop tracking context movements after profiling stops.
+  context_filter_ = nullptr;
   StreamPendingTraceEvents();
   auto value = TracedValue::Create();
   value->SetDouble("endTime", (end_time_ - base::TimeTicks()).InMicroseconds());
@@ -825,8 +858,20 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
   current_profiles_semaphore_.Signal();
 }
 
-ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
-    : profiles_(profiles) {}
+void CpuProfilesCollection::UpdateNativeContextAddressForCurrentProfiles(
+    Address from, Address to) {
+  current_profiles_semaphore_.Wait();
+  for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
+    if (auto* context_filter = profile->context_filter()) {
+      context_filter->OnMoveEvent(from, to);
+    }
+  }
+  current_profiles_semaphore_.Signal();
+}
+
+ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles,
+                                   CodeMap* code_map)
+    : profiles_(profiles), code_map_(code_map) {}
 
 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
   ProfileStackTrace stack_trace;
@@ -848,9 +893,11 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
       // Don't use PC when in external callback code, as it can point
       // inside a callback's code, and we will erroneously report
       // that a callback calls itself.
-      stack_trace.push_back(
-          {FindEntry(reinterpret_cast<Address>(sample.external_callback_entry)),
-           no_line_info});
+      stack_trace.push_back({{FindEntry(reinterpret_cast<Address>(
+                                  sample.external_callback_entry)),
+                              no_line_info},
+                             reinterpret_cast<Address>(sample.top_context),
+                             true});
     } else {
       Address attributed_pc = reinterpret_cast<Address>(sample.pc);
       CodeEntry* pc_entry = FindEntry(attributed_pc);
@@ -874,7 +921,9 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
           src_line = pc_entry->line_number();
         }
         src_line_not_found = false;
-        stack_trace.push_back({pc_entry, src_line});
+        stack_trace.push_back({{pc_entry, src_line},
+                               reinterpret_cast<Address>(sample.top_context),
+                               true});
 
         if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
             pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
@@ -886,7 +935,9 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
           // 'unresolved' entry.
           if (!sample.has_external_callback) {
             stack_trace.push_back(
-                {CodeEntry::unresolved_entry(), no_line_info});
+                {{CodeEntry::unresolved_entry(), no_line_info},
+                 kNullAddress,
+                 true});
           }
         }
       }
@@ -894,6 +945,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
 
     for (unsigned i = 0; i < sample.frames_count; ++i) {
       Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
+      Address native_context = reinterpret_cast<Address>(sample.contexts[i]);
       CodeEntry* entry = FindEntry(stack_pos);
       int line_number = no_line_info;
       if (entry) {
@@ -905,8 +957,13 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
             entry->GetInlineStack(pc_offset);
         if (inline_stack) {
           int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
-          stack_trace.insert(stack_trace.end(), inline_stack->begin(),
-                             inline_stack->end());
+          for (auto entry : *inline_stack) {
+            // Set the native context of inlined frames to be equal to that of
+            // their parent. This is safe, as functions cannot inline themselves
+            // into a parent from another native context.
+            stack_trace.push_back({entry, native_context, true});
+          }
+
           // This is a bit of a messy hack. The line number for the most-inlined
           // frame (the function at the end of the chain of function calls) has
           // the wrong line number in inline_stack. The actual line number in
@@ -916,7 +973,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
           // inlining_id.
           DCHECK(!inline_stack->empty());
           size_t index = stack_trace.size() - inline_stack->size();
-          stack_trace[index].line_number = most_inlined_frame_line_number;
+          stack_trace[index].entry.line_number = most_inlined_frame_line_number;
         }
         // Skip unresolved frames (e.g. internal frame) and get source line of
         // the first JS caller.
@@ -935,21 +992,22 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
         // so we use it instead of pushing entry to stack_trace.
         if (inline_stack) continue;
       }
-      stack_trace.push_back({entry, line_number});
+      stack_trace.push_back({{entry, line_number}, native_context, true});
     }
   }
 
   if (FLAG_prof_browser_mode) {
     bool no_symbolized_entries = true;
     for (auto e : stack_trace) {
-      if (e.code_entry != nullptr) {
+      if (e.entry.code_entry != nullptr) {
         no_symbolized_entries = false;
         break;
       }
     }
     // If no frames were symbolized, put the VM state entry in.
     if (no_symbolized_entries) {
-      stack_trace.push_back({EntryForVMState(sample.state), no_line_info});
+      stack_trace.push_back(
+          {{EntryForVMState(sample.state), no_line_info}, kNullAddress, false});
     }
   }
 
@@ -958,6 +1016,10 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
                                       sample.sampling_interval);
 }
 
+void ProfileGenerator::UpdateNativeContextAddress(Address from, Address to) {
+  profiles_->UpdateNativeContextAddressForCurrentProfiles(from, to);
+}
+
 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
   switch (tag) {
     case GC:
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index b0543c9d7944da..2f7273a08626fc 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -234,7 +234,36 @@ struct CodeEntryAndLineNumber {
   int line_number;
 };
 
-using ProfileStackTrace = std::vector<CodeEntryAndLineNumber>;
+struct ProfileStackFrame {
+  CodeEntryAndLineNumber entry;
+  Address native_context;
+  bool filterable;  // If true, the frame should be filtered by context (if a
+                    // filter is present).
+};
+
+typedef std::vector<ProfileStackFrame> ProfileStackTrace;
+
+// Filters stack frames from sources other than a target native context.
+class ContextFilter {
+ public:
+  explicit ContextFilter(Address native_context_address)
+      : native_context_address_(native_context_address) {}
+
+  // Returns true if the stack frame passes a context check.
+  bool Accept(const ProfileStackFrame&);
+
+  // Invoked when a native context has changed address.
+  void OnMoveEvent(Address from_address, Address to_address);
+
+  // Update the context's tracked address based on VM-thread events.
+  void set_native_context_address(Address address) {
+    native_context_address_ = address;
+  }
+  Address native_context_address() const { return native_context_address_; }
+
+ private:
+  Address native_context_address_;
+};
 
 class ProfileTree;
 
@@ -321,7 +350,8 @@ class V8_EXPORT_PRIVATE ProfileTree {
       const ProfileStackTrace& path,
       int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
       bool update_stats = true,
-      ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
+      ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers,
+      ContextFilter* context_filter = nullptr);
   ProfileNode* root() const { return root_; }
   unsigned next_node_id() { return next_node_id_++; }
   unsigned GetFunctionId(const ProfileNode* node);
@@ -389,6 +419,7 @@ class CpuProfile {
   base::TimeTicks start_time() const { return start_time_; }
   base::TimeTicks end_time() const { return end_time_; }
   CpuProfiler* cpu_profiler() const { return profiler_; }
+  ContextFilter* context_filter() const { return context_filter_.get(); }
 
   void UpdateTicksScale();
 
@@ -399,6 +430,7 @@ class CpuProfile {
 
   const char* title_;
   const CpuProfilingOptions options_;
+  std::unique_ptr<ContextFilter> context_filter_;
   base::TimeTicks start_time_;
   base::TimeTicks end_time_;
   std::deque<SampleInfo> samples_;
@@ -477,6 +509,9 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
                                 bool update_stats,
                                 base::TimeDelta sampling_interval);
 
+  // Called from profile generator thread.
+  void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to);
+
   // Limits the number of profiles that can be simultaneously collected.
   static const int kMaxSimultaneousProfiles = 100;
 
@@ -494,18 +529,20 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
 
 class V8_EXPORT_PRIVATE ProfileGenerator {
  public:
-  explicit ProfileGenerator(CpuProfilesCollection* profiles);
+  explicit ProfileGenerator(CpuProfilesCollection* profiles, CodeMap* code_map);
 
   void RecordTickSample(const TickSample& sample);
 
-  CodeMap* code_map() { return &code_map_; }
+  void UpdateNativeContextAddress(Address from, Address to);
+
+  CodeMap* code_map() { return code_map_; }
 
  private:
   CodeEntry* FindEntry(Address address);
   CodeEntry* EntryForVMState(StateTag tag);
 
   CpuProfilesCollection* profiles_;
-  CodeMap code_map_;
+  CodeMap* const code_map_;
 
   DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
 };
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 156c1b8bb01b1f..b00c1f5cfd7ec2 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -177,8 +177,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
           CodeEntry* cached_entry = GetOrInsertCachedEntry(
               &cached_inline_entries, std::move(inline_entry));
 
-          inline_stack.push_back(
-              CodeEntryAndLineNumber{cached_entry, line_number});
+          inline_stack.push_back({cached_entry, line_number});
         }
         DCHECK(!inline_stack.empty());
         inline_stacks.emplace(inlining_id, std::move(inline_stack));
@@ -280,6 +279,13 @@ void ProfilerListener::SetterCallbackEvent(Name name, Address entry_point) {
   DispatchCodeEvent(evt_rec);
 }
 
+void ProfilerListener::NativeContextMoveEvent(Address from, Address to) {
+  CodeEventsContainer evt_rec(CodeEventRecord::NATIVE_CONTEXT_MOVE);
+  evt_rec.NativeContextMoveEventRecord_.from_address = from;
+  evt_rec.NativeContextMoveEventRecord_.to_address = to;
+  DispatchCodeEvent(evt_rec);
+}
+
 Name ProfilerListener::InferScriptName(Name name, SharedFunctionInfo info) {
   if (name.IsString() && String::cast(name).length()) return name;
   if (!info.script().IsScript()) return name;
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index 6ca4225e540343..85070d65aaf11d 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -55,6 +55,7 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
   void RegExpCodeCreateEvent(AbstractCode code, String source) override;
   void SetterCallbackEvent(Name name, Address entry_point) override;
   void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+  void NativeContextMoveEvent(Address from, Address to) override;
 
   const char* GetName(Name name) {
     return function_and_resource_names_.GetName(name);
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index b3ea07db34fac3..5c2f2d63ce3400 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -16,6 +16,7 @@
 #include "src/sanitizer/msan.h"
 
 namespace v8 {
+namespace internal {
 namespace {
 
 bool IsSamePage(i::Address ptr1, i::Address ptr2) {
@@ -78,11 +79,6 @@ bool IsNoFrameRegion(i::Address address) {
   return false;
 }
 
-}  // namespace
-
-namespace internal {
-namespace {
-
 #if defined(USE_SIMULATOR)
 class SimulatorHelper {
  public:
@@ -147,22 +143,56 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
 }
 #endif  // USE_SIMULATOR
 
+// Attempts to safely dereference the address of a native context at a given
+// context's address. Returns kNullAddress on failure, in the event that the
+// context is in an inconsistent state.
+Address ScrapeNativeContextAddress(Heap* heap, Address context_address) {
+  DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
+
+  if (!HAS_STRONG_HEAP_OBJECT_TAG(context_address)) return kNullAddress;
+
+  if (heap->memory_allocator()->IsOutsideAllocatedSpace(context_address))
+    return kNullAddress;
+
+  // Note that once a native context has been assigned to a context, the slot
+  // is no longer mutated except during pointer updates / evictions. Since
+  // pointer updates exclusively occur on the main thread, and we don't record
+  // TickSamples when the main thread's VM state is GC, the only other
+  // situation where the address here would be invalid is if it's being
+  // reassigned -- which isn't possible.
+  int native_context_offset =
+      i::Context::SlotOffset(i::Context::NATIVE_CONTEXT_INDEX);
+  i::Address native_context_slot_address =
+      context_address + native_context_offset;
+
+  // By the prior hypothesis, the indirect native context address should always
+  // be valid.
+  if (heap->memory_allocator()->IsOutsideAllocatedSpace(
+          native_context_slot_address)) {
+    DCHECK(false);
+    return kNullAddress;
+  }
+
+  i::ObjectSlot native_context_slot(native_context_slot_address);
+  i::Object native_context = native_context_slot.Relaxed_Load();
+
+  return native_context.ptr();
+}
+
 }  // namespace
-}  // namespace internal
 
-//
-// StackTracer implementation
-//
 DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
                                    const RegisterState& reg_state,
                                    RecordCEntryFrame record_c_entry_frame,
                                    bool update_stats,
-                                   bool use_simulator_reg_state) {
+                                   bool use_simulator_reg_state,
+                                   base::TimeDelta sampling_interval) {
   this->update_stats = update_stats;
   SampleInfo info;
   RegisterState regs = reg_state;
   if (!GetStackSample(v8_isolate, &regs, record_c_entry_frame, stack,
-                      kMaxFramesCount, &info, use_simulator_reg_state)) {
+                      kMaxFramesCount, &info, use_simulator_reg_state,
+                      contexts)) {
     // It is executing JS but failed to collect a stack trace.
     // Mark the sample as spoiled.
     pc = nullptr;
@@ -173,6 +203,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
   pc = regs.pc;
   frames_count = static_cast<unsigned>(info.frames_count);
   has_external_callback = info.external_callback_entry != nullptr;
+  top_context = info.top_context;
   if (has_external_callback) {
     external_callback_entry = info.external_callback_entry;
   } else if (frames_count) {
@@ -191,17 +222,20 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
   } else {
     tos = nullptr;
   }
+  this->sampling_interval = sampling_interval;
+  timestamp = base::TimeTicks::HighResolutionNow();
 }
 
 bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
                                 RecordCEntryFrame record_c_entry_frame,
                                 void** frames, size_t frames_limit,
                                 v8::SampleInfo* sample_info,
-                                bool use_simulator_reg_state) {
+                                bool use_simulator_reg_state, void** contexts) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
   sample_info->frames_count = 0;
   sample_info->vm_state = isolate->current_vm_state();
   sample_info->external_callback_entry = nullptr;
+  sample_info->top_context = nullptr;
   if (sample_info->vm_state == GC) return true;
 
   i::Address js_entry_sp = isolate->js_entry_sp();
@@ -229,7 +263,7 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
   i::ExternalCallbackScope* scope = isolate->external_callback_scope();
   i::Address handler = i::Isolate::handler(isolate->thread_local_top());
   // If there is a handler on top of the external callback scope then
-  // we have already entrered JavaScript again and the external callback
+  // we have already entered JavaScript again and the external callback
   // is not the top function.
   if (scope && scope->scope_address() < handler) {
     i::Address* external_callback_entry_ptr =
@@ -245,23 +279,62 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
                                reinterpret_cast<i::Address>(regs->sp),
                                reinterpret_cast<i::Address>(regs->lr),
                                js_entry_sp);
+
+  i::Address top_context_address = it.top_context_address();
+  if (top_context_address != i::kNullAddress) {
+    sample_info->top_context = reinterpret_cast<void*>(
+        i::ScrapeNativeContextAddress(isolate->heap(), top_context_address));
+  } else {
+    sample_info->top_context = nullptr;
+  }
+
   if (it.done()) return true;
 
   size_t i = 0;
   if (record_c_entry_frame == kIncludeCEntryFrame &&
       (it.top_frame_type() == internal::StackFrame::EXIT ||
        it.top_frame_type() == internal::StackFrame::BUILTIN_EXIT)) {
-    frames[i++] = reinterpret_cast<void*>(isolate->c_function());
+    frames[i] = reinterpret_cast<void*>(isolate->c_function());
+    if (contexts) contexts[i] = sample_info->top_context;
+    i++;
   }
+
+  // If we couldn't get a context address from the top frame due to execution
+  // being in a callback, borrow it from the next context on the stack.
+  bool borrows_top_context = it.top_frame_type() == i::StackFrame::EXIT ||
+                             it.top_frame_type() == i::StackFrame::BUILTIN_EXIT;
+
   i::RuntimeCallTimer* timer =
       isolate->counters()->runtime_call_stats()->current_timer();
   for (; !it.done() && i < frames_limit; it.Advance()) {
     while (timer && reinterpret_cast<i::Address>(timer) < it.frame()->fp() &&
            i < frames_limit) {
+      if (contexts) contexts[i] = nullptr;
       frames[i++] = reinterpret_cast<void*>(timer->counter());
       timer = timer->parent();
     }
     if (i == frames_limit) break;
+
+    // Attempt to read the native context associated with the frame from the
+    // heap for standard frames.
+    if (it.frame()->is_standard() && (contexts || borrows_top_context)) {
+      i::Address context_address = base::Memory<i::Address>(
+          it.frame()->fp() + i::StandardFrameConstants::kContextOffset);
+      i::Address native_context_address =
+          i::ScrapeNativeContextAddress(isolate->heap(), context_address);
+      if (contexts)
+        contexts[i] = reinterpret_cast<void*>(native_context_address);
+
+      if (borrows_top_context) {
+        DCHECK(!sample_info->top_context);
+        sample_info->top_context =
+            reinterpret_cast<void*>(native_context_address);
+      }
+    } else if (contexts) {
+      contexts[i] = nullptr;
+    }
+    borrows_top_context = false;
+
     if (it.frame()->is_interpreted()) {
       // For interpreted frames use the bytecode array pointer as the pc.
       i::InterpretedFrame* frame =
@@ -290,20 +363,6 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
   return true;
 }
 
-namespace internal {
-
-void TickSample::Init(Isolate* isolate, const v8::RegisterState& state,
-                      RecordCEntryFrame record_c_entry_frame, bool update_stats,
-                      bool use_simulator_reg_state,
-                      base::TimeDelta sampling_interval) {
-  v8::TickSample::Init(reinterpret_cast<v8::Isolate*>(isolate), state,
-                       record_c_entry_frame, update_stats,
-                       use_simulator_reg_state);
-  this->sampling_interval = sampling_interval;
-  if (pc == nullptr) return;
-  timestamp = base::TimeTicks::HighResolutionNow();
-}
-
 void TickSample::print() const {
   PrintF("TickSample: at %p\n", this);
   PrintF(" - state: %s\n", StateToString(state));
diff --git a/deps/v8/src/profiler/tick-sample.h b/deps/v8/src/profiler/tick-sample.h
index ba78c923c4c65d..37ae1e9d06e8ea 100644
--- a/deps/v8/src/profiler/tick-sample.h
+++ b/deps/v8/src/profiler/tick-sample.h
@@ -5,7 +5,7 @@
 #ifndef V8_PROFILER_TICK_SAMPLE_H_
 #define V8_PROFILER_TICK_SAMPLE_H_
 
-#include "include/v8-profiler.h"
+#include "include/v8.h"
 #include "src/base/platform/time.h"
 #include "src/common/globals.h"
 
@@ -14,15 +14,83 @@ namespace internal {
 
 class Isolate;
 
-struct TickSample : public v8::TickSample {
+// TickSample captures the information collected for each sample.
+struct V8_EXPORT TickSample {
+  // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
+  // include the runtime function we're calling. Externally exposed tick
+  // samples don't care.
+  enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
+
+  TickSample()
+      : state(OTHER),
+        pc(nullptr),
+        external_callback_entry(nullptr),
+        frames_count(0),
+        has_external_callback(false),
+        update_stats(true) {}
+
+  /**
+   * Initialize a tick sample from the isolate.
+   * \param isolate The isolate.
+   * \param state Execution state.
+   * \param record_c_entry_frame Include or skip the runtime function.
+   * \param update_stats Whether update the sample to the aggregated stats.
+   * \param use_simulator_reg_state When set to true and V8 is running under a
+   *                                simulator, the method will use the simulator
+   *                                register state rather than the one provided
+   *                                with |state| argument. Otherwise the method
+   *                                will use provided register |state| as is.
+   */
   void Init(Isolate* isolate, const v8::RegisterState& state,
             RecordCEntryFrame record_c_entry_frame, bool update_stats,
             bool use_simulator_reg_state = true,
             base::TimeDelta sampling_interval = base::TimeDelta());
-  base::TimeTicks timestamp;
-  base::TimeDelta sampling_interval;  // Sampling interval used to capture.
+  /**
+   * Get a call stack sample from the isolate.
+   * \param isolate The isolate.
+   * \param state Register state.
+   * \param record_c_entry_frame Include or skip the runtime function.
+   * \param frames Caller allocated buffer to store stack frames.
+   * \param frames_limit Maximum number of frames to capture. The buffer must
+   *                     be large enough to hold the number of frames.
+   * \param sample_info The sample info is filled up by the function
+   *                    provides number of actual captured stack frames and
+   *                    the current VM state.
+   * \param use_simulator_reg_state When set to true and V8 is running under a
+   *                                simulator, the method will use the simulator
+   *                                register state rather than the one provided
+   *                                with |state| argument. Otherwise the method
+   *                                will use provided register |state| as is.
+   * \note GetStackSample is thread and signal safe and should only be called
+   *                      when the JS thread is paused or interrupted.
+   *                      Otherwise the behavior is undefined.
+   */
+  static bool GetStackSample(Isolate* isolate, v8::RegisterState* state,
+                             RecordCEntryFrame record_c_entry_frame,
+                             void** frames, size_t frames_limit,
+                             v8::SampleInfo* sample_info,
+                             bool use_simulator_reg_state = true,
+                             void** contexts = nullptr);
 
   void print() const;
+
+  StateTag state;  // The state of the VM.
+  void* pc;        // Instruction pointer.
+  union {
+    void* tos;  // Top stack value (*sp).
+    void* external_callback_entry;
+  };
+  static const unsigned kMaxFramesCountLog2 = 8;
+  static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
+  void* stack[kMaxFramesCount];     // Call stack.
+  void* contexts[kMaxFramesCount];  // Stack of associated native contexts.
+  void* top_context = nullptr;      // Address of the incumbent native context.
+  unsigned frames_count : kMaxFramesCountLog2;  // Number of captured frames.
+  bool has_external_callback : 1;
+  bool update_stats : 1;  // Whether the sample should update aggregated stats.
+
+  base::TimeTicks timestamp;
+  base::TimeDelta sampling_interval;  // Sampling interval used to capture.
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 8b462cb03c9c07..2f81b6de86d931 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -40,6 +40,9 @@ namespace internal {
  * Each call to a public method should retain this convention.
  *
  * The stack will have the following structure:
+ *  - fp[56]  Address regexp     (address of the JSRegExp object; unused in
+ *                                native code, passed to match signature of
+ *                                the interpreter)
  *  - fp[52]  Isolate* isolate   (address of the current isolate)
  *  - fp[48]  direct_call        (if 1, direct call from JavaScript code,
  *                                if 0, call through the runtime system).
@@ -83,7 +86,8 @@ namespace internal {
  *              int num_capture_registers,
  *              byte* stack_area_base,
  *              bool direct_call = false,
- *              Isolate* isolate);
+ *              Isolate* isolate,
+ *              Address regexp);
  * The call is performed by NativeRegExpMacroAssembler::Execute()
  * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
  */
@@ -172,15 +176,14 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
   BranchOrBacktrack(gt, on_greater);
 }
 
-
-void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
+void RegExpMacroAssemblerARM::CheckAtStart(int cp_offset, Label* on_at_start) {
   __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
-  __ add(r0, current_input_offset(), Operand(-char_size()));
+  __ add(r0, current_input_offset(),
+         Operand(-char_size() + cp_offset * char_size()));
   __ cmp(r0, r1);
   BranchOrBacktrack(eq, on_at_start);
 }
 
-
 void RegExpMacroAssemblerARM::CheckNotAtStart(int cp_offset,
                                               Label* on_not_at_start) {
   __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
@@ -647,7 +650,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
   Label stack_ok;
 
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ mov(r0, Operand(stack_limit));
   __ ldr(r0, MemOperand(r0));
   __ sub(r0, sp, r0, SetCC);
@@ -929,15 +932,19 @@ RegExpMacroAssembler::IrregexpImplementation
   return kARMImplementation;
 }
 
+void RegExpMacroAssemblerARM::LoadCurrentCharacterImpl(int cp_offset,
+                                                       Label* on_end_of_input,
+                                                       bool check_bounds,
+                                                       int characters,
+                                                       int eats_at_least) {
+  // It's possible to preload a small number of characters when each success
+  // path requires a large number of characters, but not the reverse.
+  DCHECK_GE(eats_at_least, characters);
 
-void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
-                                                   Label* on_end_of_input,
-                                                   bool check_bounds,
-                                                   int characters) {
   DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
   if (check_bounds) {
     if (cp_offset >= 0) {
-      CheckPosition(cp_offset + characters - 1, on_end_of_input);
+      CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
     } else {
       CheckPosition(cp_offset, on_end_of_input);
     }
@@ -945,7 +952,6 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
   LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
-
 void RegExpMacroAssemblerARM::PopCurrentPosition() {
   Pop(current_input_offset());
 }
@@ -1109,7 +1115,8 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
   return NativeRegExpMacroAssembler::CheckStackGuardState(
       frame_entry<Isolate*>(re_frame, kIsolate),
       frame_entry<int>(re_frame, kStartIndex),
-      frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
+      static_cast<RegExp::CallOrigin>(frame_entry<int>(re_frame, kDirectCall)),
+      return_address, re_code,
       frame_entry_address<Address>(re_frame, kInputString),
       frame_entry_address<const byte*>(re_frame, kInputStart),
       frame_entry_address<const byte*>(re_frame, kInputEnd));
@@ -1193,7 +1200,7 @@ void RegExpMacroAssemblerARM::Pop(Register target) {
 void RegExpMacroAssemblerARM::CheckPreemption() {
   // Check for preemption.
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ mov(r0, Operand(stack_limit));
   __ ldr(r0, MemOperand(r0));
   __ cmp(sp, r0);
@@ -1203,7 +1210,7 @@ void RegExpMacroAssemblerARM::CheckPreemption() {
 
 void RegExpMacroAssemblerARM::CheckStackLimit() {
   ExternalReference stack_limit =
-      ExternalReference::address_of_regexp_stack_limit(isolate());
+      ExternalReference::address_of_regexp_stack_limit_address(isolate());
   __ mov(r0, Operand(stack_limit));
   __ ldr(r0, MemOperand(r0));
   __ cmp(backtrack_stackpointer(), Operand(r0));
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 9e95f8e1f26c92..9b21c5a11c6e26 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
   virtual void AdvanceRegister(int reg, int by);
   virtual void Backtrack();
   virtual void Bind(Label* label);
-  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckAtStart(int cp_offset, Label* on_at_start);
   virtual void CheckCharacter(unsigned c, Label* on_equal);
   virtual void CheckCharacterAfterAnd(unsigned c,
                                       unsigned mask,
@@ -67,10 +67,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
   virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
   virtual void IfRegisterEqPos(int reg, Label* if_eq);
   virtual IrregexpImplementation Implementation();
-  virtual void LoadCurrentCharacter(int cp_offset,
-                                    Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1);
+  virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                        bool check_bounds, int characters,
+                                        int eats_at_least);
   virtual void PopCurrentPosition();
   virtual void PopRegister(int register_index);
   virtual void PushBacktrack(Label* label);
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index b299ad05356b22..9e00063487fb25 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -55,7 +55,10 @@ namespace internal {
  *              (as referred to in
  *              the code)
  *
- *  - fp[96]   isolate            Address of the current isolate.
+ *  - fp[104]  Address regexp      Address of the JSRegExp object. Unused in
+ *                                 native code, passed to match signature of
+ *                                 the interpreter.
+ *  - fp[96]   isolate             Address of the current isolate.
  *  ^^^ sp when called ^^^
  *  - fp[88]    lr                 Return from the RegExp code.
  *  - fp[80]    r29                Old frame pointer (CalleeSaved).
@@ -93,7 +96,8 @@ namespace internal {
  *              int num_capture_registers,
  *              byte* stack_area_base,
  *              bool direct_call = false,
- *              Isolate* isolate);
+ *              Isolate* isolate,
+ *              Address regexp);
  * The call is performed by NativeRegExpMacroAssembler::Execute()
  * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
  */
@@ -201,14 +205,14 @@ void RegExpMacroAssemblerARM64::CheckCharacterGT(uc16 limit,
   CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
 }
 
-
-void RegExpMacroAssemblerARM64::CheckAtStart(Label* on_at_start) {
-  __ Add(w10, current_input_offset(), Operand(-char_size()));
+void RegExpMacroAssemblerARM64::CheckAtStart(int cp_offset,
+                                             Label* on_at_start) {
+  __ Add(w10, current_input_offset(),
+         Operand(-char_size() + cp_offset * char_size()));
   __ Cmp(w10, string_start_minus_one());
   BranchOrBacktrack(eq, on_at_start);
 }
 
-
 void RegExpMacroAssemblerARM64::CheckNotAtStart(int cp_offset,
                                                 Label* on_not_at_start) {
   __ Add(w10, current_input_offset(),
@@ -750,7 +754,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
   Label stack_ok;
 
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ Mov(x10, stack_limit);
   __ Ldr(x10, MemOperand(x10));
   __ Subs(x10, sp, x10);
@@ -1106,18 +1110,22 @@ RegExpMacroAssembler::IrregexpImplementation
   return kARM64Implementation;
 }
 
+void RegExpMacroAssemblerARM64::LoadCurrentCharacterImpl(int cp_offset,
+                                                         Label* on_end_of_input,
+                                                         bool check_bounds,
+                                                         int characters,
+                                                         int eats_at_least) {
+  // It's possible to preload a small number of characters when each success
+  // path requires a large number of characters, but not the reverse.
+  DCHECK_GE(eats_at_least, characters);
 
-void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
-                                                     Label* on_end_of_input,
-                                                     bool check_bounds,
-                                                     int characters) {
   // TODO(pielan): Make sure long strings are caught before this, and not
   // just asserted in debug mode.
   // Be sane! (And ensure that an int32_t can be used to index the string)
   DCHECK(cp_offset < (1<<30));
   if (check_bounds) {
     if (cp_offset >= 0) {
-      CheckPosition(cp_offset + characters - 1, on_end_of_input);
+      CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
     } else {
       CheckPosition(cp_offset, on_end_of_input);
     }
@@ -1125,7 +1133,6 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
   LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
-
 void RegExpMacroAssemblerARM64::PopCurrentPosition() {
   Pop(current_input_offset());
 }
@@ -1326,8 +1333,9 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(
   Code re_code = Code::cast(Object(raw_code));
   return NativeRegExpMacroAssembler::CheckStackGuardState(
       frame_entry<Isolate*>(re_frame, kIsolate), start_index,
-      frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
-      frame_entry_address<Address>(re_frame, kInput), input_start, input_end);
+      static_cast<RegExp::CallOrigin>(frame_entry<int>(re_frame, kDirectCall)),
+      return_address, re_code, frame_entry_address<Address>(re_frame, kInput),
+      input_start, input_end);
 }
 
 
@@ -1448,7 +1456,7 @@ void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
 void RegExpMacroAssemblerARM64::CheckPreemption() {
   // Check for preemption.
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ Mov(x10, stack_limit);
   __ Ldr(x10, MemOperand(x10));
   __ Cmp(sp, x10);
@@ -1458,7 +1466,7 @@ void RegExpMacroAssemblerARM64::CheckPreemption() {
 
 void RegExpMacroAssemblerARM64::CheckStackLimit() {
   ExternalReference stack_limit =
-      ExternalReference::address_of_regexp_stack_limit(isolate());
+      ExternalReference::address_of_regexp_stack_limit_address(isolate());
   __ Mov(x10, stack_limit);
   __ Ldr(x10, MemOperand(x10));
   __ Cmp(backtrack_stackpointer(), x10);
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index ef83f9e43cc6a9..6154c6cf603ed2 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -24,7 +24,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
   virtual void AdvanceRegister(int reg, int by);
   virtual void Backtrack();
   virtual void Bind(Label* label);
-  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckAtStart(int cp_offset, Label* on_at_start);
   virtual void CheckCharacter(unsigned c, Label* on_equal);
   virtual void CheckCharacterAfterAnd(unsigned c,
                                       unsigned mask,
@@ -72,10 +72,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
   virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
   virtual void IfRegisterEqPos(int reg, Label* if_eq);
   virtual IrregexpImplementation Implementation();
-  virtual void LoadCurrentCharacter(int cp_offset,
-                                    Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1);
+  virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                        bool check_bounds, int characters,
+                                        int eats_at_least);
   virtual void PopCurrentPosition();
   virtual void PopRegister(int register_index);
   virtual void PushBacktrack(Label* label);
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index eb42c232159b93..5ee7b909883548 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -34,6 +34,9 @@ namespace internal {
  *
  * Each call to a public method should retain this convention.
  * The stack will have the following structure:
+ *       - Address regexp       (address of the JSRegExp object; unused in
+ *                               native code, passed to match signature of
+ *                               the interpreter)
  *       - Isolate* isolate     (address of the current isolate)
  *       - direct_call          (if 1, direct call from JavaScript code, if 0
  *                               call through the runtime system)
@@ -73,7 +76,8 @@ namespace internal {
  *              int num_capture_registers,
  *              byte* stack_area_base,
  *              bool direct_call = false,
- *              Isolate* isolate);
+ *              Isolate* isolate
+ *              Address regexp);
  */
 
 #define __ ACCESS_MASM(masm_)
@@ -161,14 +165,12 @@ void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
   BranchOrBacktrack(greater, on_greater);
 }
 
-
-void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
-  __ lea(eax, Operand(edi, -char_size()));
+void RegExpMacroAssemblerIA32::CheckAtStart(int cp_offset, Label* on_at_start) {
+  __ lea(eax, Operand(edi, -char_size() + cp_offset * char_size()));
   __ cmp(eax, Operand(ebp, kStringStartMinusOne));
   BranchOrBacktrack(equal, on_at_start);
 }
 
-
 void RegExpMacroAssemblerIA32::CheckNotAtStart(int cp_offset,
                                                Label* on_not_at_start) {
   __ lea(eax, Operand(edi, -char_size() + cp_offset * char_size()));
@@ -684,7 +686,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
   Label stack_ok;
 
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ mov(ecx, esp);
   __ sub(ecx, StaticVariable(stack_limit));
   // Handle it if the stack pointer is already below the stack limit.
@@ -971,15 +973,19 @@ RegExpMacroAssembler::IrregexpImplementation
   return kIA32Implementation;
 }
 
+void RegExpMacroAssemblerIA32::LoadCurrentCharacterImpl(int cp_offset,
+                                                        Label* on_end_of_input,
+                                                        bool check_bounds,
+                                                        int characters,
+                                                        int eats_at_least) {
+  // It's possible to preload a small number of characters when each success
+  // path requires a large number of characters, but not the reverse.
+  DCHECK_GE(eats_at_least, characters);
 
-void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
-                                                    Label* on_end_of_input,
-                                                    bool check_bounds,
-                                                    int characters) {
   DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
   if (check_bounds) {
     if (cp_offset >= 0) {
-      CheckPosition(cp_offset + characters - 1, on_end_of_input);
+      CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
     } else {
       CheckPosition(cp_offset, on_end_of_input);
     }
@@ -987,7 +993,6 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
   LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
-
 void RegExpMacroAssemblerIA32::PopCurrentPosition() {
   Pop(edi);
 }
@@ -1120,7 +1125,8 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
   return NativeRegExpMacroAssembler::CheckStackGuardState(
       frame_entry<Isolate*>(re_frame, kIsolate),
       frame_entry<int>(re_frame, kStartIndex),
-      frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
+      static_cast<RegExp::CallOrigin>(frame_entry<int>(re_frame, kDirectCall)),
+      return_address, re_code,
       frame_entry_address<Address>(re_frame, kInputString),
       frame_entry_address<const byte*>(re_frame, kInputStart),
       frame_entry_address<const byte*>(re_frame, kInputEnd));
@@ -1214,7 +1220,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
   // Check for preemption.
   Label no_preempt;
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ cmp(esp, StaticVariable(stack_limit));
   __ j(above, &no_preempt);
 
@@ -1227,7 +1233,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
 void RegExpMacroAssemblerIA32::CheckStackLimit() {
   Label no_stack_overflow;
   ExternalReference stack_limit =
-      ExternalReference::address_of_regexp_stack_limit(isolate());
+      ExternalReference::address_of_regexp_stack_limit_address(isolate());
   __ cmp(backtrack_stackpointer(), StaticVariable(stack_limit));
   __ j(above, &no_stack_overflow);
 
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 914552cc934a4e..3464d81facd595 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
   virtual void AdvanceRegister(int reg, int by);
   virtual void Backtrack();
   virtual void Bind(Label* label);
-  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckAtStart(int cp_offset, Label* on_at_start);
   virtual void CheckCharacter(uint32_t c, Label* on_equal);
   virtual void CheckCharacterAfterAnd(uint32_t c,
                                       uint32_t mask,
@@ -66,10 +66,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
   virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
   virtual void IfRegisterEqPos(int reg, Label* if_eq);
   virtual IrregexpImplementation Implementation();
-  virtual void LoadCurrentCharacter(int cp_offset,
-                                    Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1);
+  virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                        bool check_bounds, int characters,
+                                        int eats_at_least);
   virtual void PopCurrentPosition();
   virtual void PopRegister(int register_index);
   virtual void PushBacktrack(Label* label);
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index e8104ced7e1c92..8d2800f004e909 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -178,9 +178,10 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
 }
 
 
-void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
+void RegExpMacroAssemblerMIPS::CheckAtStart(int cp_offset, Label* on_at_start) {
   __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
-  __ Addu(a0, current_input_offset(), Operand(-char_size()));
+  __ Addu(a0, current_input_offset(),
+          Operand(-char_size() + cp_offset * char_size()));
   BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
 }
 
@@ -647,7 +648,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
     Label stack_ok;
 
     ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit(masm_->isolate());
+        ExternalReference::address_of_jslimit(masm_->isolate());
     __ li(a0, Operand(stack_limit));
     __ lw(a0, MemOperand(a0));
     __ Subu(a0, sp, a0);
@@ -946,15 +947,19 @@ RegExpMacroAssembler::IrregexpImplementation
   return kMIPSImplementation;
 }
 
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacterImpl(int cp_offset,
+                                                        Label* on_end_of_input,
+                                                        bool check_bounds,
+                                                        int characters,
+                                                        int eats_at_least) {
+  // It's possible to preload a small number of characters when each success
+  // path requires a large number of characters, but not the reverse.
+  DCHECK_GE(eats_at_least, characters);
 
-void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
-                                                    Label* on_end_of_input,
-                                                    bool check_bounds,
-                                                    int characters) {
   DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works).
   if (check_bounds) {
     if (cp_offset >= 0) {
-      CheckPosition(cp_offset + characters - 1, on_end_of_input);
+      CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
     } else {
       CheckPosition(cp_offset, on_end_of_input);
     }
@@ -962,7 +967,6 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
   LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
-
 void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
   Pop(current_input_offset());
 }
@@ -1176,7 +1180,8 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
   return NativeRegExpMacroAssembler::CheckStackGuardState(
       frame_entry<Isolate*>(re_frame, kIsolate),
       frame_entry<int>(re_frame, kStartIndex),
-      frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
+      static_cast<RegExp::CallOrigin>(frame_entry<int>(re_frame, kDirectCall)),
+      return_address, re_code,
       frame_entry_address<Address>(re_frame, kInputString),
       frame_entry_address<const byte*>(re_frame, kInputStart),
       frame_entry_address<const byte*>(re_frame, kInputEnd));
@@ -1267,7 +1272,7 @@ void RegExpMacroAssemblerMIPS::Pop(Register target) {
 void RegExpMacroAssemblerMIPS::CheckPreemption() {
   // Check for preemption.
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(masm_->isolate());
+      ExternalReference::address_of_jslimit(masm_->isolate());
   __ li(a0, Operand(stack_limit));
   __ lw(a0, MemOperand(a0));
   SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
@@ -1276,7 +1281,8 @@ void RegExpMacroAssemblerMIPS::CheckPreemption() {
 
 void RegExpMacroAssemblerMIPS::CheckStackLimit() {
   ExternalReference stack_limit =
-      ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+      ExternalReference::address_of_regexp_stack_limit_address(
+          masm_->isolate());
 
   __ li(a0, Operand(stack_limit));
   __ lw(a0, MemOperand(a0));
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index b785910466e43f..084436bbbd455e 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
   virtual void AdvanceRegister(int reg, int by);
   virtual void Backtrack();
   virtual void Bind(Label* label);
-  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckAtStart(int cp_offset, Label* on_at_start);
   virtual void CheckCharacter(uint32_t c, Label* on_equal);
   virtual void CheckCharacterAfterAnd(uint32_t c,
                                       uint32_t mask,
@@ -67,10 +67,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
   virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
   virtual void IfRegisterEqPos(int reg, Label* if_eq);
   virtual IrregexpImplementation Implementation();
-  virtual void LoadCurrentCharacter(int cp_offset,
-                                    Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1);
+  virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                        bool check_bounds, int characters,
+                                        int eats_at_least);
   virtual void PopCurrentPosition();
   virtual void PopRegister(int register_index);
   virtual void PushBacktrack(Label* label);
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 239cc87ae88e6f..2d5402ebdbc298 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -214,9 +214,10 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
 }
 
 
-void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
+void RegExpMacroAssemblerMIPS::CheckAtStart(int cp_offset, Label* on_at_start) {
   __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
-  __ Daddu(a0, current_input_offset(), Operand(-char_size()));
+  __ Daddu(a0, current_input_offset(),
+           Operand(-char_size() + cp_offset * char_size()));
   BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
 }
 
@@ -683,7 +684,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
     Label stack_ok;
 
     ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit(masm_->isolate());
+        ExternalReference::address_of_jslimit(masm_->isolate());
     __ li(a0, Operand(stack_limit));
     __ Ld(a0, MemOperand(a0));
     __ Dsubu(a0, sp, a0);
@@ -983,15 +984,19 @@ RegExpMacroAssembler::IrregexpImplementation
   return kMIPSImplementation;
 }
 
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacterImpl(int cp_offset,
+                                                        Label* on_end_of_input,
+                                                        bool check_bounds,
+                                                        int characters,
+                                                        int eats_at_least) {
+  // It's possible to preload a small number of characters when each success
+  // path requires a large number of characters, but not the reverse.
+  DCHECK_GE(eats_at_least, characters);
 
-void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
-                                                    Label* on_end_of_input,
-                                                    bool check_bounds,
-                                                    int characters) {
   DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works).
   if (check_bounds) {
     if (cp_offset >= 0) {
-      CheckPosition(cp_offset + characters - 1, on_end_of_input);
+      CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
     } else {
       CheckPosition(cp_offset, on_end_of_input);
     }
@@ -999,7 +1004,6 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
   LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
-
 void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
   Pop(current_input_offset());
 }
@@ -1213,7 +1217,9 @@ int64_t RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
   return NativeRegExpMacroAssembler::CheckStackGuardState(
       frame_entry<Isolate*>(re_frame, kIsolate),
       static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
-      frame_entry<int64_t>(re_frame, kDirectCall) == 1, return_address, re_code,
+      static_cast<RegExp::CallOrigin>(
+          frame_entry<int64_t>(re_frame, kDirectCall)),
+      return_address, re_code,
       frame_entry_address<Address>(re_frame, kInputString),
       frame_entry_address<const byte*>(re_frame, kInputStart),
       frame_entry_address<const byte*>(re_frame, kInputEnd));
@@ -1304,7 +1310,7 @@ void RegExpMacroAssemblerMIPS::Pop(Register target) {
 void RegExpMacroAssemblerMIPS::CheckPreemption() {
   // Check for preemption.
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(masm_->isolate());
+      ExternalReference::address_of_jslimit(masm_->isolate());
   __ li(a0, Operand(stack_limit));
   __ Ld(a0, MemOperand(a0));
   SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
@@ -1313,7 +1319,8 @@ void RegExpMacroAssemblerMIPS::CheckPreemption() {
 
 void RegExpMacroAssemblerMIPS::CheckStackLimit() {
   ExternalReference stack_limit =
-      ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+      ExternalReference::address_of_regexp_stack_limit_address(
+          masm_->isolate());
 
   __ li(a0, Operand(stack_limit));
   __ Ld(a0, MemOperand(a0));
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index d24735d08ea168..9189a6a72d7141 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
   virtual void AdvanceRegister(int reg, int by);
   virtual void Backtrack();
   virtual void Bind(Label* label);
-  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckAtStart(int cp_offset, Label* on_at_start);
   virtual void CheckCharacter(uint32_t c, Label* on_equal);
   virtual void CheckCharacterAfterAnd(uint32_t c,
                                       uint32_t mask,
@@ -67,10 +67,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
   virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
   virtual void IfRegisterEqPos(int reg, Label* if_eq);
   virtual IrregexpImplementation Implementation();
-  virtual void LoadCurrentCharacter(int cp_offset,
-                                    Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1);
+  virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                        bool check_bounds, int characters,
+                                        int eats_at_least);
   virtual void PopCurrentPosition();
   virtual void PopRegister(int register_index);
   virtual void PushBacktrack(Label* label);
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index bce612e66f837a..13b5c85605e7a5 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -189,15 +189,14 @@ void RegExpMacroAssemblerPPC::CheckCharacterGT(uc16 limit, Label* on_greater) {
   BranchOrBacktrack(gt, on_greater);
 }
 
-
-void RegExpMacroAssemblerPPC::CheckAtStart(Label* on_at_start) {
+void RegExpMacroAssemblerPPC::CheckAtStart(int cp_offset, Label* on_at_start) {
   __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
-  __ addi(r3, current_input_offset(), Operand(-char_size()));
+  __ addi(r3, current_input_offset(),
+          Operand(-char_size() + cp_offset * char_size()));
   __ cmp(r3, r4);
   BranchOrBacktrack(eq, on_at_start);
 }
 
-
 void RegExpMacroAssemblerPPC::CheckNotAtStart(int cp_offset,
                                               Label* on_not_at_start) {
   __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
@@ -689,7 +688,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
     Label stack_ok;
 
     ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit(isolate());
+        ExternalReference::address_of_jslimit(isolate());
     __ mov(r3, Operand(stack_limit));
     __ LoadP(r3, MemOperand(r3));
     __ sub(r3, sp, r3, LeaveOE, SetRC);
@@ -978,15 +977,19 @@ RegExpMacroAssemblerPPC::Implementation() {
   return kPPCImplementation;
 }
 
+void RegExpMacroAssemblerPPC::LoadCurrentCharacterImpl(int cp_offset,
+                                                       Label* on_end_of_input,
+                                                       bool check_bounds,
+                                                       int characters,
+                                                       int eats_at_least) {
+  // It's possible to preload a small number of characters when each success
+  // path requires a large number of characters, but not the reverse.
+  DCHECK_GE(eats_at_least, characters);
 
-void RegExpMacroAssemblerPPC::LoadCurrentCharacter(int cp_offset,
-                                                   Label* on_end_of_input,
-                                                   bool check_bounds,
-                                                   int characters) {
   DCHECK(cp_offset < (1 << 30));  // Be sane! (And ensure negation works)
   if (check_bounds) {
     if (cp_offset >= 0) {
-      CheckPosition(cp_offset + characters - 1, on_end_of_input);
+      CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
     } else {
       CheckPosition(cp_offset, on_end_of_input);
     }
@@ -994,7 +997,6 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacter(int cp_offset,
   LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
-
 void RegExpMacroAssemblerPPC::PopCurrentPosition() {
   Pop(current_input_offset());
 }
@@ -1177,8 +1179,10 @@ int RegExpMacroAssemblerPPC::CheckStackGuardState(Address* return_address,
   return NativeRegExpMacroAssembler::CheckStackGuardState(
       frame_entry<Isolate*>(re_frame, kIsolate),
       frame_entry<intptr_t>(re_frame, kStartIndex),
-      frame_entry<intptr_t>(re_frame, kDirectCall) == 1, return_address,
-      re_code, frame_entry_address<Address>(re_frame, kInputString),
+      static_cast<RegExp::CallOrigin>(
+          frame_entry<intptr_t>(re_frame, kDirectCall)),
+      return_address, re_code,
+      frame_entry_address<Address>(re_frame, kInputString),
       frame_entry_address<const byte*>(re_frame, kInputStart),
       frame_entry_address<const byte*>(re_frame, kInputEnd));
 }
@@ -1267,7 +1271,7 @@ void RegExpMacroAssemblerPPC::Pop(Register target) {
 void RegExpMacroAssemblerPPC::CheckPreemption() {
   // Check for preemption.
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ mov(r3, Operand(stack_limit));
   __ LoadP(r3, MemOperand(r3));
   __ cmpl(sp, r3);
@@ -1277,7 +1281,7 @@ void RegExpMacroAssemblerPPC::CheckPreemption() {
 
 void RegExpMacroAssemblerPPC::CheckStackLimit() {
   ExternalReference stack_limit =
-      ExternalReference::address_of_regexp_stack_limit(isolate());
+      ExternalReference::address_of_regexp_stack_limit_address(isolate());
   __ mov(r3, Operand(stack_limit));
   __ LoadP(r3, MemOperand(r3));
   __ cmpl(backtrack_stackpointer(), r3);
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 418a01a9a48001..60236a4000f066 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
   virtual void AdvanceRegister(int reg, int by);
   virtual void Backtrack();
   virtual void Bind(Label* label);
-  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckAtStart(int cp_offset, Label* on_at_start);
   virtual void CheckCharacter(unsigned c, Label* on_equal);
   virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
                                       Label* on_equal);
@@ -59,9 +59,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
   virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
   virtual void IfRegisterEqPos(int reg, Label* if_eq);
   virtual IrregexpImplementation Implementation();
-  virtual void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1);
+  virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                        bool check_bounds, int characters,
+                                        int eats_at_least);
   virtual void PopCurrentPosition();
   virtual void PopRegister(int register_index);
   virtual void PushBacktrack(Label* label);
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index ee3b4015d5175a..85b144438ec34e 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -171,10 +171,19 @@ void RegExpBytecodeGenerator::CheckGreedyLoop(
   EmitOrLink(on_tos_equals_current_position);
 }
 
-void RegExpBytecodeGenerator::LoadCurrentCharacter(int cp_offset,
-                                                   Label* on_failure,
-                                                   bool check_bounds,
-                                                   int characters) {
+void RegExpBytecodeGenerator::LoadCurrentCharacterImpl(int cp_offset,
+                                                       Label* on_failure,
+                                                       bool check_bounds,
+                                                       int characters,
+                                                       int eats_at_least) {
+  DCHECK_GE(eats_at_least, characters);
+  if (eats_at_least > characters && check_bounds) {
+    DCHECK(is_uint24(cp_offset + eats_at_least));
+    Emit(BC_CHECK_CURRENT_POSITION, cp_offset + eats_at_least);
+    EmitOrLink(on_failure);
+    check_bounds = false;  // Load below doesn't need to check.
+  }
+
   DCHECK_LE(kMinCPOffset, cp_offset);
   DCHECK_GE(kMaxCPOffset, cp_offset);
   int bytecode;
@@ -221,8 +230,8 @@ void RegExpBytecodeGenerator::CheckCharacter(uint32_t c, Label* on_equal) {
   EmitOrLink(on_equal);
 }
 
-void RegExpBytecodeGenerator::CheckAtStart(Label* on_at_start) {
-  Emit(BC_CHECK_AT_START, 0);
+void RegExpBytecodeGenerator::CheckAtStart(int cp_offset, Label* on_at_start) {
+  Emit(BC_CHECK_AT_START, cp_offset);
   EmitOrLink(on_at_start);
 }
 
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index b7207e977c8679..84b7ce361c8c2a 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -46,16 +46,16 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
   virtual void ReadCurrentPositionFromRegister(int reg);
   virtual void WriteStackPointerToRegister(int reg);
   virtual void ReadStackPointerFromRegister(int reg);
-  virtual void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1);
+  virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                        bool check_bounds, int characters,
+                                        int eats_at_least);
   virtual void CheckCharacter(unsigned c, Label* on_equal);
   virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
                                       Label* on_equal);
   virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
   virtual void CheckCharacterLT(uc16 limit, Label* on_less);
   virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
-  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckAtStart(int cp_offset, Label* on_at_start);
   virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
   virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
diff --git a/deps/v8/src/regexp/regexp-bytecodes.h b/deps/v8/src/regexp/regexp-bytecodes.h
index 8b1468c1bfa4b2..3dd7637b88c402 100644
--- a/deps/v8/src/regexp/regexp-bytecodes.h
+++ b/deps/v8/src/regexp/regexp-bytecodes.h
@@ -5,6 +5,8 @@
 #ifndef V8_REGEXP_REGEXP_BYTECODES_H_
 #define V8_REGEXP_REGEXP_BYTECODES_H_
 
+#include "src/base/macros.h"
+
 namespace v8 {
 namespace internal {
 
@@ -67,16 +69,43 @@ const int BYTECODE_SHIFT = 8;
   V(CHECK_NOT_AT_START, 48, 8) /* bc8 offset24 addr32 */                       \
   V(CHECK_GREEDY, 49, 8) /* bc8 pad24 addr32                           */      \
   V(ADVANCE_CP_AND_GOTO, 50, 8)           /* bc8 offset24 addr32 */            \
-  V(SET_CURRENT_POSITION_FROM_END, 51, 4) /* bc8 idx24 */
+  V(SET_CURRENT_POSITION_FROM_END, 51, 4) /* bc8 idx24 */                      \
+  V(CHECK_CURRENT_POSITION, 52, 8)        /* bc8 idx24 addr32 */
+
+#define COUNT(...) +1
+static constexpr int kRegExpBytecodeCount = BYTECODE_ITERATOR(COUNT);
+#undef COUNT
+
+// Just making sure we assigned values above properly. They should be
+// contiguous, strictly increasing, and start at 0.
+// TODO(jgruber): Do not explicitly assign values, instead generate them
+// implicitly from the list order.
+STATIC_ASSERT(kRegExpBytecodeCount == 53);
 
-#define DECLARE_BYTECODES(name, code, length) static const int BC_##name = code;
+#define DECLARE_BYTECODES(name, code, length) \
+  static constexpr int BC_##name = code;
 BYTECODE_ITERATOR(DECLARE_BYTECODES)
 #undef DECLARE_BYTECODES
 
-#define DECLARE_BYTECODE_LENGTH(name, code, length) \
-  static const int BC_##name##_LENGTH = length;
-BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
+static constexpr int kRegExpBytecodeLengths[] = {
+#define DECLARE_BYTECODE_LENGTH(name, code, length) length,
+    BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
 #undef DECLARE_BYTECODE_LENGTH
+};
+
+inline constexpr int RegExpBytecodeLength(int bytecode) {
+  return kRegExpBytecodeLengths[bytecode];
+}
+
+static const char* const kRegExpBytecodeNames[] = {
+#define DECLARE_BYTECODE_NAME(name, ...) #name,
+    BYTECODE_ITERATOR(DECLARE_BYTECODE_NAME)
+#undef DECLARE_BYTECODE_NAME
+};
+
+inline const char* RegExpBytecodeName(int bytecode) {
+  return kRegExpBytecodeNames[bytecode];
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
index d12c35682e8a36..2d86d3ea9e75ec 100644
--- a/deps/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -1627,8 +1627,8 @@ RegExpNode* RegExpQuantifier::ToNode(int min, int max, bool is_greedy,
   bool needs_counter = has_min || has_max;
   int reg_ctr = needs_counter ? compiler->AllocateRegister()
                               : RegExpCompiler::kNoRegister;
-  LoopChoiceNode* center = new (zone)
-      LoopChoiceNode(body->min_match() == 0, compiler->read_backward(), zone);
+  LoopChoiceNode* center = new (zone) LoopChoiceNode(
+      body->min_match() == 0, compiler->read_backward(), min, zone);
   if (not_at_start && !compiler->read_backward()) center->set_not_at_start();
   RegExpNode* loop_return =
       needs_counter ? static_cast<RegExpNode*>(
@@ -1668,7 +1668,7 @@ RegExpNode* RegExpQuantifier::ToNode(int min, int max, bool is_greedy,
     center->AddLoopAlternative(body_alt);
   }
   if (needs_counter) {
-    return ActionNode::SetRegister(reg_ctr, 0, center);
+    return ActionNode::SetRegisterForLoop(reg_ctr, 0, center);
   } else {
     return center;
   }
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index c70bbc3e4a56d3..85da69f308bb67 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -4,13 +4,12 @@
 
 #include "src/regexp/regexp-compiler.h"
 
-#include "src/diagnostics/code-tracer.h"
+#include "src/base/safe_conversions.h"
 #include "src/execution/isolate.h"
 #include "src/objects/objects-inl.h"
 #include "src/regexp/regexp-macro-assembler-arch.h"
 #include "src/regexp/regexp-macro-assembler-tracer.h"
 #include "src/strings/unicode-inl.h"
-#include "src/utils/ostreams.h"
 #include "src/zone/zone-list-inl.h"
 
 #ifdef V8_INTL_SUPPORT
@@ -272,13 +271,7 @@ RegExpCompiler::CompilationResult RegExpCompiler::Assemble(
   Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
   isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
   work_list_ = nullptr;
-#ifdef ENABLE_DISASSEMBLER
-  if (FLAG_print_code && !FLAG_regexp_interpret_all) {
-    CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
-    OFStream os(trace_scope.file());
-    Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
-  }
-#endif
+
 #ifdef DEBUG
   if (FLAG_trace_regexp_assembler) {
     delete macro_assembler_;
@@ -422,14 +415,14 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
          action = action->next()) {
       if (action->Mentions(reg)) {
         switch (action->action_type()) {
-          case ActionNode::SET_REGISTER: {
-            Trace::DeferredSetRegister* psr =
-                static_cast<Trace::DeferredSetRegister*>(action);
+          case ActionNode::SET_REGISTER_FOR_LOOP: {
+            Trace::DeferredSetRegisterForLoop* psr =
+                static_cast<Trace::DeferredSetRegisterForLoop*>(action);
             if (!absolute) {
               value += psr->value();
               absolute = true;
             }
-            // SET_REGISTER is currently only used for newly introduced loop
+            // SET_REGISTER_FOR_LOOP is only used for newly introduced loop
             // counters. They can have a significant previous value if they
             // occur in a loop. TODO(lrn): Propagate this information, so
             // we can set undo_action to IGNORE if we know there is no value to
@@ -634,9 +627,10 @@ void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) {
   guards_->Add(guard, zone);
 }
 
-ActionNode* ActionNode::SetRegister(int reg, int val, RegExpNode* on_success) {
+ActionNode* ActionNode::SetRegisterForLoop(int reg, int val,
+                                           RegExpNode* on_success) {
   ActionNode* result =
-      new (on_success->zone()) ActionNode(SET_REGISTER, on_success);
+      new (on_success->zone()) ActionNode(SET_REGISTER_FOR_LOOP, on_success);
   result->data_.u_store_register.reg = reg;
   result->data_.u_store_register.value = val;
   return result;
@@ -705,10 +699,6 @@ ActionNode* ActionNode::EmptyMatchCheck(int start_register,
 FOR_EACH_NODE_TYPE(DEFINE_ACCEPT)
 #undef DEFINE_ACCEPT
 
-void LoopChoiceNode::Accept(NodeVisitor* visitor) {
-  visitor->VisitLoopChoice(this);
-}
-
 // -------------------------------------------------------------------
 // Emit code.
 
@@ -1326,12 +1316,6 @@ bool RegExpNode::KeepRecursing(RegExpCompiler* compiler) {
          compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion;
 }
 
-int ActionNode::EatsAtLeast(int still_to_find, int budget, bool not_at_start) {
-  if (budget <= 0) return 0;
-  if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) return 0;  // Rewinds input!
-  return on_success()->EatsAtLeast(still_to_find, budget - 1, not_at_start);
-}
-
 void ActionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
                               BoyerMooreLookahead* bm, bool not_at_start) {
   if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) {
@@ -1344,16 +1328,16 @@ void ActionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
   SaveBMInfo(bm, not_at_start, offset);
 }
 
-int AssertionNode::EatsAtLeast(int still_to_find, int budget,
-                               bool not_at_start) {
-  if (budget <= 0) return 0;
-  // If we know we are not at the start and we are asked "how many characters
-  // will you match if you succeed?" then we can answer anything since false
-  // implies false.  So lets just return the max answer (still_to_find) since
-  // that won't prevent us from preloading a lot of characters for the other
-  // branches in the node graph.
-  if (assertion_type() == AT_START && not_at_start) return still_to_find;
-  return on_success()->EatsAtLeast(still_to_find, budget - 1, not_at_start);
+void ActionNode::GetQuickCheckDetails(QuickCheckDetails* details,
+                                      RegExpCompiler* compiler, int filled_in,
+                                      bool not_at_start) {
+  if (action_type_ == SET_REGISTER_FOR_LOOP) {
+    on_success()->GetQuickCheckDetailsFromLoopEntry(details, compiler,
+                                                    filled_in, not_at_start);
+  } else {
+    on_success()->GetQuickCheckDetails(details, compiler, filled_in,
+                                       not_at_start);
+  }
 }
 
 void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
@@ -1364,68 +1348,13 @@ void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
   SaveBMInfo(bm, not_at_start, offset);
 }
 
-int BackReferenceNode::EatsAtLeast(int still_to_find, int budget,
-                                   bool not_at_start) {
-  if (read_backward()) return 0;
-  if (budget <= 0) return 0;
-  return on_success()->EatsAtLeast(still_to_find, budget - 1, not_at_start);
-}
-
-int TextNode::EatsAtLeast(int still_to_find, int budget, bool not_at_start) {
-  if (read_backward()) return 0;
-  int answer = Length();
-  if (answer >= still_to_find) return answer;
-  if (budget <= 0) return answer;
-  // We are not at start after this node so we set the last argument to 'true'.
-  return answer +
-         on_success()->EatsAtLeast(still_to_find - answer, budget - 1, true);
-}
-
-int NegativeLookaroundChoiceNode::EatsAtLeast(int still_to_find, int budget,
-                                              bool not_at_start) {
-  if (budget <= 0) return 0;
-  // Alternative 0 is the negative lookahead, alternative 1 is what comes
-  // afterwards.
-  RegExpNode* node = alternatives_->at(1).node();
-  return node->EatsAtLeast(still_to_find, budget - 1, not_at_start);
-}
-
 void NegativeLookaroundChoiceNode::GetQuickCheckDetails(
     QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in,
     bool not_at_start) {
-  // Alternative 0 is the negative lookahead, alternative 1 is what comes
-  // afterwards.
-  RegExpNode* node = alternatives_->at(1).node();
+  RegExpNode* node = continue_node();
   return node->GetQuickCheckDetails(details, compiler, filled_in, not_at_start);
 }
 
-int ChoiceNode::EatsAtLeastHelper(int still_to_find, int budget,
-                                  RegExpNode* ignore_this_node,
-                                  bool not_at_start) {
-  if (budget <= 0) return 0;
-  int min = 100;
-  int choice_count = alternatives_->length();
-  budget = (budget - 1) / choice_count;
-  for (int i = 0; i < choice_count; i++) {
-    RegExpNode* node = alternatives_->at(i).node();
-    if (node == ignore_this_node) continue;
-    int node_eats_at_least =
-        node->EatsAtLeast(still_to_find, budget, not_at_start);
-    if (node_eats_at_least < min) min = node_eats_at_least;
-    if (min == 0) return 0;
-  }
-  return min;
-}
-
-int LoopChoiceNode::EatsAtLeast(int still_to_find, int budget,
-                                bool not_at_start) {
-  return EatsAtLeastHelper(still_to_find, budget - 1, loop_node_, not_at_start);
-}
-
-int ChoiceNode::EatsAtLeast(int still_to_find, int budget, bool not_at_start) {
-  return EatsAtLeastHelper(still_to_find, budget, nullptr, not_at_start);
-}
-
 // Takes the left-most 1-bit and smears it out, setting all bits to its right.
 static inline uint32_t SmearBitsRight(uint32_t v) {
   v |= v >> 1;
@@ -1459,12 +1388,78 @@ bool QuickCheckDetails::Rationalize(bool asc) {
   return found_useful_op;
 }
 
+int RegExpNode::EatsAtLeast(bool not_at_start) {
+  return not_at_start ? eats_at_least_.eats_at_least_from_not_start
+                      : eats_at_least_.eats_at_least_from_possibly_start;
+}
+
+EatsAtLeastInfo RegExpNode::EatsAtLeastFromLoopEntry() {
+  // SET_REGISTER_FOR_LOOP is only used to initialize loop counters, and it
+  // implies that the following node must be a LoopChoiceNode. If we need to
+  // set registers to constant values for other reasons, we could introduce a
+  // new action type SET_REGISTER that doesn't imply anything about its
+  // successor.
+  UNREACHABLE();
+}
+
+void RegExpNode::GetQuickCheckDetailsFromLoopEntry(QuickCheckDetails* details,
+                                                   RegExpCompiler* compiler,
+                                                   int characters_filled_in,
+                                                   bool not_at_start) {
+  // See comment in RegExpNode::EatsAtLeastFromLoopEntry.
+  UNREACHABLE();
+}
+
+EatsAtLeastInfo LoopChoiceNode::EatsAtLeastFromLoopEntry() {
+  DCHECK_EQ(alternatives_->length(), 2);  // There's just loop and continue.
+
+  if (read_backward()) {
+    // Can't do anything special for a backward loop, so return the basic values
+    // that we got during analysis.
+    return *eats_at_least_info();
+  }
+
+  // Figure out how much the loop body itself eats, not including anything in
+  // the continuation case. In general, the nodes in the loop body should report
+  // that they eat at least the number eaten by the continuation node, since any
+  // successful match in the loop body must also include the continuation node.
+  // However, in some cases involving positive lookaround, the loop body under-
+  // reports its appetite, so use saturated math here to avoid negative numbers.
+  uint8_t loop_body_from_not_start = base::saturated_cast<uint8_t>(
+      loop_node_->EatsAtLeast(true) - continue_node_->EatsAtLeast(true));
+  uint8_t loop_body_from_possibly_start = base::saturated_cast<uint8_t>(
+      loop_node_->EatsAtLeast(false) - continue_node_->EatsAtLeast(true));
+
+  // Limit the number of loop iterations to avoid overflow in subsequent steps.
+  int loop_iterations = base::saturated_cast<uint8_t>(min_loop_iterations());
+
+  EatsAtLeastInfo result;
+  result.eats_at_least_from_not_start =
+      base::saturated_cast<uint8_t>(loop_iterations * loop_body_from_not_start +
+                                    continue_node_->EatsAtLeast(true));
+  if (loop_iterations > 0 && loop_body_from_possibly_start > 0) {
+    // First loop iteration eats at least one, so all subsequent iterations
+    // and the after-loop chunk are guaranteed to not be at the start.
+    result.eats_at_least_from_possibly_start = base::saturated_cast<uint8_t>(
+        loop_body_from_possibly_start +
+        (loop_iterations - 1) * loop_body_from_not_start +
+        continue_node_->EatsAtLeast(true));
+  } else {
+    // Loop body might eat nothing, so only continue node contributes.
+    result.eats_at_least_from_possibly_start =
+        continue_node_->EatsAtLeast(false);
+  }
+  return result;
+}
+
 bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
                                 Trace* bounds_check_trace, Trace* trace,
                                 bool preload_has_checked_bounds,
                                 Label* on_possible_success,
                                 QuickCheckDetails* details,
-                                bool fall_through_on_failure) {
+                                bool fall_through_on_failure,
+                                ChoiceNode* predecessor) {
+  DCHECK_NOT_NULL(predecessor);
   if (details->characters() == 0) return false;
   GetQuickCheckDetails(details, compiler, 0,
                        trace->at_start() == Trace::FALSE_VALUE);
@@ -1479,13 +1474,17 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
 
   if (trace->characters_preloaded() != details->characters()) {
     DCHECK(trace->cp_offset() == bounds_check_trace->cp_offset());
-    // We are attempting to preload the minimum number of characters
+    // The bounds check is performed using the minimum number of characters
     // any choice would eat, so if the bounds check fails, then none of the
     // choices can succeed, so we can just immediately backtrack, rather
-    // than go to the next choice.
+    // than go to the next choice. The number of characters preloaded may be
+    // less than the number used for the bounds check.
+    int eats_at_least = predecessor->EatsAtLeast(
+        bounds_check_trace->at_start() == Trace::FALSE_VALUE);
+    DCHECK_GE(eats_at_least, details->characters());
     assembler->LoadCurrentCharacter(
         trace->cp_offset(), bounds_check_trace->backtrack(),
-        !preload_has_checked_bounds, details->characters());
+        !preload_has_checked_bounds, details->characters(), eats_at_least);
   }
 
   bool need_mask = true;
@@ -1579,7 +1578,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
             // and the mask-compare will determine definitely whether we have
             // a match at this character position.
             pos->mask = char_mask;
-            pos->value = c;
+            pos->value = chars[0];
             pos->determines_perfectly = true;
           } else {
             uint32_t common_bits = char_mask;
@@ -1764,6 +1763,37 @@ class VisitMarker {
   NodeInfo* info_;
 };
 
+// Temporarily sets traversed_loop_initialization_node_.
+class LoopInitializationMarker {
+ public:
+  explicit LoopInitializationMarker(LoopChoiceNode* node) : node_(node) {
+    DCHECK(!node_->traversed_loop_initialization_node_);
+    node_->traversed_loop_initialization_node_ = true;
+  }
+  ~LoopInitializationMarker() {
+    DCHECK(node_->traversed_loop_initialization_node_);
+    node_->traversed_loop_initialization_node_ = false;
+  }
+
+ private:
+  LoopChoiceNode* node_;
+  DISALLOW_COPY_AND_ASSIGN(LoopInitializationMarker);
+};
+
+// Temporarily decrements min_loop_iterations_.
+class IterationDecrementer {
+ public:
+  explicit IterationDecrementer(LoopChoiceNode* node) : node_(node) {
+    DCHECK_GT(node_->min_loop_iterations_, 0);
+    --node_->min_loop_iterations_;
+  }
+  ~IterationDecrementer() { ++node_->min_loop_iterations_; }
+
+ private:
+  LoopChoiceNode* node_;
+  DISALLOW_COPY_AND_ASSIGN(IterationDecrementer);
+};
+
 RegExpNode* SeqRegExpNode::FilterOneByte(int depth) {
   if (info()->replacement_calculated) return replacement();
   if (depth < 0) return this;
@@ -1916,17 +1946,17 @@ RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth) {
   VisitMarker marker(info());
   // Alternative 0 is the negative lookahead, alternative 1 is what comes
   // afterwards.
-  RegExpNode* node = alternatives_->at(1).node();
+  RegExpNode* node = continue_node();
   RegExpNode* replacement = node->FilterOneByte(depth - 1);
   if (replacement == nullptr) return set_replacement(nullptr);
-  alternatives_->at(1).set_node(replacement);
+  alternatives_->at(kContinueIndex).set_node(replacement);
 
-  RegExpNode* neg_node = alternatives_->at(0).node();
+  RegExpNode* neg_node = lookaround_node();
   RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1);
   // If the negative lookahead is always going to fail then
   // we don't need to check it.
   if (neg_replacement == nullptr) return set_replacement(replacement);
-  alternatives_->at(0).set_node(neg_replacement);
+  alternatives_->at(kLookaroundIndex).set_node(neg_replacement);
   return set_replacement(this);
 }
 
@@ -1935,9 +1965,48 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
                                           int characters_filled_in,
                                           bool not_at_start) {
   if (body_can_be_zero_length_ || info()->visited) return;
-  VisitMarker marker(info());
-  return ChoiceNode::GetQuickCheckDetails(details, compiler,
-                                          characters_filled_in, not_at_start);
+  not_at_start = not_at_start || this->not_at_start();
+  DCHECK_EQ(alternatives_->length(), 2);  // There's just loop and continue.
+  if (traversed_loop_initialization_node_ && min_loop_iterations_ > 0 &&
+      loop_node_->EatsAtLeast(not_at_start) >
+          continue_node_->EatsAtLeast(true)) {
+    // Loop body is guaranteed to execute at least once, and consume characters
+    // when it does, meaning the only possible quick checks from this point
+    // begin with the loop body. We may recursively visit this LoopChoiceNode,
+    // but we temporarily decrease its minimum iteration counter so we know when
+    // to check the continue case.
+    IterationDecrementer next_iteration(this);
+    loop_node_->GetQuickCheckDetails(details, compiler, characters_filled_in,
+                                     not_at_start);
+  } else {
+    // Might not consume anything in the loop body, so treat it like a normal
+    // ChoiceNode (and don't recursively visit this node again).
+    VisitMarker marker(info());
+    ChoiceNode::GetQuickCheckDetails(details, compiler, characters_filled_in,
+                                     not_at_start);
+  }
+}
+
+void LoopChoiceNode::GetQuickCheckDetailsFromLoopEntry(
+    QuickCheckDetails* details, RegExpCompiler* compiler,
+    int characters_filled_in, bool not_at_start) {
+  if (traversed_loop_initialization_node_) {
+    // We already entered this loop once, exited via its continuation node, and
+    // followed an outer loop's back-edge to before the loop entry point. We
+    // could try to reset the minimum iteration count to its starting value at
+    // this point, but that seems like more trouble than it's worth. It's safe
+    // to keep going with the current (possibly reduced) minimum iteration
+    // count.
+    GetQuickCheckDetails(details, compiler, characters_filled_in, not_at_start);
+  } else {
+    // We are entering a loop via its counter initialization action, meaning we
+    // are guaranteed to run the loop body at least some minimum number of times
+    // before running the continuation node. Set a flag so that this node knows
+    // (now and any times we visit it again recursively) that it was entered
+    // from the top.
+    LoopInitializationMarker marker(this);
+    GetQuickCheckDetails(details, compiler, characters_filled_in, not_at_start);
+  }
 }
 
 void LoopChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
@@ -2014,12 +2083,7 @@ void EmitHat(RegExpCompiler* compiler, RegExpNode* on_success, Trace* trace) {
   if (may_be_at_or_before_subject_string_start) {
     // The start of input counts as a newline in this context, so skip to ok if
     // we are at the start.
-    // TODO(jgruber): It would be less awkward to use CheckAtStart here, but
-    // that currently does not support a non-zero cp_offset.
-    Label not_at_start;
-    assembler->CheckNotAtStart(new_trace.cp_offset(), &not_at_start);
-    assembler->GoTo(&ok);
-    assembler->Bind(&not_at_start);
+    assembler->CheckAtStart(new_trace.cp_offset(), &ok);
   }
 
   // If we've already checked that we are not at the start of input, it's okay
@@ -2049,9 +2113,8 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
   bool not_at_start = (trace->at_start() == Trace::FALSE_VALUE);
   BoyerMooreLookahead* lookahead = bm_info(not_at_start);
   if (lookahead == nullptr) {
-    int eats_at_least = Min(kMaxLookaheadForBoyerMoore,
-                            EatsAtLeast(kMaxLookaheadForBoyerMoore,
-                                        kRecursionBudget, not_at_start));
+    int eats_at_least =
+        Min(kMaxLookaheadForBoyerMoore, EatsAtLeast(not_at_start));
     if (eats_at_least >= 1) {
       BoyerMooreLookahead* bm =
           new (zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
@@ -2113,12 +2176,7 @@ void AssertionNode::BacktrackIfPrevious(
   if (may_be_at_or_before_subject_string_start) {
     // The start of input counts as a non-word character, so the question is
     // decided if we are at the start.
-    // TODO(jgruber): It would be less awkward to use CheckAtStart here, but
-    // that currently does not support a non-zero cp_offset.
-    Label not_at_start;
-    assembler->CheckNotAtStart(new_trace.cp_offset(), &not_at_start);
-    assembler->GoTo(non_word);
-    assembler->Bind(&not_at_start);
+    assembler->CheckAtStart(new_trace.cp_offset(), non_word);
   }
 
   // If we've already checked that we are not at the start of input, it's okay
@@ -2939,8 +2997,7 @@ void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler, Trace* current_trace,
   if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
     // Save some time by looking at most one machine word ahead.
     state->eats_at_least_ =
-        EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
-                    current_trace->at_start() == Trace::FALSE_VALUE);
+        EatsAtLeast(current_trace->at_start() == Trace::FALSE_VALUE);
   }
   state->preload_characters_ =
       CalculatePreloadCharacters(compiler, state->eats_at_least_);
@@ -3090,9 +3147,7 @@ int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
   // small alternation.
   BoyerMooreLookahead* bm = bm_info(false);
   if (bm == nullptr) {
-    eats_at_least =
-        Min(kMaxLookaheadForBoyerMoore,
-            EatsAtLeast(kMaxLookaheadForBoyerMoore, kRecursionBudget, false));
+    eats_at_least = Min(kMaxLookaheadForBoyerMoore, EatsAtLeast(false));
     if (eats_at_least >= 1) {
       bm = new (zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
       GuardedAlternative alt0 = alternatives_->at(0);
@@ -3144,7 +3199,7 @@ void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
         alternative.node()->EmitQuickCheck(
             compiler, trace, &new_trace, preload->preload_has_checked_bounds_,
             &alt_gen->possible_success, &alt_gen->quick_check_details,
-            fall_through_on_failure)) {
+            fall_through_on_failure, this)) {
       // Quick check was generated for this choice.
       preload->preload_is_current_ = true;
       preload->preload_has_checked_bounds_ = true;
@@ -3253,9 +3308,9 @@ void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
       on_success()->Emit(compiler, &new_trace);
       break;
     }
-    case SET_REGISTER: {
-      Trace::DeferredSetRegister new_set(data_.u_store_register.reg,
-                                         data_.u_store_register.value);
+    case SET_REGISTER_FOR_LOOP: {
+      Trace::DeferredSetRegisterForLoop new_set(data_.u_store_register.reg,
+                                                data_.u_store_register.value);
       Trace new_trace = *trace;
       new_trace.add_action(&new_set);
       on_success()->Emit(compiler, &new_trace);
@@ -3377,26 +3432,6 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
   on_success()->Emit(compiler, trace);
 }
 
-// -------------------------------------------------------------------
-// Analysis
-
-void Analysis::EnsureAnalyzed(RegExpNode* that) {
-  StackLimitCheck check(isolate());
-  if (check.HasOverflowed()) {
-    fail("Stack overflow");
-    return;
-  }
-  if (that->info()->been_analyzed || that->info()->being_analyzed) return;
-  that->info()->being_analyzed = true;
-  that->Accept(this);
-  that->info()->being_analyzed = false;
-  that->info()->been_analyzed = true;
-}
-
-void Analysis::VisitEnd(EndNode* that) {
-  // nothing to do
-}
-
 void TextNode::CalculateOffsets() {
   int element_count = elements()->length();
   // Set up the offsets of the elements relative to the start.  This is a fixed
@@ -3409,60 +3444,269 @@ void TextNode::CalculateOffsets() {
   }
 }
 
-void Analysis::VisitText(TextNode* that) {
-  that->MakeCaseIndependent(isolate(), is_one_byte_);
-  EnsureAnalyzed(that->on_success());
-  if (!has_failed()) {
-    that->CalculateOffsets();
-  }
-}
+namespace {
 
-void Analysis::VisitAction(ActionNode* that) {
-  RegExpNode* target = that->on_success();
-  EnsureAnalyzed(target);
-  if (!has_failed()) {
+// Assertion propagation moves information about assertions such as
+// \b to the affected nodes.  For instance, in /.\b./ information must
+// be propagated to the first '.' that whatever follows needs to know
+// if it matched a word or a non-word, and to the second '.' that it
+// has to check if it succeeds a word or non-word.  In this case the
+// result will be something like:
+//
+//   +-------+        +------------+
+//   |   .   |        |      .     |
+//   +-------+  --->  +------------+
+//   | word? |        | check word |
+//   +-------+        +------------+
+class AssertionPropagator : public AllStatic {
+ public:
+  static void VisitText(TextNode* that) {}
+
+  static void VisitAction(ActionNode* that) {
     // If the next node is interested in what it follows then this node
     // has to be interested too so it can pass the information on.
-    that->info()->AddFromFollowing(target->info());
+    that->info()->AddFromFollowing(that->on_success()->info());
   }
-}
 
-void Analysis::VisitChoice(ChoiceNode* that) {
-  NodeInfo* info = that->info();
-  for (int i = 0; i < that->alternatives()->length(); i++) {
-    RegExpNode* node = that->alternatives()->at(i).node();
-    EnsureAnalyzed(node);
-    if (has_failed()) return;
+  static void VisitChoice(ChoiceNode* that, int i) {
     // Anything the following nodes need to know has to be known by
     // this node also, so it can pass it on.
-    info->AddFromFollowing(node->info());
+    that->info()->AddFromFollowing(that->alternatives()->at(i).node()->info());
   }
-}
 
-void Analysis::VisitLoopChoice(LoopChoiceNode* that) {
-  NodeInfo* info = that->info();
-  for (int i = 0; i < that->alternatives()->length(); i++) {
-    RegExpNode* node = that->alternatives()->at(i).node();
-    if (node != that->loop_node()) {
-      EnsureAnalyzed(node);
+  static void VisitLoopChoiceContinueNode(LoopChoiceNode* that) {
+    that->info()->AddFromFollowing(that->continue_node()->info());
+  }
+
+  static void VisitLoopChoiceLoopNode(LoopChoiceNode* that) {
+    that->info()->AddFromFollowing(that->loop_node()->info());
+  }
+
+  static void VisitNegativeLookaroundChoiceLookaroundNode(
+      NegativeLookaroundChoiceNode* that) {
+    VisitChoice(that, NegativeLookaroundChoiceNode::kLookaroundIndex);
+  }
+
+  static void VisitNegativeLookaroundChoiceContinueNode(
+      NegativeLookaroundChoiceNode* that) {
+    VisitChoice(that, NegativeLookaroundChoiceNode::kContinueIndex);
+  }
+
+  static void VisitBackReference(BackReferenceNode* that) {}
+
+  static void VisitAssertion(AssertionNode* that) {}
+};
+
+// Propagates information about the minimum size of successful matches from
+// successor nodes to their predecessors. Note that all eats_at_least values
+// are initialized to zero before analysis.
+class EatsAtLeastPropagator : public AllStatic {
+ public:
+  static void VisitText(TextNode* that) {
+    // The eats_at_least value is not used if reading backward.
+    if (!that->read_backward()) {
+      // We are not at the start after this node, and thus we can use the
+      // successor's eats_at_least_from_not_start value.
+      uint8_t eats_at_least = base::saturated_cast<uint8_t>(
+          that->Length() + that->on_success()
+                               ->eats_at_least_info()
+                               ->eats_at_least_from_not_start);
+      that->set_eats_at_least_info(EatsAtLeastInfo(eats_at_least));
+    }
+  }
+
+  static void VisitAction(ActionNode* that) {
+    // POSITIVE_SUBMATCH_SUCCESS rewinds input, so we must not consider
+    // successor nodes for eats_at_least. SET_REGISTER_FOR_LOOP indicates a loop
+    // entry point, which means the loop body will run at least the minimum
+    // number of times before the continuation case can run. Otherwise the
+    // current node eats at least as much as its successor.
+    switch (that->action_type()) {
+      case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
+        break;  // Was already initialized to zero.
+      case ActionNode::SET_REGISTER_FOR_LOOP:
+        that->set_eats_at_least_info(
+            that->on_success()->EatsAtLeastFromLoopEntry());
+        break;
+      default:
+        that->set_eats_at_least_info(*that->on_success()->eats_at_least_info());
+        break;
+    }
+  }
+
+  static void VisitChoice(ChoiceNode* that, int i) {
+    // The minimum possible match from a choice node is the minimum of its
+    // successors.
+    EatsAtLeastInfo eats_at_least =
+        i == 0 ? EatsAtLeastInfo(UINT8_MAX) : *that->eats_at_least_info();
+    eats_at_least.SetMin(
+        *that->alternatives()->at(i).node()->eats_at_least_info());
+    that->set_eats_at_least_info(eats_at_least);
+  }
+
+  static void VisitLoopChoiceContinueNode(LoopChoiceNode* that) {
+    that->set_eats_at_least_info(*that->continue_node()->eats_at_least_info());
+  }
+
+  static void VisitLoopChoiceLoopNode(LoopChoiceNode* that) {}
+
+  static void VisitNegativeLookaroundChoiceLookaroundNode(
+      NegativeLookaroundChoiceNode* that) {}
+
+  static void VisitNegativeLookaroundChoiceContinueNode(
+      NegativeLookaroundChoiceNode* that) {
+    that->set_eats_at_least_info(*that->continue_node()->eats_at_least_info());
+  }
+
+  static void VisitBackReference(BackReferenceNode* that) {
+    if (!that->read_backward()) {
+      that->set_eats_at_least_info(*that->on_success()->eats_at_least_info());
+    }
+  }
+
+  static void VisitAssertion(AssertionNode* that) {
+    EatsAtLeastInfo eats_at_least = *that->on_success()->eats_at_least_info();
+    if (that->assertion_type() == AssertionNode::AT_START) {
+      // If we know we are not at the start and we are asked "how many
+      // characters will you match if you succeed?" then we can answer anything
+      // since false implies false.  So let's just set the max answer
+      // (UINT8_MAX) since that won't prevent us from preloading a lot of
+      // characters for the other branches in the node graph.
+      eats_at_least.eats_at_least_from_not_start = UINT8_MAX;
+    }
+    that->set_eats_at_least_info(eats_at_least);
+  }
+};
+
+}  // namespace
+
+// -------------------------------------------------------------------
+// Analysis
+
+// Iterates the node graph and provides the opportunity for propagators to set
+// values that depend on successor nodes.
+template <typename... Propagators>
+class Analysis : public NodeVisitor {
+ public:
+  Analysis(Isolate* isolate, bool is_one_byte)
+      : isolate_(isolate), is_one_byte_(is_one_byte), error_message_(nullptr) {}
+
+  void EnsureAnalyzed(RegExpNode* that) {
+    StackLimitCheck check(isolate());
+    if (check.HasOverflowed()) {
+      fail("Stack overflow");
+      return;
+    }
+    if (that->info()->been_analyzed || that->info()->being_analyzed) return;
+    that->info()->being_analyzed = true;
+    that->Accept(this);
+    that->info()->being_analyzed = false;
+    that->info()->been_analyzed = true;
+  }
+
+  bool has_failed() { return error_message_ != nullptr; }
+  const char* error_message() {
+    DCHECK(error_message_ != nullptr);
+    return error_message_;
+  }
+  void fail(const char* error_message) { error_message_ = error_message; }
+
+  Isolate* isolate() const { return isolate_; }
+
+  void VisitEnd(EndNode* that) override {
+    // nothing to do
+  }
+
+// Used to call the given static function on each propagator / variadic template
+// argument.
+#define STATIC_FOR_EACH(expr)       \
+  do {                              \
+    int dummy[] = {((expr), 0)...}; \
+    USE(dummy);                     \
+  } while (false)
+
+  void VisitText(TextNode* that) override {
+    that->MakeCaseIndependent(isolate(), is_one_byte_);
+    EnsureAnalyzed(that->on_success());
+    if (has_failed()) return;
+    that->CalculateOffsets();
+    STATIC_FOR_EACH(Propagators::VisitText(that));
+  }
+
+  void VisitAction(ActionNode* that) override {
+    EnsureAnalyzed(that->on_success());
+    if (has_failed()) return;
+    STATIC_FOR_EACH(Propagators::VisitAction(that));
+  }
+
+  void VisitChoice(ChoiceNode* that) override {
+    for (int i = 0; i < that->alternatives()->length(); i++) {
+      EnsureAnalyzed(that->alternatives()->at(i).node());
       if (has_failed()) return;
-      info->AddFromFollowing(node->info());
+      STATIC_FOR_EACH(Propagators::VisitChoice(that, i));
     }
   }
-  // Check the loop last since it may need the value of this node
-  // to get a correct result.
-  EnsureAnalyzed(that->loop_node());
-  if (!has_failed()) {
-    info->AddFromFollowing(that->loop_node()->info());
+
+  void VisitLoopChoice(LoopChoiceNode* that) override {
+    DCHECK_EQ(that->alternatives()->length(), 2);  // Just loop and continue.
+
+    // First propagate all information from the continuation node.
+    EnsureAnalyzed(that->continue_node());
+    if (has_failed()) return;
+    STATIC_FOR_EACH(Propagators::VisitLoopChoiceContinueNode(that));
+
+    // Check the loop last since it may need the value of this node
+    // to get a correct result.
+    EnsureAnalyzed(that->loop_node());
+    if (has_failed()) return;
+    STATIC_FOR_EACH(Propagators::VisitLoopChoiceLoopNode(that));
+  }
+
+  void VisitNegativeLookaroundChoice(
+      NegativeLookaroundChoiceNode* that) override {
+    DCHECK_EQ(that->alternatives()->length(), 2);  // Lookaround and continue.
+
+    EnsureAnalyzed(that->lookaround_node());
+    if (has_failed()) return;
+    STATIC_FOR_EACH(
+        Propagators::VisitNegativeLookaroundChoiceLookaroundNode(that));
+
+    EnsureAnalyzed(that->continue_node());
+    if (has_failed()) return;
+    STATIC_FOR_EACH(
+        Propagators::VisitNegativeLookaroundChoiceContinueNode(that));
   }
-}
 
-void Analysis::VisitBackReference(BackReferenceNode* that) {
-  EnsureAnalyzed(that->on_success());
-}
+  void VisitBackReference(BackReferenceNode* that) override {
+    EnsureAnalyzed(that->on_success());
+    if (has_failed()) return;
+    STATIC_FOR_EACH(Propagators::VisitBackReference(that));
+  }
+
+  void VisitAssertion(AssertionNode* that) override {
+    EnsureAnalyzed(that->on_success());
+    if (has_failed()) return;
+    STATIC_FOR_EACH(Propagators::VisitAssertion(that));
+  }
+
+#undef STATIC_FOR_EACH
+
+ private:
+  Isolate* isolate_;
+  bool is_one_byte_;
+  const char* error_message_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
+};
 
-void Analysis::VisitAssertion(AssertionNode* that) {
-  EnsureAnalyzed(that->on_success());
+const char* AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
+                          RegExpNode* node) {
+  Analysis<AssertionPropagator, EatsAtLeastPropagator> analysis(isolate,
+                                                                is_one_byte);
+  DCHECK_EQ(node->info()->been_analyzed, false);
+  analysis.EnsureAnalyzed(node);
+  DCHECK_IMPLIES(analysis.has_failed(), analysis.error_message() != nullptr);
+  return analysis.has_failed() ? analysis.error_message() : nullptr;
 }
 
 void BackReferenceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
diff --git a/deps/v8/src/regexp/regexp-compiler.h b/deps/v8/src/regexp/regexp-compiler.h
index 1b70abfd981003..2de221f35d237b 100644
--- a/deps/v8/src/regexp/regexp-compiler.h
+++ b/deps/v8/src/regexp/regexp-compiler.h
@@ -285,10 +285,11 @@ class Trace {
     void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
   };
 
-  class DeferredSetRegister : public DeferredAction {
+  class DeferredSetRegisterForLoop : public DeferredAction {
    public:
-    DeferredSetRegister(int reg, int value)
-        : DeferredAction(ActionNode::SET_REGISTER, reg), value_(value) {}
+    DeferredSetRegisterForLoop(int reg, int value)
+        : DeferredAction(ActionNode::SET_REGISTER_FOR_LOOP, reg),
+          value_(value) {}
     int value() { return value_; }
 
    private:
@@ -419,45 +420,13 @@ struct PreloadState {
   void init() { eats_at_least_ = kEatsAtLeastNotYetInitialized; }
 };
 
-// Assertion propagation moves information about assertions such as
-// \b to the affected nodes.  For instance, in /.\b./ information must
-// be propagated to the first '.' that whatever follows needs to know
-// if it matched a word or a non-word, and to the second '.' that it
-// has to check if it succeeds a word or non-word.  In this case the
-// result will be something like:
+// Analysis performs assertion propagation and computes eats_at_least_ values.
+// See the comments on AssertionPropagator and EatsAtLeastPropagator for more
+// details.
 //
-//   +-------+        +------------+
-//   |   .   |        |      .     |
-//   +-------+  --->  +------------+
-//   | word? |        | check word |
-//   +-------+        +------------+
-class Analysis : public NodeVisitor {
- public:
-  Analysis(Isolate* isolate, bool is_one_byte)
-      : isolate_(isolate), is_one_byte_(is_one_byte), error_message_(nullptr) {}
-  void EnsureAnalyzed(RegExpNode* node);
-
-#define DECLARE_VISIT(Type) void Visit##Type(Type##Node* that) override;
-  FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
-  void VisitLoopChoice(LoopChoiceNode* that) override;
-
-  bool has_failed() { return error_message_ != nullptr; }
-  const char* error_message() {
-    DCHECK(error_message_ != nullptr);
-    return error_message_;
-  }
-  void fail(const char* error_message) { error_message_ = error_message; }
-
-  Isolate* isolate() const { return isolate_; }
-
- private:
-  Isolate* isolate_;
-  bool is_one_byte_;
-  const char* error_message_;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
-};
+// This method returns nullptr on success or a null-terminated failure message
+// on failure.
+const char* AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpNode* node);
 
 class FrequencyCollator {
  public:
diff --git a/deps/v8/src/regexp/regexp-dotprinter.cc b/deps/v8/src/regexp/regexp-dotprinter.cc
index a6d72aaf5bb245..b6640626f2c4d8 100644
--- a/deps/v8/src/regexp/regexp-dotprinter.cc
+++ b/deps/v8/src/regexp/regexp-dotprinter.cc
@@ -114,6 +114,15 @@ void DotPrinterImpl::VisitChoice(ChoiceNode* that) {
   }
 }
 
+void DotPrinterImpl::VisitLoopChoice(LoopChoiceNode* that) {
+  VisitChoice(that);
+}
+
+void DotPrinterImpl::VisitNegativeLookaroundChoice(
+    NegativeLookaroundChoiceNode* that) {
+  VisitChoice(that);
+}
+
 void DotPrinterImpl::VisitText(TextNode* that) {
   Zone* zone = that->zone();
   os_ << "  n" << that << " [label=\"";
@@ -191,7 +200,7 @@ void DotPrinterImpl::VisitAssertion(AssertionNode* that) {
 void DotPrinterImpl::VisitAction(ActionNode* that) {
   os_ << "  n" << that << " [";
   switch (that->action_type_) {
-    case ActionNode::SET_REGISTER:
+    case ActionNode::SET_REGISTER_FOR_LOOP:
       os_ << "label=\"$" << that->data_.u_store_register.reg
           << ":=" << that->data_.u_store_register.value << "\", shape=octagon";
       break;
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index 881758861cd1e9..cf2fb55e4a861c 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -8,6 +8,7 @@
 
 #include "src/ast/ast.h"
 #include "src/base/small-vector.h"
+#include "src/objects/js-regexp-inl.h"
 #include "src/objects/objects-inl.h"
 #include "src/regexp/regexp-bytecodes.h"
 #include "src/regexp/regexp-macro-assembler.h"
@@ -19,12 +20,20 @@
 #include "unicode/uchar.h"
 #endif  // V8_INTL_SUPPORT
 
+// Use token threaded dispatch iff the compiler supports computed gotos and the
+// build argument v8_enable_regexp_interpreter_threaded_dispatch was set.
+#if V8_HAS_COMPUTED_GOTO && \
+    defined(V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH)
+#define V8_USE_COMPUTED_GOTO 1
+#endif  // V8_HAS_COMPUTED_GOTO
+
 namespace v8 {
 namespace internal {
 
-static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
-                                 int len, Vector<const uc16> subject,
-                                 bool unicode) {
+namespace {
+
+bool BackRefMatchesNoCase(Isolate* isolate, int from, int current, int len,
+                          Vector<const uc16> subject, bool unicode) {
   Address offset_a =
       reinterpret_cast<Address>(const_cast<uc16*>(&subject.at(from)));
   Address offset_b =
@@ -34,9 +43,8 @@ static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
              offset_a, offset_b, length, unicode ? nullptr : isolate) == 1;
 }
 
-static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
-                                 int len, Vector<const uint8_t> subject,
-                                 bool unicode) {
+bool BackRefMatchesNoCase(Isolate* isolate, int from, int current, int len,
+                          Vector<const uint8_t> subject, bool unicode) {
   // For Latin1 characters the unicode flag makes no difference.
   for (int i = 0; i < len; i++) {
     unsigned int old_char = subject[from++];
@@ -55,49 +63,48 @@ static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
   return true;
 }
 
+void DisassembleSingleBytecode(const byte* code_base, const byte* pc) {
+  PrintF("%s", RegExpBytecodeName(*pc));
+
+  // Args and the bytecode as hex.
+  for (int i = 0; i < RegExpBytecodeLength(*pc); i++) {
+    PrintF(", %02x", pc[i]);
+  }
+  PrintF(" ");
+
+  // Args as ascii.
+  for (int i = 1; i < RegExpBytecodeLength(*pc); i++) {
+    unsigned char b = pc[i];
+    PrintF("%c", std::isprint(b) ? b : '.');
+  }
+  PrintF("\n");
+}
+
 #ifdef DEBUG
-static void TraceInterpreter(const byte* code_base, const byte* pc,
-                             int stack_depth, int current_position,
-                             uint32_t current_char, int bytecode_length,
-                             const char* bytecode_name) {
+void MaybeTraceInterpreter(const byte* code_base, const byte* pc,
+                           int stack_depth, int current_position,
+                           uint32_t current_char, int bytecode_length,
+                           const char* bytecode_name) {
   if (FLAG_trace_regexp_bytecodes) {
-    bool printable = (current_char < 127 && current_char >= 32);
+    const bool printable = std::isprint(current_char);
     const char* format =
         printable
-            ? "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = %s"
-            : "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = %s";
+            ? "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = "
+            : "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = ";
     PrintF(format, pc - code_base, stack_depth, current_position, current_char,
-           printable ? current_char : '.', bytecode_name);
-    for (int i = 0; i < bytecode_length; i++) {
-      printf(", %02x", pc[i]);
-    }
-    printf(" ");
-    for (int i = 1; i < bytecode_length; i++) {
-      unsigned char b = pc[i];
-      if (b < 127 && b >= 32) {
-        printf("%c", b);
-      } else {
-        printf(".");
-      }
-    }
-    printf("\n");
+           printable ? current_char : '.');
+
+    DisassembleSingleBytecode(code_base, pc);
   }
 }
+#endif  // DEBUG
 
-#define BYTECODE(name)                                             \
-  case BC_##name:                                                  \
-    TraceInterpreter(code_base, pc, backtrack_stack.sp(), current, \
-                     current_char, BC_##name##_LENGTH, #name);
-#else
-#define BYTECODE(name) case BC_##name:
-#endif
-
-static int32_t Load32Aligned(const byte* pc) {
+int32_t Load32Aligned(const byte* pc) {
   DCHECK_EQ(0, reinterpret_cast<intptr_t>(pc) & 3);
   return *reinterpret_cast<const int32_t*>(pc);
 }
 
-static int32_t Load16Aligned(const byte* pc) {
+int32_t Load16Aligned(const byte* pc) {
   DCHECK_EQ(0, reinterpret_cast<intptr_t>(pc) & 1);
   return *reinterpret_cast<const uint16_t*>(pc);
 }
@@ -139,9 +146,9 @@ class BacktrackStack {
   DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
 };
 
-namespace {
-
-IrregexpInterpreter::Result StackOverflow(Isolate* isolate) {
+IrregexpInterpreter::Result StackOverflow(Isolate* isolate,
+                                          RegExp::CallOrigin call_origin) {
+  CHECK(call_origin == RegExp::CallOrigin::kFromRuntime);
   // We abort interpreter execution after the stack overflow is thrown, and thus
   // allow allocation here despite the outer DisallowHeapAllocationScope.
   AllowHeapAllocation yes_gc;
@@ -149,72 +156,154 @@ IrregexpInterpreter::Result StackOverflow(Isolate* isolate) {
   return IrregexpInterpreter::EXCEPTION;
 }
 
-// Runs all pending interrupts. Callers must update unhandlified object
-// references after this function completes.
-IrregexpInterpreter::Result HandleInterrupts(Isolate* isolate,
-                                             Handle<String> subject_string) {
+template <typename Char>
+void UpdateCodeAndSubjectReferences(
+    Isolate* isolate, Handle<ByteArray> code_array,
+    Handle<String> subject_string, ByteArray* code_array_out,
+    const byte** code_base_out, const byte** pc_out, String* subject_string_out,
+    Vector<const Char>* subject_string_vector_out) {
   DisallowHeapAllocation no_gc;
 
-  StackLimitCheck check(isolate);
-  if (check.JsHasOverflowed()) {
-    return StackOverflow(isolate);  // A real stack overflow.
+  if (*code_base_out != code_array->GetDataStartAddress()) {
+    *code_array_out = *code_array;
+    const intptr_t pc_offset = *pc_out - *code_base_out;
+    DCHECK_GT(pc_offset, 0);
+    *code_base_out = code_array->GetDataStartAddress();
+    *pc_out = *code_base_out + pc_offset;
   }
 
-  // Handle interrupts if any exist.
-  if (check.InterruptRequested()) {
-    const bool was_one_byte =
-        String::IsOneByteRepresentationUnderneath(*subject_string);
+  DCHECK(subject_string->IsFlat());
+  *subject_string_out = *subject_string;
+  *subject_string_vector_out = subject_string->GetCharVector<Char>(no_gc);
+}
 
-    Object result;
-    {
-      AllowHeapAllocation yes_gc;
-      result = isolate->stack_guard()->HandleInterrupts();
-    }
+// Runs all pending interrupts and updates unhandlified object references if
+// necessary.
+template <typename Char>
+IrregexpInterpreter::Result HandleInterrupts(
+    Isolate* isolate, RegExp::CallOrigin call_origin, ByteArray* code_array_out,
+    String* subject_string_out, const byte** code_base_out,
+    Vector<const Char>* subject_string_vector_out, const byte** pc_out) {
+  DisallowHeapAllocation no_gc;
 
-    if (result.IsException(isolate)) {
+  StackLimitCheck check(isolate);
+  bool js_has_overflowed = check.JsHasOverflowed();
+
+  if (call_origin == RegExp::CallOrigin::kFromJs) {
+    // Direct calls from JavaScript can be interrupted in two ways:
+    // 1. A real stack overflow, in which case we let the caller throw the
+    //    exception.
+    // 2. The stack guard was used to interrupt execution for another purpose,
+    //    forcing the call through the runtime system.
+    if (js_has_overflowed) {
       return IrregexpInterpreter::EXCEPTION;
-    }
-
-    // If we changed between a LATIN1 and a UC16 string, we need to restart
-    // regexp matching with the appropriate template instantiation of RawMatch.
-    if (String::IsOneByteRepresentationUnderneath(*subject_string) !=
-        was_one_byte) {
+    } else if (check.InterruptRequested()) {
       return IrregexpInterpreter::RETRY;
     }
+  } else {
+    DCHECK(call_origin == RegExp::CallOrigin::kFromRuntime);
+    // Prepare for possible GC.
+    HandleScope handles(isolate);
+    Handle<ByteArray> code_handle(*code_array_out, isolate);
+    Handle<String> subject_handle(*subject_string_out, isolate);
+
+    if (js_has_overflowed) {
+      return StackOverflow(isolate, call_origin);
+    } else if (check.InterruptRequested()) {
+      const bool was_one_byte =
+          String::IsOneByteRepresentationUnderneath(*subject_string_out);
+      Object result;
+      {
+        AllowHeapAllocation yes_gc;
+        result = isolate->stack_guard()->HandleInterrupts();
+      }
+      if (result.IsException(isolate)) {
+        return IrregexpInterpreter::EXCEPTION;
+      }
+
+      // If we changed between a LATIN1 and a UC16 string, we need to restart
+      // regexp matching with the appropriate template instantiation of
+      // RawMatch.
+      if (String::IsOneByteRepresentationUnderneath(*subject_handle) !=
+          was_one_byte) {
+        return IrregexpInterpreter::RETRY;
+      }
+
+      UpdateCodeAndSubjectReferences(
+          isolate, code_handle, subject_handle, code_array_out, code_base_out,
+          pc_out, subject_string_out, subject_string_vector_out);
+    }
   }
 
   return IrregexpInterpreter::SUCCESS;
 }
 
-template <typename Char>
-void UpdateCodeAndSubjectReferences(Isolate* isolate,
-                                    Handle<ByteArray> code_array,
-                                    Handle<String> subject_string,
-                                    const byte** code_base_out,
-                                    const byte** pc_out,
-                                    Vector<const Char>* subject_string_out) {
-  DisallowHeapAllocation no_gc;
+// If computed gotos are supported by the compiler, we can get addresses to
+// labels directly in C/C++. Every bytecode handler has its own label and we
+// store the addresses in a dispatch table indexed by bytecode. To execute the
+// next handler we simply jump (goto) directly to its address.
+#if V8_USE_COMPUTED_GOTO
+#define BC_LABEL(name) BC_##name:
+#define DECODE()                                                   \
+  do {                                                             \
+    next_insn = Load32Aligned(next_pc);                            \
+    next_handler_addr = dispatch_table[next_insn & BYTECODE_MASK]; \
+  } while (false)
+#define DISPATCH()  \
+  pc = next_pc;     \
+  insn = next_insn; \
+  goto* next_handler_addr
+// Without computed goto support, we fall back to a simple switch-based
+// dispatch (A large switch statement inside a loop with a case for every
+// bytecode).
+#else  // V8_USE_COMPUTED_GOTO
+#define BC_LABEL(name) case BC_##name:
+#define DECODE() next_insn = Load32Aligned(next_pc)
+#define DISPATCH()  \
+  pc = next_pc;     \
+  insn = next_insn; \
+  break
+#endif  // V8_USE_COMPUTED_GOTO
+
+// ADVANCE/SET_PC_FROM_OFFSET are separated from DISPATCH, because ideally some
+// instructions can be executed between ADVANCE/SET_PC_FROM_OFFSET and DISPATCH.
+// We want those two macros as far apart as possible, because the goto in
+// DISPATCH is dependent on a memory load in ADVANCE/SET_PC_FROM_OFFSET. If we
+// don't hit the cache and have to fetch the next handler address from physical
+// memory, instructions between ADVANCE/SET_PC_FROM_OFFSET and DISPATCH can
+// potentially be executed unconditionally, reducing memory stall.
+#define ADVANCE(name)                             \
+  next_pc = pc + RegExpBytecodeLength(BC_##name); \
+  DECODE()
+#define SET_PC_FROM_OFFSET(offset) \
+  next_pc = code_base + offset;    \
+  DECODE()
 
-  if (*code_base_out != code_array->GetDataStartAddress()) {
-    const intptr_t pc_offset = *pc_out - *code_base_out;
-    DCHECK_GT(pc_offset, 0);
-    *code_base_out = code_array->GetDataStartAddress();
-    *pc_out = *code_base_out + pc_offset;
-  }
-
-  DCHECK(subject_string->IsFlat());
-  *subject_string_out = subject_string->GetCharVector<Char>(no_gc);
-}
+#ifdef DEBUG
+#define BYTECODE(name)                                                \
+  BC_LABEL(name)                                                      \
+  MaybeTraceInterpreter(code_base, pc, backtrack_stack.sp(), current, \
+                        current_char, RegExpBytecodeLength(BC_##name), #name);
+#else
+#define BYTECODE(name) BC_LABEL(name)
+#endif  // DEBUG
 
 template <typename Char>
-IrregexpInterpreter::Result RawMatch(Isolate* isolate,
-                                     Handle<ByteArray> code_array,
-                                     Handle<String> subject_string,
+IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
+                                     String subject_string,
                                      Vector<const Char> subject, int* registers,
-                                     int current, uint32_t current_char) {
+                                     int current, uint32_t current_char,
+                                     RegExp::CallOrigin call_origin) {
   DisallowHeapAllocation no_gc;
 
-  const byte* pc = code_array->GetDataStartAddress();
+#if V8_USE_COMPUTED_GOTO
+#define DECLARE_DISPATCH_TABLE_ENTRY(name, code, length) &&BC_##name,
+  static const void* const dispatch_table[] = {
+      BYTECODE_ITERATOR(DECLARE_DISPATCH_TABLE_ENTRY)};
+#undef DECLARE_DISPATCH_TABLE_ENTRY
+#endif
+
+  const byte* pc = code_array.GetDataStartAddress();
   const byte* code_base = pc;
 
   BacktrackStack backtrack_stack;
@@ -224,457 +313,572 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
     PrintF("\n\nStart bytecode interpreter\n\n");
   }
 #endif
+
   while (true) {
-    const int32_t insn = Load32Aligned(pc);
+    const byte* next_pc = pc;
+    int32_t insn;
+    int32_t next_insn;
+#if V8_USE_COMPUTED_GOTO
+    const void* next_handler_addr;
+    DECODE();
+    DISPATCH();
+#else
+    insn = Load32Aligned(pc);
     switch (insn & BYTECODE_MASK) {
-      BYTECODE(BREAK) { UNREACHABLE(); }
-      BYTECODE(PUSH_CP) {
-        backtrack_stack.push(current);
-        pc += BC_PUSH_CP_LENGTH;
-        break;
-      }
-      BYTECODE(PUSH_BT) {
-        backtrack_stack.push(Load32Aligned(pc + 4));
-        pc += BC_PUSH_BT_LENGTH;
-        break;
-      }
-      BYTECODE(PUSH_REGISTER) {
-        backtrack_stack.push(registers[insn >> BYTECODE_SHIFT]);
-        pc += BC_PUSH_REGISTER_LENGTH;
-        break;
-      }
-      BYTECODE(SET_REGISTER) {
-        registers[insn >> BYTECODE_SHIFT] = Load32Aligned(pc + 4);
-        pc += BC_SET_REGISTER_LENGTH;
-        break;
-      }
-      BYTECODE(ADVANCE_REGISTER) {
-        registers[insn >> BYTECODE_SHIFT] += Load32Aligned(pc + 4);
-        pc += BC_ADVANCE_REGISTER_LENGTH;
-        break;
-      }
-      BYTECODE(SET_REGISTER_TO_CP) {
-        registers[insn >> BYTECODE_SHIFT] = current + Load32Aligned(pc + 4);
-        pc += BC_SET_REGISTER_TO_CP_LENGTH;
-        break;
-      }
-      BYTECODE(SET_CP_TO_REGISTER) {
-        current = registers[insn >> BYTECODE_SHIFT];
-        pc += BC_SET_CP_TO_REGISTER_LENGTH;
-        break;
-      }
-      BYTECODE(SET_REGISTER_TO_SP) {
-        registers[insn >> BYTECODE_SHIFT] = backtrack_stack.sp();
-        pc += BC_SET_REGISTER_TO_SP_LENGTH;
-        break;
-      }
-      BYTECODE(SET_SP_TO_REGISTER) {
-        backtrack_stack.set_sp(registers[insn >> BYTECODE_SHIFT]);
-        pc += BC_SET_SP_TO_REGISTER_LENGTH;
-        break;
-      }
-      BYTECODE(POP_CP) {
-        current = backtrack_stack.pop();
-        pc += BC_POP_CP_LENGTH;
-        break;
-      }
-      BYTECODE(POP_BT) {
-        IrregexpInterpreter::Result return_code =
-            HandleInterrupts(isolate, subject_string);
-        if (return_code != IrregexpInterpreter::SUCCESS) return return_code;
-
-        UpdateCodeAndSubjectReferences(isolate, code_array, subject_string,
-                                       &code_base, &pc, &subject);
-
-        pc = code_base + backtrack_stack.pop();
-        break;
-      }
-      BYTECODE(POP_REGISTER) {
-        registers[insn >> BYTECODE_SHIFT] = backtrack_stack.pop();
-        pc += BC_POP_REGISTER_LENGTH;
-        break;
-      }
-      BYTECODE(FAIL) { return IrregexpInterpreter::FAILURE; }
-      BYTECODE(SUCCEED) { return IrregexpInterpreter::SUCCESS; }
-      BYTECODE(ADVANCE_CP) {
-        current += insn >> BYTECODE_SHIFT;
-        pc += BC_ADVANCE_CP_LENGTH;
-        break;
-      }
-      BYTECODE(GOTO) {
-        pc = code_base + Load32Aligned(pc + 4);
-        break;
-      }
-      BYTECODE(ADVANCE_CP_AND_GOTO) {
-        current += insn >> BYTECODE_SHIFT;
-        pc = code_base + Load32Aligned(pc + 4);
-        break;
-      }
-      BYTECODE(CHECK_GREEDY) {
-        if (current == backtrack_stack.peek()) {
-          backtrack_stack.pop();
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          pc += BC_CHECK_GREEDY_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(LOAD_CURRENT_CHAR) {
-        int pos = current + (insn >> BYTECODE_SHIFT);
-        if (pos >= subject.length() || pos < 0) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          current_char = subject[pos];
-          pc += BC_LOAD_CURRENT_CHAR_LENGTH;
-        }
-        break;
+#endif  // V8_USE_COMPUTED_GOTO
+    BYTECODE(BREAK) { UNREACHABLE(); }
+    BYTECODE(PUSH_CP) {
+      ADVANCE(PUSH_CP);
+      backtrack_stack.push(current);
+      DISPATCH();
+    }
+    BYTECODE(PUSH_BT) {
+      ADVANCE(PUSH_BT);
+      backtrack_stack.push(Load32Aligned(pc + 4));
+      DISPATCH();
+    }
+    BYTECODE(PUSH_REGISTER) {
+      ADVANCE(PUSH_REGISTER);
+      backtrack_stack.push(registers[insn >> BYTECODE_SHIFT]);
+      DISPATCH();
+    }
+    BYTECODE(SET_REGISTER) {
+      ADVANCE(SET_REGISTER);
+      registers[insn >> BYTECODE_SHIFT] = Load32Aligned(pc + 4);
+      DISPATCH();
+    }
+    BYTECODE(ADVANCE_REGISTER) {
+      ADVANCE(ADVANCE_REGISTER);
+      registers[insn >> BYTECODE_SHIFT] += Load32Aligned(pc + 4);
+      DISPATCH();
+    }
+    BYTECODE(SET_REGISTER_TO_CP) {
+      ADVANCE(SET_REGISTER_TO_CP);
+      registers[insn >> BYTECODE_SHIFT] = current + Load32Aligned(pc + 4);
+      DISPATCH();
+    }
+    BYTECODE(SET_CP_TO_REGISTER) {
+      ADVANCE(SET_CP_TO_REGISTER);
+      current = registers[insn >> BYTECODE_SHIFT];
+      DISPATCH();
+    }
+    BYTECODE(SET_REGISTER_TO_SP) {
+      ADVANCE(SET_REGISTER_TO_SP);
+      registers[insn >> BYTECODE_SHIFT] = backtrack_stack.sp();
+      DISPATCH();
+    }
+    BYTECODE(SET_SP_TO_REGISTER) {
+      ADVANCE(SET_SP_TO_REGISTER);
+      backtrack_stack.set_sp(registers[insn >> BYTECODE_SHIFT]);
+      DISPATCH();
+    }
+    BYTECODE(POP_CP) {
+      ADVANCE(POP_CP);
+      current = backtrack_stack.pop();
+      DISPATCH();
+    }
+    BYTECODE(POP_BT) {
+      IrregexpInterpreter::Result return_code =
+          HandleInterrupts(isolate, call_origin, &code_array, &subject_string,
+                           &code_base, &subject, &pc);
+      if (return_code != IrregexpInterpreter::SUCCESS) return return_code;
+
+      SET_PC_FROM_OFFSET(backtrack_stack.pop());
+      DISPATCH();
+    }
+    BYTECODE(POP_REGISTER) {
+      ADVANCE(POP_REGISTER);
+      registers[insn >> BYTECODE_SHIFT] = backtrack_stack.pop();
+      DISPATCH();
+    }
+    BYTECODE(FAIL) { return IrregexpInterpreter::FAILURE; }
+    BYTECODE(SUCCEED) { return IrregexpInterpreter::SUCCESS; }
+    BYTECODE(ADVANCE_CP) {
+      ADVANCE(ADVANCE_CP);
+      current += insn >> BYTECODE_SHIFT;
+      DISPATCH();
+    }
+    BYTECODE(GOTO) {
+      SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      DISPATCH();
+    }
+    BYTECODE(ADVANCE_CP_AND_GOTO) {
+      SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      current += insn >> BYTECODE_SHIFT;
+      DISPATCH();
+    }
+    BYTECODE(CHECK_GREEDY) {
+      if (current == backtrack_stack.peek()) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+        backtrack_stack.pop();
+      } else {
+        ADVANCE(CHECK_GREEDY);
       }
-      BYTECODE(LOAD_CURRENT_CHAR_UNCHECKED) {
-        int pos = current + (insn >> BYTECODE_SHIFT);
+      DISPATCH();
+    }
+    BYTECODE(LOAD_CURRENT_CHAR) {
+      int pos = current + (insn >> BYTECODE_SHIFT);
+      if (pos >= subject.length() || pos < 0) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(LOAD_CURRENT_CHAR);
         current_char = subject[pos];
-        pc += BC_LOAD_CURRENT_CHAR_UNCHECKED_LENGTH;
-        break;
-      }
-      BYTECODE(LOAD_2_CURRENT_CHARS) {
-        int pos = current + (insn >> BYTECODE_SHIFT);
-        if (pos + 2 > subject.length() || pos < 0) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          Char next = subject[pos + 1];
-          current_char =
-              (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
-          pc += BC_LOAD_2_CURRENT_CHARS_LENGTH;
-        }
-        break;
       }
-      BYTECODE(LOAD_2_CURRENT_CHARS_UNCHECKED) {
-        int pos = current + (insn >> BYTECODE_SHIFT);
+      DISPATCH();
+    }
+    BYTECODE(LOAD_CURRENT_CHAR_UNCHECKED) {
+      ADVANCE(LOAD_CURRENT_CHAR_UNCHECKED);
+      int pos = current + (insn >> BYTECODE_SHIFT);
+      current_char = subject[pos];
+      DISPATCH();
+    }
+    BYTECODE(LOAD_2_CURRENT_CHARS) {
+      int pos = current + (insn >> BYTECODE_SHIFT);
+      if (pos + 2 > subject.length() || pos < 0) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(LOAD_2_CURRENT_CHARS);
         Char next = subject[pos + 1];
         current_char = (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
-        pc += BC_LOAD_2_CURRENT_CHARS_UNCHECKED_LENGTH;
-        break;
-      }
-      BYTECODE(LOAD_4_CURRENT_CHARS) {
-        DCHECK_EQ(1, sizeof(Char));
-        int pos = current + (insn >> BYTECODE_SHIFT);
-        if (pos + 4 > subject.length() || pos < 0) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          Char next1 = subject[pos + 1];
-          Char next2 = subject[pos + 2];
-          Char next3 = subject[pos + 3];
-          current_char =
-              (subject[pos] | (next1 << 8) | (next2 << 16) | (next3 << 24));
-          pc += BC_LOAD_4_CURRENT_CHARS_LENGTH;
-        }
-        break;
       }
-      BYTECODE(LOAD_4_CURRENT_CHARS_UNCHECKED) {
-        DCHECK_EQ(1, sizeof(Char));
-        int pos = current + (insn >> BYTECODE_SHIFT);
+      DISPATCH();
+    }
+    BYTECODE(LOAD_2_CURRENT_CHARS_UNCHECKED) {
+      ADVANCE(LOAD_2_CURRENT_CHARS_UNCHECKED);
+      int pos = current + (insn >> BYTECODE_SHIFT);
+      Char next = subject[pos + 1];
+      current_char = (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
+      DISPATCH();
+    }
+    BYTECODE(LOAD_4_CURRENT_CHARS) {
+      DCHECK_EQ(1, sizeof(Char));
+      int pos = current + (insn >> BYTECODE_SHIFT);
+      if (pos + 4 > subject.length() || pos < 0) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(LOAD_4_CURRENT_CHARS);
         Char next1 = subject[pos + 1];
         Char next2 = subject[pos + 2];
         Char next3 = subject[pos + 3];
         current_char =
             (subject[pos] | (next1 << 8) | (next2 << 16) | (next3 << 24));
-        pc += BC_LOAD_4_CURRENT_CHARS_UNCHECKED_LENGTH;
-        break;
-      }
-      BYTECODE(CHECK_4_CHARS) {
-        uint32_t c = Load32Aligned(pc + 4);
-        if (c == current_char) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_CHECK_4_CHARS_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(CHECK_CHAR) {
-        uint32_t c = (insn >> BYTECODE_SHIFT);
-        if (c == current_char) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          pc += BC_CHECK_CHAR_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(CHECK_NOT_4_CHARS) {
-        uint32_t c = Load32Aligned(pc + 4);
-        if (c != current_char) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_CHECK_NOT_4_CHARS_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(CHECK_NOT_CHAR) {
-        uint32_t c = (insn >> BYTECODE_SHIFT);
-        if (c != current_char) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          pc += BC_CHECK_NOT_CHAR_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(AND_CHECK_4_CHARS) {
-        uint32_t c = Load32Aligned(pc + 4);
-        if (c == (current_char & Load32Aligned(pc + 8))) {
-          pc = code_base + Load32Aligned(pc + 12);
-        } else {
-          pc += BC_AND_CHECK_4_CHARS_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(AND_CHECK_CHAR) {
-        uint32_t c = (insn >> BYTECODE_SHIFT);
-        if (c == (current_char & Load32Aligned(pc + 4))) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_AND_CHECK_CHAR_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(AND_CHECK_NOT_4_CHARS) {
-        uint32_t c = Load32Aligned(pc + 4);
-        if (c != (current_char & Load32Aligned(pc + 8))) {
-          pc = code_base + Load32Aligned(pc + 12);
-        } else {
-          pc += BC_AND_CHECK_NOT_4_CHARS_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(AND_CHECK_NOT_CHAR) {
-        uint32_t c = (insn >> BYTECODE_SHIFT);
-        if (c != (current_char & Load32Aligned(pc + 4))) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_AND_CHECK_NOT_CHAR_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(MINUS_AND_CHECK_NOT_CHAR) {
-        uint32_t c = (insn >> BYTECODE_SHIFT);
-        uint32_t minus = Load16Aligned(pc + 4);
-        uint32_t mask = Load16Aligned(pc + 6);
-        if (c != ((current_char - minus) & mask)) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_MINUS_AND_CHECK_NOT_CHAR_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(CHECK_CHAR_IN_RANGE) {
-        uint32_t from = Load16Aligned(pc + 4);
-        uint32_t to = Load16Aligned(pc + 6);
-        if (from <= current_char && current_char <= to) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_CHECK_CHAR_IN_RANGE_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(CHECK_CHAR_NOT_IN_RANGE) {
-        uint32_t from = Load16Aligned(pc + 4);
-        uint32_t to = Load16Aligned(pc + 6);
-        if (from > current_char || current_char > to) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_CHECK_CHAR_NOT_IN_RANGE_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(CHECK_BIT_IN_TABLE) {
-        int mask = RegExpMacroAssembler::kTableMask;
-        byte b = pc[8 + ((current_char & mask) >> kBitsPerByteLog2)];
-        int bit = (current_char & (kBitsPerByte - 1));
-        if ((b & (1 << bit)) != 0) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          pc += BC_CHECK_BIT_IN_TABLE_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(CHECK_LT) {
-        uint32_t limit = (insn >> BYTECODE_SHIFT);
-        if (current_char < limit) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          pc += BC_CHECK_LT_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(CHECK_GT) {
-        uint32_t limit = (insn >> BYTECODE_SHIFT);
-        if (current_char > limit) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          pc += BC_CHECK_GT_LENGTH;
-        }
-        break;
       }
-      BYTECODE(CHECK_REGISTER_LT) {
-        if (registers[insn >> BYTECODE_SHIFT] < Load32Aligned(pc + 4)) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_CHECK_REGISTER_LT_LENGTH;
-        }
-        break;
+      DISPATCH();
+    }
+    BYTECODE(LOAD_4_CURRENT_CHARS_UNCHECKED) {
+      ADVANCE(LOAD_4_CURRENT_CHARS_UNCHECKED);
+      DCHECK_EQ(1, sizeof(Char));
+      int pos = current + (insn >> BYTECODE_SHIFT);
+      Char next1 = subject[pos + 1];
+      Char next2 = subject[pos + 2];
+      Char next3 = subject[pos + 3];
+      current_char =
+          (subject[pos] | (next1 << 8) | (next2 << 16) | (next3 << 24));
+      DISPATCH();
+    }
+    BYTECODE(CHECK_4_CHARS) {
+      uint32_t c = Load32Aligned(pc + 4);
+      if (c == current_char) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      } else {
+        ADVANCE(CHECK_4_CHARS);
       }
-      BYTECODE(CHECK_REGISTER_GE) {
-        if (registers[insn >> BYTECODE_SHIFT] >= Load32Aligned(pc + 4)) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_CHECK_REGISTER_GE_LENGTH;
-        }
-        break;
+      DISPATCH();
+    }
+    BYTECODE(CHECK_CHAR) {
+      uint32_t c = (insn >> BYTECODE_SHIFT);
+      if (c == current_char) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(CHECK_CHAR);
       }
-      BYTECODE(CHECK_REGISTER_EQ_POS) {
-        if (registers[insn >> BYTECODE_SHIFT] == current) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          pc += BC_CHECK_REGISTER_EQ_POS_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(CHECK_NOT_REGS_EQUAL) {
-        if (registers[insn >> BYTECODE_SHIFT] ==
-            registers[Load32Aligned(pc + 4)]) {
-          pc += BC_CHECK_NOT_REGS_EQUAL_LENGTH;
-        } else {
-          pc = code_base + Load32Aligned(pc + 8);
-        }
-        break;
-      }
-      BYTECODE(CHECK_NOT_BACK_REF) {
-        int from = registers[insn >> BYTECODE_SHIFT];
-        int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
-        if (from >= 0 && len > 0) {
-          if (current + len > subject.length() ||
-              CompareChars(&subject[from], &subject[current], len) != 0) {
-            pc = code_base + Load32Aligned(pc + 4);
-            break;
-          }
-          current += len;
-        }
-        pc += BC_CHECK_NOT_BACK_REF_LENGTH;
-        break;
-      }
-      BYTECODE(CHECK_NOT_BACK_REF_BACKWARD) {
-        int from = registers[insn >> BYTECODE_SHIFT];
-        int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
-        if (from >= 0 && len > 0) {
-          if (current - len < 0 ||
-              CompareChars(&subject[from], &subject[current - len], len) != 0) {
-            pc = code_base + Load32Aligned(pc + 4);
-            break;
-          }
-          current -= len;
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_4_CHARS) {
+      uint32_t c = Load32Aligned(pc + 4);
+      if (c != current_char) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      } else {
+        ADVANCE(CHECK_NOT_4_CHARS);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_CHAR) {
+      uint32_t c = (insn >> BYTECODE_SHIFT);
+      if (c != current_char) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(CHECK_NOT_CHAR);
+      }
+      DISPATCH();
+    }
+    BYTECODE(AND_CHECK_4_CHARS) {
+      uint32_t c = Load32Aligned(pc + 4);
+      if (c == (current_char & Load32Aligned(pc + 8))) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 12));
+      } else {
+        ADVANCE(AND_CHECK_4_CHARS);
+      }
+      DISPATCH();
+    }
+    BYTECODE(AND_CHECK_CHAR) {
+      uint32_t c = (insn >> BYTECODE_SHIFT);
+      if (c == (current_char & Load32Aligned(pc + 4))) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      } else {
+        ADVANCE(AND_CHECK_CHAR);
+      }
+      DISPATCH();
+    }
+    BYTECODE(AND_CHECK_NOT_4_CHARS) {
+      uint32_t c = Load32Aligned(pc + 4);
+      if (c != (current_char & Load32Aligned(pc + 8))) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 12));
+      } else {
+        ADVANCE(AND_CHECK_NOT_4_CHARS);
+      }
+      DISPATCH();
+    }
+    BYTECODE(AND_CHECK_NOT_CHAR) {
+      uint32_t c = (insn >> BYTECODE_SHIFT);
+      if (c != (current_char & Load32Aligned(pc + 4))) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      } else {
+        ADVANCE(AND_CHECK_NOT_CHAR);
+      }
+      DISPATCH();
+    }
+    BYTECODE(MINUS_AND_CHECK_NOT_CHAR) {
+      uint32_t c = (insn >> BYTECODE_SHIFT);
+      uint32_t minus = Load16Aligned(pc + 4);
+      uint32_t mask = Load16Aligned(pc + 6);
+      if (c != ((current_char - minus) & mask)) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      } else {
+        ADVANCE(MINUS_AND_CHECK_NOT_CHAR);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_CHAR_IN_RANGE) {
+      uint32_t from = Load16Aligned(pc + 4);
+      uint32_t to = Load16Aligned(pc + 6);
+      if (from <= current_char && current_char <= to) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      } else {
+        ADVANCE(CHECK_CHAR_IN_RANGE);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_CHAR_NOT_IN_RANGE) {
+      uint32_t from = Load16Aligned(pc + 4);
+      uint32_t to = Load16Aligned(pc + 6);
+      if (from > current_char || current_char > to) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      } else {
+        ADVANCE(CHECK_CHAR_NOT_IN_RANGE);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_BIT_IN_TABLE) {
+      int mask = RegExpMacroAssembler::kTableMask;
+      byte b = pc[8 + ((current_char & mask) >> kBitsPerByteLog2)];
+      int bit = (current_char & (kBitsPerByte - 1));
+      if ((b & (1 << bit)) != 0) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(CHECK_BIT_IN_TABLE);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_LT) {
+      uint32_t limit = (insn >> BYTECODE_SHIFT);
+      if (current_char < limit) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(CHECK_LT);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_GT) {
+      uint32_t limit = (insn >> BYTECODE_SHIFT);
+      if (current_char > limit) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(CHECK_GT);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_REGISTER_LT) {
+      if (registers[insn >> BYTECODE_SHIFT] < Load32Aligned(pc + 4)) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      } else {
+        ADVANCE(CHECK_REGISTER_LT);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_REGISTER_GE) {
+      if (registers[insn >> BYTECODE_SHIFT] >= Load32Aligned(pc + 4)) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      } else {
+        ADVANCE(CHECK_REGISTER_GE);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_REGISTER_EQ_POS) {
+      if (registers[insn >> BYTECODE_SHIFT] == current) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(CHECK_REGISTER_EQ_POS);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_REGS_EQUAL) {
+      if (registers[insn >> BYTECODE_SHIFT] ==
+          registers[Load32Aligned(pc + 4)]) {
+        ADVANCE(CHECK_NOT_REGS_EQUAL);
+      } else {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_BACK_REF) {
+      int from = registers[insn >> BYTECODE_SHIFT];
+      int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+      if (from >= 0 && len > 0) {
+        if (current + len > subject.length() ||
+            CompareChars(&subject[from], &subject[current], len) != 0) {
+          SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+          DISPATCH();
         }
-        pc += BC_CHECK_NOT_BACK_REF_BACKWARD_LENGTH;
-        break;
-      }
-      BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE)
-      V8_FALLTHROUGH;
-      BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
-        bool unicode =
-            (insn & BYTECODE_MASK) == BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE;
-        int from = registers[insn >> BYTECODE_SHIFT];
-        int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
-        if (from >= 0 && len > 0) {
-          if (current + len > subject.length() ||
-              !BackRefMatchesNoCase(isolate, from, current, len, subject,
-                                    unicode)) {
-            pc = code_base + Load32Aligned(pc + 4);
-            break;
-          }
-          current += len;
+        current += len;
+      }
+      ADVANCE(CHECK_NOT_BACK_REF);
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_BACK_REF_BACKWARD) {
+      int from = registers[insn >> BYTECODE_SHIFT];
+      int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+      if (from >= 0 && len > 0) {
+        if (current - len < 0 ||
+            CompareChars(&subject[from], &subject[current - len], len) != 0) {
+          SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+          DISPATCH();
         }
-        pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
-        break;
-      }
-      BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD)
-      V8_FALLTHROUGH;
-      BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
-        bool unicode = (insn & BYTECODE_MASK) ==
-                       BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD;
-        int from = registers[insn >> BYTECODE_SHIFT];
-        int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
-        if (from >= 0 && len > 0) {
-          if (current - len < 0 ||
-              !BackRefMatchesNoCase(isolate, from, current - len, len, subject,
-                                    unicode)) {
-            pc = code_base + Load32Aligned(pc + 4);
-            break;
-          }
-          current -= len;
+        current -= len;
+      }
+      ADVANCE(CHECK_NOT_BACK_REF_BACKWARD);
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE) {
+      int from = registers[insn >> BYTECODE_SHIFT];
+      int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+      if (from >= 0 && len > 0) {
+        if (current + len > subject.length() ||
+            !BackRefMatchesNoCase(isolate, from, current, len, subject, true)) {
+          SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+          DISPATCH();
         }
-        pc += BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD_LENGTH;
-        break;
-      }
-      BYTECODE(CHECK_AT_START) {
-        if (current == 0) {
-          pc = code_base + Load32Aligned(pc + 4);
-        } else {
-          pc += BC_CHECK_AT_START_LENGTH;
+        current += len;
+      }
+      ADVANCE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE);
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
+      int from = registers[insn >> BYTECODE_SHIFT];
+      int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+      if (from >= 0 && len > 0) {
+        if (current + len > subject.length() ||
+            !BackRefMatchesNoCase(isolate, from, current, len, subject,
+                                  false)) {
+          SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+          DISPATCH();
         }
-        break;
+        current += len;
       }
-      BYTECODE(CHECK_NOT_AT_START) {
-        if (current + (insn >> BYTECODE_SHIFT) == 0) {
-          pc += BC_CHECK_NOT_AT_START_LENGTH;
-        } else {
-          pc = code_base + Load32Aligned(pc + 4);
+      ADVANCE(CHECK_NOT_BACK_REF_NO_CASE);
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD) {
+      int from = registers[insn >> BYTECODE_SHIFT];
+      int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+      if (from >= 0 && len > 0) {
+        if (current - len < 0 ||
+            !BackRefMatchesNoCase(isolate, from, current - len, len, subject,
+                                  true)) {
+          SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+          DISPATCH();
         }
-        break;
+        current -= len;
       }
-      BYTECODE(SET_CURRENT_POSITION_FROM_END) {
-        int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
-        if (subject.length() - current > by) {
-          current = subject.length() - by;
-          current_char = subject[current - 1];
+      ADVANCE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD);
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
+      int from = registers[insn >> BYTECODE_SHIFT];
+      int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+      if (from >= 0 && len > 0) {
+        if (current - len < 0 ||
+            !BackRefMatchesNoCase(isolate, from, current - len, len, subject,
+                                  false)) {
+          SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+          DISPATCH();
         }
-        pc += BC_SET_CURRENT_POSITION_FROM_END_LENGTH;
-        break;
+        current -= len;
       }
+      ADVANCE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD);
+      DISPATCH();
+    }
+    BYTECODE(CHECK_AT_START) {
+      if (current + (insn >> BYTECODE_SHIFT) == 0) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(CHECK_AT_START);
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_NOT_AT_START) {
+      if (current + (insn >> BYTECODE_SHIFT) == 0) {
+        ADVANCE(CHECK_NOT_AT_START);
+      } else {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      }
+      DISPATCH();
+    }
+    BYTECODE(SET_CURRENT_POSITION_FROM_END) {
+      ADVANCE(SET_CURRENT_POSITION_FROM_END);
+      int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
+      if (subject.length() - current > by) {
+        current = subject.length() - by;
+        current_char = subject[current - 1];
+      }
+      DISPATCH();
+    }
+    BYTECODE(CHECK_CURRENT_POSITION) {
+      int pos = current + (insn >> BYTECODE_SHIFT);
+      if (pos > subject.length() || pos < 0) {
+        SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+      } else {
+        ADVANCE(CHECK_CURRENT_POSITION);
+      }
+      DISPATCH();
+    }
+#if V8_USE_COMPUTED_GOTO
+// Lint gets confused a lot if we just use !V8_USE_COMPUTED_GOTO or ifndef
+// V8_USE_COMPUTED_GOTO here.
+#else
       default:
         UNREACHABLE();
-        break;
     }
+#endif  // V8_USE_COMPUTED_GOTO
   }
 }
 
 #undef BYTECODE
+#undef DISPATCH
+#undef DECODE
+#undef SET_PC_FROM_OFFSET
+#undef ADVANCE
+#undef BC_LABEL
+#undef V8_USE_COMPUTED_GOTO
 
 }  // namespace
 
+// static
+void IrregexpInterpreter::Disassemble(ByteArray byte_array,
+                                      const std::string& pattern) {
+  DisallowHeapAllocation no_gc;
+
+  PrintF("[generated bytecode for regexp pattern: '%s']\n", pattern.c_str());
+
+  const byte* const code_base = byte_array.GetDataStartAddress();
+  const int byte_array_length = byte_array.length();
+  ptrdiff_t offset = 0;
+
+  while (offset < byte_array_length) {
+    const byte* const pc = code_base + offset;
+    PrintF("%p  %4" V8PRIxPTRDIFF "  ", pc, offset);
+    DisassembleSingleBytecode(code_base, pc);
+    offset += RegExpBytecodeLength(*pc);
+  }
+}
+
 // static
 IrregexpInterpreter::Result IrregexpInterpreter::Match(
-    Isolate* isolate, Handle<ByteArray> code_array,
-    Handle<String> subject_string, int* registers, int start_position) {
-  DCHECK(subject_string->IsFlat());
+    Isolate* isolate, JSRegExp regexp, String subject_string, int* registers,
+    int registers_length, int start_position, RegExp::CallOrigin call_origin) {
+  if (FLAG_regexp_tier_up) {
+    regexp.MarkTierUpForNextExec();
+  }
+
+  bool is_one_byte = String::IsOneByteRepresentationUnderneath(subject_string);
+  ByteArray code_array = ByteArray::cast(regexp.Bytecode(is_one_byte));
 
-  // Note: Heap allocation *is* allowed in two situations:
+  return MatchInternal(isolate, code_array, subject_string, registers,
+                       registers_length, start_position, call_origin);
+}
+
+IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
+    Isolate* isolate, ByteArray code_array, String subject_string,
+    int* registers, int registers_length, int start_position,
+    RegExp::CallOrigin call_origin) {
+  DCHECK(subject_string.IsFlat());
+
+  // Note: Heap allocation *is* allowed in two situations if calling from
+  // Runtime:
   // 1. When creating & throwing a stack overflow exception. The interpreter
   //    aborts afterwards, and thus possible-moved objects are never used.
   // 2. When handling interrupts. We manually relocate unhandlified references
   //    after interrupts have run.
   DisallowHeapAllocation no_gc;
 
+  // Reset registers to -1 (=undefined).
+  // This is necessary because registers are only written when a
+  // capture group matched.
+  // Resetting them ensures that previous matches are cleared.
+  memset(registers, -1, sizeof(registers[0]) * registers_length);
+
   uc16 previous_char = '\n';
-  String::FlatContent subject_content = subject_string->GetFlatContent(no_gc);
+  String::FlatContent subject_content = subject_string.GetFlatContent(no_gc);
   if (subject_content.IsOneByte()) {
     Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
     if (start_position != 0) previous_char = subject_vector[start_position - 1];
     return RawMatch(isolate, code_array, subject_string, subject_vector,
-                    registers, start_position, previous_char);
+                    registers, start_position, previous_char, call_origin);
   } else {
     DCHECK(subject_content.IsTwoByte());
     Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
     if (start_position != 0) previous_char = subject_vector[start_position - 1];
     return RawMatch(isolate, code_array, subject_string, subject_vector,
-                    registers, start_position, previous_char);
+                    registers, start_position, previous_char, call_origin);
   }
 }
 
+// This method is called through an external reference from RegExpExecInternal
+// builtin.
+IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs(
+    Address subject, int32_t start_position, Address, Address, int* registers,
+    int32_t registers_length, Address, RegExp::CallOrigin call_origin,
+    Isolate* isolate, Address regexp) {
+  DCHECK_NOT_NULL(isolate);
+  DCHECK_NOT_NULL(registers);
+  DCHECK(call_origin == RegExp::CallOrigin::kFromJs);
+
+  DisallowHeapAllocation no_gc;
+  DisallowJavascriptExecution no_js(isolate);
+
+  String subject_string = String::cast(Object(subject));
+  JSRegExp regexp_obj = JSRegExp::cast(Object(regexp));
+
+  return Match(isolate, regexp_obj, subject_string, registers, registers_length,
+               start_position, call_origin);
+}
+
+IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromRuntime(
+    Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject_string,
+    int* registers, int registers_length, int start_position) {
+  return Match(isolate, *regexp, *subject_string, registers, registers_length,
+               start_position, RegExp::CallOrigin::kFromRuntime);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/regexp/regexp-interpreter.h b/deps/v8/src/regexp/regexp-interpreter.h
index ad27dcd2961176..fbc5a3b29069df 100644
--- a/deps/v8/src/regexp/regexp-interpreter.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -12,7 +12,7 @@
 namespace v8 {
 namespace internal {
 
-class V8_EXPORT_PRIVATE IrregexpInterpreter {
+class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
  public:
   enum Result {
     FAILURE = RegExp::kInternalRegExpFailure,
@@ -21,10 +21,37 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter {
     RETRY = RegExp::kInternalRegExpRetry,
   };
 
-  // The caller is responsible for initializing registers before each call.
-  static Result Match(Isolate* isolate, Handle<ByteArray> code_array,
-                      Handle<String> subject_string, int* registers,
-                      int start_position);
+  // In case a StackOverflow occurs, a StackOverflowException is created and
+  // EXCEPTION is returned.
+  static Result MatchForCallFromRuntime(Isolate* isolate,
+                                        Handle<JSRegExp> regexp,
+                                        Handle<String> subject_string,
+                                        int* registers, int registers_length,
+                                        int start_position);
+
+  // In case a StackOverflow occurs, EXCEPTION is returned. The caller is
+  // responsible for creating the exception.
+  // Arguments input_start, input_end and backtrack_stack are
+  // unused. They are only passed to match the signature of the native irregex
+  // code.
+  static Result MatchForCallFromJs(Address subject, int32_t start_position,
+                                   Address input_start, Address input_end,
+                                   int* registers, int32_t registers_length,
+                                   Address backtrack_stack,
+                                   RegExp::CallOrigin call_origin,
+                                   Isolate* isolate, Address regexp);
+
+  static Result MatchInternal(Isolate* isolate, ByteArray code_array,
+                              String subject_string, int* registers,
+                              int registers_length, int start_position,
+                              RegExp::CallOrigin call_origin);
+
+  static void Disassemble(ByteArray byte_array, const std::string& pattern);
+
+ private:
+  static Result Match(Isolate* isolate, JSRegExp regexp, String subject_string,
+                      int* registers, int registers_length, int start_position,
+                      RegExp::CallOrigin call_origin);
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index db9c5af5695141..5dca04a18cc277 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -162,24 +162,19 @@ void RegExpMacroAssemblerTracer::ReadStackPointerFromRegister(int reg) {
   assembler_->ReadStackPointerFromRegister(reg);
 }
 
-
-void RegExpMacroAssemblerTracer::LoadCurrentCharacter(int cp_offset,
-                                                      Label* on_end_of_input,
-                                                      bool check_bounds,
-                                                      int characters) {
+void RegExpMacroAssemblerTracer::LoadCurrentCharacterImpl(
+    int cp_offset, Label* on_end_of_input, bool check_bounds, int characters,
+    int eats_at_least) {
   const char* check_msg = check_bounds ? "" : " (unchecked)";
-  PrintF(" LoadCurrentCharacter(cp_offset=%d, label[%08x]%s (%d chars));\n",
-         cp_offset,
-         LabelToInt(on_end_of_input),
-         check_msg,
-         characters);
-  assembler_->LoadCurrentCharacter(cp_offset,
-                                   on_end_of_input,
-                                   check_bounds,
-                                   characters);
+  PrintF(
+      " LoadCurrentCharacter(cp_offset=%d, label[%08x]%s (%d chars) (eats at "
+      "least %d));\n",
+      cp_offset, LabelToInt(on_end_of_input), check_msg, characters,
+      eats_at_least);
+  assembler_->LoadCurrentCharacter(cp_offset, on_end_of_input, check_bounds,
+                                   characters, eats_at_least);
 }
 
-
 class PrintablePrinter {
  public:
   explicit PrintablePrinter(uc16 character) : character_(character) { }
@@ -232,13 +227,13 @@ void RegExpMacroAssemblerTracer::CheckCharacter(unsigned c, Label* on_equal) {
   assembler_->CheckCharacter(c, on_equal);
 }
 
-
-void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) {
-  PrintF(" CheckAtStart(label[%08x]);\n", LabelToInt(on_at_start));
-  assembler_->CheckAtStart(on_at_start);
+void RegExpMacroAssemblerTracer::CheckAtStart(int cp_offset,
+                                              Label* on_at_start) {
+  PrintF(" CheckAtStart(cp_offset=%d, label[%08x]);\n", cp_offset,
+         LabelToInt(on_at_start));
+  assembler_->CheckAtStart(cp_offset, on_at_start);
 }
 
-
 void RegExpMacroAssemblerTracer::CheckNotAtStart(int cp_offset,
                                                  Label* on_not_at_start) {
   PrintF(" CheckNotAtStart(cp_offset=%d, label[%08x]);\n", cp_offset,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
index d0b68bd59d5c26..2a44146e73895b 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
@@ -22,13 +22,13 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
   void AdvanceRegister(int reg, int by) override;  // r[reg] += by.
   void Backtrack() override;
   void Bind(Label* label) override;
-  void CheckAtStart(Label* on_at_start) override;
   void CheckCharacter(unsigned c, Label* on_equal) override;
   void CheckCharacterAfterAnd(unsigned c, unsigned and_with,
                               Label* on_equal) override;
   void CheckCharacterGT(uc16 limit, Label* on_greater) override;
   void CheckCharacterLT(uc16 limit, Label* on_less) override;
   void CheckGreedyLoop(Label* on_tos_equals_current_position) override;
+  void CheckAtStart(int cp_offset, Label* on_at_start) override;
   void CheckNotAtStart(int cp_offset, Label* on_not_at_start) override;
   void CheckNotBackReference(int start_reg, bool read_backward,
                              Label* on_no_match) override;
@@ -53,9 +53,9 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
   void IfRegisterLT(int reg, int comparand, Label* if_lt) override;
   void IfRegisterEqPos(int reg, Label* if_eq) override;
   IrregexpImplementation Implementation() override;
-  void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
-                            bool check_bounds = true,
-                            int characters = 1) override;
+  void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                bool check_bounds, int characters,
+                                int eats_at_least) override;
   void PopCurrentPosition() override;
   void PopRegister(int register_index) override;
   void PushBacktrack(Label* label) override;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 68fa16db6118a2..96fb53d2a0464c 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -85,6 +85,20 @@ void RegExpMacroAssembler::CheckPosition(int cp_offset,
   LoadCurrentCharacter(cp_offset, on_outside_input, true);
 }
 
+void RegExpMacroAssembler::LoadCurrentCharacter(int cp_offset,
+                                                Label* on_end_of_input,
+                                                bool check_bounds,
+                                                int characters,
+                                                int eats_at_least) {
+  // By default, eats_at_least = characters.
+  if (eats_at_least == kUseCharactersValue) {
+    eats_at_least = characters;
+  }
+
+  LoadCurrentCharacterImpl(cp_offset, on_end_of_input, check_bounds, characters,
+                           eats_at_least);
+}
+
 bool RegExpMacroAssembler::CheckSpecialCharacterClass(uc16 type,
                                                       Label* on_no_match) {
   return false;
@@ -129,32 +143,46 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
   }
 }
 
+// This method may only be called after an interrupt.
 int NativeRegExpMacroAssembler::CheckStackGuardState(
-    Isolate* isolate, int start_index, bool is_direct_call,
+    Isolate* isolate, int start_index, RegExp::CallOrigin call_origin,
     Address* return_address, Code re_code, Address* subject,
     const byte** input_start, const byte** input_end) {
   DisallowHeapAllocation no_gc;
 
   DCHECK(re_code.raw_instruction_start() <= *return_address);
   DCHECK(*return_address <= re_code.raw_instruction_end());
-  int return_value = 0;
-  // Prepare for possible GC.
-  HandleScope handles(isolate);
-  Handle<Code> code_handle(re_code, isolate);
-  Handle<String> subject_handle(String::cast(Object(*subject)), isolate);
-  bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject_handle);
-
   StackLimitCheck check(isolate);
   bool js_has_overflowed = check.JsHasOverflowed();
 
-  if (is_direct_call) {
+  if (call_origin == RegExp::CallOrigin::kFromJs) {
     // Direct calls from JavaScript can be interrupted in two ways:
     // 1. A real stack overflow, in which case we let the caller throw the
     //    exception.
     // 2. The stack guard was used to interrupt execution for another purpose,
     //    forcing the call through the runtime system.
-    return_value = js_has_overflowed ? EXCEPTION : RETRY;
-  } else if (js_has_overflowed) {
+
+    // Bug(v8:9540) Investigate why this method is called from JS although no
+    // stackoverflow or interrupt is pending on ARM64. We return 0 in this case
+    // to continue execution normally.
+    if (js_has_overflowed) {
+      return EXCEPTION;
+    } else if (check.InterruptRequested()) {
+      return RETRY;
+    } else {
+      return 0;
+    }
+  }
+  DCHECK(call_origin == RegExp::CallOrigin::kFromRuntime);
+
+  // Prepare for possible GC.
+  HandleScope handles(isolate);
+  Handle<Code> code_handle(re_code, isolate);
+  Handle<String> subject_handle(String::cast(Object(*subject)), isolate);
+  bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject_handle);
+  int return_value = 0;
+
+  if (js_has_overflowed) {
     AllowHeapAllocation yes_gc;
     isolate->StackOverflow();
     return_value = EXCEPTION;
@@ -191,7 +219,7 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
 }
 
 // Returns a {Result} sentinel, or the number of successful matches.
-int NativeRegExpMacroAssembler::Match(Handle<Code> regexp_code,
+int NativeRegExpMacroAssembler::Match(Handle<JSRegExp> regexp,
                                       Handle<String> subject,
                                       int* offsets_vector,
                                       int offsets_vector_length,
@@ -234,31 +262,36 @@ int NativeRegExpMacroAssembler::Match(Handle<Code> regexp_code,
       StringCharacterPosition(subject_ptr, start_offset + slice_offset, no_gc);
   int byte_length = char_length << char_size_shift;
   const byte* input_end = input_start + byte_length;
-  return Execute(*regexp_code, *subject, start_offset, input_start, input_end,
-                 offsets_vector, offsets_vector_length, isolate);
+  return Execute(*subject, start_offset, input_start, input_end, offsets_vector,
+                 offsets_vector_length, isolate, *regexp);
 }
 
 // Returns a {Result} sentinel, or the number of successful matches.
+// TODO(pthier): The JSRegExp object is passed to native irregexp code to match
+// the signature of the interpreter. We should get rid of JS objects passed to
+// internal methods.
 int NativeRegExpMacroAssembler::Execute(
-    Code code,
     String input,  // This needs to be the unpacked (sliced, cons) string.
     int start_offset, const byte* input_start, const byte* input_end,
-    int* output, int output_size, Isolate* isolate) {
+    int* output, int output_size, Isolate* isolate, JSRegExp regexp) {
   // Ensure that the minimum stack has been allocated.
   RegExpStackScope stack_scope(isolate);
   Address stack_base = stack_scope.stack()->stack_base();
 
-  int direct_call = 0;
+  bool is_one_byte = String::IsOneByteRepresentationUnderneath(input);
+  Code code = Code::cast(regexp.Code(is_one_byte));
+  RegExp::CallOrigin call_origin = RegExp::CallOrigin::kFromRuntime;
 
   using RegexpMatcherSig = int(
       Address input_string, int start_offset,  // NOLINT(readability/casting)
       const byte* input_start, const byte* input_end, int* output,
-      int output_size, Address stack_base, int direct_call, Isolate* isolate);
+      int output_size, Address stack_base, int call_origin, Isolate* isolate,
+      Address regexp);
 
   auto fn = GeneratedCode<RegexpMatcherSig>::FromCode(code);
-  int result =
-      fn.CallIrregexp(input.ptr(), start_offset, input_start, input_end, output,
-                      output_size, stack_base, direct_call, isolate);
+  int result = fn.CallIrregexp(input.ptr(), start_offset, input_start,
+                               input_end, output, output_size, stack_base,
+                               call_origin, isolate, regexp.ptr());
   DCHECK(result >= RETRY);
 
   if (result == EXCEPTION && !isolate->has_pending_exception()) {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index b55ac13590da96..ccf19d3fb689e5 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -36,6 +36,8 @@ class RegExpMacroAssembler {
   static const int kTableSize = 1 << kTableSizeBits;
   static const int kTableMask = kTableSize - 1;
 
+  static constexpr int kUseCharactersValue = -1;
+
   enum IrregexpImplementation {
     kIA32Implementation,
     kARMImplementation,
@@ -69,7 +71,6 @@ class RegExpMacroAssembler {
   // stack by an earlier PushBacktrack(Label*).
   virtual void Backtrack() = 0;
   virtual void Bind(Label* label) = 0;
-  virtual void CheckAtStart(Label* on_at_start) = 0;
   // Dispatch after looking the current character up in a 2-bits-per-entry
   // map.  The destinations vector has up to 4 labels.
   virtual void CheckCharacter(unsigned c, Label* on_equal) = 0;
@@ -81,6 +82,7 @@ class RegExpMacroAssembler {
   virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
   virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
   virtual void CheckGreedyLoop(Label* on_tos_equals_current_position) = 0;
+  virtual void CheckAtStart(int cp_offset, Label* on_at_start) = 0;
   virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start) = 0;
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match) = 0;
@@ -133,10 +135,12 @@ class RegExpMacroAssembler {
   // label if it is.
   virtual void IfRegisterEqPos(int reg, Label* if_eq) = 0;
   virtual IrregexpImplementation Implementation() = 0;
-  virtual void LoadCurrentCharacter(int cp_offset,
-                                    Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1) = 0;
+  V8_EXPORT_PRIVATE void LoadCurrentCharacter(
+      int cp_offset, Label* on_end_of_input, bool check_bounds = true,
+      int characters = 1, int eats_at_least = kUseCharactersValue);
+  virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                        bool check_bounds, int characters,
+                                        int eats_at_least) = 0;
   virtual void PopCurrentPosition() = 0;
   virtual void PopRegister(int register_index) = 0;
   // Pushes the label on the backtrack stack, so that a following Backtrack
@@ -219,7 +223,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
   bool CanReadUnaligned() override;
 
   // Returns a {Result} sentinel, or the number of successful matches.
-  static int Match(Handle<Code> regexp, Handle<String> subject,
+  static int Match(Handle<JSRegExp> regexp, Handle<String> subject,
                    int* offsets_vector, int offsets_vector_length,
                    int previous_index, Isolate* isolate);
 
@@ -235,9 +239,9 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
       String subject, int start_index, const DisallowHeapAllocation& no_gc);
 
   static int CheckStackGuardState(Isolate* isolate, int start_index,
-                                  bool is_direct_call, Address* return_address,
-                                  Code re_code, Address* subject,
-                                  const byte** input_start,
+                                  RegExp::CallOrigin call_origin,
+                                  Address* return_address, Code re_code,
+                                  Address* subject, const byte** input_start,
                                   const byte** input_end);
 
   // Byte map of one byte characters with a 0xff if the character is a word
@@ -250,11 +254,11 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
   }
 
   // Returns a {Result} sentinel, or the number of successful matches.
-  V8_EXPORT_PRIVATE static int Execute(Code code, String input,
-                                       int start_offset,
+  V8_EXPORT_PRIVATE static int Execute(String input, int start_offset,
                                        const byte* input_start,
                                        const byte* input_end, int* output,
-                                       int output_size, Isolate* isolate);
+                                       int output_size, Isolate* isolate,
+                                       JSRegExp regexp);
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/regexp/regexp-nodes.h b/deps/v8/src/regexp/regexp-nodes.h
index 4c13b74926d263..d618c9bb276310 100644
--- a/deps/v8/src/regexp/regexp-nodes.h
+++ b/deps/v8/src/regexp/regexp-nodes.h
@@ -20,11 +20,14 @@ class QuickCheckDetails;
 class RegExpCompiler;
 class Trace;
 struct PreloadState;
+class ChoiceNode;
 
 #define FOR_EACH_NODE_TYPE(VISIT) \
   VISIT(End)                      \
   VISIT(Action)                   \
   VISIT(Choice)                   \
+  VISIT(LoopChoice)               \
+  VISIT(NegativeLookaroundChoice) \
   VISIT(BackReference)            \
   VISIT(Assertion)                \
   VISIT(Text)
@@ -90,6 +93,34 @@ struct NodeInfo final {
   bool replacement_calculated : 1;
 };
 
+struct EatsAtLeastInfo final {
+  EatsAtLeastInfo() : EatsAtLeastInfo(0) {}
+  explicit EatsAtLeastInfo(uint8_t eats)
+      : eats_at_least_from_possibly_start(eats),
+        eats_at_least_from_not_start(eats) {}
+  void SetMin(const EatsAtLeastInfo& other) {
+    if (other.eats_at_least_from_possibly_start <
+        eats_at_least_from_possibly_start) {
+      eats_at_least_from_possibly_start =
+          other.eats_at_least_from_possibly_start;
+    }
+    if (other.eats_at_least_from_not_start < eats_at_least_from_not_start) {
+      eats_at_least_from_not_start = other.eats_at_least_from_not_start;
+    }
+  }
+
+  // Any successful match starting from the current node will consume at least
+  // this many characters. This does not necessarily mean that there is a
+  // possible match with exactly this many characters, but we generally try to
+  // get this number as high as possible to allow for early exit on failure.
+  uint8_t eats_at_least_from_possibly_start;
+
+  // Like eats_at_least_from_possibly_start, but with the additional assumption
+  // that start-of-string assertions (^) can't match. This value is greater than
+  // or equal to eats_at_least_from_possibly_start.
+  uint8_t eats_at_least_from_not_start;
+};
+
 class RegExpNode : public ZoneObject {
  public:
   explicit RegExpNode(Zone* zone)
@@ -104,13 +135,20 @@ class RegExpNode : public ZoneObject {
   // Generates a goto to this node or actually generates the code at this point.
   virtual void Emit(RegExpCompiler* compiler, Trace* trace) = 0;
   // How many characters must this node consume at a minimum in order to
-  // succeed.  If we have found at least 'still_to_find' characters that
-  // must be consumed there is no need to ask any following nodes whether
-  // they are sure to eat any more characters.  The not_at_start argument is
-  // used to indicate that we know we are not at the start of the input.  In
-  // this case anchored branches will always fail and can be ignored when
-  // determining how many characters are consumed on success.
-  virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start) = 0;
+  // succeed.  The not_at_start argument is used to indicate that we know we are
+  // not at the start of the input.  In this case anchored branches will always
+  // fail and can be ignored when determining how many characters are consumed
+  // on success.  If this node has not been analyzed yet, EatsAtLeast returns 0.
+  int EatsAtLeast(bool not_at_start);
+  // Returns how many characters this node must consume in order to succeed,
+  // given that this is a LoopChoiceNode whose counter register is in a
+  // newly-initialized state at the current position in the generated code. For
+  // example, consider /a{6,8}/. Absent any extra information, the
+  // LoopChoiceNode for the repetition must report that it consumes at least
+  // zero characters, because it may have already looped several times. However,
+  // with a newly-initialized counter, it can report that it consumes at least
+  // six characters.
+  virtual EatsAtLeastInfo EatsAtLeastFromLoopEntry();
   // Emits some quick code that checks whether the preloaded characters match.
   // Falls through on certain failure, jumps to the label on possible success.
   // If the node cannot make a quick check it does nothing and returns false.
@@ -118,7 +156,7 @@ class RegExpNode : public ZoneObject {
                       Trace* trace, bool preload_has_checked_bounds,
                       Label* on_possible_success,
                       QuickCheckDetails* details_return,
-                      bool fall_through_on_failure);
+                      bool fall_through_on_failure, ChoiceNode* predecessor);
   // For a given number of characters this returns a mask and a value.  The
   // next n characters are anded with the mask and compared with the value.
   // A comparison failure indicates the node cannot match the next n characters.
@@ -127,6 +165,17 @@ class RegExpNode : public ZoneObject {
                                     RegExpCompiler* compiler,
                                     int characters_filled_in,
                                     bool not_at_start) = 0;
+  // Fills in quick check details for this node, given that this is a
+  // LoopChoiceNode whose counter register is in a newly-initialized state at
+  // the current position in the generated code. For example, consider /a{6,8}/.
+  // Absent any extra information, the LoopChoiceNode for the repetition cannot
+  // generate any useful quick check because a match might be the (empty)
+  // continuation node. However, with a newly-initialized counter, it can
+  // generate a quick check for several 'a' characters at once.
+  virtual void GetQuickCheckDetailsFromLoopEntry(QuickCheckDetails* details,
+                                                 RegExpCompiler* compiler,
+                                                 int characters_filled_in,
+                                                 bool not_at_start);
   static const int kNodeIsTooComplexForGreedyLoops = kMinInt;
   virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
   // Only returns the successor for a text node of length 1 that matches any
@@ -183,6 +232,10 @@ class RegExpNode : public ZoneObject {
   void set_on_work_list(bool value) { on_work_list_ = value; }
 
   NodeInfo* info() { return &info_; }
+  const EatsAtLeastInfo* eats_at_least_info() const { return &eats_at_least_; }
+  void set_eats_at_least_info(const EatsAtLeastInfo& eats_at_least) {
+    eats_at_least_ = eats_at_least;
+  }
 
   BoyerMooreLookahead* bm_info(bool not_at_start) {
     return bm_info_[not_at_start ? 1 : 0];
@@ -205,6 +258,11 @@ class RegExpNode : public ZoneObject {
   Label label_;
   bool on_work_list_;
   NodeInfo info_;
+
+  // Saved values for EatsAtLeast results, to avoid recomputation. Filled in
+  // during analysis (valid if info_.been_analyzed is true).
+  EatsAtLeastInfo eats_at_least_;
+
   // This variable keeps track of how many times code has been generated for
   // this node (in different traces).  We don't keep track of where the
   // generated code is located unless the code is generated at the start of
@@ -239,7 +297,7 @@ class SeqRegExpNode : public RegExpNode {
 class ActionNode : public SeqRegExpNode {
  public:
   enum ActionType {
-    SET_REGISTER,
+    SET_REGISTER_FOR_LOOP,
     INCREMENT_REGISTER,
     STORE_POSITION,
     BEGIN_SUBMATCH,
@@ -247,7 +305,8 @@ class ActionNode : public SeqRegExpNode {
     EMPTY_MATCH_CHECK,
     CLEAR_CAPTURES
   };
-  static ActionNode* SetRegister(int reg, int val, RegExpNode* on_success);
+  static ActionNode* SetRegisterForLoop(int reg, int val,
+                                        RegExpNode* on_success);
   static ActionNode* IncrementRegister(int reg, RegExpNode* on_success);
   static ActionNode* StorePosition(int reg, bool is_capture,
                                    RegExpNode* on_success);
@@ -265,13 +324,9 @@ class ActionNode : public SeqRegExpNode {
                                      RegExpNode* on_success);
   void Accept(NodeVisitor* visitor) override;
   void Emit(RegExpCompiler* compiler, Trace* trace) override;
-  int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
   void GetQuickCheckDetails(QuickCheckDetails* details,
                             RegExpCompiler* compiler, int filled_in,
-                            bool not_at_start) override {
-    return on_success()->GetQuickCheckDetails(details, compiler, filled_in,
-                                              not_at_start);
-  }
+                            bool not_at_start) override;
   void FillInBMInfo(Isolate* isolate, int offset, int budget,
                     BoyerMooreLookahead* bm, bool not_at_start) override;
   ActionType action_type() { return action_type_; }
@@ -342,7 +397,6 @@ class TextNode : public SeqRegExpNode {
                                           JSRegExp::Flags flags);
   void Accept(NodeVisitor* visitor) override;
   void Emit(RegExpCompiler* compiler, Trace* trace) override;
-  int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
   void GetQuickCheckDetails(QuickCheckDetails* details,
                             RegExpCompiler* compiler, int characters_filled_in,
                             bool not_at_start) override;
@@ -356,6 +410,7 @@ class TextNode : public SeqRegExpNode {
                     BoyerMooreLookahead* bm, bool not_at_start) override;
   void CalculateOffsets();
   RegExpNode* FilterOneByte(int depth) override;
+  int Length();
 
  private:
   enum TextEmitPassType {
@@ -371,7 +426,6 @@ class TextNode : public SeqRegExpNode {
   void TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
                     bool preloaded, Trace* trace, bool first_element_checked,
                     int* checked_up_to);
-  int Length();
   ZoneList<TextElement>* elms_;
   bool read_backward_;
 };
@@ -402,7 +456,6 @@ class AssertionNode : public SeqRegExpNode {
   }
   void Accept(NodeVisitor* visitor) override;
   void Emit(RegExpCompiler* compiler, Trace* trace) override;
-  int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
   void GetQuickCheckDetails(QuickCheckDetails* details,
                             RegExpCompiler* compiler, int filled_in,
                             bool not_at_start) override;
@@ -434,8 +487,6 @@ class BackReferenceNode : public SeqRegExpNode {
   int end_register() { return end_reg_; }
   bool read_backward() { return read_backward_; }
   void Emit(RegExpCompiler* compiler, Trace* trace) override;
-  int EatsAtLeast(int still_to_find, int recursion_depth,
-                  bool not_at_start) override;
   void GetQuickCheckDetails(QuickCheckDetails* details,
                             RegExpCompiler* compiler, int characters_filled_in,
                             bool not_at_start) override {
@@ -457,10 +508,6 @@ class EndNode : public RegExpNode {
   EndNode(Action action, Zone* zone) : RegExpNode(zone), action_(action) {}
   void Accept(NodeVisitor* visitor) override;
   void Emit(RegExpCompiler* compiler, Trace* trace) override;
-  int EatsAtLeast(int still_to_find, int recursion_depth,
-                  bool not_at_start) override {
-    return 0;
-  }
   void GetQuickCheckDetails(QuickCheckDetails* details,
                             RegExpCompiler* compiler, int characters_filled_in,
                             bool not_at_start) override {
@@ -540,9 +587,6 @@ class ChoiceNode : public RegExpNode {
   }
   ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
   void Emit(RegExpCompiler* compiler, Trace* trace) override;
-  int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
-  int EatsAtLeastHelper(int still_to_find, int budget,
-                        RegExpNode* ignore_this_node, bool not_at_start);
   void GetQuickCheckDetails(QuickCheckDetails* details,
                             RegExpCompiler* compiler, int characters_filled_in,
                             bool not_at_start) override;
@@ -564,6 +608,7 @@ class ChoiceNode : public RegExpNode {
   ZoneList<GuardedAlternative>* alternatives_;
 
  private:
+  template <typename...>
   friend class Analysis;
 
   void GenerateGuard(RegExpMacroAssembler* macro_assembler, Guard* guard,
@@ -601,16 +646,23 @@ class NegativeLookaroundChoiceNode : public ChoiceNode {
     AddAlternative(this_must_fail);
     AddAlternative(then_do_this);
   }
-  int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
   void GetQuickCheckDetails(QuickCheckDetails* details,
                             RegExpCompiler* compiler, int characters_filled_in,
                             bool not_at_start) override;
   void FillInBMInfo(Isolate* isolate, int offset, int budget,
                     BoyerMooreLookahead* bm, bool not_at_start) override {
-    alternatives_->at(1).node()->FillInBMInfo(isolate, offset, budget - 1, bm,
-                                              not_at_start);
+    continue_node()->FillInBMInfo(isolate, offset, budget - 1, bm,
+                                  not_at_start);
     if (offset == 0) set_bm_info(not_at_start, bm);
   }
+  static constexpr int kLookaroundIndex = 0;
+  static constexpr int kContinueIndex = 1;
+  RegExpNode* lookaround_node() {
+    return alternatives()->at(kLookaroundIndex).node();
+  }
+  RegExpNode* continue_node() {
+    return alternatives()->at(kContinueIndex).node();
+  }
   // For a negative lookahead we don't emit the quick check for the
   // alternative that is expected to fail.  This is because quick check code
   // starts by loading enough characters for the alternative that takes fewest
@@ -619,29 +671,38 @@ class NegativeLookaroundChoiceNode : public ChoiceNode {
   bool try_to_emit_quick_check_for_alternative(bool is_first) override {
     return !is_first;
   }
+  void Accept(NodeVisitor* visitor) override;
   RegExpNode* FilterOneByte(int depth) override;
 };
 
 class LoopChoiceNode : public ChoiceNode {
  public:
-  LoopChoiceNode(bool body_can_be_zero_length, bool read_backward, Zone* zone)
+  LoopChoiceNode(bool body_can_be_zero_length, bool read_backward,
+                 int min_loop_iterations, Zone* zone)
       : ChoiceNode(2, zone),
         loop_node_(nullptr),
         continue_node_(nullptr),
         body_can_be_zero_length_(body_can_be_zero_length),
-        read_backward_(read_backward) {}
+        read_backward_(read_backward),
+        traversed_loop_initialization_node_(false),
+        min_loop_iterations_(min_loop_iterations) {}
   void AddLoopAlternative(GuardedAlternative alt);
   void AddContinueAlternative(GuardedAlternative alt);
   void Emit(RegExpCompiler* compiler, Trace* trace) override;
-  int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
   void GetQuickCheckDetails(QuickCheckDetails* details,
                             RegExpCompiler* compiler, int characters_filled_in,
                             bool not_at_start) override;
+  void GetQuickCheckDetailsFromLoopEntry(QuickCheckDetails* details,
+                                         RegExpCompiler* compiler,
+                                         int characters_filled_in,
+                                         bool not_at_start) override;
   void FillInBMInfo(Isolate* isolate, int offset, int budget,
                     BoyerMooreLookahead* bm, bool not_at_start) override;
+  EatsAtLeastInfo EatsAtLeastFromLoopEntry() override;
   RegExpNode* loop_node() { return loop_node_; }
   RegExpNode* continue_node() { return continue_node_; }
   bool body_can_be_zero_length() { return body_can_be_zero_length_; }
+  int min_loop_iterations() const { return min_loop_iterations_; }
   bool read_backward() override { return read_backward_; }
   void Accept(NodeVisitor* visitor) override;
   RegExpNode* FilterOneByte(int depth) override;
@@ -658,6 +719,22 @@ class LoopChoiceNode : public ChoiceNode {
   RegExpNode* continue_node_;
   bool body_can_be_zero_length_;
   bool read_backward_;
+
+  // Temporary marker set only while generating quick check details. Represents
+  // whether GetQuickCheckDetails traversed the initialization node for this
+  // loop's counter. If so, we may be able to generate stricter quick checks
+  // because we know the loop node must match at least min_loop_iterations_
+  // times before the continuation node can match.
+  bool traversed_loop_initialization_node_;
+
+  // The minimum number of times the loop_node_ must match before the
+  // continue_node_ might be considered. This value can be temporarily decreased
+  // while generating quick check details, to represent the remaining iterations
+  // after the completed portion of the quick check details.
+  int min_loop_iterations_;
+
+  friend class IterationDecrementer;
+  friend class LoopInitializationMarker;
 };
 
 class NodeVisitor {
@@ -666,7 +743,6 @@ class NodeVisitor {
 #define DECLARE_VISIT(Type) virtual void Visit##Type(Type##Node* that) = 0;
   FOR_EACH_NODE_TYPE(DECLARE_VISIT)
 #undef DECLARE_VISIT
-  virtual void VisitLoopChoice(LoopChoiceNode* that) { VisitChoice(that); }
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 3647680969a9d9..d6e421cafa3f89 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -692,7 +692,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
     }
   }
   if (subexpr_type == CAPTURE) {
-    if (captures_started_ >= kMaxCaptures) {
+    if (captures_started_ >= JSRegExp::kMaxCaptures) {
       ReportError(CStrVector("Too many captures"));
       return nullptr;
     }
@@ -800,7 +800,7 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
     uc32 c = current();
     if (IsDecimalDigit(c)) {
       value = 10 * value + (c - '0');
-      if (value > kMaxCaptures) {
+      if (value > JSRegExp::kMaxCaptures) {
         Reset(start);
         return false;
       }
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index 36cec7e984eb32..cc1948b101f8c8 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -221,7 +221,6 @@ class V8_EXPORT_PRIVATE RegExpParser {
 
   static bool IsSyntaxCharacterOrSlash(uc32 c);
 
-  static const int kMaxCaptures = 1 << 16;
   static const uc32 kEndMarker = (1 << 21);
 
  private:
diff --git a/deps/v8/src/regexp/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index 3885fd8e8d283c..a6a128841fe7e0 100644
--- a/deps/v8/src/regexp/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -72,12 +72,12 @@ Address RegExpStack::EnsureCapacity(size_t size) {
       DeleteArray(thread_local_.memory_);
     }
     thread_local_.memory_ = new_memory;
+    thread_local_.memory_top_ = new_memory + size;
     thread_local_.memory_size_ = size;
     thread_local_.limit_ = reinterpret_cast<Address>(new_memory) +
                            kStackLimitSlack * kSystemPointerSize;
   }
-  return reinterpret_cast<Address>(thread_local_.memory_) +
-         thread_local_.memory_size_;
+  return reinterpret_cast<Address>(thread_local_.memory_top_);
 }
 
 
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index b1d45717602f71..7ecaa40b819dbb 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -46,8 +46,9 @@ class RegExpStack {
   // Gives the top of the memory used as stack.
   Address stack_base() {
     DCHECK_NE(0, thread_local_.memory_size_);
-    return reinterpret_cast<Address>(thread_local_.memory_) +
-           thread_local_.memory_size_;
+    DCHECK_EQ(thread_local_.memory_top_,
+              thread_local_.memory_ + thread_local_.memory_size_);
+    return reinterpret_cast<Address>(thread_local_.memory_top_);
   }
 
   // The total size of the memory allocated for the stack.
@@ -58,7 +59,7 @@ class RegExpStack {
   // There is only a limited number of locations below the stack limit,
   // so users of the stack should check the stack limit during any
   // sequence of pushes longer that this.
-  Address* limit_address() { return &(thread_local_.limit_); }
+  Address* limit_address_address() { return &(thread_local_.limit_); }
 
   // Ensures that there is a memory area with at least the specified size.
   // If passing zero, the default/minimum size buffer is allocated.
@@ -89,12 +90,15 @@ class RegExpStack {
   // Structure holding the allocated memory, size and limit.
   struct ThreadLocal {
     ThreadLocal() { Clear(); }
-    // If memory_size_ > 0 then memory_ must be non-nullptr.
+    // If memory_size_ > 0 then memory_ and memory_top_ must be non-nullptr
+    // and memory_top_ = memory_ + memory_size_
     byte* memory_;
+    byte* memory_top_;
     size_t memory_size_;
     Address limit_;
     void Clear() {
       memory_ = nullptr;
+      memory_top_ = nullptr;
       memory_size_ = 0;
       limit_ = kMemoryTop;
     }
@@ -102,7 +106,7 @@ class RegExpStack {
   };
 
   // Address of allocated memory.
-  Address memory_address() {
+  Address memory_address_address() {
     return reinterpret_cast<Address>(&thread_local_.memory_);
   }
 
@@ -111,6 +115,11 @@ class RegExpStack {
     return reinterpret_cast<Address>(&thread_local_.memory_size_);
   }
 
+  // Address of top of memory used as stack.
+  Address memory_top_address_address() {
+    return reinterpret_cast<Address>(&thread_local_.memory_top_);
+  }
+
   // Resets the buffer if it has grown beyond the default/minimum size.
   // After this, the buffer is either the default size, or it is empty, so
   // you have to call EnsureCapacity before using it again.
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index ad50270fdcdac0..c9194d5170c675 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -5,6 +5,7 @@
 #include "src/regexp/regexp-utils.h"
 
 #include "src/execution/isolate.h"
+#include "src/execution/protectors-inl.h"
 #include "src/heap/factory.h"
 #include "src/objects/js-regexp-inl.h"
 #include "src/objects/objects-inl.h"
@@ -179,7 +180,14 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
     return false;
   }
 
-  if (!isolate->IsRegExpSpeciesLookupChainIntact(isolate->native_context())) {
+  // Note: Unlike the more involved check in CSA (see BranchIfFastRegExp), this
+  // does not go on to check the actual value of the exec property. This would
+  // not be valid since this method is called from places that access the flags
+  // property. Similar spots in CSA would use BranchIfFastRegExp_Strict in this
+  // case.
+
+  if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
+          recv.GetCreationContext())) {
     return false;
   }
 
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
index 4b8714c55fa693..19f1f240399823 100644
--- a/deps/v8/src/regexp/regexp-utils.h
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -38,6 +38,9 @@ class RegExpUtils : public AllStatic {
   // Checks whether the given object is an unmodified JSRegExp instance.
   // Neither the object's map, nor its prototype's map, nor any relevant
   // method on the prototype may be modified.
+  //
+  // Note: This check is limited may only be used in situations where the only
+  // relevant property is 'exec'.
   static bool IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj);
 
   // ES#sec-advancestringindex
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index 15b0321c4643f8..e0bc4b8e32347a 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -5,6 +5,7 @@
 #include "src/regexp/regexp.h"
 
 #include "src/codegen/compilation-cache.h"
+#include "src/diagnostics/code-tracer.h"
 #include "src/heap/heap-inl.h"
 #include "src/objects/js-regexp-inl.h"
 #include "src/regexp/regexp-bytecode-generator.h"
@@ -14,6 +15,7 @@
 #include "src/regexp/regexp-macro-assembler-arch.h"
 #include "src/regexp/regexp-parser.h"
 #include "src/strings/string-search.h"
+#include "src/utils/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -298,29 +300,72 @@ Handle<Object> RegExpImpl::AtomExec(Isolate* isolate, Handle<JSRegExp> re,
 bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
                                         Handle<String> sample_subject,
                                         bool is_one_byte) {
-  Object compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
-  if (compiled_code != Smi::FromInt(JSRegExp::kUninitializedValue)) {
-    DCHECK(FLAG_regexp_interpret_all ? compiled_code.IsByteArray()
-                                     : compiled_code.IsCode());
+  Object compiled_code = re->Code(is_one_byte);
+  Object bytecode = re->Bytecode(is_one_byte);
+  bool needs_initial_compilation =
+      compiled_code == Smi::FromInt(JSRegExp::kUninitializedValue);
+  // Recompile is needed when we're dealing with the first execution of the
+  // regexp after the decision to tier up has been made. If the tiering up
+  // strategy is not in use, this value is always false.
+  bool needs_tier_up_compilation =
+      re->MarkedForTierUp() && bytecode.IsByteArray();
+
+  if (FLAG_trace_regexp_tier_up && needs_tier_up_compilation) {
+    PrintF("JSRegExp object %p needs tier-up compilation\n",
+           reinterpret_cast<void*>(re->ptr()));
+  }
+
+  if (!needs_initial_compilation && !needs_tier_up_compilation) {
+    DCHECK(compiled_code.IsCode());
+    DCHECK_IMPLIES(FLAG_regexp_interpret_all, bytecode.IsByteArray());
     return true;
   }
+
+  DCHECK_IMPLIES(needs_tier_up_compilation, bytecode.IsByteArray());
+
   return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
 }
 
+#ifdef DEBUG
+namespace {
+
+bool RegExpCodeIsValidForPreCompilation(Handle<JSRegExp> re, bool is_one_byte) {
+  Object entry = re->Code(is_one_byte);
+  Object bytecode = re->Bytecode(is_one_byte);
+  // If we're not using the tier-up strategy, entry can only be a smi
+  // representing an uncompiled regexp here. If we're using the tier-up
+  // strategy, entry can still be a smi representing an uncompiled regexp, when
+  // compiling the regexp before the tier-up, or it can contain a trampoline to
+  // the regexp interpreter, in which case the bytecode field contains compiled
+  // bytecode, when recompiling the regexp after the tier-up. If the
+  // tier-up was forced, which happens for global replaces, entry is a smi
+  // representing an uncompiled regexp, even though we're "recompiling" after
+  // the tier-up.
+  if (re->ShouldProduceBytecode()) {
+    DCHECK(entry.IsSmi());
+    DCHECK(bytecode.IsSmi());
+    int entry_value = Smi::ToInt(entry);
+    int bytecode_value = Smi::ToInt(bytecode);
+    DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value);
+    DCHECK_EQ(JSRegExp::kUninitializedValue, bytecode_value);
+  } else {
+    DCHECK(entry.IsSmi() || (entry.IsCode() && bytecode.IsByteArray()));
+  }
+
+  return true;
+}
+
+}  // namespace
+#endif
+
 bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
                                  Handle<String> sample_subject,
                                  bool is_one_byte) {
   // Compile the RegExp.
   Zone zone(isolate->allocator(), ZONE_NAME);
   PostponeInterruptsScope postpone(isolate);
-#ifdef DEBUG
-  Object entry = re->DataAt(JSRegExp::code_index(is_one_byte));
-  // When arriving here entry can only be a smi representing an uncompiled
-  // regexp.
-  DCHECK(entry.IsSmi());
-  int entry_value = Smi::ToInt(entry);
-  DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value);
-#endif
+
+  DCHECK(RegExpCodeIsValidForPreCompilation(re, is_one_byte));
 
   JSRegExp::Flags flags = re->GetFlags();
 
@@ -335,6 +380,14 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
     USE(ThrowRegExpException(isolate, re, pattern, compile_data.error));
     return false;
   }
+  // The compilation target is a kBytecode if we're interpreting all regexp
+  // objects, or if we're using the tier-up strategy but the tier-up hasn't
+  // happened yet. The compilation target is a kNative if we're using the
+  // tier-up strategy and we need to recompile to tier-up, or if we're producing
+  // native code for all regexp objects.
+  compile_data.compilation_target = re->ShouldProduceBytecode()
+                                        ? RegExpCompilationTarget::kBytecode
+                                        : RegExpCompilationTarget::kNative;
   const bool compilation_succeeded =
       Compile(isolate, &zone, &compile_data, flags, pattern, sample_subject,
               is_one_byte);
@@ -346,13 +399,37 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
 
   Handle<FixedArray> data =
       Handle<FixedArray>(FixedArray::cast(re->data()), isolate);
-  data->set(JSRegExp::code_index(is_one_byte), compile_data.code);
+  if (compile_data.compilation_target == RegExpCompilationTarget::kNative) {
+    data->set(JSRegExp::code_index(is_one_byte), compile_data.code);
+    // Reset bytecode to uninitialized. In case we use tier-up we know that
+    // tier-up has happened this way.
+    data->set(JSRegExp::bytecode_index(is_one_byte),
+              Smi::FromInt(JSRegExp::kUninitializedValue));
+  } else {
+    DCHECK_EQ(compile_data.compilation_target,
+              RegExpCompilationTarget::kBytecode);
+    // Store code generated by compiler in bytecode and trampoline to
+    // interpreter in code.
+    data->set(JSRegExp::bytecode_index(is_one_byte), compile_data.code);
+    Handle<Code> trampoline =
+        BUILTIN_CODE(isolate, RegExpInterpreterTrampoline);
+    data->set(JSRegExp::code_index(is_one_byte), *trampoline);
+  }
   SetIrregexpCaptureNameMap(*data, compile_data.capture_name_map);
   int register_max = IrregexpMaxRegisterCount(*data);
   if (compile_data.register_count > register_max) {
     SetIrregexpMaxRegisterCount(*data, compile_data.register_count);
   }
 
+  if (FLAG_trace_regexp_tier_up) {
+    PrintF("JSRegExp object %p %s size: %d\n",
+           reinterpret_cast<void*>(re->ptr()),
+           re->ShouldProduceBytecode() ? "bytecode" : "native code",
+           re->ShouldProduceBytecode()
+               ? IrregexpByteCode(*data, is_one_byte).Size()
+               : IrregexpNativeCode(*data, is_one_byte).Size());
+  }
+
   return true;
 }
 
@@ -382,7 +459,7 @@ int RegExpImpl::IrregexpNumberOfRegisters(FixedArray re) {
 }
 
 ByteArray RegExpImpl::IrregexpByteCode(FixedArray re, bool is_one_byte) {
-  return ByteArray::cast(re.get(JSRegExp::code_index(is_one_byte)));
+  return ByteArray::cast(re.get(JSRegExp::bytecode_index(is_one_byte)));
 }
 
 Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
@@ -411,7 +488,7 @@ int RegExp::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
 
   DisallowHeapAllocation no_gc;
   FixedArray data = FixedArray::cast(regexp->data());
-  if (FLAG_regexp_interpret_all) {
+  if (regexp->ShouldProduceBytecode()) {
     // Byte-code regexp needs space allocated for all its registers.
     // The result captures are copied to the start of the registers array
     // if the match succeeds.  This way those registers are not clobbered
@@ -436,16 +513,15 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
 
   bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
 
-  if (!FLAG_regexp_interpret_all) {
+  if (!regexp->ShouldProduceBytecode()) {
     DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
     do {
       EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
-      Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
       // The stack is used to allocate registers for the compiled regexp code.
       // This means that in case of failure, the output registers array is left
       // untouched and contains the capture results from the previous successful
       // match.  We can use that to set the last match info lazily.
-      int res = NativeRegExpMacroAssembler::Match(code, subject, output,
+      int res = NativeRegExpMacroAssembler::Match(regexp, subject, output,
                                                   output_size, index, isolate);
       if (res != NativeRegExpMacroAssembler::RETRY) {
         DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION ||
@@ -464,12 +540,11 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
       // the, potentially, different subject (the string can switch between
       // being internal and external, and even between being Latin1 and UC16,
       // but the characters are always the same).
-      RegExp::IrregexpPrepare(isolate, regexp, subject);
       is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
     } while (true);
     UNREACHABLE();
   } else {
-    DCHECK(FLAG_regexp_interpret_all);
+    DCHECK(regexp->ShouldProduceBytecode());
     DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
     // We must have done EnsureCompiledIrregexp, so we can get the number of
     // registers.
@@ -478,17 +553,10 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
     int32_t* raw_output = &output[number_of_capture_registers];
 
     do {
-      // We do not touch the actual capture result registers until we know there
-      // has been a match so that we can use those capture results to set the
-      // last match info.
-      for (int i = number_of_capture_registers - 1; i >= 0; i--) {
-        raw_output[i] = -1;
-      }
-      Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte),
-                                   isolate);
-
-      IrregexpInterpreter::Result result = IrregexpInterpreter::Match(
-          isolate, byte_codes, subject, raw_output, index);
+      IrregexpInterpreter::Result result =
+          IrregexpInterpreter::MatchForCallFromRuntime(
+              isolate, regexp, subject, raw_output, number_of_capture_registers,
+              index);
       DCHECK_IMPLIES(result == IrregexpInterpreter::EXCEPTION,
                      isolate->has_pending_exception());
 
@@ -504,6 +572,10 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
         case IrregexpInterpreter::RETRY:
           // The string has changed representation, and we must restart the
           // match.
+          // We need to reset the tier up to start over with compilation.
+          if (FLAG_regexp_tier_up) {
+            regexp->ResetTierUp();
+          }
           is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
           EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
           break;
@@ -520,14 +592,15 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
 
   subject = String::Flatten(isolate, subject);
 
-  // Prepare space for the return values.
 #ifdef DEBUG
-  if (FLAG_regexp_interpret_all && FLAG_trace_regexp_bytecodes) {
+  if (FLAG_trace_regexp_bytecodes && regexp->ShouldProduceBytecode()) {
     String pattern = regexp->Pattern();
     PrintF("\n\nRegexp match:   /%s/\n\n", pattern.ToCString().get());
     PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
   }
 #endif
+
+  // Prepare space for the return values.
   int required_registers = RegExp::IrregexpPrepare(isolate, regexp, subject);
   if (required_registers < 0) {
     // Compiling failed with an exception.
@@ -547,6 +620,7 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
   int res =
       RegExpImpl::IrregexpExecRaw(isolate, regexp, subject, previous_index,
                                   output_registers, required_registers);
+
   if (res == RegExp::RE_SUCCESS) {
     int capture_count =
         IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
@@ -706,17 +780,14 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
 
   if (node == nullptr) node = new (zone) EndNode(EndNode::BACKTRACK, zone);
   data->node = node;
-  Analysis analysis(isolate, is_one_byte);
-  analysis.EnsureAnalyzed(node);
-  if (analysis.has_failed()) {
-    data->error =
-        isolate->factory()->NewStringFromAsciiChecked(analysis.error_message());
+  if (const char* error_message = AnalyzeRegExp(isolate, is_one_byte, node)) {
+    data->error = isolate->factory()->NewStringFromAsciiChecked(error_message);
     return false;
   }
 
   // Create the correct assembler for the architecture.
   std::unique_ptr<RegExpMacroAssembler> macro_assembler;
-  if (!FLAG_regexp_interpret_all) {
+  if (data->compilation_target == RegExpCompilationTarget::kNative) {
     // Native regexp implementation.
     DCHECK(!FLAG_jitless);
 
@@ -752,8 +823,7 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
 #error "Unsupported architecture"
 #endif
   } else {
-    DCHECK(FLAG_regexp_interpret_all);
-
+    DCHECK_EQ(data->compilation_target, RegExpCompilationTarget::kBytecode);
     // Interpreted regexp implementation.
     macro_assembler.reset(new RegExpBytecodeGenerator(isolate, zone));
   }
@@ -781,6 +851,26 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
   RegExpCompiler::CompilationResult result = compiler.Assemble(
       isolate, macro_assembler.get(), node, data->capture_count, pattern);
 
+  // Code / bytecode printing.
+  {
+#ifdef ENABLE_DISASSEMBLER
+    if (FLAG_print_regexp_code &&
+        data->compilation_target == RegExpCompilationTarget::kNative) {
+      CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+      OFStream os(trace_scope.file());
+      Handle<Code> c(Code::cast(result.code), isolate);
+      auto pattern_cstring = pattern->ToCString();
+      c->Disassemble(pattern_cstring.get(), os);
+    }
+#endif
+    if (FLAG_print_regexp_bytecode &&
+        data->compilation_target == RegExpCompilationTarget::kBytecode) {
+      Handle<ByteArray> bytecode(ByteArray::cast(result.code), isolate);
+      auto pattern_cstring = pattern->ToCString();
+      IrregexpInterpreter::Disassemble(*bytecode, pattern_cstring.get());
+    }
+  }
+
   if (FLAG_correctness_fuzzer_suppressions &&
       strncmp(result.error_message, "Stack overflow", 15) == 0) {
     FATAL("Aborting on stack overflow");
@@ -790,6 +880,7 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
     data->error =
         isolate->factory()->NewStringFromAsciiChecked(result.error_message);
   }
+
   data->code = result.code;
   data->register_count = result.num_registers;
 
@@ -803,7 +894,7 @@ RegExpGlobalCache::RegExpGlobalCache(Handle<JSRegExp> regexp,
       regexp_(regexp),
       subject_(subject),
       isolate_(isolate) {
-  bool interpreted = FLAG_regexp_interpret_all;
+  bool interpreted = regexp->ShouldProduceBytecode();
 
   if (regexp_->TypeTag() == JSRegExp::ATOM) {
     static const int kAtomRegistersPerMatch = 2;
@@ -868,6 +959,7 @@ int RegExpGlobalCache::AdvanceZeroLength(int last_index) {
 
 int32_t* RegExpGlobalCache::FetchNext() {
   current_match_index_++;
+
   if (current_match_index_ >= num_matches_) {
     // Current batch of results exhausted.
     // Fail if last batch was not even fully filled.
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index 0f3ed463dae746..8ccc9789a30ab4 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -13,6 +13,8 @@ namespace internal {
 class RegExpNode;
 class RegExpTree;
 
+enum class RegExpCompilationTarget : int { kBytecode, kNative };
+
 // TODO(jgruber): Consider splitting between ParseData and CompileData.
 struct RegExpCompileData {
   // The parsed AST as produced by the RegExpParser.
@@ -21,8 +23,8 @@ struct RegExpCompileData {
   // The compiled Node graph as produced by RegExpTree::ToNode methods.
   RegExpNode* node = nullptr;
 
-  // The generated code as produced by the compiler. Either a Code object (for
-  // irregexp native code) or a ByteArray (for irregexp bytecode).
+  // Either the generated code as produced by the compiler or a trampoline
+  // to the interpreter.
   Object code;
 
   // True, iff the pattern is a 'simple' atom with zero captures. In other
@@ -46,12 +48,20 @@ struct RegExpCompileData {
 
   // The number of registers used by the generated code.
   int register_count = 0;
+
+  // The compilation target (bytecode or native code).
+  RegExpCompilationTarget compilation_target;
 };
 
 class RegExp final : public AllStatic {
  public:
   // Whether the irregexp engine generates native code or interpreter bytecode.
-  static bool GeneratesNativeCode() { return !FLAG_regexp_interpret_all; }
+  static bool CanGenerateNativeCode() {
+    return !FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
+  }
+  static bool CanGenerateBytecode() {
+    return FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
+  }
 
   // Parses the RegExp pattern and prepares the JSRegExp object with
   // generic data and choice of implementation - as well as what
@@ -61,6 +71,11 @@ class RegExp final : public AllStatic {
       Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
       JSRegExp::Flags flags);
 
+  enum CallOrigin : int {
+    kFromRuntime = 0,
+    kFromJs = 1,
+  };
+
   // See ECMA-262 section 15.10.6.2.
   // This function calls the garbage collector if necessary.
   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Exec(
@@ -73,7 +88,7 @@ class RegExp final : public AllStatic {
   static constexpr int kInternalRegExpException = -1;
   static constexpr int kInternalRegExpRetry = -2;
 
-  enum IrregexpResult {
+  enum IrregexpResult : int32_t {
     RE_FAILURE = kInternalRegExpFailure,
     RE_SUCCESS = kInternalRegExpSuccess,
     RE_EXCEPTION = kInternalRegExpException,
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 5ebdd6ce151f40..d4144e7e6409ab 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -178,9 +178,10 @@ void RegExpMacroAssemblerS390::CheckCharacterGT(uc16 limit, Label* on_greater) {
   BranchOrBacktrack(gt, on_greater);
 }
 
-void RegExpMacroAssemblerS390::CheckAtStart(Label* on_at_start) {
+void RegExpMacroAssemblerS390::CheckAtStart(int cp_offset, Label* on_at_start) {
   __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
-  __ AddP(r2, current_input_offset(), Operand(-char_size()));
+  __ AddP(r2, current_input_offset(),
+          Operand(-char_size() + cp_offset * char_size()));
   __ CmpP(r2, r3);
   BranchOrBacktrack(eq, on_at_start);
 }
@@ -663,7 +664,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
   Label stack_ok;
 
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ mov(r2, Operand(stack_limit));
   __ LoadP(r2, MemOperand(r2));
   __ SubP(r2, sp, r2);
@@ -965,14 +966,19 @@ RegExpMacroAssemblerS390::Implementation() {
   return kS390Implementation;
 }
 
-void RegExpMacroAssemblerS390::LoadCurrentCharacter(int cp_offset,
-                                                    Label* on_end_of_input,
-                                                    bool check_bounds,
-                                                    int characters) {
+void RegExpMacroAssemblerS390::LoadCurrentCharacterImpl(int cp_offset,
+                                                        Label* on_end_of_input,
+                                                        bool check_bounds,
+                                                        int characters,
+                                                        int eats_at_least) {
+  // It's possible to preload a small number of characters when each success
+  // path requires a large number of characters, but not the reverse.
+  DCHECK_GE(eats_at_least, characters);
+
   DCHECK(cp_offset < (1 << 30));  // Be sane! (And ensure negation works)
   if (check_bounds) {
     if (cp_offset >= 0) {
-      CheckPosition(cp_offset + characters - 1, on_end_of_input);
+      CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
     } else {
       CheckPosition(cp_offset, on_end_of_input);
     }
@@ -1120,8 +1126,10 @@ int RegExpMacroAssemblerS390::CheckStackGuardState(Address* return_address,
   return NativeRegExpMacroAssembler::CheckStackGuardState(
       frame_entry<Isolate*>(re_frame, kIsolate),
       frame_entry<intptr_t>(re_frame, kStartIndex),
-      frame_entry<intptr_t>(re_frame, kDirectCall) == 1, return_address,
-      re_code, frame_entry_address<Address>(re_frame, kInputString),
+      static_cast<RegExp::CallOrigin>(
+          frame_entry<intptr_t>(re_frame, kDirectCall)),
+      return_address, re_code,
+      frame_entry_address<Address>(re_frame, kInputString),
       frame_entry_address<const byte*>(re_frame, kInputStart),
       frame_entry_address<const byte*>(re_frame, kInputEnd));
 }
@@ -1206,7 +1214,7 @@ void RegExpMacroAssemblerS390::Pop(Register target) {
 void RegExpMacroAssemblerS390::CheckPreemption() {
   // Check for preemption.
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ mov(r2, Operand(stack_limit));
   __ CmpLogicalP(sp, MemOperand(r2));
   SafeCall(&check_preempt_label_, le);
@@ -1214,7 +1222,7 @@ void RegExpMacroAssemblerS390::CheckPreemption() {
 
 void RegExpMacroAssemblerS390::CheckStackLimit() {
   ExternalReference stack_limit =
-      ExternalReference::address_of_regexp_stack_limit(isolate());
+      ExternalReference::address_of_regexp_stack_limit_address(isolate());
   __ mov(r2, Operand(stack_limit));
   __ CmpLogicalP(backtrack_stackpointer(), MemOperand(r2));
   SafeCall(&stack_overflow_label_, le);
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 636ba76079ea4b..3a6a915263c64c 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
   virtual void AdvanceRegister(int reg, int by);
   virtual void Backtrack();
   virtual void Bind(Label* label);
-  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckAtStart(int cp_offset, Label* on_at_start);
   virtual void CheckCharacter(unsigned c, Label* on_equal);
   virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
                                       Label* on_equal);
@@ -59,9 +59,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
   virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
   virtual void IfRegisterEqPos(int reg, Label* if_eq);
   virtual IrregexpImplementation Implementation();
-  virtual void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1);
+  virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                        bool check_bounds, int characters,
+                                        int eats_at_least);
   virtual void PopCurrentPosition();
   virtual void PopRegister(int register_index);
   virtual void PushBacktrack(Label* label);
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 798484d52f7f9e..42ba13c4ee850b 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -48,6 +48,8 @@ namespace internal {
  *
  * The stack will have the following content, in some order, indexable from the
  * frame pointer (see, e.g., kStackHighEnd):
+ *    - Address regexp       (address of the JSRegExp object; unused in native
+ *                            code, passed to match signature of interpreter)
  *    - Isolate* isolate     (address of the current isolate)
  *    - direct_call          (if 1, direct call from JavaScript code, if 0 call
  *                            through the runtime system)
@@ -75,9 +77,8 @@ namespace internal {
  * "character -1" in the string (i.e., char_size() bytes before the first
  * character of the string).  The remaining registers starts out uninitialized.
  *
- * The first seven values must be provided by the calling code by
- * calling the code's entry address cast to a function pointer with the
- * following signature:
+ * The argument values must be provided by the calling code by calling the
+ * code's entry address cast to a function pointer with the following signature:
  * int (*match)(String input_string,
  *              int start_index,
  *              Address start,
@@ -86,7 +87,8 @@ namespace internal {
  *              int num_capture_registers,
  *              byte* stack_area_base,
  *              bool direct_call = false,
- *              Isolate* isolate);
+ *              Isolate* isolate,
+ *              Address regexp);
  */
 
 #define __ ACCESS_MASM((&masm_))
@@ -172,14 +174,12 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
   BranchOrBacktrack(greater, on_greater);
 }
 
-
-void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
-  __ leaq(rax, Operand(rdi, -char_size()));
+void RegExpMacroAssemblerX64::CheckAtStart(int cp_offset, Label* on_at_start) {
+  __ leaq(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
   __ cmpq(rax, Operand(rbp, kStringStartMinusOne));
   BranchOrBacktrack(equal, on_at_start);
 }
 
-
 void RegExpMacroAssemblerX64::CheckNotAtStart(int cp_offset,
                                               Label* on_not_at_start) {
   __ leaq(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
@@ -721,7 +721,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
   Label stack_ok;
 
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ movq(rcx, rsp);
   __ Move(kScratchRegister, stack_limit);
   __ subq(rcx, Operand(kScratchRegister, 0));
@@ -1035,15 +1035,19 @@ RegExpMacroAssembler::IrregexpImplementation
   return kX64Implementation;
 }
 
+void RegExpMacroAssemblerX64::LoadCurrentCharacterImpl(int cp_offset,
+                                                       Label* on_end_of_input,
+                                                       bool check_bounds,
+                                                       int characters,
+                                                       int eats_at_least) {
+  // It's possible to preload a small number of characters when each success
+  // path requires a large number of characters, but not the reverse.
+  DCHECK_GE(eats_at_least, characters);
 
-void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
-                                                   Label* on_end_of_input,
-                                                   bool check_bounds,
-                                                   int characters) {
   DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
   if (check_bounds) {
     if (cp_offset >= 0) {
-      CheckPosition(cp_offset + characters - 1, on_end_of_input);
+      CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
     } else {
       CheckPosition(cp_offset, on_end_of_input);
     }
@@ -1051,7 +1055,6 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
   LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
-
 void RegExpMacroAssemblerX64::PopCurrentPosition() {
   Pop(rdi);
 }
@@ -1198,7 +1201,8 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
   return NativeRegExpMacroAssembler::CheckStackGuardState(
       frame_entry<Isolate*>(re_frame, kIsolate),
       frame_entry<int>(re_frame, kStartIndex),
-      frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
+      static_cast<RegExp::CallOrigin>(frame_entry<int>(re_frame, kDirectCall)),
+      return_address, re_code,
       frame_entry_address<Address>(re_frame, kInputString),
       frame_entry_address<const byte*>(re_frame, kInputStart),
       frame_entry_address<const byte*>(re_frame, kInputEnd));
@@ -1318,7 +1322,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
   // Check for preemption.
   Label no_preempt;
   ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
+      ExternalReference::address_of_jslimit(isolate());
   __ load_rax(stack_limit);
   __ cmpq(rsp, rax);
   __ j(above, &no_preempt);
@@ -1332,7 +1336,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
 void RegExpMacroAssemblerX64::CheckStackLimit() {
   Label no_stack_overflow;
   ExternalReference stack_limit =
-      ExternalReference::address_of_regexp_stack_limit(isolate());
+      ExternalReference::address_of_regexp_stack_limit_address(isolate());
   __ load_rax(stack_limit);
   __ cmpq(backtrack_stackpointer(), rax);
   __ j(above, &no_stack_overflow);
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 59b80ef8027154..9d011dcd467df8 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -24,7 +24,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
   void AdvanceRegister(int reg, int by) override;
   void Backtrack() override;
   void Bind(Label* label) override;
-  void CheckAtStart(Label* on_at_start) override;
+  void CheckAtStart(int cp_offset, Label* on_at_start) override;
   void CheckCharacter(uint32_t c, Label* on_equal) override;
   void CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
                               Label* on_equal) override;
@@ -60,9 +60,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
   void IfRegisterLT(int reg, int comparand, Label* if_lt) override;
   void IfRegisterEqPos(int reg, Label* if_eq) override;
   IrregexpImplementation Implementation() override;
-  void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
-                            bool check_bounds = true,
-                            int characters = 1) override;
+  void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+                                bool check_bounds, int characters,
+                                int eats_at_least) override;
   void PopCurrentPosition() override;
   void PopRegister(int register_index) override;
   void PushBacktrack(Label* label) override;
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index e6bcd94c019872..c82ec6d04f8e33 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -97,7 +97,6 @@ class Symbol;
   V(Map, global_dictionary_map, GlobalDictionaryMap)                           \
   V(Map, many_closures_cell_map, ManyClosuresCellMap)                          \
   V(Map, module_info_map, ModuleInfoMap)                                       \
-  V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
   V(Map, name_dictionary_map, NameDictionaryMap)                               \
   V(Map, no_closures_cell_map, NoClosuresCellMap)                              \
   V(Map, number_dictionary_map, NumberDictionaryMap)                           \
@@ -199,6 +198,9 @@ class Symbol;
     TrampolineTrivialCodeDataContainer)                                        \
   V(CodeDataContainer, trampoline_promise_rejection_code_data_container,       \
     TrampolinePromiseRejectionCodeDataContainer)                               \
+  /* Canonical scope infos */                                                  \
+  V(ScopeInfo, global_this_binding_scope_info, GlobalThisBindingScopeInfo)     \
+  V(ScopeInfo, empty_function_scope_info, EmptyFunctionScopeInfo)              \
   /* Hash seed */                                                              \
   V(ByteArray, hash_seed, HashSeed)
 
@@ -270,8 +272,6 @@ class Symbol;
 
 // Entries in this list are limited to Smis and are not visited during GC.
 #define SMI_ROOT_LIST(V)                                                       \
-  V(Smi, stack_limit, StackLimit)                                              \
-  V(Smi, real_stack_limit, RealStackLimit)                                     \
   V(Smi, last_script_id, LastScriptId)                                         \
   V(Smi, last_debugging_id, LastDebuggingId)                                   \
   /* To distinguish the function templates, so that we can find them in the */ \
diff --git a/deps/v8/src/runtime/OWNERS b/deps/v8/src/runtime/OWNERS
index 450423f87850ba..f52e1c9ca8effc 100644
--- a/deps/v8/src/runtime/OWNERS
+++ b/deps/v8/src/runtime/OWNERS
@@ -1,3 +1,3 @@
-file://COMMON_OWNERS
+file:../../COMMON_OWNERS
 
 # COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index f35e72a666fd56..6190b16cff124b 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -272,7 +272,8 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
 
   // If the receiver is not a special receiver type, and the length is a valid
   // element index, perform fast operation tailored to specific ElementsKinds.
-  if (!object->map().IsSpecialReceiverMap() && len < kMaxUInt32 &&
+  if (!object->map().IsSpecialReceiverMap() &&
+      len <= JSObject::kMaxElementCount &&
       JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
     Handle<JSObject> obj = Handle<JSObject>::cast(object);
     ElementsAccessor* elements = obj->GetElementsAccessor();
@@ -283,8 +284,10 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
     return *isolate->factory()->ToBoolean(result.FromJust());
   }
 
-  // Otherwise, perform slow lookups for special receiver types
+  // Otherwise, perform slow lookups for special receiver types.
   for (; index < len; ++index) {
+    HandleScope iteration_hs(isolate);
+
     // Let elementK be the result of ? Get(O, ! ToString(k)).
     Handle<Object> element_k;
     {
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 6e7b9874589773..a4ea0b22ed0909 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -25,7 +25,12 @@ RUNTIME_FUNCTION(Runtime_SetGrow) {
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()), isolate);
-  table = OrderedHashSet::EnsureGrowable(isolate, table);
+  MaybeHandle<OrderedHashSet> table_candidate =
+      OrderedHashSet::EnsureGrowable(isolate, table);
+  if (!table_candidate.ToHandle(&table)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kValueOutOfRange));
+  }
   holder->set_table(*table);
   return ReadOnlyRoots(isolate).undefined_value();
 }
@@ -56,7 +61,12 @@ RUNTIME_FUNCTION(Runtime_MapGrow) {
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()), isolate);
-  table = OrderedHashMap::EnsureGrowable(isolate, table);
+  MaybeHandle<OrderedHashMap> table_candidate =
+      OrderedHashMap::EnsureGrowable(isolate, table);
+  if (!table_candidate.ToHandle(&table)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kValueOutOfRange));
+  }
   holder->set_table(*table);
   return ReadOnlyRoots(isolate).undefined_value();
 }
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 19c6f8bff54bef..4364c55775e4b8 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -266,7 +266,26 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
       }
 
       DCHECK(result->is_turbofanned());
-      if (!function->HasOptimizedCode()) {
+      if (function->feedback_vector().invocation_count() <= 1 &&
+          function->HasOptimizationMarker()) {
+        // With lazy feedback allocation we may not have feedback for the
+        // initial part of the function that was executed before we allocated a
+        // feedback vector. Reset any optimization markers for such functions.
+        //
+        // TODO(mythria): Instead of resetting the optimization marker here we
+        // should only mark a function for optimization if it has sufficient
+        // feedback. We cannot do this currently since we OSR only after we mark
+        // a function for optimization. We should instead change it to be based
+        // based on number of ticks.
+        DCHECK(!function->IsInOptimizationQueue());
+        function->ClearOptimizationMarker();
+      }
+      // TODO(mythria): Once we have OSR code cache we may not need to mark
+      // the function for non-concurrent compilation. We could arm the loops
+      // early so the second execution uses the already compiled OSR code and
+      // the optimization occurs concurrently off main thread.
+      if (!function->HasOptimizedCode() &&
+          function->feedback_vector().invocation_count() > 1) {
         // If we're not already optimized, set to optimize non-concurrently on
         // the next call, otherwise we'd run unoptimized once more and
         // potentially compile for OSR again.
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 94320740afcd1c..0fbea6a193a1c8 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -115,10 +115,17 @@ RUNTIME_FUNCTION(Runtime_DebugBreakAtEntry) {
   DCHECK(function->shared().HasDebugInfo());
   DCHECK(function->shared().GetDebugInfo().BreakAtEntry());
 
-  // Get the top-most JavaScript frame.
+  // Get the top-most JavaScript frame. This is the debug target function.
   JavaScriptFrameIterator it(isolate);
   DCHECK_EQ(*function, it.frame()->function());
-  isolate->debug()->Break(it.frame(), function);
+  // Check whether the next JS frame is closer than the last API entry.
+  // if yes, then the call to the debug target came from JavaScript. Otherwise,
+  // the call to the debug target came from API. Do not break for the latter.
+  it.Advance();
+  if (!it.done() &&
+      it.frame()->fp() < isolate->thread_local_top()->last_api_entry_) {
+    isolate->debug()->Break(it.frame(), function);
+  }
 
   return ReadOnlyRoots(isolate).undefined_value();
 }
@@ -688,7 +695,7 @@ RUNTIME_FUNCTION(Runtime_DebugCollectCoverage) {
   int num_scripts = static_cast<int>(coverage->size());
   // Prepare property keys.
   Handle<FixedArray> scripts_array = factory->NewFixedArray(num_scripts);
-  Handle<String> script_string = factory->NewStringFromStaticChars("script");
+  Handle<String> script_string = factory->script_string();
   for (int i = 0; i < num_scripts; i++) {
     const auto& script_data = coverage->at(i);
     HandleScope inner_scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 4b8a0e38a1e2c7..80f9baa48d3850 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -13,6 +13,8 @@
 #include "src/execution/arguments-inl.h"
 #include "src/execution/frames-inl.h"
 #include "src/execution/isolate-inl.h"
+#include "src/execution/messages.h"
+#include "src/handles/maybe-handles.h"
 #include "src/init/bootstrapper.h"
 #include "src/logging/counters.h"
 #include "src/numbers/conversions.h"
@@ -322,7 +324,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
   double_align = false;
 
   return *isolate->factory()->NewFillerObject(size, double_align,
-                                              AllocationType::kYoung);
+                                              AllocationType::kYoung,
+                                              AllocationOrigin::kGeneratedCode);
 }
 
 RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
@@ -339,7 +342,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
     CHECK(size <= kMaxRegularHeapObjectSize);
   }
   return *isolate->factory()->NewFillerObject(size, double_align,
-                                              AllocationType::kOld);
+                                              AllocationType::kOld,
+                                              AllocationOrigin::kGeneratedCode);
 }
 
 RUNTIME_FUNCTION(Runtime_AllocateByteArray) {
@@ -372,228 +376,35 @@ RUNTIME_FUNCTION(Runtime_AllocateSeqTwoByteString) {
   return *result;
 }
 
-namespace {
-
-bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
-  JavaScriptFrameIterator it(isolate);
-  if (!it.done()) {
-    // Compute the location from the function and the relocation info of the
-    // baseline code. For optimized code this will use the deoptimization
-    // information to get canonical location information.
-    std::vector<FrameSummary> frames;
-    it.frame()->Summarize(&frames);
-    auto& summary = frames.back().AsJavaScript();
-    Handle<SharedFunctionInfo> shared(summary.function()->shared(), isolate);
-    Handle<Object> script(shared->script(), isolate);
-    SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared);
-    int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
-    if (script->IsScript() &&
-        !(Handle<Script>::cast(script)->source().IsUndefined(isolate))) {
-      Handle<Script> casted_script = Handle<Script>::cast(script);
-      *target = MessageLocation(casted_script, pos, pos + 1, shared);
-      return true;
-    }
-  }
-  return false;
-}
-
-Handle<String> BuildDefaultCallSite(Isolate* isolate, Handle<Object> object) {
-  IncrementalStringBuilder builder(isolate);
-
-  builder.AppendString(Object::TypeOf(isolate, object));
-  if (object->IsString()) {
-    builder.AppendCString(" \"");
-    builder.AppendString(Handle<String>::cast(object));
-    builder.AppendCString("\"");
-  } else if (object->IsNull(isolate)) {
-    builder.AppendCString(" ");
-    builder.AppendString(isolate->factory()->null_string());
-  } else if (object->IsTrue(isolate)) {
-    builder.AppendCString(" ");
-    builder.AppendString(isolate->factory()->true_string());
-  } else if (object->IsFalse(isolate)) {
-    builder.AppendCString(" ");
-    builder.AppendString(isolate->factory()->false_string());
-  } else if (object->IsNumber()) {
-    builder.AppendCString(" ");
-    builder.AppendString(isolate->factory()->NumberToString(object));
-  }
-
-  return builder.Finish().ToHandleChecked();
-}
-
-Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
-                              CallPrinter::ErrorHint* hint) {
-  MessageLocation location;
-  if (ComputeLocation(isolate, &location)) {
-    ParseInfo info(isolate, location.shared());
-    if (parsing::ParseAny(&info, location.shared(), isolate)) {
-      info.ast_value_factory()->Internalize(isolate);
-      CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
-      Handle<String> str = printer.Print(info.literal(), location.start_pos());
-      *hint = printer.GetErrorHint();
-      if (str->length() > 0) return str;
-    } else {
-      isolate->clear_pending_exception();
-    }
-  }
-  return BuildDefaultCallSite(isolate, object);
-}
-
-MessageTemplate UpdateErrorTemplate(CallPrinter::ErrorHint hint,
-                                    MessageTemplate default_id) {
-  switch (hint) {
-    case CallPrinter::ErrorHint::kNormalIterator:
-      return MessageTemplate::kNotIterable;
-
-    case CallPrinter::ErrorHint::kCallAndNormalIterator:
-      return MessageTemplate::kNotCallableOrIterable;
-
-    case CallPrinter::ErrorHint::kAsyncIterator:
-      return MessageTemplate::kNotAsyncIterable;
-
-    case CallPrinter::ErrorHint::kCallAndAsyncIterator:
-      return MessageTemplate::kNotCallableOrAsyncIterable;
-
-    case CallPrinter::ErrorHint::kNone:
-      return default_id;
-  }
-  return default_id;
-}
-
-}  // namespace
-
-MaybeHandle<Object> Runtime::ThrowIteratorError(Isolate* isolate,
-                                                Handle<Object> object) {
-  CallPrinter::ErrorHint hint = CallPrinter::kNone;
-  Handle<String> callsite = RenderCallSite(isolate, object, &hint);
-  MessageTemplate id = MessageTemplate::kNotIterableNoSymbolLoad;
-
-  if (hint == CallPrinter::kNone) {
-    Handle<Symbol> iterator_symbol = isolate->factory()->iterator_symbol();
-    THROW_NEW_ERROR(isolate, NewTypeError(id, callsite, iterator_symbol),
-                    Object);
-  }
-
-  id = UpdateErrorTemplate(hint, id);
-  THROW_NEW_ERROR(isolate, NewTypeError(id, callsite), Object);
-}
-
 RUNTIME_FUNCTION(Runtime_ThrowIteratorError) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  RETURN_RESULT_OR_FAILURE(isolate,
-                           Runtime::ThrowIteratorError(isolate, object));
+  return isolate->Throw(*ErrorUtils::NewIteratorError(isolate, object));
 }
 
 RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  CallPrinter::ErrorHint hint = CallPrinter::kNone;
-  Handle<String> callsite = RenderCallSite(isolate, object, &hint);
-  MessageTemplate id = MessageTemplate::kCalledNonCallable;
-  id = UpdateErrorTemplate(hint, id);
-  THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(id, callsite));
+  return isolate->Throw(
+      *ErrorUtils::NewCalledNonCallableError(isolate, object));
 }
 
 RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  CallPrinter::ErrorHint hint = CallPrinter::kNone;
-  Handle<String> callsite = RenderCallSite(isolate, object, &hint);
-  MessageTemplate id = MessageTemplate::kNotConstructor;
-  THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(id, callsite));
+  return isolate->Throw(
+      *ErrorUtils::NewConstructedNonConstructable(isolate, object));
 }
 
-namespace {
-
-// Helper visitor for ThrowPatternAssignmentNonCoercible which finds an
-// object literal (representing a destructuring assignment) at a given source
-// position.
-class PatternFinder final : public AstTraversalVisitor<PatternFinder> {
- public:
-  PatternFinder(Isolate* isolate, Expression* root, int position)
-      : AstTraversalVisitor(isolate, root),
-        position_(position),
-        object_literal_(nullptr) {}
-
-  ObjectLiteral* object_literal() const { return object_literal_; }
-
- private:
-  // This is required so that the overriden Visit* methods can be
-  // called by the base class (template).
-  friend class AstTraversalVisitor<PatternFinder>;
-
-  void VisitObjectLiteral(ObjectLiteral* lit) {
-    // TODO(leszeks): This could be smarter in only traversing object literals
-    // that are known to be a destructuring pattern. We could then also
-    // potentially find the corresponding assignment value and report that too.
-    if (lit->position() == position_) {
-      object_literal_ = lit;
-      return;
-    }
-    AstTraversalVisitor::VisitObjectLiteral(lit);
-  }
-
-  int position_;
-  ObjectLiteral* object_literal_;
-};
-
-}  // namespace
-
 RUNTIME_FUNCTION(Runtime_ThrowPatternAssignmentNonCoercible) {
   HandleScope scope(isolate);
-  DCHECK_EQ(0, args.length());
-
-  // Find the object literal representing the destructuring assignment, so that
-  // we can try to attribute the error to a property name on it rather than to
-  // the literal itself.
-  MaybeHandle<String> maybe_property_name;
-  MessageLocation location;
-  if (ComputeLocation(isolate, &location)) {
-    ParseInfo info(isolate, location.shared());
-    if (parsing::ParseAny(&info, location.shared(), isolate)) {
-      info.ast_value_factory()->Internalize(isolate);
-
-      PatternFinder finder(isolate, info.literal(), location.start_pos());
-      finder.Run();
-      if (finder.object_literal()) {
-        for (ObjectLiteralProperty* pattern_property :
-             *finder.object_literal()->properties()) {
-          Expression* key = pattern_property->key();
-          if (key->IsPropertyName()) {
-            int pos = key->position();
-            maybe_property_name =
-                key->AsLiteral()->AsRawPropertyName()->string();
-            // Change the message location to point at the property name.
-            location = MessageLocation(location.script(), pos, pos + 1,
-                                       location.shared());
-            break;
-          }
-        }
-      }
-    } else {
-      isolate->clear_pending_exception();
-    }
-  }
-
-  // Create a "non-coercible" type error with a property name if one is
-  // available, otherwise create a generic one.
-  Handle<Object> error;
-  Handle<String> property_name;
-  if (maybe_property_name.ToHandle(&property_name)) {
-    error = isolate->factory()->NewTypeError(
-        MessageTemplate::kNonCoercibleWithProperty, property_name);
-  } else {
-    error = isolate->factory()->NewTypeError(MessageTemplate::kNonCoercible);
-  }
-
-  // Explicitly pass the calculated location, as we may have updated it to match
-  // the property name.
-  return isolate->Throw(*error, &location);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  return ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, object,
+                                                  MaybeHandle<Object>());
 }
 
 RUNTIME_FUNCTION(Runtime_ThrowConstructorReturnedNonObject) {
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 0c7a28c279fa2d..0ffc6e932ef471 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -113,9 +113,12 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
           copy->map(isolate).instance_descriptors(isolate), isolate);
       int limit = copy->map(isolate).NumberOfOwnDescriptors();
       for (int i = 0; i < limit; i++) {
-        DCHECK_EQ(kField, descriptors->GetDetails(i).location());
-        DCHECK_EQ(kData, descriptors->GetDetails(i).kind());
-        FieldIndex index = FieldIndex::ForDescriptor(copy->map(isolate), i);
+        PropertyDetails details = descriptors->GetDetails(i);
+        DCHECK_EQ(kField, details.location());
+        DCHECK_EQ(kData, details.kind());
+        FieldIndex index = FieldIndex::ForPropertyIndex(
+            copy->map(isolate), details.field_index(),
+            details.representation());
         if (copy->IsUnboxedDoubleField(isolate, index)) continue;
         Object raw = copy->RawFastPropertyAt(isolate, index);
         if (raw.IsJSObject(isolate)) {
@@ -123,11 +126,9 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
           ASSIGN_RETURN_ON_EXCEPTION(
               isolate, value, VisitElementOrProperty(copy, value), JSObject);
           if (copying) copy->FastPropertyAtPut(index, *value);
-        } else if (copying && raw.IsMutableHeapNumber(isolate)) {
-          DCHECK(descriptors->GetDetails(i).representation().IsDouble());
-          uint64_t double_value = MutableHeapNumber::cast(raw).value_as_bits();
-          auto value =
-              isolate->factory()->NewMutableHeapNumberFromBits(double_value);
+        } else if (copying && details.representation().IsDouble()) {
+          uint64_t double_value = HeapNumber::cast(raw).value_as_bits();
+          auto value = isolate->factory()->NewHeapNumberFromBits(double_value);
           copy->FastPropertyAtPut(index, *value);
         }
       }
@@ -154,8 +155,10 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
     case PACKED_ELEMENTS:
     case PACKED_FROZEN_ELEMENTS:
     case PACKED_SEALED_ELEMENTS:
+    case PACKED_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_FROZEN_ELEMENTS:
     case HOLEY_SEALED_ELEMENTS:
+    case HOLEY_NONEXTENSIBLE_ELEMENTS:
     case HOLEY_ELEMENTS: {
       Handle<FixedArray> elements(FixedArray::cast(copy->elements(isolate)),
                                   isolate);
@@ -652,15 +655,16 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
     DCHECK(maybe_vector->IsFeedbackVector());
     vector = Handle<FeedbackVector>::cast(maybe_vector);
   }
-  Handle<Object> boilerplate;
   if (vector.is_null()) {
+    Handle<JSRegExp> new_regexp;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, boilerplate,
+        isolate, new_regexp,
         JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags)));
-    return *JSRegExp::Copy(Handle<JSRegExp>::cast(boilerplate));
+    return *new_regexp;
   }
 
   // Check if boilerplate exists. If not, create it first.
+  Handle<JSRegExp> boilerplate;
   Handle<Object> literal_site(vector->Get(literal_slot)->cast<Object>(),
                               isolate);
   if (!HasBoilerplate(literal_site)) {
@@ -673,7 +677,7 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
     }
     vector->Set(literal_slot, *boilerplate);
   }
-  return *JSRegExp::Copy(Handle<JSRegExp>::cast(boilerplate));
+  return *JSRegExp::Copy(boilerplate);
 }
 
 }  // namespace internal
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 25bd07b53524b4..310cdaab4208f5 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -2,10 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/ast/prettyprinter.h"
 #include "src/common/message-template.h"
 #include "src/debug/debug.h"
 #include "src/execution/arguments-inl.h"
 #include "src/execution/isolate-inl.h"
+#include "src/execution/messages.h"
+#include "src/handles/maybe-handles.h"
 #include "src/heap/heap-inl.h"  // For ToBoolean. TODO(jkummerow): Drop.
 #include "src/init/bootstrapper.h"
 #include "src/logging/counters.h"
@@ -24,13 +27,8 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
                                                Handle<Object> key,
                                                bool* is_found_out) {
   if (object->IsNullOrUndefined(isolate)) {
-    if (*key == ReadOnlyRoots(isolate).iterator_symbol()) {
-      return Runtime::ThrowIteratorError(isolate, object);
-    }
-    THROW_NEW_ERROR(
-        isolate,
-        NewTypeError(MessageTemplate::kNonObjectPropertyLoad, key, object),
-        Object);
+    ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, object, key);
+    return MaybeHandle<Object>();
   }
 
   bool success = false;
@@ -46,7 +44,7 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
     Handle<Object> name_string(Symbol::cast(*key).name(), isolate);
     DCHECK(name_string->IsString());
     THROW_NEW_ERROR(isolate,
-                    NewTypeError(MessageTemplate::kInvalidPrivateFieldRead,
+                    NewTypeError(MessageTemplate::kInvalidPrivateMemberRead,
                                  name_string, object),
                     Object);
   }
@@ -413,7 +411,7 @@ MaybeHandle<Object> Runtime::SetObjectProperty(
     Handle<Object> name_string(Symbol::cast(*key).name(), isolate);
     DCHECK(name_string->IsString());
     THROW_NEW_ERROR(isolate,
-                    NewTypeError(MessageTemplate::kInvalidPrivateFieldWrite,
+                    NewTypeError(MessageTemplate::kInvalidPrivateMemberWrite,
                                  name_string, object),
                     Object);
   }
@@ -778,7 +776,6 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
   return isolate->heap()->ToBoolean(maybe.FromJust());
 }
 
-
 RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -795,7 +792,6 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
   return *isolate->factory()->NewJSArrayWithElements(keys);
 }
 
-
 RUNTIME_FUNCTION(Runtime_ToFastProperties) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
@@ -807,14 +803,12 @@ RUNTIME_FUNCTION(Runtime_ToFastProperties) {
   return *object;
 }
 
-
 RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
   HandleScope scope(isolate);
   DCHECK_EQ(0, args.length());
   return *isolate->factory()->NewHeapNumber(0);
 }
 
-
 RUNTIME_FUNCTION(Runtime_NewObject) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -845,7 +839,6 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
   return ReadOnlyRoots(isolate).undefined_value();
 }
 
-
 RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
@@ -862,12 +855,10 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
   return *object;
 }
 
-
 static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
   return obj->IsNullOrUndefined(isolate) || obj->IsCallable();
 }
 
-
 // Implements part of 8.12.9 DefineOwnProperty.
 // There are 3 cases that lead here:
 // Step 4b - define a new accessor property.
@@ -891,7 +882,6 @@ RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
   return ReadOnlyRoots(isolate).undefined_value();
 }
 
-
 RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
   HandleScope scope(isolate);
   DCHECK_EQ(6, args.length());
@@ -914,8 +904,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
         nexus.ConfigureMegamorphic(PROPERTY);
       }
     } else if (nexus.ic_state() == MONOMORPHIC) {
-      if (nexus.GetFirstMap() != object->map() ||
-          nexus.GetFeedbackExtra() != MaybeObject::FromObject(*name)) {
+      if (nexus.GetFirstMap() != object->map() || nexus.GetName() != *name) {
         nexus.ConfigureMegamorphic(PROPERTY);
       }
     }
@@ -984,7 +973,6 @@ RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
       IsFastPackedElementsKind(obj.map().elements_kind()));
 }
 
-
 RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
   SealHandleScope shs(isolate);
   DCHECK_EQ(1, args.length());
@@ -992,7 +980,6 @@ RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
   return isolate->heap()->ToBoolean(obj.IsJSReceiver());
 }
 
-
 RUNTIME_FUNCTION(Runtime_ClassOf) {
   SealHandleScope shs(isolate);
   DCHECK_EQ(1, args.length());
@@ -1069,9 +1056,9 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
   DCHECK_LE(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, source, 0);
 
-  // 2. If source is undefined or null, let keys be an empty List.
-  if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
-    return ReadOnlyRoots(isolate).undefined_value();
+  // If source is undefined or null, throw a non-coercible error.
+  if (source->IsNullOrUndefined(isolate)) {
+    return ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, source);
   }
 
   ScopedVector<Handle<Object>> excluded_properties(args.length() - 1);
@@ -1141,7 +1128,6 @@ RUNTIME_FUNCTION(Runtime_ToNumeric) {
   RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumeric(isolate, input));
 }
 
-
 RUNTIME_FUNCTION(Runtime_ToLength) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
@@ -1175,7 +1161,6 @@ RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
   return isolate->heap()->ToBoolean(result.FromJust());
 }
 
-
 // ES6 section 7.4.7 CreateIterResultObject ( value, done )
 RUNTIME_FUNCTION(Runtime_CreateIterResultObject) {
   HandleScope scope(isolate);
@@ -1217,6 +1202,32 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
   return *desc.ToPropertyDescriptorObject(isolate);
 }
 
+RUNTIME_FUNCTION(Runtime_LoadPrivateSetter) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(args.length(), 1);
+  CONVERT_ARG_HANDLE_CHECKED(AccessorPair, pair, 0);
+  DCHECK(pair->setter().IsJSFunction());
+  return pair->setter();
+}
+
+RUNTIME_FUNCTION(Runtime_LoadPrivateGetter) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(args.length(), 1);
+  CONVERT_ARG_HANDLE_CHECKED(AccessorPair, pair, 0);
+  DCHECK(pair->getter().IsJSFunction());
+  return pair->getter();
+}
+
+RUNTIME_FUNCTION(Runtime_CreatePrivateAccessors) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(args.length(), 2);
+  DCHECK(args[0].IsNull() || args[0].IsJSFunction());
+  DCHECK(args[1].IsNull() || args[1].IsJSFunction());
+  Handle<AccessorPair> pair = isolate->factory()->NewAccessorPair();
+  pair->SetComponents(args[0], args[1]);
+  return *pair;
+}
+
 RUNTIME_FUNCTION(Runtime_AddPrivateBrand) {
   HandleScope scope(isolate);
   DCHECK_EQ(args.length(), 2);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 76056a7823973b..e197e16e11282b 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -613,6 +613,20 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
 
   JSRegExp::Type typeTag = regexp->TypeTag();
   if (typeTag == JSRegExp::IRREGEXP) {
+    // Force tier up to native code for global replaces. The global replace is
+    // implemented differently for native code and bytecode execution, where the
+    // native code expects an array to store all the matches, and the bytecode
+    // matches one at a time, so it's easier to tier-up to native code from the
+    // start.
+    if (FLAG_regexp_tier_up) {
+      regexp->MarkTierUpForNextExec();
+      if (FLAG_trace_regexp_tier_up) {
+        PrintF(
+            "Forcing tier-up of JSRegExp object %p in "
+            "StringReplaceGlobalRegExpWithString\n",
+            reinterpret_cast<void*>(regexp->ptr()));
+      }
+    }
     // Ensure the RegExp is compiled so we can access the capture-name map.
     if (RegExp::IrregexpPrepare(isolate, regexp, subject) == -1) {
       DCHECK(isolate->has_pending_exception());
@@ -1085,6 +1099,19 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
   DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
   DCHECK(subject->IsFlat());
 
+  // Force tier up to native code for global replaces. The global replace is
+  // implemented differently for native code and bytecode execution, where the
+  // native code expects an array to store all the matches, and the bytecode
+  // matches one at a time, so it's easier to tier-up to native code from the
+  // start.
+  if (FLAG_regexp_tier_up && regexp->TypeTag() == JSRegExp::IRREGEXP) {
+    regexp->MarkTierUpForNextExec();
+    if (FLAG_trace_regexp_tier_up) {
+      PrintF("Forcing tier-up of JSRegExp object %p in SearchRegExpMultiple\n",
+             reinterpret_cast<void*>(regexp->ptr()));
+    }
+  }
+
   int capture_count = regexp->CaptureCount();
   int subject_length = subject->length();
 
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index f67b6922bf6400..812b09b1f0640c 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -678,9 +678,6 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
       FindNameClash(isolate, scope_info, global_object, script_context_table);
   if (isolate->has_pending_exception()) return name_clash_result;
 
-  // We do not need script contexts here during bootstrap.
-  DCHECK(!isolate->bootstrapper()->IsActive());
-
   Handle<Context> result =
       isolate->factory()->NewScriptContext(native_context, scope_info);
 
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index f0caaaa14cf336..a766dd5db29260 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -17,6 +17,7 @@
 #include "src/execution/arguments-inl.h"
 #include "src/execution/frames-inl.h"
 #include "src/execution/isolate-inl.h"
+#include "src/execution/protectors-inl.h"
 #include "src/execution/runtime-profiler.h"
 #include "src/heap/heap-inl.h"  // For ToBoolean. TODO(jkummerow): Drop.
 #include "src/heap/heap-write-barrier-inl.h"
@@ -24,6 +25,7 @@
 #include "src/logging/counters.h"
 #include "src/objects/heap-object-inl.h"
 #include "src/objects/js-array-inl.h"
+#include "src/objects/js-regexp-inl.h"
 #include "src/objects/smi.h"
 #include "src/snapshot/natives.h"
 #include "src/trap-handler/trap-handler.h"
@@ -310,13 +312,19 @@ namespace {
 
 bool EnsureFeedbackVector(Handle<JSFunction> function) {
   // Check function allows lazy compilation.
-  if (!function->shared().allows_lazy_compilation()) {
-    return false;
-  }
+  if (!function->shared().allows_lazy_compilation()) return false;
+
+  if (function->has_feedback_vector()) return true;
 
   // If function isn't compiled, compile it now.
   IsCompiledScope is_compiled_scope(function->shared().is_compiled_scope());
-  if (!is_compiled_scope.is_compiled() &&
+  // If the JSFunction isn't compiled but it has a initialized feedback cell
+  // then no need to compile. CompileLazy builtin would handle these cases by
+  // installing the code from SFI. Calling compile here may cause another
+  // optimization if FLAG_always_opt is set.
+  bool needs_compilation =
+      !function->is_compiled() && !function->has_closure_feedback_cell_array();
+  if (needs_compilation &&
       !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
                          &is_compiled_scope)) {
     return false;
@@ -343,12 +351,24 @@ RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
 
 RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
   HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
+  DCHECK(args.length() == 1 || args.length() == 2);
   if (!args[0].IsJSFunction()) {
     return ReadOnlyRoots(isolate).undefined_value();
   }
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
 
+  bool allow_heuristic_optimization = false;
+  if (args.length() == 2) {
+    CONVERT_ARG_HANDLE_CHECKED(Object, sync_object, 1);
+    if (!sync_object->IsString())
+      return ReadOnlyRoots(isolate).undefined_value();
+    Handle<String> sync = Handle<String>::cast(sync_object);
+    if (sync->IsOneByteEqualTo(
+            StaticCharVector("allow heuristic optimization"))) {
+      allow_heuristic_optimization = true;
+    }
+  }
+
   if (!EnsureFeedbackVector(function)) {
     return ReadOnlyRoots(isolate).undefined_value();
   }
@@ -369,7 +389,8 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
   // Hold onto the bytecode array between marking and optimization to ensure
   // it's not flushed.
   if (FLAG_testing_d8_test_runner) {
-    PendingOptimizationTable::PreparedForOptimization(isolate, function);
+    PendingOptimizationTable::PreparedForOptimization(
+        isolate, function, allow_heuristic_optimization);
   }
 
   return ReadOnlyRoots(isolate).undefined_value();
@@ -502,7 +523,11 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
   }
 
   if (function->IsOptimized()) {
-    status |= static_cast<int>(OptimizationStatus::kOptimized);
+    if (function->code().marked_for_deoptimization()) {
+      status |= static_cast<int>(OptimizationStatus::kMarkedForDeoptimization);
+    } else {
+      status |= static_cast<int>(OptimizationStatus::kOptimized);
+    }
     if (function->code().is_turbofanned()) {
       status |= static_cast<int>(OptimizationStatus::kTurboFanned);
     }
@@ -1024,6 +1049,24 @@ RUNTIME_FUNCTION(Runtime_SetWasmThreadsEnabled) {
   return ReadOnlyRoots(isolate).undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_RegexpHasBytecode) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_BOOLEAN_ARG_CHECKED(is_latin1, 1);
+  bool is_irregexp_bytecode = regexp.Bytecode(is_latin1).IsByteArray();
+  return isolate->heap()->ToBoolean(is_irregexp_bytecode);
+}
+
+RUNTIME_FUNCTION(Runtime_RegexpHasNativeCode) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_BOOLEAN_ARG_CHECKED(is_latin1, 1);
+  bool is_irregexp_native_code = regexp.Code(is_latin1).IsCode();
+  return isolate->heap()->ToBoolean(is_irregexp_native_code);
+}
+
 #define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name)      \
   RUNTIME_FUNCTION(Runtime_Has##Name) {                 \
     CONVERT_ARG_CHECKED(JSObject, obj, 0);              \
@@ -1057,7 +1100,8 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
 RUNTIME_FUNCTION(Runtime_ArraySpeciesProtector) {
   SealHandleScope shs(isolate);
   DCHECK_EQ(0, args.length());
-  return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
+  return isolate->heap()->ToBoolean(
+      Protectors::IsArraySpeciesLookupChainIntact(isolate));
 }
 
 RUNTIME_FUNCTION(Runtime_MapIteratorProtector) {
@@ -1299,6 +1343,7 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
     void RegExpCodeCreateEvent(AbstractCode code, String source) final {}
     void CodeMoveEvent(AbstractCode from, AbstractCode to) final {}
     void SharedFunctionInfoMoveEvent(Address from, Address to) final {}
+    void NativeContextMoveEvent(Address from, Address to) final {}
     void CodeMovingGCEvent() final {}
     void CodeDisableOptEvent(AbstractCode code,
                              SharedFunctionInfo shared) final {}
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 65acb296cccf2b..57e59c07be76be 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -430,8 +430,8 @@ RUNTIME_FUNCTION(Runtime_WasmRefFunc) {
   isolate->set_context(instance->native_context());
   CONVERT_UINT32_ARG_CHECKED(function_index, 0);
 
-  Handle<WasmExportedFunction> function =
-      WasmInstanceObject::GetOrCreateWasmExportedFunction(isolate, instance,
+  Handle<WasmExternalFunction> function =
+      WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
                                                           function_index);
 
   return *function;
@@ -568,5 +568,24 @@ RUNTIME_FUNCTION(Runtime_WasmTableFill) {
   }
   return ReadOnlyRoots(isolate).undefined_value();
 }
+
+RUNTIME_FUNCTION(Runtime_WasmNewMultiReturnFixedArray) {
+  DCHECK_EQ(1, args.length());
+  HandleScope scope(isolate);
+  CONVERT_INT32_ARG_CHECKED(size, 0);
+  Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(size);
+  return *fixed_array;
+}
+
+RUNTIME_FUNCTION(Runtime_WasmNewMultiReturnJSArray) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  DCHECK(!isolate->context().is_null());
+  CONVERT_ARG_CHECKED(FixedArray, fixed_array, 0);
+  Handle<FixedArray> fixed_array_handle(fixed_array, isolate);
+  Handle<JSArray> array = isolate->factory()->NewJSArrayWithElements(
+      fixed_array_handle, PACKED_ELEMENTS);
+  return *array;
+}
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/runtime/runtime-weak-refs.cc b/deps/v8/src/runtime/runtime-weak-refs.cc
deleted file mode 100644
index 2720176c30baed..00000000000000
--- a/deps/v8/src/runtime/runtime-weak-refs.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "include/v8.h"
-#include "src/api/api.h"
-#include "src/execution/arguments-inl.h"
-#include "src/execution/execution.h"
-#include "src/handles/handles-inl.h"
-#include "src/logging/counters.h"
-#include "src/objects/js-weak-refs-inl.h"
-#include "src/objects/objects-inl.h"
-#include "src/runtime/runtime-utils.h"
-
-namespace v8 {
-namespace internal {
-
-RUNTIME_FUNCTION(Runtime_FinalizationGroupCleanupJob) {
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSFinalizationGroup, finalization_group, 0);
-  finalization_group->set_scheduled_for_cleanup(false);
-
-  Handle<Object> cleanup(finalization_group->cleanup(), isolate);
-  JSFinalizationGroup::Cleanup(isolate, finalization_group, cleanup);
-  return ReadOnlyRoots(isolate).undefined_value();
-}
-
-}  // namespace internal
-}  // namespace v8
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index ad49a0299cd860..09840802935309 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -107,9 +107,12 @@ bool Runtime::NeedsExactContext(FunctionId id) {
       return false;
     case Runtime::kAddPrivateField:
     case Runtime::kAddPrivateBrand:
+    case Runtime::kCreatePrivateAccessors:
     case Runtime::kCopyDataProperties:
     case Runtime::kCreateDataProperty:
     case Runtime::kCreatePrivateNameSymbol:
+    case Runtime::kLoadPrivateGetter:
+    case Runtime::kLoadPrivateSetter:
     case Runtime::kReThrow:
     case Runtime::kThrow:
     case Runtime::kThrowApplyNonFunction:
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 92ca9f31426207..d705b05752c3d8 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -37,17 +37,17 @@ namespace internal {
 // inline), use the F macro below. To declare the runtime version and the inline
 // version simultaneously, use the I macro below.
 
-#define FOR_EACH_INTRINSIC_ARRAY(F, I)    \
-  F(ArrayIncludes_Slow, 3, 1)             \
-  F(ArrayIndexOf, 3, 1)                   \
-  F(ArrayIsArray, 1, 1)                   \
-  F(ArraySpeciesConstructor, 1, 1)        \
-  F(GrowArrayElements, 2, 1)              \
-  I(IsArray, 1, 1)                        \
-  F(NewArray, -1 /* >= 3 */, 1)           \
-  F(NormalizeElements, 1, 1)              \
-  F(TransitionElementsKind, 2, 1)         \
-  F(TransitionElementsKindWithKind, 2, 1) \
+#define FOR_EACH_INTRINSIC_ARRAY(F, I) \
+  F(ArrayIncludes_Slow, 3, 1)          \
+  F(ArrayIndexOf, 3, 1)                \
+  F(ArrayIsArray, 1, 1)                \
+  F(ArraySpeciesConstructor, 1, 1)     \
+  F(GrowArrayElements, 2, 1)           \
+  I(IsArray, 1, 1)                     \
+  F(NewArray, -1 /* >= 3 */, 1)        \
+  F(NormalizeElements, 1, 1)           \
+  F(TransitionElementsKind, 2, 1)      \
+  F(TransitionElementsKindWithKind, 2, 1)
 
 #define FOR_EACH_INTRINSIC_ATOMICS(F, I) \
   F(AtomicsLoad64, 2, 1)                 \
@@ -236,7 +236,7 @@ namespace internal {
   F(ThrowIteratorError, 1, 1)                        \
   F(ThrowIteratorResultNotAnObject, 1, 1)            \
   F(ThrowNotConstructor, 1, 1)                       \
-  F(ThrowPatternAssignmentNonCoercible, 0, 1)        \
+  F(ThrowPatternAssignmentNonCoercible, 1, 1)        \
   F(ThrowRangeError, -1 /* >= 1 */, 1)               \
   F(ThrowReferenceError, 1, 1)                       \
   F(ThrowAccessedUninitializedVariable, 1, 1)        \
@@ -247,8 +247,7 @@ namespace internal {
   F(ThrowTypeError, -1 /* >= 1 */, 1)                \
   F(ThrowTypeErrorIfStrict, -1 /* >= 1 */, 1)        \
   F(Typeof, 1, 1)                                    \
-  F(UnwindAndFindExceptionHandler, 0, 1)             \
-  F(FinalizationGroupCleanupJob, 1, 1)
+  F(UnwindAndFindExceptionHandler, 0, 1)
 
 #define FOR_EACH_INTRINSIC_LITERALS(F, I)           \
   F(CreateArrayLiteral, 4, 1)                       \
@@ -285,6 +284,7 @@ namespace internal {
   F(CopyDataPropertiesWithExcludedProperties, -1 /* >= 1 */, 1) \
   I(CreateDataProperty, 3, 1)                                   \
   I(CreateIterResultObject, 2, 1)                               \
+  F(CreatePrivateAccessors, 2, 1)                               \
   F(DefineAccessorPropertyUnchecked, 5, 1)                      \
   F(DefineDataPropertyInLiteral, 6, 1)                          \
   F(DefineGetterPropertyUnchecked, 4, 1)                        \
@@ -305,6 +305,8 @@ namespace internal {
   F(JSReceiverGetPrototypeOf, 1, 1)                             \
   F(JSReceiverSetPrototypeOfDontThrow, 2, 1)                    \
   F(JSReceiverSetPrototypeOfThrow, 2, 1)                        \
+  F(LoadPrivateGetter, 1, 1)                                    \
+  F(LoadPrivateSetter, 1, 1)                                    \
   F(NewObject, 2, 1)                                            \
   F(ObjectCreate, 2, 1)                                         \
   F(ObjectEntries, 1, 1)                                        \
@@ -495,12 +497,14 @@ namespace internal {
   F(IsThreadInWasm, 0, 1)                     \
   F(IsWasmCode, 1, 1)                         \
   F(IsWasmTrapHandlerEnabled, 0, 1)           \
+  F(RegexpHasBytecode, 2, 1)                  \
+  F(RegexpHasNativeCode, 2, 1)                \
   F(MapIteratorProtector, 0, 1)               \
   F(NeverOptimizeFunction, 1, 1)              \
   F(NotifyContextDisposed, 0, 1)              \
   F(OptimizeFunctionOnNextCall, -1, 1)        \
   F(OptimizeOsr, -1, 1)                       \
-  F(PrepareFunctionForOptimization, 1, 1)     \
+  F(PrepareFunctionForOptimization, -1, 1)    \
   F(PrintWithNameForAssert, 2, 1)             \
   F(RedirectToWasmInterpreter, 2, 1)          \
   F(RunningInSimulator, 0, 1)                 \
@@ -530,28 +534,30 @@ namespace internal {
   F(TypedArraySet, 2, 1)                    \
   F(TypedArraySortFast, 1, 1)
 
-#define FOR_EACH_INTRINSIC_WASM(F, I) \
-  F(ThrowWasmError, 1, 1)             \
-  F(ThrowWasmStackOverflow, 0, 1)     \
-  F(WasmI32AtomicWait, 4, 1)          \
-  F(WasmI64AtomicWait, 5, 1)          \
-  F(WasmAtomicNotify, 3, 1)           \
-  F(WasmExceptionGetValues, 1, 1)     \
-  F(WasmExceptionGetTag, 1, 1)        \
-  F(WasmMemoryGrow, 2, 1)             \
-  F(WasmRunInterpreter, 2, 1)         \
-  F(WasmStackGuard, 0, 1)             \
-  F(WasmThrowCreate, 2, 1)            \
-  F(WasmThrowTypeError, 0, 1)         \
-  F(WasmRefFunc, 1, 1)                \
-  F(WasmFunctionTableGet, 3, 1)       \
-  F(WasmFunctionTableSet, 4, 1)       \
-  F(WasmTableInit, 5, 1)              \
-  F(WasmTableCopy, 5, 1)              \
-  F(WasmTableGrow, 3, 1)              \
-  F(WasmTableFill, 4, 1)              \
-  F(WasmIsValidFuncRefValue, 1, 1)    \
-  F(WasmCompileLazy, 2, 1)
+#define FOR_EACH_INTRINSIC_WASM(F, I)   \
+  F(ThrowWasmError, 1, 1)               \
+  F(ThrowWasmStackOverflow, 0, 1)       \
+  F(WasmI32AtomicWait, 4, 1)            \
+  F(WasmI64AtomicWait, 5, 1)            \
+  F(WasmAtomicNotify, 3, 1)             \
+  F(WasmExceptionGetValues, 1, 1)       \
+  F(WasmExceptionGetTag, 1, 1)          \
+  F(WasmMemoryGrow, 2, 1)               \
+  F(WasmRunInterpreter, 2, 1)           \
+  F(WasmStackGuard, 0, 1)               \
+  F(WasmThrowCreate, 2, 1)              \
+  F(WasmThrowTypeError, 0, 1)           \
+  F(WasmRefFunc, 1, 1)                  \
+  F(WasmFunctionTableGet, 3, 1)         \
+  F(WasmFunctionTableSet, 4, 1)         \
+  F(WasmTableInit, 5, 1)                \
+  F(WasmTableCopy, 5, 1)                \
+  F(WasmTableGrow, 3, 1)                \
+  F(WasmTableFill, 4, 1)                \
+  F(WasmIsValidFuncRefValue, 1, 1)      \
+  F(WasmCompileLazy, 2, 1)              \
+  F(WasmNewMultiReturnFixedArray, 1, 1) \
+  F(WasmNewMultiReturnJSArray, 1, 1)
 
 #define FOR_EACH_INTRINSIC_RETURN_PAIR_IMPL(F, I) \
   F(DebugBreakOnBytecode, 1, 2)                   \
@@ -728,7 +734,6 @@ class Runtime : public AllStatic {
       Isolate* isolate, Handle<Object> object);
 };
 
-
 class RuntimeState {
  public:
 #ifndef V8_INTL_SUPPORT
@@ -769,11 +774,11 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, Runtime::FunctionId);
 //---------------------------------------------------------------------------
 // Constants used by interface to runtime functions.
 
-class AllocateDoubleAlignFlag : public BitField<bool, 0, 1> {};
+using AllocateDoubleAlignFlag = BitField<bool, 0, 1>;
 
-class AllowLargeObjectAllocationFlag : public BitField<bool, 1, 1> {};
+using AllowLargeObjectAllocationFlag = BitField<bool, 1, 1>;
 
-class DeclareGlobalsEvalFlag : public BitField<bool, 0, 1> {};
+using DeclareGlobalsEvalFlag = BitField<bool, 0, 1>;
 
 // A set of bits returned by Runtime_GetOptimizationStatus.
 // These bits must be in sync with bits defined in test/mjsunit/mjsunit.js
@@ -791,6 +796,7 @@ enum class OptimizationStatus {
   kIsExecuting = 1 << 10,
   kTopmostFrameIsTurboFanned = 1 << 11,
   kLiteMode = 1 << 12,
+  kMarkedForDeoptimization = 1 << 13,
 };
 
 }  // namespace internal
diff --git a/deps/v8/src/sanitizer/OWNERS b/deps/v8/src/sanitizer/OWNERS
index 25abe6c3b11dfb..96c9d10c122abe 100644
--- a/deps/v8/src/sanitizer/OWNERS
+++ b/deps/v8/src/sanitizer/OWNERS
@@ -1,3 +1,3 @@
-file://INFRA_OWNERS
+file:../../INFRA_OWNERS
 
 clemensh@chromium.org
diff --git a/deps/v8/src/sanitizer/lsan-page-allocator.cc b/deps/v8/src/sanitizer/lsan-page-allocator.cc
index 68b1f130b136d9..8f7f6f4666b400 100644
--- a/deps/v8/src/sanitizer/lsan-page-allocator.cc
+++ b/deps/v8/src/sanitizer/lsan-page-allocator.cc
@@ -20,11 +20,10 @@ LsanPageAllocator::LsanPageAllocator(v8::PageAllocator* page_allocator)
   DCHECK_NOT_NULL(page_allocator);
 }
 
-void* LsanPageAllocator::AllocatePages(void* address, size_t size,
+void* LsanPageAllocator::AllocatePages(void* hint, size_t size,
                                        size_t alignment,
                                        PageAllocator::Permission access) {
-  void* result =
-      page_allocator_->AllocatePages(address, size, alignment, access);
+  void* result = page_allocator_->AllocatePages(hint, size, alignment, access);
 #if defined(LEAK_SANITIZER)
   if (result != nullptr) {
     __lsan_register_root_region(result, size);
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index d7e208eac51f61..774c2f3789e27e 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -312,6 +312,8 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
                                              log_code_creation);
 #endif  // V8_TARGET_ARCH_ARM
 
+  bool needs_source_positions = isolate->NeedsSourcePositionsForProfiling();
+
   if (log_code_creation || FLAG_log_function_events) {
     Handle<Script> script(Script::cast(result->script()), isolate);
     Handle<String> name(script->name().IsString()
@@ -328,22 +330,29 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
     if (log_code_creation) {
       Script::InitLineEnds(script);
 
-      DisallowHeapAllocation no_gc;
       SharedFunctionInfo::ScriptIterator iter(isolate, *script);
-      for (i::SharedFunctionInfo info = iter.Next(); !info.is_null();
+      for (SharedFunctionInfo info = iter.Next(); !info.is_null();
            info = iter.Next()) {
         if (info.is_compiled()) {
-          int line_num = script->GetLineNumber(info.StartPosition()) + 1;
-          int column_num = script->GetColumnNumber(info.StartPosition()) + 1;
+          Handle<SharedFunctionInfo> shared_info(info, isolate);
+          if (needs_source_positions) {
+            SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate,
+                                                               shared_info);
+          }
+          DisallowHeapAllocation no_gc;
+          int line_num =
+              script->GetLineNumber(shared_info->StartPosition()) + 1;
+          int column_num =
+              script->GetColumnNumber(shared_info->StartPosition()) + 1;
           PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
-                                           info.abstract_code(), info, *name,
-                                           line_num, column_num));
+                                           info.abstract_code(), *shared_info,
+                                           *name, line_num, column_num));
         }
       }
     }
   }
 
-  if (isolate->NeedsSourcePositionsForProfiling()) {
+  if (needs_source_positions) {
     Handle<Script> script(Script::cast(result->script()), isolate);
     Script::InitLineEnds(script);
   }
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 0474d3babe0ff0..2f6d17d6b2bd9d 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -55,20 +55,35 @@ Code InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
 void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
                                                        uint8_t** data,
                                                        uint32_t* size) {
+  // Create the embedded blob from scratch using the current Isolate's heap.
   EmbeddedData d = EmbeddedData::FromIsolate(isolate);
 
+  // Allocate the backing store that will contain the embedded blob in this
+  // Isolate. The backing store is on the native heap, *not* on V8's garbage-
+  // collected heap.
   v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
-  const uint32_t page_size =
+  const uint32_t alignment =
       static_cast<uint32_t>(page_allocator->AllocatePageSize());
-  const uint32_t allocated_size = RoundUp(d.size(), page_size);
+
+  void* const requested_allocation_address =
+      AlignedAddress(isolate->heap()->GetRandomMmapAddr(), alignment);
+  const uint32_t allocation_size = RoundUp(d.size(), alignment);
 
   uint8_t* allocated_bytes = static_cast<uint8_t*>(
-      AllocatePages(page_allocator, isolate->heap()->GetRandomMmapAddr(),
-                    allocated_size, page_size, PageAllocator::kReadWrite));
+      AllocatePages(page_allocator, requested_allocation_address,
+                    allocation_size, alignment, PageAllocator::kReadWrite));
   CHECK_NOT_NULL(allocated_bytes);
 
+  // Copy the embedded blob into the newly allocated backing store. Switch
+  // permissions to read-execute since builtin code is immutable from now on
+  // and must be executable in case any JS execution is triggered.
+  //
+  // Once this backing store is set as the current_embedded_blob, V8 cannot tell
+  // the difference between a 'real' embedded build (where the blob is embedded
+  // in the binary) and what we are currently setting up here (where the blob is
+  // on the native heap).
   std::memcpy(allocated_bytes, d.data(), d.size());
-  CHECK(SetPermissions(page_allocator, allocated_bytes, allocated_size,
+  CHECK(SetPermissions(page_allocator, allocated_bytes, allocation_size,
                        PageAllocator::kReadExecute));
 
   *data = allocated_bytes;
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
index 4703ef48224f9f..5f57993fc3255a 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
@@ -92,7 +92,7 @@ void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
     w->Newline();
   }
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   {
     i::EmbeddedVector<char, kTemporaryStringLength> unwind_info_symbol;
     i::SNPrintF(unwind_info_symbol, "%s_Builtins_UnwindInfo",
@@ -102,7 +102,7 @@ void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
                            EmbeddedBlobDataSymbol().c_str(), blob,
                            reinterpret_cast<const void*>(&unwind_infos_[0]));
   }
-#endif
+#endif  // V8_OS_WIN64
 
   w->FileEpilogue();
 }
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.h b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
index c26465ae6a7d26..e487b9be9bcc77 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.h
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
@@ -13,9 +13,9 @@
 #include "src/snapshot/embedded/embedded-data.h"
 #include "src/snapshot/embedded/platform-embedded-file-writer-base.h"
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
 #include "src/diagnostics/unwinding-info-win64.h"
-#endif
+#endif  // V8_OS_WIN64
 
 namespace v8 {
 namespace internal {
@@ -35,11 +35,11 @@ class EmbeddedFileWriterInterface {
   // compiled builtin Code objects with trampolines.
   virtual void PrepareBuiltinSourcePositionMap(Builtins* builtins) = 0;
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   virtual void SetBuiltinUnwindData(
       int builtin_index,
       const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) = 0;
-#endif
+#endif  // V8_OS_WIN64
 };
 
 // Generates the embedded.S file which is later compiled into the final v8
@@ -59,14 +59,14 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
 
   void PrepareBuiltinSourcePositionMap(Builtins* builtins) override;
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   void SetBuiltinUnwindData(
       int builtin_index,
       const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) override {
     DCHECK_LT(builtin_index, Builtins::builtin_count);
     unwind_infos_[builtin_index] = unwinding_info;
   }
-#endif
+#endif  // V8_OS_WIN64
 
   void SetEmbeddedFile(const char* embedded_src_path) {
     embedded_src_path_ = embedded_src_path;
@@ -172,9 +172,6 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
                          const i::EmbeddedData* blob) const;
 
 #if defined(V8_OS_WIN_X64)
-  std::string BuiltinsUnwindInfoLabel() const;
-  void WriteUnwindInfo(PlatformEmbeddedFileWriterBase* w,
-                       const i::EmbeddedData* blob) const;
   void WriteUnwindInfoEntry(PlatformEmbeddedFileWriterBase* w,
                             uint64_t rva_start, uint64_t rva_end) const;
 #endif
@@ -194,9 +191,9 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
  private:
   std::vector<byte> source_positions_[Builtins::builtin_count];
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   win64_unwindinfo::BuiltinUnwindInfo unwind_infos_[Builtins::builtin_count];
-#endif
+#endif  // V8_OS_WIN64
 
   std::map<const char*, int> external_filenames_;
   std::vector<const char*> external_filenames_by_index_;
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index d0758cb42c0770..9a9a26fbd0abef 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -6,14 +6,14 @@
 
 #include <algorithm>
 
-#include "src/common/globals.h"  // For V8_OS_WIN_X64.
+#include "src/common/globals.h"  // For V8_OS_WIN64
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
 #include "src/builtins/builtins.h"
 #include "src/diagnostics/unwinding-info-win64.h"
-#include "src/objects/objects-inl.h"
 #include "src/snapshot/embedded/embedded-data.h"
-#endif
+#include "src/snapshot/embedded/embedded-file-writer.h"
+#endif  // V8_OS_WIN64
 
 namespace v8 {
 namespace internal {
@@ -214,20 +214,118 @@ void EmitUnwindData(PlatformEmbeddedFileWriterWin* w,
   w->EndPdataSection();
   w->Newline();
 }
-#endif  // defined(V8_OS_WIN_X64)
+
+#elif defined(V8_OS_WIN_ARM64)
+
+void EmitUnwindData(PlatformEmbeddedFileWriterWin* w,
+                    const char* unwind_info_symbol,
+                    const char* embedded_blob_data_symbol,
+                    const EmbeddedData* blob,
+                    const win64_unwindinfo::BuiltinUnwindInfo* unwind_infos) {
+  DCHECK(win64_unwindinfo::CanEmitUnwindInfoForBuiltins());
+
+  // Fairly arbitrary but should fit all symbol names.
+  static constexpr int kTemporaryStringLength = 256;
+  i::EmbeddedVector<char, kTemporaryStringLength> unwind_info_full_symbol;
+
+  // Emit a RUNTIME_FUNCTION (PDATA) entry for each builtin function, as
+  // documented here:
+  // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling.
+  w->Comment(
+      "pdata for all the code in the embedded blob (structs of type "
+      "RUNTIME_FUNCTION).");
+  w->Comment("    BeginAddress");
+  w->Comment("    UnwindInfoAddress");
+  w->StartPdataSection();
+  std::vector<int> code_chunks;
+  std::vector<int> fp_adjustments;
+
+  for (int i = 0; i < Builtins::builtin_count; i++) {
+    if (!blob->ContainsBuiltin(i)) continue;
+    if (unwind_infos[i].is_leaf_function()) continue;
+
+    uint64_t builtin_start_offset = blob->InstructionStartOfBuiltin(i) -
+                                    reinterpret_cast<Address>(blob->data());
+    uint32_t builtin_size = blob->InstructionSizeOfBuiltin(i);
+
+    const std::vector<int>& xdata_desc = unwind_infos[i].fp_offsets();
+    const std::vector<int>& xdata_fp_adjustments =
+        unwind_infos[i].fp_adjustments();
+    DCHECK_EQ(xdata_desc.size(), xdata_fp_adjustments.size());
+
+    for (size_t j = 0; j < xdata_desc.size(); j++) {
+      int chunk_start = xdata_desc[j];
+      int chunk_end =
+          (j < xdata_desc.size() - 1) ? xdata_desc[j + 1] : builtin_size;
+      int chunk_len = ::RoundUp(chunk_end - chunk_start, kInstrSize);
+
+      while (chunk_len > 0) {
+        int allowed_chunk_len =
+            std::min(chunk_len, win64_unwindinfo::kMaxFunctionLength);
+        chunk_len -= win64_unwindinfo::kMaxFunctionLength;
+
+        // Record the chunk length and fp_adjustment for emitting UNWIND_INFO
+        // later.
+        code_chunks.push_back(allowed_chunk_len);
+        fp_adjustments.push_back(xdata_fp_adjustments[j]);
+        i::SNPrintF(unwind_info_full_symbol, "%s_%u", unwind_info_symbol,
+                    code_chunks.size());
+        w->DeclareRvaToSymbol(embedded_blob_data_symbol,
+                              builtin_start_offset + chunk_start);
+        w->DeclareRvaToSymbol(unwind_info_full_symbol.begin());
+      }
+    }
+  }
+  w->EndPdataSection();
+  w->Newline();
+
+  // Emit an UNWIND_INFO (XDATA) structs, which contains the unwinding
+  // information.
+  w->DeclareExternalFunction(CRASH_HANDLER_FUNCTION_NAME_STRING);
+  w->StartXdataSection();
+  {
+    for (size_t i = 0; i < code_chunks.size(); i++) {
+      i::SNPrintF(unwind_info_full_symbol, "%s_%u", unwind_info_symbol, i + 1);
+      w->DeclareLabel(unwind_info_full_symbol.begin());
+      std::vector<uint8_t> xdata =
+          win64_unwindinfo::GetUnwindInfoForBuiltinFunction(code_chunks[i],
+                                                            fp_adjustments[i]);
+
+      w->IndentedDataDirective(kByte);
+      for (size_t j = 0; j < xdata.size(); j++) {
+        if (j > 0) fprintf(w->fp(), ",");
+        w->HexLiteral(xdata[j]);
+      }
+      w->Newline();
+      w->DeclareRvaToSymbol(CRASH_HANDLER_FUNCTION_NAME_STRING);
+    }
+  }
+  w->EndXdataSection();
+  w->Newline();
+}
+
+#endif  // V8_OS_WIN_X64
 
 }  // namespace
 
 void PlatformEmbeddedFileWriterWin::MaybeEmitUnwindData(
     const char* unwind_info_symbol, const char* embedded_blob_data_symbol,
     const EmbeddedData* blob, const void* unwind_infos) {
-#if defined(V8_OS_WIN_X64)
+// Windows ARM64 supports cross build which could require unwind info for
+// host_os. Ignore this case because it is only used in build time.
+#if defined(V8_OS_WIN_ARM64)
+  if (target_arch_ != EmbeddedTargetArch::kArm64) {
+    return;
+  }
+#endif  // V8_OS_WIN_ARM64
+
+#if defined(V8_OS_WIN64)
   if (win64_unwindinfo::CanEmitUnwindInfoForBuiltins()) {
     EmitUnwindData(this, unwind_info_symbol, embedded_blob_data_symbol, blob,
                    reinterpret_cast<const win64_unwindinfo::BuiltinUnwindInfo*>(
                        unwind_infos));
   }
-#endif  // defined(V8_OS_WIN_X64)
+#endif  // V8_OS_WIN64
 }
 
 // Windows, MSVC, not arm/arm64.
@@ -545,6 +643,7 @@ void PlatformEmbeddedFileWriterWin::DeclareFunctionBegin(const char* name) {
   if (target_arch_ == EmbeddedTargetArch::kArm64) {
     // Windows ARM64 assembly is in GAS syntax, but ".type" is invalid directive
     // in PE/COFF for Windows.
+    DeclareSymbolGlobal(name);
   } else {
     // The directives for inserting debugging information on Windows come
     // from the PE (Portable Executable) and COFF (Common Object File Format)
@@ -570,11 +669,7 @@ void PlatformEmbeddedFileWriterWin::DeclareExternalFilename(
   // Replace any Windows style paths (backslashes) with forward
   // slashes.
   std::string fixed_filename(filename);
-  for (auto& c : fixed_filename) {
-    if (c == '\\') {
-      c = '/';
-    }
-  }
+  std::replace(fixed_filename.begin(), fixed_filename.end(), '\\', '/');
   fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
 }
 
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
index c81e9a1e2154c1..215db9d9a56119 100644
--- a/deps/v8/src/snapshot/references.h
+++ b/deps/v8/src/snapshot/references.h
@@ -154,12 +154,10 @@ class SerializerReference {
   }
 
  private:
-  class SpaceBits : public BitField<SnapshotSpace, 0, kSpaceTagSize> {};
-  class ChunkIndexBits
-      : public BitField<uint32_t, SpaceBits::kNext, 32 - kSpaceTagSize> {};
-  class SpecialValueTypeBits
-      : public BitField<SpecialValueType, SpaceBits::kNext,
-                        32 - kSpaceTagSize> {};
+  using SpaceBits = BitField<SnapshotSpace, 0, kSpaceTagSize>;
+  using ChunkIndexBits = SpaceBits::Next<uint32_t, 32 - kSpaceTagSize>;
+  using SpecialValueTypeBits =
+      SpaceBits::Next<SpecialValueType, 32 - kSpaceTagSize>;
 
   // We use two fields to store a reference.
   // In case of a normal back reference, the bitfield_ stores the space and
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index c845a089a390a6..2a30fefe33b0fc 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -34,8 +34,8 @@ class ExternalReferenceEncoder {
     uint32_t index() const { return Index::decode(value_); }
 
    private:
-    class Index : public BitField<uint32_t, 0, 31> {};
-    class IsFromAPI : public BitField<bool, 31, 1> {};
+    using Index = BitField<uint32_t, 0, 31>;
+    using IsFromAPI = BitField<bool, 31, 1>;
     uint32_t value_;
   };
 
@@ -328,8 +328,8 @@ class SerializedData {
 
   uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
 
-  class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
-  class IsLastChunkBits : public BitField<bool, 31, 1> {};
+  using ChunkSizeBits = BitField<uint32_t, 0, 31>;
+  using IsLastChunkBits = BitField<bool, 31, 1>;
 
   static constexpr uint32_t kMagicNumberOffset = 0;
   static constexpr uint32_t kMagicNumber =
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index f7e1e86b845ba5..4a4da9f755484b 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -8,7 +8,6 @@
 #include "src/snapshot/partial-serializer.h"
 #include "src/snapshot/startup-serializer.h"
 
-#include "src/objects/objects-inl.h"
 #include "src/utils/utils.h"
 
 namespace v8 {
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 6ad2efc18b8438..4e4bc9eefc02c0 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -143,13 +143,9 @@ void StartupSerializer::SerializeStrongReferences() {
   // No active or weak handles.
   CHECK(isolate->handle_scope_implementer()->blocks()->empty());
 
-  // Visit smi roots.
-  // Clear the stack limits to make the snapshot reproducible.
-  // Reset it again afterwards.
-  isolate->heap()->ClearStackLimits();
+  // Visit smi roots and immortal immovables first to make sure they end up in
+  // the first page.
   isolate->heap()->IterateSmiRoots(this);
-  isolate->heap()->SetStackLimits();
-  // First visit immortal immovables to make sure they end up in the first page.
   isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
 }
 
diff --git a/deps/v8/src/strings/char-predicates-inl.h b/deps/v8/src/strings/char-predicates-inl.h
index 3b9b13aba56de6..8ff9f0d21f7470 100644
--- a/deps/v8/src/strings/char-predicates-inl.h
+++ b/deps/v8/src/strings/char-predicates-inl.h
@@ -81,14 +81,14 @@ enum AsciiCharFlags {
   kIsWhiteSpaceOrLineTerminator = 1 << 3
 };
 constexpr uint8_t BuildAsciiCharFlags(uc32 c) {
-  // clang-format off
-  return
-    (IsAsciiIdentifier(c) || c == '\\') ? (
-      kIsIdentifierPart | (!IsDecimalDigit(c) ? kIsIdentifierStart : 0)) : 0 |
-    (c == ' ' || c == '\t' || c == '\v' || c == '\f') ?
-      kIsWhiteSpace | kIsWhiteSpaceOrLineTerminator : 0 |
-    (c == '\r' || c == '\n') ? kIsWhiteSpaceOrLineTerminator : 0;
-  // clang-format on
+  return ((IsAsciiIdentifier(c) || c == '\\')
+              ? (kIsIdentifierPart |
+                 (!IsDecimalDigit(c) ? kIsIdentifierStart : 0))
+              : 0) |
+         ((c == ' ' || c == '\t' || c == '\v' || c == '\f')
+              ? kIsWhiteSpace | kIsWhiteSpaceOrLineTerminator
+              : 0) |
+         ((c == '\r' || c == '\n') ? kIsWhiteSpaceOrLineTerminator : 0);
 }
 const constexpr uint8_t kAsciiCharFlags[128] = {
 #define BUILD_CHAR_FLAGS(N) BuildAsciiCharFlags(N),
diff --git a/deps/v8/src/strings/unicode.h b/deps/v8/src/strings/unicode.h
index bd94300e349c5c..48483af9e172bf 100644
--- a/deps/v8/src/strings/unicode.h
+++ b/deps/v8/src/strings/unicode.h
@@ -51,8 +51,8 @@ class Predicate {
     bool value() const { return ValueField::decode(bit_field_); }
 
    private:
-    class CodePointField : public v8::internal::BitField<uchar, 0, 21> {};
-    class ValueField : public v8::internal::BitField<bool, 21, 1> {};
+    using CodePointField = v8::internal::BitField<uchar, 0, 21>;
+    using ValueField = v8::internal::BitField<bool, 21, 1>;
 
     uint32_t bit_field_;
   };
diff --git a/deps/v8/src/third_party/valgrind/OWNERS b/deps/v8/src/third_party/valgrind/OWNERS
index 852d438bb0a884..cb9c7e9c3887f1 100644
--- a/deps/v8/src/third_party/valgrind/OWNERS
+++ b/deps/v8/src/third_party/valgrind/OWNERS
@@ -1 +1 @@
-file://COMMON_OWNERS
+file:../../../COMMON_OWNERS
diff --git a/deps/v8/src/third_party/vtune/OWNERS b/deps/v8/src/third_party/vtune/OWNERS
index 852d438bb0a884..cb9c7e9c3887f1 100644
--- a/deps/v8/src/third_party/vtune/OWNERS
+++ b/deps/v8/src/third_party/vtune/OWNERS
@@ -1 +1 @@
-file://COMMON_OWNERS
+file:../../../COMMON_OWNERS
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 23de1210658b99..5ce25cf13ab473 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -46,8 +46,7 @@ namespace torque {
 #define AST_TYPE_EXPRESSION_NODE_KIND_LIST(V) \
   V(BasicTypeExpression)                      \
   V(FunctionTypeExpression)                   \
-  V(UnionTypeExpression)                      \
-  V(ReferenceTypeExpression)
+  V(UnionTypeExpression)
 
 #define AST_STATEMENT_NODE_KIND_LIST(V) \
   V(BlockStatement)                     \
@@ -72,20 +71,17 @@ namespace torque {
 
 #define AST_DECLARATION_NODE_KIND_LIST(V) \
   AST_TYPE_DECLARATION_NODE_KIND_LIST(V)  \
-  V(StandardDeclaration)                  \
   V(GenericDeclaration)                   \
   V(SpecializationDeclaration)            \
   V(ExternConstDeclaration)               \
   V(NamespaceDeclaration)                 \
   V(ConstDeclaration)                     \
-  V(CppIncludeDeclaration)
-
-#define AST_CALLABLE_NODE_KIND_LIST(V) \
-  V(TorqueMacroDeclaration)            \
-  V(TorqueBuiltinDeclaration)          \
-  V(ExternalMacroDeclaration)          \
-  V(ExternalBuiltinDeclaration)        \
-  V(ExternalRuntimeDeclaration)        \
+  V(CppIncludeDeclaration)                \
+  V(TorqueMacroDeclaration)               \
+  V(TorqueBuiltinDeclaration)             \
+  V(ExternalMacroDeclaration)             \
+  V(ExternalBuiltinDeclaration)           \
+  V(ExternalRuntimeDeclaration)           \
   V(IntrinsicDeclaration)
 
 #define AST_NODE_KIND_LIST(V)           \
@@ -93,7 +89,6 @@ namespace torque {
   AST_TYPE_EXPRESSION_NODE_KIND_LIST(V) \
   AST_STATEMENT_NODE_KIND_LIST(V)       \
   AST_DECLARATION_NODE_KIND_LIST(V)     \
-  AST_CALLABLE_NODE_KIND_LIST(V)        \
   V(Identifier)                         \
   V(LabelBlock)
 
@@ -118,16 +113,16 @@ struct AstNodeClassCheck {
 };
 
 // Boilerplate for most derived classes.
-#define DEFINE_AST_NODE_LEAF_BOILERPLATE(T)                        \
-  static const Kind kKind = Kind::k##T;                            \
-  static T* cast(AstNode* node) {                                  \
-    if (node->kind != kKind) return nullptr;                       \
-    return static_cast<T*>(node);                                  \
-  }                                                                \
-  static T* DynamicCast(AstNode* node) {                           \
-    if (!node) return nullptr;                                     \
-    if (!AstNodeClassCheck::IsInstanceOf<T>(node)) return nullptr; \
-    return static_cast<T*>(node);                                  \
+#define DEFINE_AST_NODE_LEAF_BOILERPLATE(T)  \
+  static const Kind kKind = Kind::k##T;      \
+  static T* cast(AstNode* node) {            \
+    DCHECK_EQ(node->kind, kKind);            \
+    return static_cast<T*>(node);            \
+  }                                          \
+  static T* DynamicCast(AstNode* node) {     \
+    if (!node) return nullptr;               \
+    if (node->kind != kKind) return nullptr; \
+    return static_cast<T*>(node);            \
   }
 
 // Boilerplate for classes with subclasses.
@@ -221,6 +216,10 @@ struct Identifier : AstNode {
   std::string value;
 };
 
+inline std::ostream& operator<<(std::ostream& os, Identifier* id) {
+  return os << id->value;
+}
+
 struct IdentifierPtrValueEq {
   bool operator()(const Identifier* a, const Identifier* b) {
     return a->value < b->value;
@@ -252,11 +251,11 @@ struct IdentifierExpression : LocationExpression {
 
 struct IntrinsicCallExpression : Expression {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(IntrinsicCallExpression)
-  IntrinsicCallExpression(SourcePosition pos, std::string name,
+  IntrinsicCallExpression(SourcePosition pos, Identifier* name,
                           std::vector<TypeExpression*> generic_arguments,
                           std::vector<Expression*> arguments)
       : Expression(kKind, pos),
-        name(std::move(name)),
+        name(name),
         generic_arguments(std::move(generic_arguments)),
         arguments(std::move(arguments)) {}
 
@@ -267,7 +266,7 @@ struct IntrinsicCallExpression : Expression {
     callback(this);
   }
 
-  std::string name;
+  Identifier* name;
   std::vector<TypeExpression*> generic_arguments;
   std::vector<Expression*> arguments;
 };
@@ -619,13 +618,6 @@ struct UnionTypeExpression : TypeExpression {
   TypeExpression* b;
 };
 
-struct ReferenceTypeExpression : TypeExpression {
-  DEFINE_AST_NODE_LEAF_BOILERPLATE(ReferenceTypeExpression)
-  ReferenceTypeExpression(SourcePosition pos, TypeExpression* referenced_type)
-      : TypeExpression(kKind, pos), referenced_type(referenced_type) {}
-  TypeExpression* referenced_type;
-};
-
 struct ExpressionStatement : Statement {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(ExpressionStatement)
   ExpressionStatement(SourcePosition pos, Expression* expression)
@@ -849,10 +841,15 @@ struct ConditionalAnnotation {
   ConditionalAnnotationType type;
 };
 
+struct Annotation {
+  Identifier* name;
+  base::Optional<std::string> param;
+};
+
 struct ClassFieldExpression {
   NameAndTypeExpression name_and_type;
   base::Optional<std::string> index;
-  base::Optional<ConditionalAnnotation> conditional;
+  std::vector<ConditionalAnnotation> conditions;
   bool weak;
   bool const_qualified;
   bool generate_verify;
@@ -865,34 +862,33 @@ struct LabelAndTypes {
 
 using LabelAndTypesVector = std::vector<LabelAndTypes>;
 
-struct CallableNodeSignature {
+struct CallableDeclaration : Declaration {
+  CallableDeclaration(AstNode::Kind kind, SourcePosition pos,
+                      bool transitioning, Identifier* name,
+                      ParameterList parameters, TypeExpression* return_type,
+                      LabelAndTypesVector labels)
+      : Declaration(kind, pos),
+        transitioning(transitioning),
+        name(name),
+        parameters(std::move(parameters)),
+        return_type(return_type),
+        labels(std::move(labels)) {}
+  DEFINE_AST_NODE_INNER_BOILERPLATE(CallableDeclaration)
+  bool transitioning;
+  Identifier* name;
   ParameterList parameters;
   TypeExpression* return_type;
   LabelAndTypesVector labels;
 };
 
-struct CallableNode : AstNode {
-  CallableNode(AstNode::Kind kind, SourcePosition pos, bool transitioning,
-               std::string name, ParameterList parameters,
-               TypeExpression* return_type, const LabelAndTypesVector& labels)
-      : AstNode(kind, pos),
-        transitioning(transitioning),
-        name(std::move(name)),
-        signature(new CallableNodeSignature{parameters, return_type, labels}) {}
-  DEFINE_AST_NODE_INNER_BOILERPLATE(CallableNode)
-  bool transitioning;
-  std::string name;
-  std::unique_ptr<CallableNodeSignature> signature;
-};
-
-struct MacroDeclaration : CallableNode {
+struct MacroDeclaration : CallableDeclaration {
   DEFINE_AST_NODE_INNER_BOILERPLATE(MacroDeclaration)
   MacroDeclaration(AstNode::Kind kind, SourcePosition pos, bool transitioning,
-                   std::string name, base::Optional<std::string> op,
+                   Identifier* name, base::Optional<std::string> op,
                    ParameterList parameters, TypeExpression* return_type,
                    const LabelAndTypesVector& labels)
-      : CallableNode(kind, pos, transitioning, std::move(name),
-                     std::move(parameters), return_type, labels),
+      : CallableDeclaration(kind, pos, transitioning, name,
+                            std::move(parameters), return_type, labels),
         op(std::move(op)) {
     if (parameters.implicit_kind == ImplicitKind::kJSImplicit) {
       Error("Cannot use \"js-implicit\" with macros, use \"implicit\" instead.")
@@ -906,23 +902,22 @@ struct ExternalMacroDeclaration : MacroDeclaration {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalMacroDeclaration)
   ExternalMacroDeclaration(SourcePosition pos, bool transitioning,
                            std::string external_assembler_name,
-                           std::string name, base::Optional<std::string> op,
+                           Identifier* name, base::Optional<std::string> op,
                            ParameterList parameters,
                            TypeExpression* return_type,
                            const LabelAndTypesVector& labels)
-      : MacroDeclaration(kKind, pos, transitioning, std::move(name),
-                         std::move(op), std::move(parameters), return_type,
-                         labels),
+      : MacroDeclaration(kKind, pos, transitioning, name, std::move(op),
+                         std::move(parameters), return_type, labels),
         external_assembler_name(std::move(external_assembler_name)) {}
   std::string external_assembler_name;
 };
 
-struct IntrinsicDeclaration : CallableNode {
+struct IntrinsicDeclaration : CallableDeclaration {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(IntrinsicDeclaration)
-  IntrinsicDeclaration(SourcePosition pos, std::string name,
+  IntrinsicDeclaration(SourcePosition pos, Identifier* name,
                        ParameterList parameters, TypeExpression* return_type)
-      : CallableNode(kKind, pos, false, std::move(name), std::move(parameters),
-                     return_type, {}) {
+      : CallableDeclaration(kKind, pos, false, name, std::move(parameters),
+                            return_type, {}) {
     if (parameters.implicit_kind != ImplicitKind::kNoImplicit) {
       Error("Intinsics cannot have implicit parameters.");
     }
@@ -932,24 +927,26 @@ struct IntrinsicDeclaration : CallableNode {
 struct TorqueMacroDeclaration : MacroDeclaration {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(TorqueMacroDeclaration)
   TorqueMacroDeclaration(SourcePosition pos, bool transitioning,
-                         std::string name, base::Optional<std::string> op,
+                         Identifier* name, base::Optional<std::string> op,
                          ParameterList parameters, TypeExpression* return_type,
-                         const LabelAndTypesVector& labels, bool export_to_csa)
-      : MacroDeclaration(kKind, pos, transitioning, std::move(name),
-                         std::move(op), std::move(parameters), return_type,
-                         labels),
-        export_to_csa(export_to_csa) {}
+                         const LabelAndTypesVector& labels, bool export_to_csa,
+                         base::Optional<Statement*> body)
+      : MacroDeclaration(kKind, pos, transitioning, name, std::move(op),
+                         std::move(parameters), return_type, labels),
+        export_to_csa(export_to_csa),
+        body(body) {}
   bool export_to_csa;
+  base::Optional<Statement*> body;
 };
 
-struct BuiltinDeclaration : CallableNode {
+struct BuiltinDeclaration : CallableDeclaration {
   DEFINE_AST_NODE_INNER_BOILERPLATE(BuiltinDeclaration)
   BuiltinDeclaration(AstNode::Kind kind, SourcePosition pos,
                      bool javascript_linkage, bool transitioning,
-                     std::string name, ParameterList parameters,
+                     Identifier* name, ParameterList parameters,
                      TypeExpression* return_type)
-      : CallableNode(kind, pos, transitioning, std::move(name),
-                     std::move(parameters), return_type, {}),
+      : CallableDeclaration(kind, pos, transitioning, name,
+                            std::move(parameters), return_type, {}),
         javascript_linkage(javascript_linkage) {
     if (parameters.implicit_kind == ImplicitKind::kJSImplicit &&
         !javascript_linkage) {
@@ -971,32 +968,33 @@ struct BuiltinDeclaration : CallableNode {
 struct ExternalBuiltinDeclaration : BuiltinDeclaration {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalBuiltinDeclaration)
   ExternalBuiltinDeclaration(SourcePosition pos, bool transitioning,
-                             bool javascript_linkage, std::string name,
+                             bool javascript_linkage, Identifier* name,
                              ParameterList parameters,
                              TypeExpression* return_type)
-      : BuiltinDeclaration(kKind, pos, javascript_linkage, transitioning,
-                           std::move(name), std::move(parameters),
-                           return_type) {}
+      : BuiltinDeclaration(kKind, pos, javascript_linkage, transitioning, name,
+                           std::move(parameters), return_type) {}
 };
 
 struct TorqueBuiltinDeclaration : BuiltinDeclaration {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(TorqueBuiltinDeclaration)
   TorqueBuiltinDeclaration(SourcePosition pos, bool transitioning,
-                           bool javascript_linkage, std::string name,
+                           bool javascript_linkage, Identifier* name,
                            ParameterList parameters,
-                           TypeExpression* return_type)
-      : BuiltinDeclaration(kKind, pos, javascript_linkage, transitioning,
-                           std::move(name), std::move(parameters),
-                           return_type) {}
+                           TypeExpression* return_type,
+                           base::Optional<Statement*> body)
+      : BuiltinDeclaration(kKind, pos, javascript_linkage, transitioning, name,
+                           std::move(parameters), return_type),
+        body(body) {}
+  base::Optional<Statement*> body;
 };
 
-struct ExternalRuntimeDeclaration : CallableNode {
+struct ExternalRuntimeDeclaration : CallableDeclaration {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalRuntimeDeclaration)
   ExternalRuntimeDeclaration(SourcePosition pos, bool transitioning,
-                             std::string name, ParameterList parameters,
+                             Identifier* name, ParameterList parameters,
                              TypeExpression* return_type)
-      : CallableNode(kKind, pos, transitioning, name, parameters, return_type,
-                     {}) {}
+      : CallableDeclaration(kKind, pos, transitioning, name, parameters,
+                            return_type, {}) {}
 };
 
 struct ConstDeclaration : Declaration {
@@ -1012,47 +1010,32 @@ struct ConstDeclaration : Declaration {
   Expression* expression;
 };
 
-struct StandardDeclaration : Declaration {
-  DEFINE_AST_NODE_LEAF_BOILERPLATE(StandardDeclaration)
-  StandardDeclaration(SourcePosition pos, CallableNode* callable,
-                      base::Optional<Statement*> body)
-      : Declaration(kKind, pos), callable(callable), body(body) {}
-  CallableNode* callable;
-  base::Optional<Statement*> body;
-};
-
 struct GenericDeclaration : Declaration {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(GenericDeclaration)
-  GenericDeclaration(SourcePosition pos, CallableNode* callable,
+  GenericDeclaration(SourcePosition pos,
                      std::vector<Identifier*> generic_parameters,
-                     base::Optional<Statement*> body = base::nullopt)
+                     CallableDeclaration* declaration)
       : Declaration(kKind, pos),
-        callable(callable),
         generic_parameters(std::move(generic_parameters)),
-        body(body) {}
-  CallableNode* callable;
+        declaration(declaration) {}
   std::vector<Identifier*> generic_parameters;
-  base::Optional<Statement*> body;
+  CallableDeclaration* declaration;
 };
 
-struct SpecializationDeclaration : Declaration {
+struct SpecializationDeclaration : CallableDeclaration {
   DEFINE_AST_NODE_LEAF_BOILERPLATE(SpecializationDeclaration)
-  SpecializationDeclaration(SourcePosition pos, Identifier* name,
+  SpecializationDeclaration(SourcePosition pos, bool transitioning,
+                            Identifier* name,
                             std::vector<TypeExpression*> generic_parameters,
                             ParameterList parameters,
                             TypeExpression* return_type,
-                            LabelAndTypesVector labels, Statement* b)
-      : Declaration(kKind, pos),
-        name(name),
-        external(false),
+                            LabelAndTypesVector labels, Statement* body)
+      : CallableDeclaration(kKind, pos, transitioning, name,
+                            std::move(parameters), return_type,
+                            std::move(labels)),
         generic_parameters(std::move(generic_parameters)),
-        signature(new CallableNodeSignature{std::move(parameters), return_type,
-                                            std::move(labels)}),
-        body(b) {}
-  Identifier* name;
-  bool external;
+        body(body) {}
   std::vector<TypeExpression*> generic_parameters;
-  std::unique_ptr<CallableNodeSignature> signature;
   Statement* body;
 };
 
diff --git a/deps/v8/src/torque/class-debug-reader-generator.cc b/deps/v8/src/torque/class-debug-reader-generator.cc
new file mode 100644
index 00000000000000..6abdffcc91feb5
--- /dev/null
+++ b/deps/v8/src/torque/class-debug-reader-generator.cc
@@ -0,0 +1,222 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/flags/flags.h"
+#include "src/torque/implementation-visitor.h"
+#include "src/torque/type-oracle.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+namespace {
+void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
+                              std::ostream& cc_contents, std::ostream& visitor,
+                              std::unordered_set<const ClassType*>* done) {
+  // Make sure each class only gets generated once.
+  if (!type.IsExtern() || !done->insert(&type).second) return;
+  const ClassType* super_type = type.GetSuperClass();
+
+  // We must emit the classes in dependency order. If the super class hasn't
+  // been emitted yet, go handle it first.
+  if (super_type != nullptr) {
+    GenerateClassDebugReader(*super_type, h_contents, cc_contents, visitor,
+                             done);
+  }
+
+  const std::string name = type.name();
+  const std::string super_name =
+      super_type == nullptr ? "Object" : super_type->name();
+  h_contents << "\nclass Tq" << name << " : public Tq" << super_name << " {\n";
+  h_contents << " public:\n";
+  h_contents << "  inline Tq" << name << "(uintptr_t address) : Tq"
+             << super_name << "(address) {}\n";
+  h_contents << "  std::vector<std::unique_ptr<ObjectProperty>> "
+                "GetProperties(d::MemoryAccessor accessor) const override;\n";
+  h_contents << "  const char* GetName() const override;\n";
+  h_contents << "  void Visit(TqObjectVisitor* visitor) const override;\n";
+
+  cc_contents << "\nconst char* Tq" << name << "::GetName() const {\n";
+  cc_contents << "  return \"v8::internal::" << name << "\";\n";
+  cc_contents << "}\n";
+
+  cc_contents << "\nvoid Tq" << name
+              << "::Visit(TqObjectVisitor* visitor) const {\n";
+  cc_contents << "  visitor->Visit" << name << "(this);\n";
+  cc_contents << "}\n";
+
+  visitor << "  virtual void Visit" << name << "(const Tq" << name
+          << "* object) {\n";
+  visitor << "    Visit" << super_name << "(object);\n";
+  visitor << "  }\n";
+
+  std::stringstream get_props_impl;
+
+  for (const Field& field : type.fields()) {
+    const Type* field_type = field.name_and_type.type;
+    if (field_type == TypeOracle::GetVoidType()) continue;
+    const std::string& field_name = field.name_and_type.name;
+    bool is_field_tagged = field_type->IsSubtypeOf(TypeOracle::GetTaggedType());
+    base::Optional<const ClassType*> field_class_type =
+        field_type->ClassSupertype();
+    size_t field_size = 0;
+    std::string field_size_string;
+    std::tie(field_size, field_size_string) = field.GetFieldSizeInformation();
+
+    std::string field_value_type;
+    std::string field_value_type_compressed;
+    std::string field_cc_type;
+    std::string field_cc_type_compressed;
+    if (is_field_tagged) {
+      field_value_type = "uintptr_t";
+      field_value_type_compressed = "i::Tagged_t";
+      field_cc_type = "v8::internal::" + (field_class_type.has_value()
+                                              ? (*field_class_type)->name()
+                                              : "Object");
+      field_cc_type_compressed =
+          COMPRESS_POINTERS_BOOL ? "v8::internal::TaggedValue" : field_cc_type;
+    } else {
+      const Type* constexpr_version = field_type->ConstexprVersion();
+      if (constexpr_version == nullptr) {
+        Error("Type '", field_type->ToString(),
+              "' requires a constexpr representation");
+        continue;
+      }
+      field_cc_type = constexpr_version->GetGeneratedTypeName();
+      field_cc_type_compressed = field_cc_type;
+      // Note that we need constexpr names to resolve correctly in the global
+      // namespace, because we're passing them as strings to a debugging
+      // extension. We can verify this during build of the debug helper, because
+      // we use this type for a local variable below, and generate this code in
+      // a disjoint namespace. However, we can't emit a useful error at this
+      // point. Instead we'll emit a comment that might be helpful.
+      field_value_type =
+          field_cc_type +
+          " /*Failing? Ensure constexpr type name is fully qualified and "
+          "necessary #includes are in debug-helper-internal.h*/";
+      field_value_type_compressed = field_value_type;
+    }
+
+    const std::string field_getter =
+        "Get" + CamelifyString(field_name) + "Value";
+    const std::string address_getter =
+        "Get" + CamelifyString(field_name) + "Address";
+
+    std::string indexed_field_info;
+    std::string index_param;
+    std::string index_offset;
+    if (field.index) {
+      const Type* index_type = (*field.index)->name_and_type.type;
+      std::string index_type_name;
+      std::string index_value;
+      if (index_type == TypeOracle::GetSmiType()) {
+        index_type_name = "uintptr_t";
+        index_value =
+            "i::PlatformSmiTagging::SmiToInt(indexed_field_count.value)";
+      } else if (!index_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+        const Type* constexpr_index = index_type->ConstexprVersion();
+        if (constexpr_index == nullptr) {
+          Error("Type '", index_type->ToString(),
+                "' requires a constexpr representation");
+          continue;
+        }
+        index_type_name = constexpr_index->GetGeneratedTypeName();
+        index_value = "indexed_field_count.value";
+      } else {
+        Error("Unsupported index type: ", index_type);
+        continue;
+      }
+      get_props_impl << "  Value<" << index_type_name
+                     << "> indexed_field_count = Get"
+                     << CamelifyString((*field.index)->name_and_type.name)
+                     << "Value(accessor);\n";
+      indexed_field_info =
+          ", " + index_value + ", GetArrayKind(indexed_field_count.validity)";
+      index_param = ", size_t offset";
+      index_offset = " + offset * sizeof(value)";
+    }
+    get_props_impl
+        << "  result.push_back(v8::base::make_unique<ObjectProperty>(\""
+        << field_name << "\", \"" << field_cc_type_compressed << "\", \""
+        << field_cc_type << "\", " << address_getter << "()"
+        << indexed_field_info << "));\n";
+
+    h_contents << "  uintptr_t " << address_getter << "() const;\n";
+    h_contents << "  Value<" << field_value_type << "> " << field_getter
+               << "(d::MemoryAccessor accessor " << index_param << ") const;\n";
+    cc_contents << "\nuintptr_t Tq" << name << "::" << address_getter
+                << "() const {\n";
+    cc_contents << "  return address_ - i::kHeapObjectTag + " << field.offset
+                << ";\n";
+    cc_contents << "}\n";
+    cc_contents << "\nValue<" << field_value_type << "> Tq" << name
+                << "::" << field_getter << "(d::MemoryAccessor accessor"
+                << index_param << ") const {\n";
+    cc_contents << "  " << field_value_type_compressed << " value{};\n";
+    cc_contents << "  d::MemoryAccessResult validity = accessor("
+                << address_getter << "()" << index_offset
+                << ", reinterpret_cast<uint8_t*>(&value), sizeof(value));\n";
+    cc_contents << "  return {validity, "
+                << (is_field_tagged ? "Decompress(value, address_)" : "value")
+                << "};\n";
+    cc_contents << "}\n";
+  }
+
+  h_contents << "};\n";
+
+  cc_contents << "\nstd::vector<std::unique_ptr<ObjectProperty>> Tq" << name
+              << "::GetProperties(d::MemoryAccessor accessor) const {\n";
+  cc_contents << "  std::vector<std::unique_ptr<ObjectProperty>> result = Tq"
+              << super_name << "::GetProperties(accessor);\n";
+  cc_contents << get_props_impl.str();
+  cc_contents << "  return result;\n";
+  cc_contents << "}\n";
+}
+}  // namespace
+
+void ImplementationVisitor::GenerateClassDebugReaders(
+    const std::string& output_directory) {
+  const std::string file_name = "class-debug-readers-tq";
+  std::stringstream h_contents;
+  std::stringstream cc_contents;
+  h_contents << "// Provides the ability to read object properties in\n";
+  h_contents << "// postmortem or remote scenarios, where the debuggee's\n";
+  h_contents << "// memory is not part of the current process's address\n";
+  h_contents << "// space and must be read using a callback function.\n\n";
+  {
+    IncludeGuardScope include_guard(h_contents, file_name + ".h");
+
+    h_contents << "#include <cstdint>\n";
+    h_contents << "#include <vector>\n";
+    h_contents
+        << "\n#include \"tools/debug_helper/debug-helper-internal.h\"\n\n";
+
+    cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
+    cc_contents << "#include \"include/v8-internal.h\"\n\n";
+    cc_contents << "namespace i = v8::internal;\n\n";
+
+    NamespaceScope h_namespaces(h_contents, {"v8_debug_helper_internal"});
+    NamespaceScope cc_namespaces(cc_contents, {"v8_debug_helper_internal"});
+
+    std::stringstream visitor;
+    visitor << "\nclass TqObjectVisitor {\n";
+    visitor << " public:\n";
+    visitor << "  virtual void VisitObject(const TqObject* object) {}\n";
+
+    std::unordered_set<const ClassType*> done;
+    for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+      const ClassType* type = ClassType::DynamicCast(alias->type());
+      GenerateClassDebugReader(*type, h_contents, cc_contents, visitor, &done);
+    }
+
+    visitor << "};\n";
+    h_contents << visitor.str();
+  }
+  WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
+  WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
+}
+
+}  // namespace torque
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 4ad3a6ec3c883f..efbbf9588ee8b0 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -28,6 +28,7 @@ static const char* const JS_FUNCTION_TYPE_STRING = "JSFunction";
 static const char* const MAP_TYPE_STRING = "Map";
 static const char* const OBJECT_TYPE_STRING = "Object";
 static const char* const HEAP_OBJECT_TYPE_STRING = "HeapObject";
+static const char* const JSANY_TYPE_STRING = "JSAny";
 static const char* const JSOBJECT_TYPE_STRING = "JSObject";
 static const char* const SMI_TYPE_STRING = "Smi";
 static const char* const TAGGED_TYPE_STRING = "Tagged";
@@ -49,6 +50,10 @@ static const char* const FLOAT64_TYPE_STRING = "float64";
 static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
 static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
 static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
+static const char* const TORQUE_INTERNAL_NAMESPACE_STRING = "torque_internal";
+static const char* const REFERENCE_TYPE_STRING = "Reference";
+static const char* const SLICE_TYPE_STRING = "Slice";
+static const char* const STRUCT_NAMESPACE_STRING = "_struct";
 
 inline bool IsConstexprName(const std::string& name) {
   return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
diff --git a/deps/v8/src/torque/contextual.h b/deps/v8/src/torque/contextual.h
index 92d2bdf3d706b1..d7764bfa68db5f 100644
--- a/deps/v8/src/torque/contextual.h
+++ b/deps/v8/src/torque/contextual.h
@@ -15,7 +15,7 @@ namespace internal {
 namespace torque {
 
 template <class Variable>
-V8_EXPORT_PRIVATE typename Variable::VariableType*& ContextualVariableTop();
+V8_EXPORT_PRIVATE typename Variable::Scope*& ContextualVariableTop();
 
 // {ContextualVariable} provides a clean alternative to a global variable.
 // The contextual variable is mutable, and supports managing the value of
@@ -30,8 +30,6 @@ V8_EXPORT_PRIVATE typename Variable::VariableType*& ContextualVariableTop();
 template <class Derived, class VarType>
 class ContextualVariable {
  public:
-  using VariableType = VarType;
-
   // A {Scope} contains a new object of type {VarType} and gives
   // ContextualVariable::Get() access to it. Upon destruction, the contextual
   // variable is restored to the state before the {Scope} was created. Scopes
@@ -41,18 +39,20 @@ class ContextualVariable {
    public:
     template <class... Args>
     explicit Scope(Args&&... args)
-        : current_(std::forward<Args>(args)...), previous_(Top()) {
-      Top() = &current_;
+        : value_(std::forward<Args>(args)...), previous_(Top()) {
+      Top() = this;
     }
     ~Scope() {
       // Ensure stack discipline.
-      DCHECK_EQ(&current_, Top());
+      DCHECK_EQ(this, Top());
       Top() = previous_;
     }
 
+    VarType& Value() { return value_; }
+
    private:
-    VarType current_;
-    VarType* previous_;
+    VarType value_;
+    Scope* previous_;
 
     static_assert(std::is_base_of<ContextualVariable, Derived>::value,
                   "Curiously Recurring Template Pattern");
@@ -65,13 +65,13 @@ class ContextualVariable {
   // for this contextual variable.
   static VarType& Get() {
     DCHECK_NOT_NULL(Top());
-    return *Top();
+    return Top()->Value();
   }
 
  private:
   template <class T>
-  friend typename T::VariableType*& ContextualVariableTop();
-  static VarType*& Top() { return ContextualVariableTop<Derived>(); }
+  friend typename T::Scope*& ContextualVariableTop();
+  static Scope*& Top() { return ContextualVariableTop<Derived>(); }
 
   static bool HasScope() { return Top() != nullptr; }
   friend class MessageBuilder;
@@ -82,11 +82,11 @@ class ContextualVariable {
   struct VarName                                  \
       : v8::internal::torque::ContextualVariable<VarName, __VA_ARGS__> {}
 
-#define DEFINE_CONTEXTUAL_VARIABLE(VarName)                                    \
-  template <>                                                                  \
-  V8_EXPORT_PRIVATE VarName::VariableType*& ContextualVariableTop<VarName>() { \
-    static thread_local VarName::VariableType* top = nullptr;                  \
-    return top;                                                                \
+#define DEFINE_CONTEXTUAL_VARIABLE(VarName)                             \
+  template <>                                                           \
+  V8_EXPORT_PRIVATE VarName::Scope*& ContextualVariableTop<VarName>() { \
+    static thread_local VarName::Scope* top = nullptr;                  \
+    return top;                                                         \
   }
 
 // By inheriting from {ContextualClass} a class can become a contextual variable
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index cd7ff22c484ed7..0c49033955b68a 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -56,14 +56,10 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
 }
 
 void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
-  std::string file = SourceFileMap::AbsolutePath(pos.source);
+  const std::string& file = SourceFileMap::AbsolutePath(pos.source);
   if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
     // Lines in Torque SourcePositions are zero-based, while the
     // CodeStubAssembler and downwind systems are one-based.
-    for (auto& c : file) {
-      if (c == '\\')
-        c = '/';
-    }
     out_ << "    ca_.SetSourcePosition(\"" << file << "\", "
          << (pos.start.line + 1) << ");\n";
     previous_position_ = pos;
@@ -309,8 +305,7 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
   std::string catch_name =
       PreCallableExceptionPreparation(instruction.catch_block);
   out_ << "    ";
-  bool needs_flattening =
-      return_type->IsStructType() || return_type->IsReferenceType();
+  bool needs_flattening = return_type->IsStructType();
   if (needs_flattening) {
     out_ << "std::tie(";
     PrintCommaSeparatedList(out_, results);
@@ -713,8 +708,13 @@ void CSAGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
 void CSAGenerator::EmitInstruction(
     const CreateFieldReferenceInstruction& instruction,
     Stack<std::string>* stack) {
-  const Field& field =
-      instruction.class_type->LookupField(instruction.field_name);
+  base::Optional<const ClassType*> class_type =
+      instruction.type->ClassSupertype();
+  if (!class_type.has_value()) {
+    ReportError("Cannot create field reference of type ", instruction.type,
+                " which does not inherit from a class type");
+  }
+  const Field& field = class_type.value()->LookupField(instruction.field_name);
   std::string offset_name = FreshNodeName();
   stack->Push(offset_name);
 
@@ -770,11 +770,6 @@ void CSAGenerator::EmitCSAValue(VisitResult result,
                    out);
     }
     out << "}";
-  } else if (result.type()->IsReferenceType()) {
-    DCHECK_EQ(2, result.stack_range().Size());
-    size_t offset = result.stack_range().begin().offset;
-    out << "CodeStubAssembler::Reference{" << values.Peek(BottomOffset{offset})
-        << ", " << values.Peek(BottomOffset{offset + 1}) << "}";
   } else {
     DCHECK_EQ(1, result.stack_range().Size());
     out << "compiler::TNode<" << result.type()->GetGeneratedTNodeTypeName()
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index 1fd07d5b0dbde9..5bcaff016d9287 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -7,6 +7,7 @@
 
 #include "src/torque/declarable.h"
 #include "src/torque/global-context.h"
+#include "src/torque/type-inference.h"
 #include "src/torque/type-visitor.h"
 
 namespace v8 {
@@ -58,67 +59,38 @@ std::ostream& operator<<(std::ostream& os, const RuntimeFunction& b) {
 std::ostream& operator<<(std::ostream& os, const Generic& g) {
   os << "generic " << g.name() << "<";
   PrintCommaSeparatedList(
-      os, g.declaration()->generic_parameters,
+      os, g.generic_parameters(),
       [](const Identifier* identifier) { return identifier->value; });
   os << ">";
 
   return os;
 }
 
-namespace {
-base::Optional<const Type*> InferTypeArgument(const std::string& to_infer,
-                                              TypeExpression* parameter,
-                                              const Type* argument) {
-  BasicTypeExpression* basic = BasicTypeExpression::DynamicCast(parameter);
-  if (basic && basic->namespace_qualification.empty() && !basic->is_constexpr &&
-      basic->name == to_infer) {
-    return argument;
-  }
-  auto* ref = ReferenceTypeExpression::DynamicCast(parameter);
-  if (ref && argument->IsReferenceType()) {
-    return InferTypeArgument(to_infer, ref->referenced_type,
-                             ReferenceType::cast(argument)->referenced_type());
-  }
-  return base::nullopt;
-}
-
-base::Optional<const Type*> InferTypeArgument(
-    const std::string& to_infer, const std::vector<TypeExpression*>& parameters,
+TypeArgumentInference Generic::InferSpecializationTypes(
+    const TypeVector& explicit_specialization_types,
     const TypeVector& arguments) {
-  for (size_t i = 0; i < arguments.size() && i < parameters.size(); ++i) {
-    if (base::Optional<const Type*> inferred =
-            InferTypeArgument(to_infer, parameters[i], arguments[i])) {
-      return *inferred;
-    }
-  }
-  return base::nullopt;
+  size_t implicit_count = declaration()->parameters.implicit_count;
+  const std::vector<TypeExpression*>& parameters =
+      declaration()->parameters.types;
+  std::vector<TypeExpression*> explicit_parameters(
+      parameters.begin() + implicit_count, parameters.end());
+
+  CurrentScope::Scope generic_scope(ParentScope());
+  TypeArgumentInference inference(generic_parameters(),
+                                  explicit_specialization_types,
+                                  explicit_parameters, arguments);
+  return inference;
 }
 
-}  // namespace
-
-base::Optional<TypeVector> Generic::InferSpecializationTypes(
-    const TypeVector& explicit_specialization_types,
-    const TypeVector& arguments) {
-  TypeVector result = explicit_specialization_types;
-  size_t type_parameter_count = declaration()->generic_parameters.size();
-  if (explicit_specialization_types.size() > type_parameter_count) {
+base::Optional<Statement*> Generic::CallableBody() {
+  if (auto* decl = TorqueMacroDeclaration::DynamicCast(declaration())) {
+    return decl->body;
+  } else if (auto* decl =
+                 TorqueBuiltinDeclaration::DynamicCast(declaration())) {
+    return decl->body;
+  } else {
     return base::nullopt;
   }
-  for (size_t i = explicit_specialization_types.size();
-       i < type_parameter_count; ++i) {
-    const std::string type_name = declaration()->generic_parameters[i]->value;
-    size_t implicit_count =
-        declaration()->callable->signature->parameters.implicit_count;
-    const std::vector<TypeExpression*>& parameters =
-        declaration()->callable->signature->parameters.types;
-    std::vector<TypeExpression*> explicit_parameters(
-        parameters.begin() + implicit_count, parameters.end());
-    base::Optional<const Type*> inferred =
-        InferTypeArgument(type_name, explicit_parameters, arguments);
-    if (!inferred) return base::nullopt;
-    result.push_back(*inferred);
-  }
-  return result;
 }
 
 bool Namespace::IsDefaultNamespace() const {
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index cf6fd2554b38da..502180953d292c 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -21,6 +21,7 @@ namespace torque {
 
 class Scope;
 class Namespace;
+class TypeArgumentInference;
 
 DECLARE_CONTEXTUAL_VARIABLE(CurrentScope, Scope*);
 
@@ -261,6 +262,7 @@ class Callable : public Scope {
   const std::string& ExternalName() const { return external_name_; }
   const std::string& ReadableName() const { return readable_name_; }
   const Signature& signature() const { return signature_; }
+  bool IsTransitioning() const { return signature().transitioning; }
   const NameVector& parameter_names() const {
     return signature_.parameter_names;
   }
@@ -269,7 +271,6 @@ class Callable : public Scope {
   }
   void IncrementReturns() { ++returns_; }
   bool HasReturns() const { return returns_; }
-  bool IsTransitioning() const { return transitioning_; }
   base::Optional<Statement*> body() const { return body_; }
   bool IsExternal() const { return !body_.has_value(); }
   virtual bool ShouldBeInlined() const { return false; }
@@ -277,14 +278,13 @@ class Callable : public Scope {
 
  protected:
   Callable(Declarable::Kind kind, std::string external_name,
-           std::string readable_name, Signature signature, bool transitioning,
+           std::string readable_name, Signature signature,
            base::Optional<Statement*> body)
       : Scope(kind),
         external_name_(std::move(external_name)),
 
         readable_name_(std::move(readable_name)),
         signature_(std::move(signature)),
-        transitioning_(transitioning),
         returns_(0),
         body_(body) {
     DCHECK(!body || *body);
@@ -294,7 +294,6 @@ class Callable : public Scope {
   std::string external_name_;
   std::string readable_name_;
   Signature signature_;
-  bool transitioning_;
   size_t returns_;
   base::Optional<Statement*> body_;
 };
@@ -320,9 +319,9 @@ class Macro : public Callable {
  protected:
   Macro(Declarable::Kind kind, std::string external_name,
         std::string readable_name, const Signature& signature,
-        bool transitioning, base::Optional<Statement*> body)
+        base::Optional<Statement*> body)
       : Callable(kind, std::move(external_name), std::move(readable_name),
-                 signature, transitioning, body),
+                 signature, body),
         used_(false) {
     if (signature.parameter_types.var_args) {
       ReportError("Varargs are not supported for macros.");
@@ -344,9 +343,9 @@ class ExternMacro : public Macro {
  private:
   friend class Declarations;
   ExternMacro(const std::string& name, std::string external_assembler_name,
-              Signature signature, bool transitioning)
+              Signature signature)
       : Macro(Declarable::kExternMacro, name, name, std::move(signature),
-              transitioning, base::nullopt),
+              base::nullopt),
         external_assembler_name_(std::move(external_assembler_name)) {}
 
   std::string external_assembler_name_;
@@ -360,10 +359,10 @@ class TorqueMacro : public Macro {
  protected:
   TorqueMacro(Declarable::Kind kind, std::string external_name,
               std::string readable_name, const Signature& signature,
-              bool transitioning, base::Optional<Statement*> body,
-              bool is_user_defined, bool exported_to_csa)
+              base::Optional<Statement*> body, bool is_user_defined,
+              bool exported_to_csa)
       : Macro(kind, std::move(external_name), std::move(readable_name),
-              signature, transitioning, body),
+              signature, body),
         exported_to_csa_(exported_to_csa) {
     SetIsUserDefined(is_user_defined);
   }
@@ -371,12 +370,11 @@ class TorqueMacro : public Macro {
  private:
   friend class Declarations;
   TorqueMacro(std::string external_name, std::string readable_name,
-              const Signature& signature, bool transitioning,
-              base::Optional<Statement*> body, bool is_user_defined,
-              bool exported_to_csa)
+              const Signature& signature, base::Optional<Statement*> body,
+              bool is_user_defined, bool exported_to_csa)
       : TorqueMacro(Declarable::kTorqueMacro, std::move(external_name),
-                    std::move(readable_name), signature, transitioning, body,
-                    is_user_defined, exported_to_csa) {}
+                    std::move(readable_name), signature, body, is_user_defined,
+                    exported_to_csa) {}
 
   bool exported_to_csa_ = false;
 };
@@ -395,11 +393,9 @@ class Method : public TorqueMacro {
  private:
   friend class Declarations;
   Method(AggregateType* aggregate_type, std::string external_name,
-         std::string readable_name, const Signature& signature,
-         bool transitioning, Statement* body)
+         std::string readable_name, const Signature& signature, Statement* body)
       : TorqueMacro(Declarable::kMethod, std::move(external_name),
-                    std::move(readable_name), signature, transitioning, body,
-                    true, false),
+                    std::move(readable_name), signature, body, true, false),
         aggregate_type_(aggregate_type) {}
   AggregateType* aggregate_type_;
 };
@@ -416,10 +412,10 @@ class Builtin : public Callable {
  private:
   friend class Declarations;
   Builtin(std::string external_name, std::string readable_name,
-          Builtin::Kind kind, const Signature& signature, bool transitioning,
+          Builtin::Kind kind, const Signature& signature,
           base::Optional<Statement*> body)
       : Callable(Declarable::kBuiltin, std::move(external_name),
-                 std::move(readable_name), signature, transitioning, body),
+                 std::move(readable_name), signature, body),
         kind_(kind) {}
 
   Kind kind_;
@@ -431,10 +427,9 @@ class RuntimeFunction : public Callable {
 
  private:
   friend class Declarations;
-  RuntimeFunction(const std::string& name, const Signature& signature,
-                  bool transitioning)
+  RuntimeFunction(const std::string& name, const Signature& signature)
       : Callable(Declarable::kRuntimeFunction, name, name, signature,
-                 transitioning, base::nullopt) {}
+                 base::nullopt) {}
 };
 
 class Intrinsic : public Callable {
@@ -444,8 +439,7 @@ class Intrinsic : public Callable {
  private:
   friend class Declarations;
   Intrinsic(std::string name, const Signature& signature)
-      : Callable(Declarable::kIntrinsic, name, name, signature, false,
-                 base::nullopt) {
+      : Callable(Declarable::kIntrinsic, name, name, signature, base::nullopt) {
     if (signature.parameter_types.var_args) {
       ReportError("Varargs are not supported for intrinsics.");
     }
@@ -483,33 +477,32 @@ class Generic : public Declarable {
   DECLARE_DECLARABLE_BOILERPLATE(Generic, generic)
 
   const std::string& name() const { return name_; }
-  GenericDeclaration* declaration() const { return declaration_; }
+  CallableDeclaration* declaration() const {
+    return generic_declaration_->declaration;
+  }
   const std::vector<Identifier*> generic_parameters() const {
-    return declaration()->generic_parameters;
+    return generic_declaration_->generic_parameters;
   }
   SpecializationMap<Callable>& specializations() { return specializations_; }
 
-  base::Optional<TypeVector> InferSpecializationTypes(
+  base::Optional<Statement*> CallableBody();
+
+  TypeArgumentInference InferSpecializationTypes(
       const TypeVector& explicit_specialization_types,
       const TypeVector& arguments);
 
  private:
   friend class Declarations;
-  Generic(const std::string& name, GenericDeclaration* declaration)
+  Generic(const std::string& name, GenericDeclaration* generic_declaration)
       : Declarable(Declarable::kGeneric),
         name_(name),
-        declaration_(declaration) {}
+        generic_declaration_(generic_declaration) {}
 
   std::string name_;
-  GenericDeclaration* declaration_;
+  GenericDeclaration* generic_declaration_;
   SpecializationMap<Callable> specializations_;
 };
 
-struct SpecializationKey {
-  Generic* generic;
-  TypeVector specialized_types;
-};
-
 class GenericStructType : public Declarable {
  public:
   DECLARE_DECLARABLE_BOILERPLATE(GenericStructType, generic_type)
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index e0e996f33b9a6d..f762337463cedd 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -53,26 +53,13 @@ void DeclarationVisitor::Visit(Declaration* decl) {
   }
 }
 
-void DeclarationVisitor::Visit(CallableNode* decl, const Signature& signature,
-                               base::Optional<Statement*> body) {
-  switch (decl->kind) {
-#define ENUM_ITEM(name)        \
-  case AstNode::Kind::k##name: \
-    return Visit(name::cast(decl), signature, body);
-    AST_CALLABLE_NODE_KIND_LIST(ENUM_ITEM)
-#undef ENUM_ITEM
-    default:
-      UNIMPLEMENTED();
-  }
-}
-
 Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
                                            std::string external_name,
                                            std::string readable_name,
                                            Signature signature,
                                            base::Optional<Statement*> body) {
   const bool javascript = decl->javascript_linkage;
-  const bool varargs = decl->signature->parameters.has_varargs;
+  const bool varargs = decl->parameters.has_varargs;
   Builtin::Kind kind = !javascript ? Builtin::kStub
                                    : varargs ? Builtin::kVarArgsJavaScript
                                              : Builtin::kFixedArgsJavaScript;
@@ -82,6 +69,21 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
           " to be a JavaScript builtin");
   }
 
+  if (javascript) {
+    if (!signature.return_type->IsSubtypeOf(TypeOracle::GetJSAnyType())) {
+      Error("Return type of JavaScript-linkage builtins has to be JSAny.")
+          .Position(decl->return_type->pos);
+    }
+    for (size_t i = signature.implicit_count;
+         i < signature.parameter_types.types.size(); ++i) {
+      const Type* parameter_type = signature.parameter_types.types[i];
+      if (parameter_type != TypeOracle::GetJSAnyType()) {
+        Error("Parameters of JavaScript-linkage builtins have to be JSAny.")
+            .Position(decl->parameters.types[i]->pos);
+      }
+    }
+  }
+
   for (size_t i = 0; i < signature.types().size(); ++i) {
     if (const StructType* type =
             StructType::DynamicCast(signature.types()[i])) {
@@ -111,14 +113,20 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
           struct_type->name());
   }
 
-  return Declarations::CreateBuiltin(
-      std::move(external_name), std::move(readable_name), kind,
-      std::move(signature), decl->transitioning, body);
+  return Declarations::CreateBuiltin(std::move(external_name),
+                                     std::move(readable_name), kind,
+                                     std::move(signature), body);
+}
+
+void DeclarationVisitor::Visit(ExternalBuiltinDeclaration* decl) {
+  Declarations::Declare(
+      decl->name->value,
+      CreateBuiltin(decl, decl->name->value, decl->name->value,
+                    TypeVisitor::MakeSignature(decl), base::nullopt));
 }
 
-void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl,
-                               const Signature& signature,
-                               base::Optional<Statement*> body) {
+void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl) {
+  Signature signature = TypeVisitor::MakeSignature(decl);
   if (signature.parameter_types.types.size() == 0 ||
       !(signature.parameter_types.types[0] == TypeOracle::GetContextType())) {
     ReportError(
@@ -142,39 +150,34 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl,
     }
   }
 
-  Declarations::DeclareRuntimeFunction(decl->name, signature,
-                                       decl->transitioning);
+  Declarations::DeclareRuntimeFunction(decl->name->value, signature);
 }
 
-void DeclarationVisitor::Visit(ExternalMacroDeclaration* decl,
-                               const Signature& signature,
-                               base::Optional<Statement*> body) {
-  Declarations::DeclareMacro(decl->name, true, decl->external_assembler_name,
-                             signature, decl->transitioning, body, decl->op);
+void DeclarationVisitor::Visit(ExternalMacroDeclaration* decl) {
+  Declarations::DeclareMacro(
+      decl->name->value, true, decl->external_assembler_name,
+      TypeVisitor::MakeSignature(decl), base::nullopt, decl->op);
 }
 
-void DeclarationVisitor::Visit(TorqueBuiltinDeclaration* decl,
-                               const Signature& signature,
-                               base::Optional<Statement*> body) {
+void DeclarationVisitor::Visit(TorqueBuiltinDeclaration* decl) {
   Declarations::Declare(
-      decl->name, CreateBuiltin(decl, decl->name, decl->name, signature, body));
+      decl->name->value,
+      CreateBuiltin(decl, decl->name->value, decl->name->value,
+                    TypeVisitor::MakeSignature(decl), decl->body));
 }
 
-void DeclarationVisitor::Visit(TorqueMacroDeclaration* decl,
-                               const Signature& signature,
-                               base::Optional<Statement*> body) {
+void DeclarationVisitor::Visit(TorqueMacroDeclaration* decl) {
   Macro* macro = Declarations::DeclareMacro(
-      decl->name, decl->export_to_csa, base::nullopt, signature,
-      decl->transitioning, body, decl->op);
+      decl->name->value, decl->export_to_csa, base::nullopt,
+      TypeVisitor::MakeSignature(decl), decl->body, decl->op);
   // TODO(szuend): Set identifier_position to decl->name->pos once all callable
   // names are changed from std::string to Identifier*.
   macro->SetPosition(decl->pos);
 }
 
-void DeclarationVisitor::Visit(IntrinsicDeclaration* decl,
-                               const Signature& signature,
-                               base::Optional<Statement*> body) {
-  Declarations::DeclareIntrinsic(decl->name, signature);
+void DeclarationVisitor::Visit(IntrinsicDeclaration* decl) {
+  Declarations::DeclareIntrinsic(decl->name->value,
+                                 TypeVisitor::MakeSignature(decl));
 }
 
 void DeclarationVisitor::Visit(ConstDeclaration* decl) {
@@ -182,30 +185,16 @@ void DeclarationVisitor::Visit(ConstDeclaration* decl) {
       decl->name, TypeVisitor::ComputeType(decl->type), decl->expression);
 }
 
-void DeclarationVisitor::Visit(StandardDeclaration* decl) {
-  Signature signature =
-      TypeVisitor::MakeSignature(decl->callable->signature.get());
-  Visit(decl->callable, signature, decl->body);
-}
-
 void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
-  if ((decl->body != nullptr) == decl->external) {
-    std::stringstream stream;
-    stream << "specialization of " << decl->name
-           << " must either be marked 'extern' or have a body";
-    ReportError(stream.str());
-  }
-
   std::vector<Generic*> generic_list =
       Declarations::LookupGeneric(decl->name->value);
   // Find the matching generic specialization based on the concrete parameter
   // list.
   Generic* matching_generic = nullptr;
-  Signature signature_with_types =
-      TypeVisitor::MakeSignature(decl->signature.get());
+  Signature signature_with_types = TypeVisitor::MakeSignature(decl);
   for (Generic* generic : generic_list) {
     Signature generic_signature_with_types =
-        MakeSpecializedSignature(SpecializationKey{
+        MakeSpecializedSignature(SpecializationKey<Generic>{
             generic, TypeVisitor::ComputeTypeVector(decl->generic_parameters)});
     if (signature_with_types.HasSameTypesAs(generic_signature_with_types,
                                             ParameterMode::kIgnoreImplicit)) {
@@ -233,7 +222,7 @@ void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
     stream << "\ncandidates are:";
     for (Generic* generic : generic_list) {
       stream << "\n  "
-             << MakeSpecializedSignature(SpecializationKey{
+             << MakeSpecializedSignature(SpecializationKey<Generic>{
                     generic,
                     TypeVisitor::ComputeTypeVector(decl->generic_parameters)});
     }
@@ -245,10 +234,12 @@ void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
                                       matching_generic->IdentifierPosition());
   }
 
-  Specialize(SpecializationKey{matching_generic, TypeVisitor::ComputeTypeVector(
-                                                     decl->generic_parameters)},
-             matching_generic->declaration()->callable, decl->signature.get(),
-             decl->body, decl->pos);
+  CallableDeclaration* generic_declaration = matching_generic->declaration();
+
+  Specialize(SpecializationKey<Generic>{matching_generic,
+                                        TypeVisitor::ComputeTypeVector(
+                                            decl->generic_parameters)},
+             generic_declaration, decl, decl->body, decl->pos);
 }
 
 void DeclarationVisitor::Visit(ExternConstDeclaration* decl) {
@@ -267,10 +258,11 @@ void DeclarationVisitor::Visit(CppIncludeDeclaration* decl) {
   GlobalContext::AddCppInclude(decl->include_path);
 }
 
-void DeclarationVisitor::DeclareSpecializedTypes(const SpecializationKey& key) {
+void DeclarationVisitor::DeclareSpecializedTypes(
+    const SpecializationKey<Generic>& key) {
   size_t i = 0;
   const std::size_t generic_parameter_count =
-      key.generic->declaration()->generic_parameters.size();
+      key.generic->generic_parameters().size();
   if (generic_parameter_count != key.specialized_types.size()) {
     std::stringstream stream;
     stream << "Wrong generic argument count for specialization of \""
@@ -280,37 +272,35 @@ void DeclarationVisitor::DeclareSpecializedTypes(const SpecializationKey& key) {
   }
 
   for (auto type : key.specialized_types) {
-    Identifier* generic_type_name =
-        key.generic->declaration()->generic_parameters[i++];
+    Identifier* generic_type_name = key.generic->generic_parameters()[i++];
     TypeAlias* alias = Declarations::DeclareType(generic_type_name, type);
     alias->SetIsUserDefined(false);
   }
 }
 
 Signature DeclarationVisitor::MakeSpecializedSignature(
-    const SpecializationKey& key) {
+    const SpecializationKey<Generic>& key) {
   CurrentScope::Scope generic_scope(key.generic->ParentScope());
   // Create a temporary fake-namespace just to temporarily declare the
   // specialization aliases for the generic types to create a signature.
   Namespace tmp_namespace("_tmp");
   CurrentScope::Scope tmp_namespace_scope(&tmp_namespace);
   DeclareSpecializedTypes(key);
-  return TypeVisitor::MakeSignature(
-      key.generic->declaration()->callable->signature.get());
+  return TypeVisitor::MakeSignature(key.generic->declaration());
 }
 
-Callable* DeclarationVisitor::SpecializeImplicit(const SpecializationKey& key) {
-  if (!key.generic->declaration()->body &&
-      IntrinsicDeclaration::DynamicCast(key.generic->declaration()->callable) ==
-          nullptr) {
+Callable* DeclarationVisitor::SpecializeImplicit(
+    const SpecializationKey<Generic>& key) {
+  base::Optional<Statement*> body = key.generic->CallableBody();
+  if (!body && IntrinsicDeclaration::DynamicCast(key.generic->declaration()) ==
+                   nullptr) {
     ReportError("missing specialization of ", key.generic->name(),
                 " with types <", key.specialized_types, "> declared at ",
                 key.generic->Position());
   }
   CurrentScope::Scope generic_scope(key.generic->ParentScope());
-  Callable* result = Specialize(key, key.generic->declaration()->callable,
-                                base::nullopt, key.generic->declaration()->body,
-                                CurrentSourcePosition::Get());
+  Callable* result = Specialize(key, key.generic->declaration(), base::nullopt,
+                                body, CurrentSourcePosition::Get());
   result->SetIsUserDefined(false);
   CurrentScope::Scope callable_scope(result);
   DeclareSpecializedTypes(key);
@@ -318,12 +308,11 @@ Callable* DeclarationVisitor::SpecializeImplicit(const SpecializationKey& key) {
 }
 
 Callable* DeclarationVisitor::Specialize(
-    const SpecializationKey& key, CallableNode* declaration,
-    base::Optional<const CallableNodeSignature*> signature,
+    const SpecializationKey<Generic>& key, CallableDeclaration* declaration,
+    base::Optional<const SpecializationDeclaration*> explicit_specialization,
     base::Optional<Statement*> body, SourcePosition position) {
   CurrentSourcePosition::Scope pos_scope(position);
-  size_t generic_parameter_count =
-      key.generic->declaration()->generic_parameters.size();
+  size_t generic_parameter_count = key.generic->generic_parameters().size();
   if (generic_parameter_count != key.specialized_types.size()) {
     std::stringstream stream;
     stream << "number of template parameters ("
@@ -338,13 +327,15 @@ Callable* DeclarationVisitor::Specialize(
                 " with types <", key.specialized_types, ">");
   }
 
-  Signature type_signature = signature ? TypeVisitor::MakeSignature(*signature)
-                                       : MakeSpecializedSignature(key);
+  Signature type_signature =
+      explicit_specialization
+          ? TypeVisitor::MakeSignature(*explicit_specialization)
+          : MakeSpecializedSignature(key);
 
   std::string generated_name = Declarations::GetGeneratedCallableName(
-      declaration->name, key.specialized_types);
+      declaration->name->value, key.specialized_types);
   std::stringstream readable_name;
-  readable_name << declaration->name << "<";
+  readable_name << declaration->name->value << "<";
   bool first = true;
   for (const Type* t : key.specialized_types) {
     if (!first) readable_name << ", ";
@@ -354,11 +345,12 @@ Callable* DeclarationVisitor::Specialize(
   readable_name << ">";
   Callable* callable;
   if (MacroDeclaration::DynamicCast(declaration) != nullptr) {
-    callable = Declarations::CreateTorqueMacro(
-        generated_name, readable_name.str(), false, type_signature,
-        declaration->transitioning, *body, true);
+    callable =
+        Declarations::CreateTorqueMacro(generated_name, readable_name.str(),
+                                        false, type_signature, *body, true);
   } else if (IntrinsicDeclaration::DynamicCast(declaration) != nullptr) {
-    callable = Declarations::CreateIntrinsic(declaration->name, type_signature);
+    callable =
+        Declarations::CreateIntrinsic(declaration->name->value, type_signature);
   } else {
     BuiltinDeclaration* builtin = BuiltinDeclaration::cast(declaration);
     callable = CreateBuiltin(builtin, generated_name, readable_name.str(),
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index dbd28f4b873afa..3a5201e24ac6d7 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -45,7 +45,7 @@ class PredeclarationVisitor {
     }
   }
   static void Predeclare(GenericDeclaration* decl) {
-    Declarations::DeclareGeneric(decl->callable->name, decl);
+    Declarations::DeclareGeneric(decl->declaration->name->value, decl);
   }
 };
 
@@ -76,30 +76,15 @@ class DeclarationVisitor {
                                 std::string external_name,
                                 std::string readable_name, Signature signature,
                                 base::Optional<Statement*> body);
-  static void Visit(ExternalBuiltinDeclaration* decl,
-                    const Signature& signature,
-                    base::Optional<Statement*> body) {
-    Declarations::Declare(
-        decl->name,
-        CreateBuiltin(decl, decl->name, decl->name, signature, base::nullopt));
-  }
-
-  static void Visit(ExternalRuntimeDeclaration* decl, const Signature& sig,
-                    base::Optional<Statement*> body);
-  static void Visit(ExternalMacroDeclaration* decl, const Signature& sig,
-                    base::Optional<Statement*> body);
-  static void Visit(TorqueBuiltinDeclaration* decl, const Signature& signature,
-                    base::Optional<Statement*> body);
-  static void Visit(TorqueMacroDeclaration* decl, const Signature& signature,
-                    base::Optional<Statement*> body);
-  static void Visit(IntrinsicDeclaration* decl, const Signature& signature,
-                    base::Optional<Statement*> body);
 
-  static void Visit(CallableNode* decl, const Signature& signature,
-                    base::Optional<Statement*> body);
+  static void Visit(ExternalBuiltinDeclaration* decl);
+  static void Visit(ExternalRuntimeDeclaration* decl);
+  static void Visit(ExternalMacroDeclaration* decl);
+  static void Visit(TorqueBuiltinDeclaration* decl);
+  static void Visit(TorqueMacroDeclaration* decl);
+  static void Visit(IntrinsicDeclaration* decl);
 
   static void Visit(ConstDeclaration* decl);
-  static void Visit(StandardDeclaration* decl);
   static void Visit(GenericDeclaration* decl) {
     // The PredeclarationVisitor already handled this case.
   }
@@ -107,15 +92,16 @@ class DeclarationVisitor {
   static void Visit(ExternConstDeclaration* decl);
   static void Visit(CppIncludeDeclaration* decl);
 
-  static Signature MakeSpecializedSignature(const SpecializationKey& key);
-  static Callable* SpecializeImplicit(const SpecializationKey& key);
+  static Signature MakeSpecializedSignature(
+      const SpecializationKey<Generic>& key);
+  static Callable* SpecializeImplicit(const SpecializationKey<Generic>& key);
   static Callable* Specialize(
-      const SpecializationKey& key, CallableNode* declaration,
-      base::Optional<const CallableNodeSignature*> signature,
+      const SpecializationKey<Generic>& key, CallableDeclaration* declaration,
+      base::Optional<const SpecializationDeclaration*> explicit_specialization,
       base::Optional<Statement*> body, SourcePosition position);
 
  private:
-  static void DeclareSpecializedTypes(const SpecializationKey& key);
+  static void DeclareSpecializedTypes(const SpecializationKey<Generic>& key);
 };
 
 }  // namespace torque
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index 73d46d69985517..ed4ad230444666 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -11,9 +11,6 @@
 namespace v8 {
 namespace internal {
 namespace torque {
-
-DEFINE_CONTEXTUAL_VARIABLE(GlobalContext)
-
 namespace {
 
 template <class T>
@@ -139,6 +136,13 @@ GenericStructType* Declarations::LookupUniqueGenericStructType(
                       "generic struct");
 }
 
+base::Optional<GenericStructType*> Declarations::TryLookupGenericStructType(
+    const QualifiedName& name) {
+  std::vector<GenericStructType*> results = TryLookup<GenericStructType>(name);
+  if (results.empty()) return base::nullopt;
+  return EnsureUnique(results, name.name, "generic struct");
+}
+
 Namespace* Declarations::DeclareNamespace(const std::string& name) {
   return Declare(name, std::unique_ptr<Namespace>(new Namespace(name)));
 }
@@ -158,43 +162,44 @@ const TypeAlias* Declarations::PredeclareTypeAlias(const Identifier* name,
   return Declare(name->value, std::move(alias_ptr));
 }
 
-TorqueMacro* Declarations::CreateTorqueMacro(
-    std::string external_name, std::string readable_name, bool exported_to_csa,
-    Signature signature, bool transitioning, base::Optional<Statement*> body,
-    bool is_user_defined) {
+TorqueMacro* Declarations::CreateTorqueMacro(std::string external_name,
+                                             std::string readable_name,
+                                             bool exported_to_csa,
+                                             Signature signature,
+                                             base::Optional<Statement*> body,
+                                             bool is_user_defined) {
   // TODO(tebbi): Switch to more predictable names to improve incremental
   // compilation.
   external_name += "_" + std::to_string(GlobalContext::FreshId());
   return RegisterDeclarable(std::unique_ptr<TorqueMacro>(new TorqueMacro(
       std::move(external_name), std::move(readable_name), std::move(signature),
-      transitioning, body, is_user_defined, exported_to_csa)));
+      body, is_user_defined, exported_to_csa)));
 }
 
 ExternMacro* Declarations::CreateExternMacro(
-    std::string name, std::string external_assembler_name, Signature signature,
-    bool transitioning) {
+    std::string name, std::string external_assembler_name,
+    Signature signature) {
   return RegisterDeclarable(std::unique_ptr<ExternMacro>(
       new ExternMacro(std::move(name), std::move(external_assembler_name),
-                      std::move(signature), transitioning)));
+                      std::move(signature))));
 }
 
 Macro* Declarations::DeclareMacro(
     const std::string& name, bool accessible_from_csa,
     base::Optional<std::string> external_assembler_name,
-    const Signature& signature, bool transitioning,
-    base::Optional<Statement*> body, base::Optional<std::string> op,
-    bool is_user_defined) {
+    const Signature& signature, base::Optional<Statement*> body,
+    base::Optional<std::string> op, bool is_user_defined) {
   if (TryLookupMacro(name, signature.GetExplicitTypes())) {
     ReportError("cannot redeclare macro ", name,
                 " with identical explicit parameters");
   }
   Macro* macro;
   if (external_assembler_name) {
-    macro = CreateExternMacro(name, std::move(*external_assembler_name),
-                              signature, transitioning);
+    macro =
+        CreateExternMacro(name, std::move(*external_assembler_name), signature);
   } else {
-    macro = CreateTorqueMacro(name, name, accessible_from_csa, signature,
-                              transitioning, body, is_user_defined);
+    macro = CreateTorqueMacro(name, name, accessible_from_csa, signature, body,
+                              is_user_defined);
   }
   Declare(name, macro);
   if (op) {
@@ -209,11 +214,11 @@ Macro* Declarations::DeclareMacro(
 
 Method* Declarations::CreateMethod(AggregateType* container_type,
                                    const std::string& name, Signature signature,
-                                   bool transitioning, Statement* body) {
+                                   Statement* body) {
   std::string generated_name{container_type->GetGeneratedMethodName(name)};
   Method* result = RegisterDeclarable(std::unique_ptr<Method>(
       new Method(container_type, container_type->GetGeneratedMethodName(name),
-                 name, std::move(signature), transitioning, body)));
+                 name, std::move(signature), body)));
   container_type->RegisterMethod(result);
   return result;
 }
@@ -235,29 +240,27 @@ Intrinsic* Declarations::DeclareIntrinsic(const std::string& name,
 Builtin* Declarations::CreateBuiltin(std::string external_name,
                                      std::string readable_name,
                                      Builtin::Kind kind, Signature signature,
-                                     bool transitioning,
+
                                      base::Optional<Statement*> body) {
   return RegisterDeclarable(std::unique_ptr<Builtin>(
       new Builtin(std::move(external_name), std::move(readable_name), kind,
-                  std::move(signature), transitioning, body)));
+                  std::move(signature), body)));
 }
 
 Builtin* Declarations::DeclareBuiltin(const std::string& name,
                                       Builtin::Kind kind,
                                       const Signature& signature,
-                                      bool transitioning,
+
                                       base::Optional<Statement*> body) {
   CheckAlreadyDeclared<Builtin>(name, "builtin");
-  return Declare(
-      name, CreateBuiltin(name, name, kind, signature, transitioning, body));
+  return Declare(name, CreateBuiltin(name, name, kind, signature, body));
 }
 
 RuntimeFunction* Declarations::DeclareRuntimeFunction(
-    const std::string& name, const Signature& signature, bool transitioning) {
+    const std::string& name, const Signature& signature) {
   CheckAlreadyDeclared<RuntimeFunction>(name, "runtime function");
-  return Declare(name,
-                 RegisterDeclarable(std::unique_ptr<RuntimeFunction>(
-                     new RuntimeFunction(name, signature, transitioning))));
+  return Declare(name, RegisterDeclarable(std::unique_ptr<RuntimeFunction>(
+                           new RuntimeFunction(name, signature))));
 }
 
 void Declarations::DeclareExternConstant(Identifier* name, const Type* type,
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 00e0facefef4dc..240680fa1e1bde 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -15,8 +15,6 @@ namespace internal {
 namespace torque {
 
 static constexpr const char* const kFromConstexprMacroName = "FromConstexpr";
-static constexpr const char* kTrueLabelName = "__True";
-static constexpr const char* kFalseLabelName = "__False";
 static constexpr const char* kMacroEndLabelName = "__macro_end";
 static constexpr const char* kBreakLabelName = "__break";
 static constexpr const char* kContinueLabelName = "__continue";
@@ -78,6 +76,8 @@ class Declarations {
 
   static GenericStructType* LookupUniqueGenericStructType(
       const QualifiedName& name);
+  static base::Optional<GenericStructType*> TryLookupGenericStructType(
+      const QualifiedName& name);
 
   static Namespace* DeclareNamespace(const std::string& name);
   static TypeAlias* DeclareType(const Identifier* name, const Type* type);
@@ -88,23 +88,21 @@ class Declarations {
   static TorqueMacro* CreateTorqueMacro(std::string external_name,
                                         std::string readable_name,
                                         bool exported_to_csa,
-                                        Signature signature, bool transitioning,
+                                        Signature signature,
                                         base::Optional<Statement*> body,
                                         bool is_user_defined);
   static ExternMacro* CreateExternMacro(std::string name,
                                         std::string external_assembler_name,
-                                        Signature signature,
-                                        bool transitioning);
+                                        Signature signature);
   static Macro* DeclareMacro(
       const std::string& name, bool accessible_from_csa,
       base::Optional<std::string> external_assembler_name,
-      const Signature& signature, bool transitioning,
-      base::Optional<Statement*> body, base::Optional<std::string> op = {},
-      bool is_user_defined = true);
+      const Signature& signature, base::Optional<Statement*> body,
+      base::Optional<std::string> op = {}, bool is_user_defined = true);
 
   static Method* CreateMethod(AggregateType* class_type,
                               const std::string& name, Signature signature,
-                              bool transitioning, Statement* body);
+                              Statement* body);
 
   static Intrinsic* CreateIntrinsic(const std::string& name,
                                     const Signature& signature);
@@ -114,15 +112,14 @@ class Declarations {
 
   static Builtin* CreateBuiltin(std::string external_name,
                                 std::string readable_name, Builtin::Kind kind,
-                                Signature signature, bool transitioning,
+                                Signature signature,
                                 base::Optional<Statement*> body);
   static Builtin* DeclareBuiltin(const std::string& name, Builtin::Kind kind,
-                                 const Signature& signature, bool transitioning,
+                                 const Signature& signature,
                                  base::Optional<Statement*> body);
 
   static RuntimeFunction* DeclareRuntimeFunction(const std::string& name,
-                                                 const Signature& signature,
-                                                 bool transitioning);
+                                                 const Signature& signature);
 
   static void DeclareExternConstant(Identifier* name, const Type* type,
                                     std::string value);
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index d3d0c89c42c828..9f7ba6a7aee6a5 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -56,8 +56,8 @@ enum class ParseResultHolderBase::TypeId {
   kImplicitParameters,
   kOptionalImplicitParameters,
   kNameAndExpression,
-  kConditionalAnnotation,
-  kOptionalConditionalAnnotation,
+  kAnnotation,
+  kVectorOfAnnotation,
   kClassFieldExpression,
   kStructFieldExpression,
   kStdVectorOfNameAndTypeExpression,
diff --git a/deps/v8/src/torque/global-context.cc b/deps/v8/src/torque/global-context.cc
index f258f1847410d4..13503038c55810 100644
--- a/deps/v8/src/torque/global-context.cc
+++ b/deps/v8/src/torque/global-context.cc
@@ -8,6 +8,9 @@ namespace v8 {
 namespace internal {
 namespace torque {
 
+DEFINE_CONTEXTUAL_VARIABLE(GlobalContext)
+DEFINE_CONTEXTUAL_VARIABLE(TargetArchitecture)
+
 GlobalContext::GlobalContext(Ast ast)
     : collect_language_server_data_(false),
       force_assert_statements_(false),
@@ -19,6 +22,10 @@ GlobalContext::GlobalContext(Ast ast)
       RegisterDeclarable(base::make_unique<Namespace>(kBaseNamespaceName));
 }
 
+TargetArchitecture::TargetArchitecture(bool force_32bit)
+    : tagged_size_(force_32bit ? sizeof(int32_t) : kTaggedSize),
+      raw_ptr_size_(force_32bit ? sizeof(int32_t) : kSystemPointerSize) {}
+
 }  // namespace torque
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index e103a22575f0c9..e1106adbd1c117 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -7,6 +7,7 @@
 
 #include <map>
 
+#include "src/common/globals.h"
 #include "src/torque/ast.h"
 #include "src/torque/contextual.h"
 #include "src/torque/declarable.h"
@@ -91,6 +92,18 @@ T* RegisterDeclarable(std::unique_ptr<T> d) {
   return GlobalContext::Get().RegisterDeclarable(std::move(d));
 }
 
+class TargetArchitecture : public ContextualClass<TargetArchitecture> {
+ public:
+  explicit TargetArchitecture(bool force_32bit);
+
+  static int TaggedSize() { return Get().tagged_size_; }
+  static int RawPtrSize() { return Get().raw_ptr_size_; }
+
+ private:
+  const int tagged_size_;
+  const int raw_ptr_size_;
+};
+
 }  // namespace torque
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index a0aeeee81b5782..8f36afd020fe32 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -10,6 +10,7 @@
 #include "src/torque/implementation-visitor.h"
 #include "src/torque/parameter-difference.h"
 #include "src/torque/server-data.h"
+#include "src/torque/type-inference.h"
 #include "src/torque/type-visitor.h"
 
 namespace v8 {
@@ -107,7 +108,8 @@ void ImplementationVisitor::EndCSAFiles() {
 }
 
 void ImplementationVisitor::Visit(NamespaceConstant* decl) {
-  Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(), {}};
+  Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(),
+                      {}, false};
 
   BindingsManagersScope bindings_managers_scope;
 
@@ -466,13 +468,13 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
                     : "UncheckedCast<Object>(Parameter(Descriptor::kReceiver))")
             << ";\n";
         source_out() << "USE(" << generated_name << ");\n";
-        expected_type = TypeOracle::GetObjectType();
+        expected_type = TypeOracle::GetJSAnyType();
       } else if (param_name == "newTarget") {
         source_out() << "  TNode<Object> " << generated_name
                      << " = UncheckedCast<Object>(Parameter("
                      << "Descriptor::kJSNewTarget));\n";
         source_out() << "USE(" << generated_name << ");\n";
-        expected_type = TypeOracle::GetObjectType();
+        expected_type = TypeOracle::GetJSAnyType();
       } else if (param_name == "target") {
         source_out() << "  TNode<JSFunction> " << generated_name
                      << " = UncheckedCast<JSFunction>(Parameter("
@@ -646,24 +648,8 @@ VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
 }
 
 VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
-  VisitResult left_result;
-  {
-    Block* false_block = assembler().NewBlock(assembler().CurrentStack());
-    Binding<LocalLabel> false_binding{&LabelBindingsManager::Get(),
-                                      kFalseLabelName, LocalLabel{false_block}};
-    left_result = Visit(expr->left);
-    if (left_result.type()->IsBool()) {
-      Block* true_block = LookupSimpleLabel(kTrueLabelName);
-      assembler().Branch(true_block, false_block);
-      assembler().Bind(false_block);
-    } else if (left_result.type()->IsNever()) {
-      assembler().Bind(false_block);
-    } else if (!left_result.type()->IsConstexprBool()) {
-      ReportError(
-          "expected type bool, constexpr bool, or never on left-hand side of "
-          "operator ||");
-    }
-  }
+  StackScope outer_scope(this);
+  VisitResult left_result = Visit(expr->left);
 
   if (left_result.type()->IsConstexprBool()) {
     VisitResult right_result = Visit(expr->right);
@@ -677,38 +663,34 @@ VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
                            " || " + right_result.constexpr_value() + ")");
   }
 
-  VisitResult right_result = Visit(expr->right);
-  if (right_result.type()->IsBool()) {
-    Block* true_block = LookupSimpleLabel(kTrueLabelName);
-    Block* false_block = LookupSimpleLabel(kFalseLabelName);
-    assembler().Branch(true_block, false_block);
-    return VisitResult::NeverResult();
-  } else if (!right_result.type()->IsNever()) {
-    ReportError(
-        "expected type bool or never on right-hand side of operator ||");
+  Block* true_block = assembler().NewBlock();
+  Block* false_block = assembler().NewBlock();
+  Block* done_block = assembler().NewBlock();
+
+  left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
+  GenerateBranch(left_result, true_block, false_block);
+
+  assembler().Bind(true_block);
+  VisitResult true_result = GenerateBoolConstant(true);
+  assembler().Goto(done_block);
+
+  assembler().Bind(false_block);
+  VisitResult false_result;
+  {
+    StackScope false_block_scope(this);
+    false_result = false_block_scope.Yield(
+        GenerateImplicitConvert(TypeOracle::GetBoolType(), Visit(expr->right)));
   }
-  return right_result;
+  assembler().Goto(done_block);
+
+  assembler().Bind(done_block);
+  DCHECK_EQ(true_result, false_result);
+  return outer_scope.Yield(true_result);
 }
 
 VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
-  VisitResult left_result;
-  {
-    Block* true_block = assembler().NewBlock(assembler().CurrentStack());
-    Binding<LocalLabel> false_binding{&LabelBindingsManager::Get(),
-                                      kTrueLabelName, LocalLabel{true_block}};
-    left_result = Visit(expr->left);
-    if (left_result.type()->IsBool()) {
-      Block* false_block = LookupSimpleLabel(kFalseLabelName);
-      assembler().Branch(true_block, false_block);
-      assembler().Bind(true_block);
-    } else if (left_result.type()->IsNever()) {
-      assembler().Bind(true_block);
-    } else if (!left_result.type()->IsConstexprBool()) {
-      ReportError(
-          "expected type bool, constexpr bool, or never on left-hand side of "
-          "operator &&");
-    }
-  }
+  StackScope outer_scope(this);
+  VisitResult left_result = Visit(expr->left);
 
   if (left_result.type()->IsConstexprBool()) {
     VisitResult right_result = Visit(expr->right);
@@ -722,17 +704,29 @@ VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
                            " && " + right_result.constexpr_value() + ")");
   }
 
-  VisitResult right_result = Visit(expr->right);
-  if (right_result.type()->IsBool()) {
-    Block* true_block = LookupSimpleLabel(kTrueLabelName);
-    Block* false_block = LookupSimpleLabel(kFalseLabelName);
-    assembler().Branch(true_block, false_block);
-    return VisitResult::NeverResult();
-  } else if (!right_result.type()->IsNever()) {
-    ReportError(
-        "expected type bool or never on right-hand side of operator &&");
+  Block* true_block = assembler().NewBlock();
+  Block* false_block = assembler().NewBlock();
+  Block* done_block = assembler().NewBlock();
+
+  left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
+  GenerateBranch(left_result, true_block, false_block);
+
+  assembler().Bind(true_block);
+  VisitResult true_result;
+  {
+    StackScope true_block_scope(this);
+    true_result = true_block_scope.Yield(
+        GenerateImplicitConvert(TypeOracle::GetBoolType(), Visit(expr->right)));
   }
-  return right_result;
+  assembler().Goto(done_block);
+
+  assembler().Bind(false_block);
+  VisitResult false_result = GenerateBoolConstant(false);
+  assembler().Goto(done_block);
+
+  assembler().Bind(done_block);
+  DCHECK_EQ(true_result, false_result);
+  return outer_scope.Yield(true_result);
 }
 
 VisitResult ImplementationVisitor::Visit(IncrementDecrementExpression* expr) {
@@ -1110,29 +1104,6 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
   return TypeOracle::GetNeverType();
 }
 
-VisitResult ImplementationVisitor::TemporaryUninitializedStruct(
-    const StructType* struct_type, const std::string& reason) {
-  StackRange range = assembler().TopRange(0);
-  for (const Field& f : struct_type->fields()) {
-    if (const StructType* struct_type =
-            StructType::DynamicCast(f.name_and_type.type)) {
-      range.Extend(
-          TemporaryUninitializedStruct(struct_type, reason).stack_range());
-    } else {
-      std::string descriptor = "uninitialized field '" + f.name_and_type.name +
-                               "' declared at " + PositionAsString(f.pos) +
-                               " (" + reason + ")";
-      TypeVector lowered_types = LowerType(f.name_and_type.type);
-      for (const Type* type : lowered_types) {
-        assembler().Emit(PushUninitializedInstruction{
-            TypeOracle::GetTopType(descriptor, type)});
-      }
-      range.Extend(assembler().TopRange(lowered_types.size()));
-    }
-  }
-  return VisitResult(struct_type, range);
-}
-
 VisitResult ImplementationVisitor::Visit(TryLabelExpression* expr) {
   size_t parameter_count = expr->label_block->parameters.names.size();
   std::vector<VisitResult> parameters;
@@ -1211,15 +1182,38 @@ VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
   return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
 }
 
+void ImplementationVisitor::CheckInitializersWellformed(
+    const std::string& aggregate_name,
+    const std::vector<Field>& aggregate_fields,
+    const std::vector<NameAndExpression>& initializers,
+    bool ignore_first_field) {
+  size_t fields_offset = ignore_first_field ? 1 : 0;
+  size_t fields_size = aggregate_fields.size() - fields_offset;
+  for (size_t i = 0; i < std::min(fields_size, initializers.size()); i++) {
+    const std::string& field_name =
+        aggregate_fields[i + fields_offset].name_and_type.name;
+    Identifier* found_name = initializers[i].name;
+    if (field_name != found_name->value) {
+      Error("Expected field name \"", field_name, "\" instead of \"",
+            found_name->value, "\"")
+          .Position(found_name->pos)
+          .Throw();
+    }
+  }
+  if (fields_size != initializers.size()) {
+    ReportError("expected ", fields_size, " initializers for ", aggregate_name,
+                " found ", initializers.size());
+  }
+}
+
 InitializerResults ImplementationVisitor::VisitInitializerResults(
-    const AggregateType* current_aggregate,
+    const ClassType* class_type,
     const std::vector<NameAndExpression>& initializers) {
   InitializerResults result;
   for (const NameAndExpression& initializer : initializers) {
     result.names.push_back(initializer.name);
     Expression* e = initializer.expression;
-    const Field& field =
-        current_aggregate->LookupField(initializer.name->value);
+    const Field& field = class_type->LookupField(initializer.name->value);
     auto field_index = field.index;
     if (SpreadExpression* s = SpreadExpression::DynamicCast(e)) {
       if (!field_index) {
@@ -1238,54 +1232,30 @@ InitializerResults ImplementationVisitor::VisitInitializerResults(
   return result;
 }
 
-size_t ImplementationVisitor::InitializeAggregateHelper(
-    const AggregateType* aggregate_type, VisitResult allocate_result,
+void ImplementationVisitor::InitializeClass(
+    const ClassType* class_type, VisitResult allocate_result,
     const InitializerResults& initializer_results) {
-  const ClassType* current_class = ClassType::DynamicCast(aggregate_type);
-  size_t current = 0;
-  if (current_class) {
-    const ClassType* super = current_class->GetSuperClass();
-    if (super) {
-      current = InitializeAggregateHelper(super, allocate_result,
-                                          initializer_results);
-    }
+  if (const ClassType* super = class_type->GetSuperClass()) {
+    InitializeClass(super, allocate_result, initializer_results);
   }
 
-  for (Field f : aggregate_type->fields()) {
-    if (current == initializer_results.field_value_map.size()) {
-      ReportError("insufficient number of initializers for ",
-                  aggregate_type->name());
-    }
+  for (Field f : class_type->fields()) {
     VisitResult current_value =
         initializer_results.field_value_map.at(f.name_and_type.name);
-    Identifier* fieldname = initializer_results.names[current];
-    if (fieldname->value != f.name_and_type.name) {
-      CurrentSourcePosition::Scope scope(fieldname->pos);
-      ReportError("Expected fieldname \"", f.name_and_type.name,
-                  "\" instead of \"", fieldname->value, "\"");
-    }
-    if (aggregate_type->IsClassType()) {
-      if (f.index) {
-        InitializeFieldFromSpread(allocate_result, f, initializer_results);
-      } else {
-        allocate_result.SetType(aggregate_type);
-        GenerateCopy(allocate_result);
-        assembler().Emit(CreateFieldReferenceInstruction{
-            ClassType::cast(aggregate_type), f.name_and_type.name});
-        VisitResult heap_reference(
-            TypeOracle::GetReferenceType(f.name_and_type.type),
-            assembler().TopRange(2));
-        GenerateAssignToLocation(
-            LocationReference::HeapReference(heap_reference), current_value);
-      }
+    if (f.index) {
+      InitializeFieldFromSpread(allocate_result, f, initializer_results);
     } else {
-      LocationReference struct_field_ref = LocationReference::VariableAccess(
-          ProjectStructField(allocate_result, f.name_and_type.name));
-      GenerateAssignToLocation(struct_field_ref, current_value);
+      allocate_result.SetType(class_type);
+      GenerateCopy(allocate_result);
+      assembler().Emit(CreateFieldReferenceInstruction{
+          ClassType::cast(class_type), f.name_and_type.name});
+      VisitResult heap_reference(
+          TypeOracle::GetReferenceType(f.name_and_type.type),
+          assembler().TopRange(2));
+      GenerateAssignToLocation(LocationReference::HeapReference(heap_reference),
+                               current_value);
     }
-    ++current;
   }
-  return current;
 }
 
 void ImplementationVisitor::InitializeFieldFromSpread(
@@ -1304,17 +1274,6 @@ void ImplementationVisitor::InitializeFieldFromSpread(
                {field.aggregate, index.type, iterator.type()});
 }
 
-void ImplementationVisitor::InitializeAggregate(
-    const AggregateType* aggregate_type, VisitResult allocate_result,
-    const InitializerResults& initializer_results) {
-  size_t consumed_initializers = InitializeAggregateHelper(
-      aggregate_type, allocate_result, initializer_results);
-  if (consumed_initializers != initializer_results.field_value_map.size()) {
-    ReportError("more initializers than fields present in ",
-                aggregate_type->name());
-  }
-}
-
 VisitResult ImplementationVisitor::AddVariableObjectSize(
     VisitResult object_size, const ClassType* current_class,
     const InitializerResults& initializer_results) {
@@ -1397,6 +1356,11 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
     initializer_results.field_value_map[map_field.name_and_type.name] =
         object_map;
   }
+
+  CheckInitializersWellformed(class_type->name(),
+                              class_type->ComputeAllFields(),
+                              expr->initializers, !class_type->IsExtern());
+
   Arguments size_arguments;
   size_arguments.parameters.push_back(object_map);
   VisitResult object_size = GenerateCall("%GetAllocationBaseSize",
@@ -1411,7 +1375,7 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
       GenerateCall("%Allocate", allocate_arguments, {class_type}, false);
   DCHECK(allocate_result.IsOnStack());
 
-  InitializeAggregate(class_type, allocate_result, initializer_results);
+  InitializeClass(class_type, allocate_result, initializer_results);
 
   return stack_scope.Yield(allocate_result);
 }
@@ -1582,7 +1546,9 @@ namespace {
 void FailCallableLookup(const std::string& reason, const QualifiedName& name,
                         const TypeVector& parameter_types,
                         const std::vector<Binding<LocalLabel>*>& labels,
-                        const std::vector<Signature>& candidates) {
+                        const std::vector<Signature>& candidates,
+                        const std::vector<std::tuple<Generic*, const char*>>
+                            inapplicable_generics) {
   std::stringstream stream;
   stream << "\n" << reason << ": \n  " << name << "(" << parameter_types << ")";
   if (labels.size() != 0) {
@@ -1596,10 +1562,20 @@ void FailCallableLookup(const std::string& reason, const QualifiedName& name,
     stream << "\n  " << name;
     PrintSignature(stream, signature, false);
   }
+  if (inapplicable_generics.size() != 0) {
+    stream << "\nfailed to instantiate all of these generic declarations:";
+    for (auto& failure : inapplicable_generics) {
+      Generic* generic;
+      const char* reason;
+      std::tie(generic, reason) = failure;
+      stream << "\n  " << generic->name() << " defined at "
+             << generic->Position() << ":\n    " << reason << "\n";
+    }
+  }
   ReportError(stream.str());
 }
 
-Callable* GetOrCreateSpecialization(const SpecializationKey& key) {
+Callable* GetOrCreateSpecialization(const SpecializationKey<Generic>& key) {
   if (base::Optional<Callable*> specialization =
           key.generic->specializations().Get(key.specialized_types)) {
     return *specialization;
@@ -1655,16 +1631,20 @@ Callable* ImplementationVisitor::LookupCallable(
 
   std::vector<Declarable*> overloads;
   std::vector<Signature> overload_signatures;
+  std::vector<std::tuple<Generic*, const char*>> inapplicable_generics;
   for (auto* declarable : declaration_container) {
     if (Generic* generic = Generic::DynamicCast(declarable)) {
-      base::Optional<TypeVector> inferred_specialization_types =
-          generic->InferSpecializationTypes(specialization_types,
-                                            parameter_types);
-      if (!inferred_specialization_types) continue;
+      TypeArgumentInference inference = generic->InferSpecializationTypes(
+          specialization_types, parameter_types);
+      if (inference.HasFailed()) {
+        inapplicable_generics.push_back(
+            std::make_tuple(generic, inference.GetFailureReason()));
+        continue;
+      }
       overloads.push_back(generic);
       overload_signatures.push_back(
           DeclarationVisitor::MakeSpecializedSignature(
-              SpecializationKey{generic, *inferred_specialization_types}));
+              SpecializationKey<Generic>{generic, inference.GetResult()}));
     } else if (Callable* callable = Callable::DynamicCast(declarable)) {
       overloads.push_back(callable);
       overload_signatures.push_back(callable->signature());
@@ -1674,16 +1654,12 @@ Callable* ImplementationVisitor::LookupCallable(
   std::vector<size_t> candidates;
   for (size_t i = 0; i < overloads.size(); ++i) {
     const Signature& signature = overload_signatures[i];
-    bool try_bool_context = labels.size() == 0 &&
-                            signature.return_type == TypeOracle::GetNeverType();
-    if (IsCompatibleSignature(signature, parameter_types, labels.size()) ||
-        (try_bool_context &&
-         IsCompatibleSignature(signature, parameter_types, 2))) {
+    if (IsCompatibleSignature(signature, parameter_types, labels.size())) {
       candidates.push_back(i);
     }
   }
 
-  if (overloads.empty()) {
+  if (overloads.empty() && inapplicable_generics.empty()) {
     if (silence_errors) return nullptr;
     std::stringstream stream;
     stream << "no matching declaration found for " << name;
@@ -1691,7 +1667,8 @@ Callable* ImplementationVisitor::LookupCallable(
   } else if (candidates.empty()) {
     if (silence_errors) return nullptr;
     FailCallableLookup("cannot find suitable callable with name", name,
-                       parameter_types, labels, overload_signatures);
+                       parameter_types, labels, overload_signatures,
+                       inapplicable_generics);
   }
 
   auto is_better_candidate = [&](size_t a, size_t b) {
@@ -1712,14 +1689,15 @@ Callable* ImplementationVisitor::LookupCallable(
         candidate_signatures.push_back(overload_signatures[i]);
       }
       FailCallableLookup("ambiguous callable ", name, parameter_types, labels,
-                         candidate_signatures);
+                         candidate_signatures, inapplicable_generics);
     }
   }
 
   if (Generic* generic = Generic::DynamicCast(overloads[best])) {
+    TypeArgumentInference inference = generic->InferSpecializationTypes(
+        specialization_types, parameter_types);
     result = GetOrCreateSpecialization(
-        SpecializationKey{generic, *generic->InferSpecializationTypes(
-                                       specialization_types, parameter_types)});
+        SpecializationKey<Generic>{generic, inference.GetResult()});
   } else {
     result = Callable::cast(overloads[best]);
   }
@@ -1749,14 +1727,13 @@ Callable* ImplementationVisitor::LookupCallable(
 }
 
 Method* ImplementationVisitor::LookupMethod(
-    const std::string& name, LocationReference this_reference,
+    const std::string& name, const AggregateType* receiver_type,
     const Arguments& arguments, const TypeVector& specialization_types) {
   TypeVector types(arguments.parameters.ComputeTypeVector());
-  types.insert(types.begin(), this_reference.ReferencedType());
-  return Method::cast(LookupCallable(
-      {{}, name},
-      AggregateType::cast(this_reference.ReferencedType())->Methods(name),
-      types, arguments.labels, specialization_types));
+  types.insert(types.begin(), receiver_type);
+  return Method::cast(LookupCallable({{}, name}, receiver_type->Methods(name),
+                                     types, arguments.labels,
+                                     specialization_types));
 }
 
 const Type* ImplementationVisitor::GetCommonType(const Type* left,
@@ -1783,24 +1760,36 @@ VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
 
 VisitResult ImplementationVisitor::Visit(StructExpression* expr) {
   StackScope stack_scope(this);
-  const Type* raw_type = TypeVisitor::ComputeType(expr->type);
-  if (!raw_type->IsStructType()) {
-    ReportError(*raw_type, " is not a struct but used like one");
-  }
 
-  const StructType* struct_type = StructType::cast(raw_type);
+  auto& initializers = expr->initializers;
+  std::vector<VisitResult> values;
+  std::vector<const Type*> term_argument_types;
+  values.reserve(initializers.size());
+  term_argument_types.reserve(initializers.size());
 
-  InitializerResults initialization_results =
-      ImplementationVisitor::VisitInitializerResults(struct_type,
-                                                     expr->initializers);
+  // Compute values and types of all initializer arguments
+  for (const NameAndExpression& initializer : initializers) {
+    VisitResult value = Visit(initializer.expression);
+    values.push_back(value);
+    term_argument_types.push_back(value.type());
+  }
 
-  // Push uninitialized 'this'
-  VisitResult result = TemporaryUninitializedStruct(
-      struct_type, "it's not initialized in the struct " + struct_type->name());
+  // Compute and check struct type from given struct name and argument types
+  const StructType* struct_type = TypeVisitor::ComputeTypeForStructExpression(
+      expr->type, term_argument_types);
+  CheckInitializersWellformed(struct_type->name(), struct_type->fields(),
+                              initializers);
 
-  InitializeAggregate(struct_type, result, initialization_results);
+  // Implicitly convert values and thereby build the struct on the stack
+  StackRange struct_range = assembler().TopRange(0);
+  auto& fields = struct_type->fields();
+  for (size_t i = 0; i < values.size(); i++) {
+    values[i] =
+        GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
+    struct_range.Extend(values[i].stack_range());
+  }
 
-  return stack_scope.Yield(result);
+  return stack_scope.Yield(VisitResult(struct_type, struct_range));
 }
 
 LocationReference ImplementationVisitor::GetLocationReference(
@@ -1865,7 +1854,33 @@ LocationReference ImplementationVisitor::GetLocationReference(
         LanguageServerData::AddDefinition(expr->field->pos, field.pos);
       }
       if (field.index) {
-        return LocationReference::IndexedFieldAccess(object_result, fieldname);
+        assembler().Emit(
+            CreateFieldReferenceInstruction{object_result.type(), fieldname});
+        // Fetch the length from the object
+        {
+          StackScope length_scope(this);
+          // Get a reference to the length
+          const Field* index_field = field.index.value();
+          GenerateCopy(object_result);
+          assembler().Emit(CreateFieldReferenceInstruction{
+              object_result.type(), index_field->name_and_type.name});
+          VisitResult length_reference(
+              TypeOracle::GetReferenceType(index_field->name_and_type.type),
+              assembler().TopRange(2));
+
+          // Load the length from the reference and convert it to intptr
+          VisitResult length = GenerateFetchFromLocation(
+              LocationReference::HeapReference(length_reference));
+          VisitResult converted_length =
+              GenerateCall("Convert", {{length}, {}},
+                           {TypeOracle::GetIntPtrType(), length.type()}, false);
+          DCHECK_EQ(converted_length.stack_range().Size(), 1);
+          length_scope.Yield(converted_length);
+        }
+        const Type* slice_type =
+            TypeOracle::GetSliceType(field.name_and_type.type);
+        return LocationReference::HeapSlice(
+            VisitResult(slice_type, assembler().TopRange(3)));
       } else {
         assembler().Emit(
             CreateFieldReferenceInstruction{*class_type, fieldname});
@@ -1883,8 +1898,13 @@ LocationReference ImplementationVisitor::GetLocationReference(
     ElementAccessExpression* expr) {
   LocationReference reference = GetLocationReference(expr->array);
   VisitResult index = Visit(expr->index);
-  if (reference.IsIndexedFieldAccess()) {
-    return LocationReference::IndexedFieldIndexedAccess(reference, index);
+  if (reference.IsHeapSlice()) {
+    Arguments arguments{{index}, {}};
+    const AggregateType* slice_type =
+        AggregateType::cast(reference.heap_slice().type());
+    Method* method = LookupMethod("AtIndex", slice_type, arguments, {});
+    return LocationReference::HeapReference(
+        GenerateCall(method, reference, arguments, {}, false));
   } else {
     return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
                                           index);
@@ -1927,8 +1947,9 @@ LocationReference ImplementationVisitor::GetLocationReference(
   }
   if (expr->generic_arguments.size() != 0) {
     Generic* generic = Declarations::LookupUniqueGeneric(name);
-    Callable* specialization = GetOrCreateSpecialization(SpecializationKey{
-        generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
+    Callable* specialization =
+        GetOrCreateSpecialization(SpecializationKey<Generic>{
+            generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
     if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
       DCHECK(!builtin->IsExternal());
       return LocationReference::Temporary(GetBuiltinCode(builtin),
@@ -1963,8 +1984,8 @@ LocationReference ImplementationVisitor::GetLocationReference(
 LocationReference ImplementationVisitor::GetLocationReference(
     DereferenceExpression* expr) {
   VisitResult ref = Visit(expr->reference);
-  const ReferenceType* type = ReferenceType::DynamicCast(ref.type());
-  if (!type) {
+  if (!StructType::MatchUnaryGeneric(ref.type(),
+                                     TypeOracle::GetReferenceGeneric())) {
     ReportError("Operator * expects a reference but found a value of type ",
                 *ref.type());
   }
@@ -1983,7 +2004,7 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
     DCHECK_EQ(1, LoweredSlotCount(reference.ReferencedType()));
     return VisitResult(reference.ReferencedType(), assembler().TopRange(1));
   } else {
-    if (reference.IsIndexedFieldAccess()) {
+    if (reference.IsHeapSlice()) {
       ReportError(
           "fetching a value directly from an indexed field isn't allowed");
     }
@@ -2011,12 +2032,19 @@ void ImplementationVisitor::GenerateAssignToLocation(
     if (reference.binding()) {
       (*reference.binding())->SetWritten();
     }
-  } else if (reference.IsIndexedFieldAccess()) {
+  } else if (reference.IsHeapSlice()) {
     ReportError("assigning a value directly to an indexed field isn't allowed");
   } else if (reference.IsHeapReference()) {
     const Type* referenced_type = reference.ReferencedType();
     GenerateCopy(reference.heap_reference());
-    GenerateImplicitConvert(referenced_type, assignment_value);
+    VisitResult converted_assignment_value =
+        GenerateImplicitConvert(referenced_type, assignment_value);
+    if (referenced_type == TypeOracle::GetFloat64Type()) {
+      VisitResult silenced_float_value =
+          GenerateCall("Float64SilenceNaN", {{assignment_value}, {}});
+      assembler().Poke(converted_assignment_value.stack_range(),
+                       silenced_float_value.stack_range(), referenced_type);
+    }
     assembler().Emit(StoreReferenceInstruction{referenced_type});
   } else {
     DCHECK(reference.IsTemporary());
@@ -2097,27 +2125,20 @@ VisitResult ImplementationVisitor::GenerateCall(
     Callable* callable, base::Optional<LocationReference> this_reference,
     Arguments arguments, const TypeVector& specialization_types,
     bool is_tailcall) {
-  // Operators used in a branching context can also be function calls that never
-  // return but have a True and False label
-  if (arguments.labels.size() == 0 &&
-      callable->signature().labels.size() == 2) {
-    base::Optional<Binding<LocalLabel>*> true_label =
-        TryLookupLabel(kTrueLabelName);
-    base::Optional<Binding<LocalLabel>*> false_label =
-        TryLookupLabel(kFalseLabelName);
-    if (!true_label || !false_label) {
-      ReportError(
-          callable->ReadableName(),
-          " does not return a value, but has to be called in a branching "
-          "context (e.g., conditional or if-condition). You can fix this by "
-          "adding \"? true : false\".");
+  const Type* return_type = callable->signature().return_type;
+
+  if (is_tailcall) {
+    if (Builtin* builtin = Builtin::DynamicCast(CurrentCallable::Get())) {
+      const Type* outer_return_type = builtin->signature().return_type;
+      if (!return_type->IsSubtypeOf(outer_return_type)) {
+        Error("Cannot tailcall, type of result is ", *return_type,
+              " but should be a subtype of ", *outer_return_type, ".");
+      }
+    } else {
+      Error("Tail calls are only allowed from builtins");
     }
-    arguments.labels.push_back(*true_label);
-    arguments.labels.push_back(*false_label);
   }
 
-  const Type* return_type = callable->signature().return_type;
-
   std::vector<VisitResult> converted_arguments;
   StackRange argument_range = assembler().TopRange(0);
   std::vector<std::string> constexpr_arguments;
@@ -2142,8 +2163,8 @@ VisitResult ImplementationVisitor::GenerateCall(
   if (this_reference) {
     DCHECK(callable->IsMethod());
     Method* method = Method::cast(callable);
-    // By now, the this reference should either be a variable or
-    // a temporary, in both cases the fetch of the VisitResult should succeed.
+    // By now, the this reference should either be a variable, a temporary or
+    // a Slice. In either case the fetch of the VisitResult should succeed.
     VisitResult this_value = this_reference->GetVisitResult();
     if (method->ShouldBeInlined()) {
       if (!this_value.type()->IsSubtypeOf(method->aggregate_type())) {
@@ -2280,9 +2301,10 @@ VisitResult ImplementationVisitor::GenerateCall(
         size_t j = 0;
         for (auto t : callable->signature().labels[i].types) {
           const Type* parameter_type = label->parameter_types[j];
-          if (parameter_type != t) {
-            ReportError("mismatch of label parameters (expected ", *t, " got ",
-                        parameter_type, " for parameter ", i + 1, ")");
+          if (!t->IsSubtypeOf(parameter_type)) {
+            ReportError("mismatch of label parameters (label expects ",
+                        *parameter_type, " but macro produces ", *t,
+                        " for parameter ", i + 1, ")");
           }
           j++;
         }
@@ -2360,6 +2382,7 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
     if (auto* loc_expr = LocationExpression::DynamicCast(expr->arguments[0])) {
       LocationReference ref = GetLocationReference(loc_expr);
       if (ref.IsHeapReference()) return scope.Yield(ref.heap_reference());
+      if (ref.IsHeapSlice()) return scope.Yield(ref.heap_slice());
     }
     ReportError("Unable to create a heap reference.");
   }
@@ -2413,7 +2436,7 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
   DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
   QualifiedName qualified_name = QualifiedName(method_name);
   Callable* callable = nullptr;
-  callable = LookupMethod(method_name, target, arguments, {});
+  callable = LookupMethod(method_name, target_type, arguments, {});
   if (GlobalContext::collect_language_server_data()) {
     LanguageServerData::AddDefinition(expr->method->name->pos,
                                       callable->IdentifierPosition());
@@ -2429,7 +2452,7 @@ VisitResult ImplementationVisitor::Visit(IntrinsicCallExpression* expr) {
   for (Expression* arg : expr->arguments)
     arguments.parameters.push_back(Visit(arg));
   return scope.Yield(
-      GenerateCall(expr->name, arguments, specialization_types, false));
+      GenerateCall(expr->name->value, arguments, specialization_types, false));
 }
 
 void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
@@ -2440,32 +2463,20 @@ void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
   assembler().Branch(true_block, false_block);
 }
 
-void ImplementationVisitor::GenerateExpressionBranch(
-    VisitResultGenerator generator, Block* true_block, Block* false_block) {
-  // Conditional expressions can either explicitly return a bit
-  // type, or they can be backed by macros that don't return but
-  // take a true and false label. By declaring the labels before
-  // visiting the conditional expression, those label-based
-  // macro conditionals will be able to find them through normal
-  // label lookups.
-  Binding<LocalLabel> true_binding{&LabelBindingsManager::Get(), kTrueLabelName,
-                                   LocalLabel{true_block}};
-  Binding<LocalLabel> false_binding{&LabelBindingsManager::Get(),
-                                    kFalseLabelName, LocalLabel{false_block}};
-  StackScope stack_scope(this);
-  VisitResult expression_result = generator();
-  if (!expression_result.type()->IsNever()) {
-    expression_result = stack_scope.Yield(
-        GenerateImplicitConvert(TypeOracle::GetBoolType(), expression_result));
-    GenerateBranch(expression_result, true_block, false_block);
-  }
+VisitResult ImplementationVisitor::GenerateBoolConstant(bool constant) {
+  return GenerateImplicitConvert(TypeOracle::GetBoolType(),
+                                 VisitResult(TypeOracle::GetConstexprBoolType(),
+                                             constant ? "true" : "false"));
 }
 
 void ImplementationVisitor::GenerateExpressionBranch(Expression* expression,
                                                      Block* true_block,
                                                      Block* false_block) {
-  GenerateExpressionBranch([&]() { return this->Visit(expression); },
-                           true_block, false_block);
+  StackScope stack_scope(this);
+  VisitResult expression_result = this->Visit(expression);
+  expression_result = stack_scope.Yield(
+      GenerateImplicitConvert(TypeOracle::GetBoolType(), expression_result));
+  GenerateBranch(expression_result, true_block, false_block);
 }
 
 VisitResult ImplementationVisitor::GenerateImplicitConvert(
@@ -2530,10 +2541,6 @@ StackRange ImplementationVisitor::LowerParameter(
       range.Extend(parameter_range);
     }
     return range;
-  } else if (type->IsReferenceType()) {
-    lowered_parameters->Push(parameter_name + ".object");
-    lowered_parameters->Push(parameter_name + ".offset");
-    return lowered_parameters->TopRange(2);
   } else {
     lowered_parameters->Push(parameter_name);
     return lowered_parameters->TopRange(1);
@@ -2663,70 +2670,6 @@ void ImplementationVisitor::Visit(Declarable* declarable) {
   }
 }
 
-namespace {
-class IfDefScope {
- public:
-  IfDefScope(std::ostream& os, std::string d) : os_(os), d_(std::move(d)) {
-    os_ << "#ifdef " << d_ << "\n";
-  }
-  ~IfDefScope() { os_ << "#endif  // " << d_ << "\n"; }
-
- private:
-  std::ostream& os_;
-  std::string d_;
-};
-
-class NamespaceScope {
- public:
-  NamespaceScope(std::ostream& os,
-                 std::initializer_list<std::string> namespaces)
-      : os_(os), d_(std::move(namespaces)) {
-    for (const std::string& s : d_) {
-      os_ << "namespace " << s << " {\n";
-    }
-  }
-  ~NamespaceScope() {
-    for (auto i = d_.rbegin(); i != d_.rend(); ++i) {
-      os_ << "}  // namespace " << *i << "\n";
-    }
-  }
-
- private:
-  std::ostream& os_;
-  std::vector<std::string> d_;
-};
-
-class IncludeGuardScope {
- public:
-  IncludeGuardScope(std::ostream& os, std::string file_name)
-      : os_(os),
-        d_("V8_GEN_TORQUE_GENERATED_" + CapifyStringWithUnderscores(file_name) +
-           "_") {
-    os_ << "#ifndef " << d_ << "\n";
-    os_ << "#define " << d_ << "\n\n";
-  }
-  ~IncludeGuardScope() { os_ << "#endif  // " << d_ << "\n"; }
-
- private:
-  std::ostream& os_;
-  std::string d_;
-};
-
-class IncludeObjectMacrosScope {
- public:
-  explicit IncludeObjectMacrosScope(std::ostream& os) : os_(os) {
-    os_ << "\n// Has to be the last include (doesn't have include guards):\n"
-           "#include \"src/objects/object-macros.h\"\n";
-  }
-  ~IncludeObjectMacrosScope() {
-    os_ << "\n#include \"src/objects/object-macros-undef.h\"\n";
-  }
-
- private:
-  std::ostream& os_;
-};
-}  // namespace
-
 void ImplementationVisitor::GenerateBuiltinDefinitions(
     const std::string& output_directory) {
   std::stringstream new_contents_stream;
@@ -2741,7 +2684,7 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(
     for (auto& declarable : GlobalContext::AllDeclarables()) {
       Builtin* builtin = Builtin::DynamicCast(declarable.get());
       if (!builtin || builtin->IsExternal()) continue;
-      int firstParameterIndex = 1;
+      size_t firstParameterIndex = 1;
       bool declareParameters = true;
       if (builtin->IsStub()) {
         new_contents_stream << "TFS(" << builtin->ExternalName();
@@ -2752,24 +2695,22 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(
               << ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
           declareParameters = false;
         } else {
-          assert(builtin->IsFixedArgsJavaScript());
+          DCHECK(builtin->IsFixedArgsJavaScript());
           // FixedArg javascript builtins need to offer the parameter
           // count.
-          int size = static_cast<int>(builtin->parameter_names().size());
-          assert(size >= 1);
-          new_contents_stream << ", " << (std::max(size - 2, 0));
+          int parameter_count =
+              static_cast<int>(builtin->signature().ExplicitCount());
+          new_contents_stream << ", " << parameter_count;
           // And the receiver is explicitly declared.
           new_contents_stream << ", kReceiver";
-          firstParameterIndex = 2;
+          firstParameterIndex = builtin->signature().implicit_count;
         }
       }
       if (declareParameters) {
-        int index = 0;
-        for (const auto& parameter : builtin->parameter_names()) {
-          if (index >= firstParameterIndex) {
-            new_contents_stream << ", k" << CamelifyString(parameter->value);
-          }
-          index++;
+        for (size_t i = firstParameterIndex;
+             i < builtin->parameter_names().size(); ++i) {
+          Identifier* parameter = builtin->parameter_names()[i];
+          new_contents_stream << ", k" << CamelifyString(parameter->value);
         }
       }
       new_contents_stream << ") \\\n";
@@ -2834,15 +2775,31 @@ class FieldOffsetsGenerator {
  public:
   explicit FieldOffsetsGenerator(const ClassType* type) : type_(type) {}
 
-  virtual void WriteField(const Field& f) = 0;
+  virtual void WriteField(const Field& f, const std::string& size_string) = 0;
   virtual void WriteMarker(const std::string& marker) = 0;
+  virtual void BeginPrivateOffsets() = 0;
 
   virtual ~FieldOffsetsGenerator() { CHECK(is_finished_); }
 
   void RecordOffsetFor(const Field& f) {
     CHECK(!is_finished_);
     UpdateSection(f);
-    WriteField(f);
+    // We don't know statically how much space an indexed field takes, so report
+    // it as zero.
+    std::string size_string = "0";
+    if (!f.index.has_value()) {
+      size_t field_size;
+      std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+    }
+    WriteField(f, size_string);
+
+    // Offsets for anything after an indexed field are likely to cause
+    // confusion, because the indexed field itself takes up a variable amount of
+    // space. We could not emit them at all, but that might allow an inherited
+    // kSize to be accessible (and wrong), so we emit them as private.
+    if (f.index.has_value()) {
+      BeginPrivateOffsets();
+    }
   }
 
   void Finish() {
@@ -2923,17 +2880,16 @@ class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
     out_ << "TORQUE_GENERATED_" << CapifyStringWithUnderscores(type_->name())
          << "_FIELDS(V) \\\n";
   }
-  virtual void WriteField(const Field& f) {
-    size_t field_size;
-    std::string size_string;
-    std::string machine_type;
-    std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+  void WriteField(const Field& f, const std::string& size_string) override {
     out_ << "V(k" << CamelifyString(f.name_and_type.name) << "Offset, "
          << size_string << ") \\\n";
   }
-  virtual void WriteMarker(const std::string& marker) {
+  void WriteMarker(const std::string& marker) override {
     out_ << "V(" << marker << ", 0) \\\n";
   }
+  void BeginPrivateOffsets() override {
+    // Can't do anything meaningful here in the macro generator.
+  }
 
  private:
   std::ostream& out_;
@@ -3025,13 +2981,15 @@ void ImplementationVisitor::GenerateClassFieldOffsets(
 
       // TODO(danno): Remove this once all classes use ClassFieldOffsetGenerator
       // to generate field offsets without the use of macros.
-      MacroFieldOffsetsGenerator g(header, type);
-      for (auto f : type->fields()) {
-        CurrentSourcePosition::Scope scope(f.pos);
-        g.RecordOffsetFor(f);
+      if (!type->GenerateCppClassDefinitions()) {
+        MacroFieldOffsetsGenerator g(header, type);
+        for (auto f : type->fields()) {
+          CurrentSourcePosition::Scope scope(f.pos);
+          g.RecordOffsetFor(f);
+        }
+        g.Finish();
+        header << "\n";
       }
-      g.Finish();
-      header << "\n";
     }
   }
   const std::string output_header_path = output_directory + "/" + file_name;
@@ -3046,11 +3004,7 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
       : FieldOffsetsGenerator(type),
         hdr_(header),
         previous_field_end_("P::kHeaderSize") {}
-  virtual void WriteField(const Field& f) {
-    size_t field_size;
-    std::string size_string;
-    std::string machine_type;
-    std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+  void WriteField(const Field& f, const std::string& size_string) override {
     std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
     std::string field_end = field + "End";
     hdr_ << "  static constexpr int " << field << " = " << previous_field_end_
@@ -3059,10 +3013,15 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
          << size_string << " - 1;\n";
     previous_field_end_ = field_end + " + 1";
   }
-  virtual void WriteMarker(const std::string& marker) {
+  void WriteMarker(const std::string& marker) override {
     hdr_ << "  static constexpr int " << marker << " = " << previous_field_end_
          << ";\n";
   }
+  void BeginPrivateOffsets() override {
+    // The following section must re-establish public mode (currently done by
+    // GenerateClassConstructors).
+    hdr_ << " private:\n";
+  }
 
  private:
   std::ostream& hdr_;
@@ -3109,8 +3068,6 @@ class CppClassGenerator {
 };
 
 void CppClassGenerator::GenerateClass() {
-  hdr_ << "class " << name_ << ";\n\n";
-
   hdr_ << template_decl() << "\n";
   hdr_ << "class " << gen_name_ << " : public P {\n";
   hdr_ << "  static_assert(std::is_same<" << name_ << ", D>::value,\n"
@@ -3118,7 +3075,7 @@ void CppClassGenerator::GenerateClass() {
   hdr_ << "  static_assert(std::is_same<" << super_->name() << ", P>::value,\n"
        << "    \"Pass in " << super_->name()
        << " as second template parameter for " << gen_name_ << ".\");\n";
-  hdr_ << "public: \n";
+  hdr_ << " public: \n";
   hdr_ << "  using Super = P;\n";
   for (const Field& f : type_->fields()) {
     GenerateFieldAccessor(f);
@@ -3170,7 +3127,7 @@ void CppClassGenerator::GenerateClassCasts() {
 }
 
 void CppClassGenerator::GenerateClassConstructors() {
-  hdr_ << "public:\n";
+  hdr_ << " public:\n";
   hdr_ << "  template <class DAlias = D>\n";
   hdr_ << "  constexpr " << gen_name_ << "() : P() {\n";
   hdr_ << "    static_assert(std::is_base_of<" << gen_name_ << ", \n";
@@ -3282,9 +3239,9 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
   DCHECK(field_type->IsSubtypeOf(TypeOracle::GetObjectType()));
   const std::string& name = f.name_and_type.name;
   const std::string offset = "k" + CamelifyString(name) + "Offset";
-  const ClassType* class_type = ClassType::DynamicCast(field_type);
+  base::Optional<const ClassType*> class_type = field_type->ClassSupertype();
 
-  std::string type = class_type ? class_type->name() : "Object";
+  std::string type = class_type ? (*class_type)->name() : "Object";
 
   // Generate declarations in header.
   if (!class_type && field_type != TypeOracle::GetObjectType()) {
@@ -3356,20 +3313,42 @@ void ImplementationVisitor::GenerateClassDefinitions(
 
     IncludeGuardScope inline_header_guard(inline_header, basename + "-inl.h");
     inline_header << "#include \"torque-generated/class-definitions-tq.h\"\n\n";
-    inline_header << "#include \"src/objects/objects-inl.h\"\n\n";
+    inline_header << "#include \"src/objects/js-promise.h\"\n";
+    inline_header << "#include \"src/objects/module.h\"\n";
+    inline_header << "#include \"src/objects/objects-inl.h\"\n";
+    inline_header << "#include \"src/objects/script.h\"\n\n";
     IncludeObjectMacrosScope inline_header_macros(inline_header);
     NamespaceScope inline_header_namespaces(inline_header, {"v8", "internal"});
 
     implementation
         << "#include \"torque-generated/class-definitions-tq.h\"\n\n";
     implementation << "#include \"torque-generated/class-verifiers-tq.h\"\n\n";
-    implementation << "#include \"src/objects/struct-inl.h\"\n\n";
+    implementation << "#include \"src/objects/arguments-inl.h\"\n";
+    implementation << "#include \"src/objects/js-collection-inl.h\"\n";
+    implementation << "#include \"src/objects/embedder-data-array-inl.h\"\n";
+    implementation << "#include \"src/objects/js-generator-inl.h\"\n";
+    implementation << "#include \"src/objects/js-regexp-inl.h\"\n";
+    implementation
+        << "#include \"src/objects/js-regexp-string-iterator-inl.h\"\n";
+    implementation << "#include \"src/objects/literal-objects-inl.h\"\n";
+    implementation << "#include \"src/objects/microtask-inl.h\"\n";
+    implementation << "#include \"src/objects/module-inl.h\"\n";
+    implementation << "#include \"src/objects/promise-inl.h\"\n";
+    implementation << "#include \"src/objects/stack-frame-info-inl.h\"\n";
+    implementation << "#include \"src/objects/struct-inl.h\"\n";
+    implementation << "#include \"src/objects/template-objects-inl.h\"\n\n";
     implementation
         << "#include "
            "\"torque-generated/internal-class-definitions-tq-inl.h\"\n\n";
     NamespaceScope implementation_namespaces(implementation,
                                              {"v8", "internal"});
 
+    // Generate forward declarations for every class.
+    for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+      const ClassType* type = ClassType::DynamicCast(alias->type());
+      header << "class " << type->name() << ";\n";
+    }
+
     for (const TypeAlias* alias : GlobalContext::GetClasses()) {
       const ClassType* type = ClassType::DynamicCast(alias->type());
       if (type->GenerateCppClassDefinitions()) {
@@ -3724,6 +3703,16 @@ void ReportAllUnusedMacros() {
     if (macro->IsTorqueMacro() && TorqueMacro::cast(macro)->IsExportedToCSA()) {
       continue;
     }
+    // TODO(gsps): Mark methods of generic structs used if they are used in any
+    // instantiation
+    if (Method* method = Method::DynamicCast(macro)) {
+      if (StructType* struct_type =
+              StructType::DynamicCast(method->aggregate_type())) {
+        if (struct_type->GetSpecializedFrom().has_value()) {
+          continue;
+        }
+      }
+    }
 
     std::vector<std::string> ignored_prefixes = {"Convert<", "Cast<",
                                                  "FromConstexpr<"};
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index a572ebb93637e2..eb1a6c4452fde4 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -12,6 +12,7 @@
 #include "src/torque/cfg.h"
 #include "src/torque/declarations.h"
 #include "src/torque/global-context.h"
+#include "src/torque/type-oracle.h"
 #include "src/torque/types.h"
 #include "src/torque/utils.h"
 
@@ -52,10 +53,20 @@ class LocationReference {
   // pointer.
   static LocationReference HeapReference(VisitResult heap_reference) {
     LocationReference result;
-    DCHECK(heap_reference.type()->IsReferenceType());
+    DCHECK(StructType::MatchUnaryGeneric(heap_reference.type(),
+                                         TypeOracle::GetReferenceGeneric()));
     result.heap_reference_ = std::move(heap_reference);
     return result;
   }
+  // A reference to an array on the heap. That is, a tagged value, an offset to
+  // encode an inner pointer, and the number of elements.
+  static LocationReference HeapSlice(VisitResult heap_slice) {
+    LocationReference result;
+    DCHECK(StructType::MatchUnaryGeneric(heap_slice.type(),
+                                         TypeOracle::GetSliceGeneric()));
+    result.heap_slice_ = std::move(heap_slice);
+    return result;
+  }
   static LocationReference ArrayAccess(VisitResult base, VisitResult offset) {
     LocationReference result;
     result.eval_function_ = std::string{"[]"};
@@ -69,26 +80,6 @@ class LocationReference {
     result.eval_function_ = "." + fieldname;
     result.assign_function_ = "." + fieldname + "=";
     result.call_arguments_ = {object};
-    result.index_field_ = base::nullopt;
-    return result;
-  }
-  static LocationReference IndexedFieldIndexedAccess(
-      const LocationReference& indexed_field, VisitResult index) {
-    LocationReference result;
-    DCHECK(indexed_field.IsIndexedFieldAccess());
-    std::string fieldname = *indexed_field.index_field_;
-    result.eval_function_ = "." + fieldname + "[]";
-    result.assign_function_ = "." + fieldname + "[]=";
-    result.call_arguments_ = indexed_field.call_arguments_;
-    result.call_arguments_.push_back(index);
-    result.index_field_ = fieldname;
-    return result;
-  }
-  static LocationReference IndexedFieldAccess(VisitResult object,
-                                              std::string fieldname) {
-    LocationReference result;
-    result.call_arguments_ = {object};
-    result.index_field_ = fieldname;
     return result;
   }
 
@@ -109,16 +100,26 @@ class LocationReference {
     DCHECK(IsHeapReference());
     return *heap_reference_;
   }
+  bool IsHeapSlice() const { return heap_slice_.has_value(); }
+  const VisitResult& heap_slice() const {
+    DCHECK(IsHeapSlice());
+    return *heap_slice_;
+  }
 
   const Type* ReferencedType() const {
     if (IsHeapReference()) {
-      return ReferenceType::cast(heap_reference().type())->referenced_type();
+      return *StructType::MatchUnaryGeneric(heap_reference().type(),
+                                            TypeOracle::GetReferenceGeneric());
+    } else if (IsHeapSlice()) {
+      return *StructType::MatchUnaryGeneric(heap_slice().type(),
+                                            TypeOracle::GetSliceGeneric());
     }
     return GetVisitResult().type();
   }
 
   const VisitResult& GetVisitResult() const {
     if (IsVariableAccess()) return variable();
+    if (IsHeapSlice()) return heap_slice();
     DCHECK(IsTemporary());
     return temporary();
   }
@@ -129,13 +130,6 @@ class LocationReference {
     return *temporary_description_;
   }
 
-  bool IsArrayField() const { return index_field_.has_value(); }
-  bool IsIndexedFieldAccess() const {
-    return IsArrayField() && !IsCallAccess();
-  }
-  bool IsIndexedFieldIndexedAccess() const {
-    return IsArrayField() && IsCallAccess();
-  }
   bool IsCallAccess() const {
     bool is_call_access = eval_function_.has_value();
     DCHECK_EQ(is_call_access, assign_function_.has_value());
@@ -163,10 +157,10 @@ class LocationReference {
   base::Optional<VisitResult> temporary_;
   base::Optional<std::string> temporary_description_;
   base::Optional<VisitResult> heap_reference_;
+  base::Optional<VisitResult> heap_slice_;
   base::Optional<std::string> eval_function_;
   base::Optional<std::string> assign_function_;
   VisitResultVector call_arguments_;
-  base::Optional<std::string> index_field_;
   base::Optional<Binding<LocalValue>*> binding_;
 
   LocationReference() = default;
@@ -354,6 +348,7 @@ class ImplementationVisitor {
   void GenerateClassDefinitions(const std::string& output_directory);
   void GenerateInstanceTypes(const std::string& output_directory);
   void GenerateClassVerifiers(const std::string& output_directory);
+  void GenerateClassDebugReaders(const std::string& output_directory);
   void GenerateExportedMacrosAssembler(const std::string& output_directory);
   void GenerateCSATypes(const std::string& output_directory);
   void GenerateCppForInternalClasses(const std::string& output_directory);
@@ -361,27 +356,26 @@ class ImplementationVisitor {
   VisitResult Visit(Expression* expr);
   const Type* Visit(Statement* stmt);
 
+  void CheckInitializersWellformed(
+      const std::string& aggregate_name,
+      const std::vector<Field>& aggregate_fields,
+      const std::vector<NameAndExpression>& initializers,
+      bool ignore_first_field = false);
+
   InitializerResults VisitInitializerResults(
-      const AggregateType* aggregate,
+      const ClassType* class_type,
       const std::vector<NameAndExpression>& expressions);
 
   void InitializeFieldFromSpread(VisitResult object, const Field& field,
                                  const InitializerResults& initializer_results);
 
-  size_t InitializeAggregateHelper(
-      const AggregateType* aggregate_type, VisitResult allocate_result,
-      const InitializerResults& initializer_results);
-
   VisitResult AddVariableObjectSize(
       VisitResult object_size, const ClassType* current_class,
       const InitializerResults& initializer_results);
 
-  void InitializeAggregate(const AggregateType* aggregate_type,
-                           VisitResult allocate_result,
-                           const InitializerResults& initializer_results);
+  void InitializeClass(const ClassType* class_type, VisitResult allocate_result,
+                       const InitializerResults& initializer_results);
 
-  VisitResult TemporaryUninitializedStruct(const StructType* struct_type,
-                                           const std::string& reason);
   VisitResult Visit(StructExpression* decl);
 
   LocationReference GetLocationReference(Expression* location);
@@ -570,7 +564,8 @@ class ImplementationVisitor {
                            const Arguments& arguments,
                            const TypeVector& specialization_types);
 
-  Method* LookupMethod(const std::string& name, LocationReference target,
+  Method* LookupMethod(const std::string& name,
+                       const AggregateType* receiver_type,
                        const Arguments& arguments,
                        const TypeVector& specialization_types);
 
@@ -608,9 +603,8 @@ class ImplementationVisitor {
   void GenerateBranch(const VisitResult& condition, Block* true_block,
                       Block* false_block);
 
-  using VisitResultGenerator = std::function<VisitResult()>;
-  void GenerateExpressionBranch(VisitResultGenerator, Block* true_block,
-                                Block* false_block);
+  VisitResult GenerateBoolConstant(bool constant);
+
   void GenerateExpressionBranch(Expression* expression, Block* true_block,
                                 Block* false_block);
 
diff --git a/deps/v8/src/torque/instructions.cc b/deps/v8/src/torque/instructions.cc
index 36a22ee8fa1c94..5bc2149f414b91 100644
--- a/deps/v8/src/torque/instructions.cc
+++ b/deps/v8/src/torque/instructions.cc
@@ -133,7 +133,7 @@ void CallCsaMacroInstruction::TypeInstruction(Stack<const Type*>* stack,
 
   if (catch_block) {
     Stack<const Type*> catch_stack = *stack;
-    catch_stack.Push(TypeOracle::GetObjectType());
+    catch_stack.Push(TypeOracle::GetJSAnyType());
     (*catch_block)->SetInputTypes(catch_stack);
   }
 
@@ -170,7 +170,7 @@ void CallCsaMacroAndBranchInstruction::TypeInstruction(
 
   if (catch_block) {
     Stack<const Type*> catch_stack = *stack;
-    catch_stack.Push(TypeOracle::GetObjectType());
+    catch_stack.Push(TypeOracle::GetJSAnyType());
     (*catch_block)->SetInputTypes(catch_stack);
   }
 
@@ -201,7 +201,7 @@ void CallBuiltinInstruction::TypeInstruction(Stack<const Type*>* stack,
 
   if (catch_block) {
     Stack<const Type*> catch_stack = *stack;
-    catch_stack.Push(TypeOracle::GetObjectType());
+    catch_stack.Push(TypeOracle::GetJSAnyType());
     (*catch_block)->SetInputTypes(catch_stack);
   }
 
@@ -236,7 +236,7 @@ void CallRuntimeInstruction::TypeInstruction(Stack<const Type*>* stack,
 
   if (catch_block) {
     Stack<const Type*> catch_stack = *stack;
-    catch_stack.Push(TypeOracle::GetObjectType());
+    catch_stack.Push(TypeOracle::GetJSAnyType());
     (*catch_block)->SetInputTypes(catch_stack);
   }
 
@@ -292,15 +292,14 @@ void UnsafeCastInstruction::TypeInstruction(Stack<const Type*>* stack,
 
 void CreateFieldReferenceInstruction::TypeInstruction(
     Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
-  ExpectSubtype(stack->Pop(), class_type);
-  stack->Push(TypeOracle::GetHeapObjectType());
+  ExpectSubtype(stack->Top(), type);
   stack->Push(TypeOracle::GetIntPtrType());
 }
 
 void LoadReferenceInstruction::TypeInstruction(Stack<const Type*>* stack,
                                                ControlFlowGraph* cfg) const {
   ExpectType(TypeOracle::GetIntPtrType(), stack->Pop());
-  ExpectType(TypeOracle::GetHeapObjectType(), stack->Pop());
+  ExpectSubtype(stack->Pop(), TypeOracle::GetHeapObjectType());
   DCHECK_EQ(std::vector<const Type*>{type}, LowerType(type));
   stack->Push(type);
 }
@@ -309,7 +308,7 @@ void StoreReferenceInstruction::TypeInstruction(Stack<const Type*>* stack,
                                                 ControlFlowGraph* cfg) const {
   ExpectSubtype(stack->Pop(), type);
   ExpectType(TypeOracle::GetIntPtrType(), stack->Pop());
-  ExpectType(TypeOracle::GetHeapObjectType(), stack->Pop());
+  ExpectSubtype(stack->Pop(), TypeOracle::GetHeapObjectType());
 }
 
 bool CallRuntimeInstruction::IsBlockTerminator() const {
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index 3136b583217458..fe3b26b86ff3d4 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -206,10 +206,9 @@ struct NamespaceConstantInstruction : InstructionBase {
 
 struct CreateFieldReferenceInstruction : InstructionBase {
   TORQUE_INSTRUCTION_BOILERPLATE()
-  CreateFieldReferenceInstruction(const ClassType* class_type,
-                                  std::string field_name)
-      : class_type(class_type), field_name(std::move(field_name)) {}
-  const ClassType* class_type;
+  CreateFieldReferenceInstruction(const Type* type, std::string field_name)
+      : type(type), field_name(std::move(field_name)) {}
+  const Type* type;
   std::string field_name;
 };
 
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index a3da95c7471afd..3968b001fb42f7 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -53,6 +53,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
   if (options.force_assert_statements) {
     GlobalContext::SetForceAssertStatements();
   }
+  TargetArchitecture::Scope target_architecture(options.force_32bit_output);
   TypeOracle::Scope type_oracle;
 
   // Two-step process of predeclaration + resolution allows to resolve type
@@ -83,6 +84,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
   implementation_visitor.GeneratePrintDefinitions(output_directory);
   implementation_visitor.GenerateClassDefinitions(output_directory);
   implementation_visitor.GenerateClassVerifiers(output_directory);
+  implementation_visitor.GenerateClassDebugReaders(output_directory);
   implementation_visitor.GenerateExportedMacrosAssembler(output_directory);
   implementation_visitor.GenerateCSATypes(output_directory);
   implementation_visitor.GenerateInstanceTypes(output_directory);
diff --git a/deps/v8/src/torque/torque-compiler.h b/deps/v8/src/torque/torque-compiler.h
index 32680986fd1425..df81d60d3eee8f 100644
--- a/deps/v8/src/torque/torque-compiler.h
+++ b/deps/v8/src/torque/torque-compiler.h
@@ -24,6 +24,12 @@ struct TorqueCompilerOptions {
   // language server support for statements inside asserts, this flag
   // can force generate them.
   bool force_assert_statements = false;
+
+  // Forge (Google3) can only run 64-bit executables. As Torque runs as part
+  // of the build process, we need a "cross-compile" mode when we target 32-bit
+  // architectures. Note that this does not needed in Chromium/V8 land, since we
+  // always build with the same bit width as the target architecture.
+  bool force_32bit_output = false;
 };
 
 struct TorqueCompilerResult {
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 0a371b79f94353..d9973dde3c82d1 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -108,13 +108,12 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
     ParseResultHolder<NameAndExpression>::id =
         ParseResultTypeId::kNameAndExpression;
 template <>
-V8_EXPORT_PRIVATE const ParseResultTypeId
-    ParseResultHolder<ConditionalAnnotation>::id =
-        ParseResultTypeId::kConditionalAnnotation;
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<Annotation>::id =
+    ParseResultTypeId::kAnnotation;
 template <>
 V8_EXPORT_PRIVATE const ParseResultTypeId
-    ParseResultHolder<base::Optional<ConditionalAnnotation>>::id =
-        ParseResultTypeId::kOptionalConditionalAnnotation;
+    ParseResultHolder<std::vector<Annotation>>::id =
+        ParseResultTypeId::kVectorOfAnnotation;
 template <>
 V8_EXPORT_PRIVATE const ParseResultTypeId
     ParseResultHolder<ClassFieldExpression>::id =
@@ -360,7 +359,7 @@ base::Optional<ParseResult> MakeBinaryOperator(
 
 base::Optional<ParseResult> MakeIntrinsicCallExpression(
     ParseResultIterator* child_results) {
-  auto callee = child_results->NextAs<std::string>();
+  auto callee = child_results->NextAs<Identifier*>();
   auto generic_arguments =
       child_results->NextAs<std::vector<TypeExpression*>>();
   auto args = child_results->NextAs<std::vector<Expression*>>();
@@ -472,29 +471,27 @@ base::Optional<ParseResult> MakeExternalMacro(
   auto operator_name = child_results->NextAs<base::Optional<std::string>>();
   auto external_assembler_name =
       child_results->NextAs<base::Optional<std::string>>();
-  auto name = child_results->NextAs<std::string>();
+  auto name = child_results->NextAs<Identifier*>();
   auto generic_parameters = child_results->NextAs<GenericParameters>();
   LintGenericParameters(generic_parameters);
 
   auto args = child_results->NextAs<ParameterList>();
   auto return_type = child_results->NextAs<TypeExpression*>();
   auto labels = child_results->NextAs<LabelAndTypesVector>();
-  MacroDeclaration* macro = MakeNode<ExternalMacroDeclaration>(
+
+  Declaration* result = MakeNode<ExternalMacroDeclaration>(
       transitioning,
       external_assembler_name ? *external_assembler_name : "CodeStubAssembler",
       name, operator_name, args, return_type, labels);
-  Declaration* result;
-  if (generic_parameters.empty()) {
-    result = MakeNode<StandardDeclaration>(macro, base::nullopt);
-  } else {
-    result = MakeNode<GenericDeclaration>(macro, generic_parameters);
+  if (!generic_parameters.empty()) {
+    Error("External builtins cannot be generic.");
   }
   return ParseResult{result};
 }
 
 base::Optional<ParseResult> MakeIntrinsicDeclaration(
     ParseResultIterator* child_results) {
-  auto name = child_results->NextAs<std::string>();
+  auto name = child_results->NextAs<Identifier*>();
   auto generic_parameters = child_results->NextAs<GenericParameters>();
   LintGenericParameters(generic_parameters);
 
@@ -502,19 +499,17 @@ base::Optional<ParseResult> MakeIntrinsicDeclaration(
   auto return_type = child_results->NextAs<TypeExpression*>();
   auto body = child_results->NextAs<base::Optional<Statement*>>();
   LabelAndTypesVector labels;
-  CallableNode* callable = nullptr;
+  CallableDeclaration* declaration;
   if (body) {
-    callable = MakeNode<TorqueMacroDeclaration>(
+    declaration = MakeNode<TorqueMacroDeclaration>(
         false, name, base::Optional<std::string>{}, args, return_type, labels,
-        false);
+        false, body);
   } else {
-    callable = MakeNode<IntrinsicDeclaration>(name, args, return_type);
+    declaration = MakeNode<IntrinsicDeclaration>(name, args, return_type);
   }
-  Declaration* result;
-  if (generic_parameters.empty()) {
-    result = MakeNode<StandardDeclaration>(callable, body);
-  } else {
-    result = MakeNode<GenericDeclaration>(callable, generic_parameters, body);
+  Declaration* result = declaration;
+  if (!generic_parameters.empty()) {
+    result = MakeNode<GenericDeclaration>(generic_parameters, declaration);
   }
   return ParseResult{result};
 }
@@ -524,8 +519,8 @@ base::Optional<ParseResult> MakeTorqueMacroDeclaration(
   auto export_to_csa = child_results->NextAs<bool>();
   auto transitioning = child_results->NextAs<bool>();
   auto operator_name = child_results->NextAs<base::Optional<std::string>>();
-  auto name = child_results->NextAs<std::string>();
-  if (!IsUpperCamelCase(name)) {
+  auto name = child_results->NextAs<Identifier*>();
+  if (!IsUpperCamelCase(name->value)) {
     NamingConventionError("Macro", name, "UpperCamelCase");
   }
 
@@ -536,16 +531,15 @@ base::Optional<ParseResult> MakeTorqueMacroDeclaration(
   auto return_type = child_results->NextAs<TypeExpression*>();
   auto labels = child_results->NextAs<LabelAndTypesVector>();
   auto body = child_results->NextAs<base::Optional<Statement*>>();
-  MacroDeclaration* macro =
-      MakeNode<TorqueMacroDeclaration>(transitioning, name, operator_name, args,
-                                       return_type, labels, export_to_csa);
-  Declaration* result;
+  CallableDeclaration* declaration = MakeNode<TorqueMacroDeclaration>(
+      transitioning, name, operator_name, args, return_type, labels,
+      export_to_csa, body);
+  Declaration* result = declaration;
   if (generic_parameters.empty()) {
     if (!body) ReportError("A non-generic declaration needs a body.");
-    result = MakeNode<StandardDeclaration>(macro, *body);
   } else {
     if (export_to_csa) ReportError("Cannot export generics to CSA.");
-    result = MakeNode<GenericDeclaration>(macro, generic_parameters, body);
+    result = MakeNode<GenericDeclaration>(generic_parameters, declaration);
   }
   return ParseResult{result};
 }
@@ -554,8 +548,8 @@ base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
     ParseResultIterator* child_results) {
   auto transitioning = child_results->NextAs<bool>();
   auto javascript_linkage = child_results->NextAs<bool>();
-  auto name = child_results->NextAs<std::string>();
-  if (!IsUpperCamelCase(name)) {
+  auto name = child_results->NextAs<Identifier*>();
+  if (!IsUpperCamelCase(name->value)) {
     NamingConventionError("Builtin", name, "UpperCamelCase");
   }
 
@@ -565,14 +559,13 @@ base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
   auto args = child_results->NextAs<ParameterList>();
   auto return_type = child_results->NextAs<TypeExpression*>();
   auto body = child_results->NextAs<base::Optional<Statement*>>();
-  BuiltinDeclaration* builtin = MakeNode<TorqueBuiltinDeclaration>(
-      transitioning, javascript_linkage, name, args, return_type);
-  Declaration* result;
+  CallableDeclaration* declaration = MakeNode<TorqueBuiltinDeclaration>(
+      transitioning, javascript_linkage, name, args, return_type, body);
+  Declaration* result = declaration;
   if (generic_parameters.empty()) {
     if (!body) ReportError("A non-generic declaration needs a body.");
-    result = MakeNode<StandardDeclaration>(builtin, *body);
   } else {
-    result = MakeNode<GenericDeclaration>(builtin, generic_parameters, body);
+    result = MakeNode<GenericDeclaration>(generic_parameters, declaration);
   }
   return ParseResult{result};
 }
@@ -649,8 +642,8 @@ base::Optional<ParseResult> MakeMethodDeclaration(
     ParseResultIterator* child_results) {
   auto transitioning = child_results->NextAs<bool>();
   auto operator_name = child_results->NextAs<base::Optional<std::string>>();
-  auto name = child_results->NextAs<std::string>();
-  if (!IsUpperCamelCase(name)) {
+  auto name = child_results->NextAs<Identifier*>();
+  if (!IsUpperCamelCase(name->value)) {
     NamingConventionError("Method", name, "UpperCamelCase");
   }
 
@@ -658,39 +651,66 @@ base::Optional<ParseResult> MakeMethodDeclaration(
   auto return_type = child_results->NextAs<TypeExpression*>();
   auto labels = child_results->NextAs<LabelAndTypesVector>();
   auto body = child_results->NextAs<Statement*>();
-  MacroDeclaration* macro = MakeNode<TorqueMacroDeclaration>(
-      transitioning, name, operator_name, args, return_type, labels, false);
-  Declaration* result = MakeNode<StandardDeclaration>(macro, body);
+  Declaration* result =
+      MakeNode<TorqueMacroDeclaration>(transitioning, name, operator_name, args,
+                                       return_type, labels, false, body);
   return ParseResult{result};
 }
 
 class AnnotationSet {
  public:
   AnnotationSet(ParseResultIterator* iter,
-                const std::set<std::string>& allowed) {
-    auto list = iter->NextAs<std::vector<Identifier*>>();
-    for (const Identifier* i : list) {
-      if (allowed.find(i->value) == allowed.end()) {
-        Lint("Annotation ", i->value, " is not allowed here").Position(i->pos);
-      }
-      if (!set_.insert(i->value).second) {
-        Lint("Duplicate annotation ", i->value).Position(i->pos);
+                const std::set<std::string>& allowed_without_param,
+                const std::set<std::string>& allowed_with_param) {
+    auto list = iter->NextAs<std::vector<Annotation>>();
+    for (const Annotation& a : list) {
+      if (a.param.has_value()) {
+        if (allowed_with_param.find(a.name->value) ==
+            allowed_with_param.end()) {
+          const char* error_message =
+              allowed_without_param.find(a.name->value) ==
+                      allowed_without_param.end()
+                  ? " is not allowed here"
+                  : " cannot have parameter here";
+          Lint("Annotation ", a.name->value, error_message)
+              .Position(a.name->pos);
+        }
+        map_[a.name->value].push_back(*a.param);
+      } else {
+        if (allowed_without_param.find(a.name->value) ==
+            allowed_without_param.end()) {
+          const char* error_message =
+              allowed_with_param.find(a.name->value) == allowed_with_param.end()
+                  ? " is not allowed here"
+                  : " requires a parameter here";
+          Lint("Annotation ", a.name->value, error_message)
+              .Position(a.name->pos);
+        }
+        if (!set_.insert(a.name->value).second) {
+          Lint("Duplicate annotation ", a.name->value).Position(a.name->pos);
+        }
       }
     }
   }
 
   bool Contains(const std::string& s) { return set_.find(s) != set_.end(); }
+  const std::vector<std::string>& GetParams(const std::string& s) {
+    return map_[s];
+  }
 
  private:
   std::set<std::string> set_;
+  std::map<std::string, std::vector<std::string>> map_;
 };
 
 base::Optional<ParseResult> MakeClassDeclaration(
     ParseResultIterator* child_results) {
   AnnotationSet annotations(
-      child_results, {"@generatePrint", "@noVerifier", "@abstract",
-                      "@dirtyInstantiatedAbstractClass",
-                      "@hasSameInstanceTypeAsParent", "@generateCppClass"});
+      child_results,
+      {"@generatePrint", "@noVerifier", "@abstract",
+       "@dirtyInstantiatedAbstractClass", "@hasSameInstanceTypeAsParent",
+       "@generateCppClass"},
+      {});
   ClassFlags flags = ClassFlag::kNone;
   bool generate_print = annotations.Contains("@generatePrint");
   if (generate_print) flags |= ClassFlag::kGeneratePrint;
@@ -726,15 +746,18 @@ base::Optional<ParseResult> MakeClassDeclaration(
 
   // Filter to only include fields that should be present based on decoration.
   std::vector<ClassFieldExpression> fields;
-  std::copy_if(fields_raw.begin(), fields_raw.end(), std::back_inserter(fields),
-               [](const ClassFieldExpression& exp) {
-                 if (!exp.conditional.has_value()) return true;
-                 const ConditionalAnnotation& conditional = *exp.conditional;
-                 return conditional.type == ConditionalAnnotationType::kPositive
-                            ? BuildFlags::GetFlag(conditional.condition, "@if")
-                            : !BuildFlags::GetFlag(conditional.condition,
-                                                   "@ifnot");
-               });
+  std::copy_if(
+      fields_raw.begin(), fields_raw.end(), std::back_inserter(fields),
+      [](const ClassFieldExpression& exp) {
+        for (const ConditionalAnnotation& condition : exp.conditions) {
+          if (condition.type == ConditionalAnnotationType::kPositive
+                  ? !BuildFlags::GetFlag(condition.condition, "@if")
+                  : BuildFlags::GetFlag(condition.condition, "@ifnot")) {
+            return false;
+          }
+        }
+        return true;
+      });
 
   Declaration* result = MakeNode<ClassDeclaration>(
       name, flags, std::move(extends), std::move(generates), std::move(methods),
@@ -756,6 +779,7 @@ base::Optional<ParseResult> MakeNamespaceDeclaration(
 
 base::Optional<ParseResult> MakeSpecializationDeclaration(
     ParseResultIterator* child_results) {
+  auto transitioning = child_results->NextAs<bool>();
   auto name = child_results->NextAs<Identifier*>();
   auto generic_parameters =
       child_results->NextAs<std::vector<TypeExpression*>>();
@@ -765,8 +789,8 @@ base::Optional<ParseResult> MakeSpecializationDeclaration(
   auto body = child_results->NextAs<Statement*>();
   CheckNotDeferredStatement(body);
   Declaration* result = MakeNode<SpecializationDeclaration>(
-      std::move(name), std::move(generic_parameters), std::move(parameters),
-      return_type, std::move(labels), body);
+      transitioning, std::move(name), std::move(generic_parameters),
+      std::move(parameters), return_type, std::move(labels), body);
   return ParseResult{result};
 }
 
@@ -817,19 +841,16 @@ base::Optional<ParseResult> MakeExternalBuiltin(
     ParseResultIterator* child_results) {
   auto transitioning = child_results->NextAs<bool>();
   auto js_linkage = child_results->NextAs<bool>();
-  auto name = child_results->NextAs<std::string>();
+  auto name = child_results->NextAs<Identifier*>();
   auto generic_parameters = child_results->NextAs<GenericParameters>();
   LintGenericParameters(generic_parameters);
 
   auto args = child_results->NextAs<ParameterList>();
   auto return_type = child_results->NextAs<TypeExpression*>();
-  BuiltinDeclaration* builtin = MakeNode<ExternalBuiltinDeclaration>(
+  Declaration* result = MakeNode<ExternalBuiltinDeclaration>(
       transitioning, js_linkage, name, args, return_type);
-  Declaration* result;
-  if (generic_parameters.empty()) {
-    result = MakeNode<StandardDeclaration>(builtin, base::nullopt);
-  } else {
-    result = MakeNode<GenericDeclaration>(builtin, generic_parameters);
+  if (!generic_parameters.empty()) {
+    Error("External builtins cannot be generic.");
   }
   return ParseResult{result};
 }
@@ -837,12 +858,11 @@ base::Optional<ParseResult> MakeExternalBuiltin(
 base::Optional<ParseResult> MakeExternalRuntime(
     ParseResultIterator* child_results) {
   auto transitioning = child_results->NextAs<bool>();
-  auto name = child_results->NextAs<std::string>();
+  auto name = child_results->NextAs<Identifier*>();
   auto args = child_results->NextAs<ParameterList>();
   auto return_type = child_results->NextAs<TypeExpression*>();
-  ExternalRuntimeDeclaration* runtime = MakeNode<ExternalRuntimeDeclaration>(
+  Declaration* result = MakeNode<ExternalRuntimeDeclaration>(
       transitioning, name, args, return_type);
-  Declaration* result = MakeNode<StandardDeclaration>(runtime, base::nullopt);
   return ParseResult{result};
 }
 
@@ -879,7 +899,11 @@ base::Optional<ParseResult> MakeFunctionTypeExpression(
 base::Optional<ParseResult> MakeReferenceTypeExpression(
     ParseResultIterator* child_results) {
   auto referenced_type = child_results->NextAs<TypeExpression*>();
-  TypeExpression* result = MakeNode<ReferenceTypeExpression>(referenced_type);
+  std::vector<std::string> namespace_qualification{
+      TORQUE_INTERNAL_NAMESPACE_STRING};
+  std::vector<TypeExpression*> generic_arguments{referenced_type};
+  TypeExpression* result = MakeNode<BasicTypeExpression>(
+      namespace_qualification, REFERENCE_TYPE_STRING, generic_arguments);
   return ParseResult{result};
 }
 
@@ -1141,7 +1165,7 @@ base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
   ParameterList parameters;
   parameters.names.push_back(MakeNode<Identifier>(variable));
   parameters.types.push_back(MakeNode<BasicTypeExpression>(
-      std::vector<std::string>{}, "Object", std::vector<TypeExpression*>{}));
+      std::vector<std::string>{}, "JSAny", std::vector<TypeExpression*>{}));
   parameters.has_varargs = false;
   LabelBlock* result = MakeNode<LabelBlock>(
       MakeNode<Identifier>(kCatchLabelName), std::move(parameters), body);
@@ -1327,22 +1351,22 @@ base::Optional<ParseResult> MakeNameAndExpressionFromExpression(
   ReportError("Constructor parameters need to be named.");
 }
 
-base::Optional<ParseResult> MakeConditionalAnnotation(
-    ParseResultIterator* child_results) {
-  auto type_str = child_results->NextAs<Identifier*>()->value;
-  DCHECK(type_str == "@if" || type_str == "@ifnot");
-  ConditionalAnnotationType type = type_str == "@if"
-                                       ? ConditionalAnnotationType::kPositive
-                                       : ConditionalAnnotationType::kNegative;
-  auto condition = child_results->NextAs<std::string>();
-  return ParseResult{ConditionalAnnotation{condition, type}};
+base::Optional<ParseResult> MakeAnnotation(ParseResultIterator* child_results) {
+  return ParseResult{
+      Annotation{child_results->NextAs<Identifier*>(),
+                 child_results->NextAs<base::Optional<std::string>>()}};
 }
 
 base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
-  auto conditional =
-      child_results->NextAs<base::Optional<ConditionalAnnotation>>();
-  AnnotationSet annotations(child_results, {"@noVerifier"});
+  AnnotationSet annotations(child_results, {"@noVerifier"}, {"@if", "@ifnot"});
   bool generate_verify = !annotations.Contains("@noVerifier");
+  std::vector<ConditionalAnnotation> conditions;
+  for (const std::string& condition : annotations.GetParams("@if")) {
+    conditions.push_back({condition, ConditionalAnnotationType::kPositive});
+  }
+  for (const std::string& condition : annotations.GetParams("@ifnot")) {
+    conditions.push_back({condition, ConditionalAnnotationType::kNegative});
+  }
   auto weak = child_results->NextAs<bool>();
   auto const_qualified = child_results->NextAs<bool>();
   auto name = child_results->NextAs<Identifier*>();
@@ -1350,7 +1374,7 @@ base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
   auto type = child_results->NextAs<TypeExpression*>();
   return ParseResult{ClassFieldExpression{{name, type},
                                           index,
-                                          conditional,
+                                          std::move(conditions),
                                           weak,
                                           const_qualified,
                                           generate_verify}};
@@ -1479,15 +1503,12 @@ struct TorqueGrammar : Grammar {
   Symbol name = {Rule({&identifier}, MakeIdentifier)};
 
   // Result: Identifier*
-  Symbol annotation = {
+  Symbol annotationName = {
       Rule({Pattern(MatchAnnotation)}, MakeIdentifierFromMatchedInput)};
 
-  // Result: std::vector<Identifier*>
-  Symbol* annotations = List<Identifier*>(&annotation);
-
   // Result: std::string
   Symbol intrinsicName = {
-      Rule({Pattern(MatchIntrinsicName)}, YieldMatchedInput)};
+      Rule({Pattern(MatchIntrinsicName)}, MakeIdentifierFromMatchedInput)};
 
   // Result: std::string
   Symbol stringLiteral = {
@@ -1501,6 +1522,22 @@ struct TorqueGrammar : Grammar {
       Rule({Pattern(MatchDecimalLiteral)}, YieldMatchedInput),
       Rule({Pattern(MatchHexLiteral)}, YieldMatchedInput)};
 
+  // Result: std::string
+  Symbol annotationParameter = {Rule({&identifier}), Rule({&decimalLiteral}),
+                                Rule({&externalString})};
+
+  // Result: std::string
+  Symbol annotationParameters = {
+      Rule({Token("("), &annotationParameter, Token(")")})};
+
+  // Result: Annotation
+  Symbol annotation = {
+      Rule({&annotationName, Optional<std::string>(&annotationParameters)},
+           MakeAnnotation)};
+
+  // Result: std::vector<Annotation>
+  Symbol* annotations = List<Annotation>(&annotation);
+
   // Result: TypeList
   Symbol* typeList = List<TypeExpression*>(&type, Token(","));
 
@@ -1578,14 +1615,8 @@ struct TorqueGrammar : Grammar {
   Symbol* optionalArraySpecifier =
       Optional<std::string>(Sequence({Token("["), &identifier, Token("]")}));
 
-  // Result: ConditionalAnnotation
-  Symbol conditionalAnnotation = {
-      Rule({OneOf({"@if", "@ifnot"}), Token("("), &identifier, Token(")")},
-           MakeConditionalAnnotation)};
-
   Symbol classField = {
-      Rule({Optional<ConditionalAnnotation>(&conditionalAnnotation),
-            annotations, CheckIf(Token("weak")), CheckIf(Token("const")), &name,
+      Rule({annotations, CheckIf(Token("weak")), CheckIf(Token("const")), &name,
             optionalArraySpecifier, Token(":"), &type, Token(";")},
            MakeClassField)};
 
@@ -1857,8 +1888,8 @@ struct TorqueGrammar : Grammar {
   Symbol method = {Rule(
       {CheckIf(Token("transitioning")),
        Optional<std::string>(Sequence({Token("operator"), &externalString})),
-       &identifier, &parameterListNoVararg, &optionalReturnType,
-       optionalLabelList, &block},
+       &name, &parameterListNoVararg, &optionalReturnType, optionalLabelList,
+       &block},
       MakeMethodDeclaration)};
 
   // Result: std::vector<Declaration*>
@@ -1900,34 +1931,34 @@ struct TorqueGrammar : Grammar {
             Optional<std::string>(
                 Sequence({Token("operator"), &externalString})),
             Token("macro"),
-            Optional<std::string>(Sequence({&identifier, Token("::")})),
-            &identifier, TryOrDefault<GenericParameters>(&genericParameters),
+            Optional<std::string>(Sequence({&identifier, Token("::")})), &name,
+            TryOrDefault<GenericParameters>(&genericParameters),
             &typeListMaybeVarArgs, &optionalReturnType, optionalLabelList,
             Token(";")},
            AsSingletonVector<Declaration*, MakeExternalMacro>()),
       Rule({Token("extern"), CheckIf(Token("transitioning")),
-            CheckIf(Token("javascript")), Token("builtin"), &identifier,
+            CheckIf(Token("javascript")), Token("builtin"), &name,
             TryOrDefault<GenericParameters>(&genericParameters),
             &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
            AsSingletonVector<Declaration*, MakeExternalBuiltin>()),
-      Rule(
-          {Token("extern"), CheckIf(Token("transitioning")), Token("runtime"),
-           &identifier, &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
-          AsSingletonVector<Declaration*, MakeExternalRuntime>()),
+      Rule({Token("extern"), CheckIf(Token("transitioning")), Token("runtime"),
+            &name, &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
+           AsSingletonVector<Declaration*, MakeExternalRuntime>()),
       Rule({CheckIf(Token("@export")), CheckIf(Token("transitioning")),
             Optional<std::string>(
                 Sequence({Token("operator"), &externalString})),
-            Token("macro"), &identifier,
+            Token("macro"), &name,
             TryOrDefault<GenericParameters>(&genericParameters),
             &parameterListNoVararg, &optionalReturnType, optionalLabelList,
             &optionalBody},
            AsSingletonVector<Declaration*, MakeTorqueMacroDeclaration>()),
       Rule({CheckIf(Token("transitioning")), CheckIf(Token("javascript")),
-            Token("builtin"), &identifier,
+            Token("builtin"), &name,
             TryOrDefault<GenericParameters>(&genericParameters),
             &parameterListAllowVararg, &optionalReturnType, &optionalBody},
            AsSingletonVector<Declaration*, MakeTorqueBuiltinDeclaration>()),
-      Rule({&name, &genericSpecializationTypeList, &parameterListAllowVararg,
+      Rule({CheckIf(Token("transitioning")), &name,
+            &genericSpecializationTypeList, &parameterListAllowVararg,
             &optionalReturnType, optionalLabelList, &block},
            AsSingletonVector<Declaration*, MakeSpecializationDeclaration>()),
       Rule({Token("#include"), &externalString},
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index e759ce613c86af..ad7551f8aa4e6c 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -19,19 +19,24 @@ std::string ErrorPrefixFor(TorqueMessage::Kind kind) {
 }
 
 int WrappedMain(int argc, const char** argv) {
-  std::string output_directory;
-  std::string v8_root;
+  TorqueCompilerOptions options;
+  options.collect_language_server_data = false;
+  options.force_assert_statements = false;
+
   std::vector<std::string> files;
 
   for (int i = 1; i < argc; ++i) {
     // Check for options
-    if (std::string(argv[i]) == "-o") {
-      output_directory = argv[++i];
-    } else if (std::string(argv[i]) == "-v8-root") {
-      v8_root = std::string(argv[++i]);
+    const std::string argument(argv[i]);
+    if (argument == "-o") {
+      options.output_directory = argv[++i];
+    } else if (argument == "-v8-root") {
+      options.v8_root = std::string(argv[++i]);
+    } else if (argument == "-m32") {
+      options.force_32bit_output = true;
     } else {
       // Otherwise it's a .tq file. Remember it for compilation.
-      files.emplace_back(argv[i]);
+      files.emplace_back(std::move(argument));
       if (!StringEndsWith(files.back(), ".tq")) {
         std::cerr << "Unexpected command-line argument \"" << files.back()
                   << "\", expected a .tq file.\n";
@@ -40,12 +45,6 @@ int WrappedMain(int argc, const char** argv) {
     }
   }
 
-  TorqueCompilerOptions options;
-  options.output_directory = std::move(output_directory);
-  options.v8_root = std::move(v8_root);
-  options.collect_language_server_data = false;
-  options.force_assert_statements = false;
-
   TorqueCompilerResult result = CompileTorque(files, options);
 
   // PositionAsString requires the SourceFileMap to be set to
diff --git a/deps/v8/src/torque/type-inference.cc b/deps/v8/src/torque/type-inference.cc
new file mode 100644
index 00000000000000..abd875f4f6c943
--- /dev/null
+++ b/deps/v8/src/torque/type-inference.cc
@@ -0,0 +1,121 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/type-inference.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+TypeArgumentInference::TypeArgumentInference(
+    const NameVector& type_parameters,
+    const TypeVector& explicit_type_arguments,
+    const std::vector<TypeExpression*>& term_parameters,
+    const TypeVector& term_argument_types)
+    : num_explicit_(explicit_type_arguments.size()),
+      type_parameter_from_name_(type_parameters.size()),
+      inferred_(type_parameters.size()) {
+  if (num_explicit_ > type_parameters.size()) {
+    Fail("more explicit type arguments than expected");
+    return;
+  }
+  if (term_parameters.size() != term_argument_types.size()) {
+    Fail("number of term parameters does not match number of term arguments!");
+    return;
+  }
+
+  for (size_t i = 0; i < type_parameters.size(); i++) {
+    type_parameter_from_name_[type_parameters[i]->value] = i;
+  }
+  for (size_t i = 0; i < num_explicit_; i++) {
+    inferred_[i] = {explicit_type_arguments[i]};
+  }
+
+  for (size_t i = 0; i < term_parameters.size(); i++) {
+    Match(term_parameters[i], term_argument_types[i]);
+    if (HasFailed()) return;
+  }
+
+  for (size_t i = 0; i < type_parameters.size(); i++) {
+    if (!inferred_[i]) {
+      Fail("failed to infer arguments for all type parameters");
+      return;
+    }
+  }
+}
+
+TypeVector TypeArgumentInference::GetResult() const {
+  CHECK(!HasFailed());
+  TypeVector result(inferred_.size());
+  std::transform(
+      inferred_.begin(), inferred_.end(), result.begin(),
+      [](base::Optional<const Type*> maybe_type) { return *maybe_type; });
+  return result;
+}
+
+void TypeArgumentInference::Match(TypeExpression* parameter,
+                                  const Type* argument_type) {
+  if (BasicTypeExpression* basic =
+          BasicTypeExpression::DynamicCast(parameter)) {
+    // If the parameter is referring to one of the type parameters, substitute
+    if (basic->namespace_qualification.empty() && !basic->is_constexpr) {
+      auto result = type_parameter_from_name_.find(basic->name);
+      if (result != type_parameter_from_name_.end()) {
+        size_t type_parameter_index = result->second;
+        if (type_parameter_index < num_explicit_) {
+          return;
+        }
+        base::Optional<const Type*>& maybe_inferred =
+            inferred_[type_parameter_index];
+        if (maybe_inferred && *maybe_inferred != argument_type) {
+          Fail("found conflicting types for generic parameter");
+        } else {
+          inferred_[type_parameter_index] = {argument_type};
+        }
+        return;
+      }
+    }
+    // Try to recurse in case of generic types
+    if (!basic->generic_arguments.empty()) {
+      auto* argument_struct_type = StructType::DynamicCast(argument_type);
+      if (argument_struct_type) {
+        MatchGeneric(basic, argument_struct_type);
+      }
+    }
+    // NOTE: We could also check whether ground parameter types match the
+    // argument types, but we are only interested in inferring type arguments
+    // here
+  } else {
+    // TODO(gsps): Perform inference on function and union types
+  }
+}
+
+void TypeArgumentInference::MatchGeneric(BasicTypeExpression* parameter,
+                                         const StructType* argument_type) {
+  QualifiedName qualified_name{parameter->namespace_qualification,
+                               parameter->name};
+  GenericStructType* generic_struct =
+      Declarations::LookupUniqueGenericStructType(qualified_name);
+  auto& specialized_from = argument_type->GetSpecializedFrom();
+  if (!specialized_from || specialized_from->generic != generic_struct) {
+    return Fail("found conflicting generic type constructors");
+  }
+  auto& parameters = parameter->generic_arguments;
+  auto& argument_types = specialized_from->specialized_types;
+  if (parameters.size() != argument_types.size()) {
+    Error(
+        "cannot infer types from generic-struct-typed parameter with "
+        "incompatible number of arguments")
+        .Position(parameter->pos)
+        .Throw();
+  }
+  for (size_t i = 0; i < parameters.size(); i++) {
+    Match(parameters[i], argument_types[i]);
+    if (HasFailed()) return;
+  }
+}
+
+}  // namespace torque
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/torque/type-inference.h b/deps/v8/src/torque/type-inference.h
new file mode 100644
index 00000000000000..671d68cce5dda1
--- /dev/null
+++ b/deps/v8/src/torque/type-inference.h
@@ -0,0 +1,84 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_TYPE_INFERENCE_H_
+#define V8_TORQUE_TYPE_INFERENCE_H_
+
+#include <string>
+#include <unordered_map>
+
+#include "src/base/optional.h"
+#include "src/torque/ast.h"
+#include "src/torque/declarations.h"
+#include "src/torque/types.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+// Type argument inference computes a potential instantiation of a generic
+// callable given some concrete argument types. As an example, consider the
+// generic macro
+//
+//   macro Pick<T: type>(x: T, y: T): T
+//
+// along with a given call site, such as
+//
+//   Pick(1, 2);
+//
+// The inference proceeds by matching the term argument types (`constexpr
+// int31`, in case of `1` and `2`) against the formal parameter types (`T` in
+// both cases). During this matching we discover that `T` must equal `constexpr
+// int31`.
+//
+// The inference will not perform any comprehensive type checking of its own,
+// but *does* fail if type parameters cannot be soundly instantiated given the
+// call site. For instance, for the following call site
+//
+//   const aSmi: Smi = ...;
+//   Pick(1, aSmi);  // inference fails
+//
+// inference would fail, since `constexpr int31` is distinct from `Smi`. To
+// allow for implicit conversions to be tried in a separate step after type
+// argument inference, a number of type arguments may be given explicitly:
+//
+//   Pick<Smi>(1, aSmi);  // inference succeeds (doing nothing)
+//
+// In the above case the inference simply ignores inconsistent constraints on
+// `T`. Similarly, we ignore all constraints arising from formal parameters
+// that are function- or union-typed.
+//
+// Finally, note that term parameters are passed as type expressions, since
+// we have no way of expressing a reference to type parameter as a Type. These
+// type expressions are resolved during matching, so TypeArgumentInference
+// should be instantiated in the appropriate scope.
+class TypeArgumentInference {
+ public:
+  TypeArgumentInference(const NameVector& type_parameters,
+                        const TypeVector& explicit_type_arguments,
+                        const std::vector<TypeExpression*>& term_parameters,
+                        const TypeVector& term_argument_types);
+
+  bool HasFailed() const { return failure_reason_.has_value(); }
+  const char* GetFailureReason() { return *failure_reason_; }
+  TypeVector GetResult() const;
+
+ private:
+  void Fail(const char* reason) { failure_reason_ = {reason}; }
+
+  void Match(TypeExpression* parameter, const Type* argument_type);
+  void MatchGeneric(BasicTypeExpression* parameter,
+                    const StructType* argument_type);
+
+  size_t num_explicit_;
+  std::unordered_map<std::string, size_t> type_parameter_from_name_;
+  std::vector<base::Optional<const Type*>> inferred_;
+  base::Optional<const char*> failure_reason_;
+};
+
+}  // namespace torque
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TORQUE_TYPE_INFERENCE_H_
diff --git a/deps/v8/src/torque/type-oracle.cc b/deps/v8/src/torque/type-oracle.cc
index 47331543fcce3d..c7e11c2165caf5 100644
--- a/deps/v8/src/torque/type-oracle.cc
+++ b/deps/v8/src/torque/type-oracle.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/torque/type-oracle.h"
+#include "src/torque/type-visitor.h"
 
 namespace v8 {
 namespace internal {
@@ -23,6 +24,28 @@ void TypeOracle::FinalizeAggregateTypes() {
   }
 }
 
+// static
+const StructType* TypeOracle::GetGenericStructTypeInstance(
+    GenericStructType* generic_struct, TypeVector arg_types) {
+  auto& params = generic_struct->generic_parameters();
+  auto& specializations = generic_struct->specializations();
+
+  if (params.size() != arg_types.size()) {
+    ReportError("Generic struct takes ", params.size(), " parameters, but ",
+                arg_types.size(), " were given");
+  }
+
+  if (auto specialization = specializations.Get(arg_types)) {
+    return *specialization;
+  } else {
+    CurrentScope::Scope generic_scope(generic_struct->ParentScope());
+    auto struct_type = TypeVisitor::ComputeType(generic_struct->declaration(),
+                                                {{generic_struct, arg_types}});
+    specializations.Add(arg_types, struct_type);
+    return struct_type;
+  }
+}
+
 }  // namespace torque
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index 405cb41e75e6f0..643c78c0306577 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -30,9 +30,13 @@ class TypeOracle : public ContextualClass<TypeOracle> {
     return result;
   }
 
-  static StructType* GetStructType(const std::string& name) {
-    StructType* result = new StructType(CurrentNamespace(), name);
+  static StructType* GetStructType(
+      const StructDeclaration* decl,
+      StructType::MaybeSpecializationKey specialized_from) {
+    Namespace* nspace = new Namespace(STRUCT_NAMESPACE_STRING);
+    StructType* result = new StructType(nspace, decl, specialized_from);
     Get().aggregate_types_.push_back(std::unique_ptr<StructType>(result));
+    Get().struct_namespaces_.push_back(std::unique_ptr<Namespace>(nspace));
     return result;
   }
 
@@ -60,8 +64,26 @@ class TypeOracle : public ContextualClass<TypeOracle> {
     return result;
   }
 
-  static const ReferenceType* GetReferenceType(const Type* referenced_type) {
-    return Get().reference_types_.Add(ReferenceType(referenced_type));
+  static const StructType* GetGenericStructTypeInstance(
+      GenericStructType* generic_struct, TypeVector arg_types);
+
+  static GenericStructType* GetReferenceGeneric() {
+    return Declarations::LookupUniqueGenericStructType(QualifiedName(
+        {TORQUE_INTERNAL_NAMESPACE_STRING}, REFERENCE_TYPE_STRING));
+  }
+
+  static GenericStructType* GetSliceGeneric() {
+    return Declarations::LookupUniqueGenericStructType(
+        QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, SLICE_TYPE_STRING));
+  }
+
+  static const StructType* GetReferenceType(const Type* referenced_type) {
+    return GetGenericStructTypeInstance(GetReferenceGeneric(),
+                                        {referenced_type});
+  }
+
+  static const StructType* GetSliceType(const Type* referenced_type) {
+    return GetGenericStructTypeInstance(GetSliceGeneric(), {referenced_type});
   }
 
   static const std::vector<const BuiltinPointerType*>&
@@ -131,6 +153,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
     return Get().GetBuiltinType(HEAP_OBJECT_TYPE_STRING);
   }
 
+  static const Type* GetJSAnyType() {
+    return Get().GetBuiltinType(JSANY_TYPE_STRING);
+  }
+
   static const Type* GetJSObjectType() {
     return Get().GetBuiltinType(JSOBJECT_TYPE_STRING);
   }
@@ -245,10 +271,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
   Deduplicator<BuiltinPointerType> function_pointer_types_;
   std::vector<const BuiltinPointerType*> all_builtin_pointer_types_;
   Deduplicator<UnionType> union_types_;
-  Deduplicator<ReferenceType> reference_types_;
   std::vector<std::unique_ptr<Type>> nominal_types_;
   std::vector<std::unique_ptr<AggregateType>> aggregate_types_;
   std::vector<std::unique_ptr<Type>> top_types_;
+  std::vector<std::unique_ptr<Namespace>> struct_namespaces_;
 };
 
 }  // namespace torque
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index 37be0df0062d90..9b5c96ee40196e 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -8,6 +8,7 @@
 #include "src/torque/declarable.h"
 #include "src/torque/global-context.h"
 #include "src/torque/server-data.h"
+#include "src/torque/type-inference.h"
 #include "src/torque/type-oracle.h"
 
 namespace v8 {
@@ -91,49 +92,47 @@ void DeclareMethods(AggregateType* container_type,
                     const std::vector<Declaration*>& methods) {
   for (auto declaration : methods) {
     CurrentSourcePosition::Scope pos_scope(declaration->pos);
-    StandardDeclaration* standard_declaration =
-        StandardDeclaration::DynamicCast(declaration);
-    DCHECK(standard_declaration);
     TorqueMacroDeclaration* method =
-        TorqueMacroDeclaration::DynamicCast(standard_declaration->callable);
-    Signature signature = TypeVisitor::MakeSignature(method->signature.get());
+        TorqueMacroDeclaration::DynamicCast(declaration);
+    Signature signature = TypeVisitor::MakeSignature(method);
     signature.parameter_names.insert(
         signature.parameter_names.begin() + signature.implicit_count,
         MakeNode<Identifier>(kThisParameterName));
-    Statement* body = *(standard_declaration->body);
-    std::string method_name(method->name);
+    Statement* body = *(method->body);
+    const std::string& method_name(method->name->value);
     signature.parameter_types.types.insert(
         signature.parameter_types.types.begin() + signature.implicit_count,
         container_type);
-    Declarations::CreateMethod(container_type, method_name, signature, false,
-                               body);
+    Declarations::CreateMethod(container_type, method_name, signature, body);
   }
 }
 
-namespace {
-std::string ComputeStructName(StructDeclaration* decl) {
-  TypeVector args;
-  if (decl->IsGeneric()) {
-    args.resize(decl->generic_parameters.size());
-    std::transform(
-        decl->generic_parameters.begin(), decl->generic_parameters.end(),
-        args.begin(), [](Identifier* parameter) {
-          return Declarations::LookupTypeAlias(QualifiedName(parameter->value))
-              ->type();
-        });
+const StructType* TypeVisitor::ComputeType(
+    StructDeclaration* decl,
+    StructType::MaybeSpecializationKey specialized_from) {
+  StructType* struct_type = TypeOracle::GetStructType(decl, specialized_from);
+  CurrentScope::Scope struct_namespace_scope(struct_type->nspace());
+  CurrentSourcePosition::Scope position_activator(decl->pos);
+
+  if (specialized_from) {
+    auto& params = specialized_from->generic->generic_parameters();
+    auto arg_types_iterator = specialized_from->specialized_types.begin();
+    for (auto param : params) {
+      TypeAlias* alias = Declarations::DeclareType(param, *arg_types_iterator);
+      alias->SetIsUserDefined(false);
+      arg_types_iterator++;
+    }
   }
-  return StructType::ComputeName(decl->name->value, args);
-}
-}  // namespace
 
-const StructType* TypeVisitor::ComputeType(StructDeclaration* decl) {
-  CurrentSourcePosition::Scope position_activator(decl->pos);
-  StructType* struct_type = TypeOracle::GetStructType(ComputeStructName(decl));
   size_t offset = 0;
   for (auto& field : decl->fields) {
     CurrentSourcePosition::Scope position_activator(
         field.name_and_type.type->pos);
     const Type* field_type = TypeVisitor::ComputeType(field.name_and_type.type);
+    if (field_type->IsConstexpr()) {
+      ReportError("struct field \"", field.name_and_type.name->value,
+                  "\" carries constexpr type \"", *field_type, "\"");
+    }
     struct_type->RegisterField({field.name_and_type.name->pos,
                                 struct_type,
                                 base::nullopt,
@@ -144,7 +143,6 @@ const StructType* TypeVisitor::ComputeType(StructDeclaration* decl) {
                                 false});
     offset += LoweredSlotCount(field_type);
   }
-  DeclareMethods(struct_type, decl->methods);
   return struct_type;
 }
 
@@ -214,34 +212,8 @@ const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
     } else {
       auto* generic_struct =
           Declarations::LookupUniqueGenericStructType(qualified_name);
-      auto& params = generic_struct->generic_parameters();
-      auto& specializations = generic_struct->specializations();
-      if (params.size() != args.size()) {
-        ReportError("Generic struct takes ", params.size(),
-                    " parameters, but only ", args.size(), " were given");
-      }
-
-      std::vector<const Type*> arg_types = ComputeTypeVector(args);
-      if (auto specialization = specializations.Get(arg_types)) {
-        type = *specialization;
-      } else {
-        CurrentScope::Scope generic_scope(generic_struct->ParentScope());
-        // Create a temporary fake-namespace just to temporarily declare the
-        // specialization aliases for the generic types to create a signature.
-        Namespace tmp_namespace("_tmp");
-        CurrentScope::Scope tmp_namespace_scope(&tmp_namespace);
-        auto arg_types_iterator = arg_types.begin();
-        for (auto param : params) {
-          TypeAlias* alias =
-              Declarations::DeclareType(param, *arg_types_iterator);
-          alias->SetIsUserDefined(false);
-          arg_types_iterator++;
-        }
-
-        auto struct_type = ComputeType(generic_struct->declaration());
-        specializations.Add(arg_types, struct_type);
-        type = struct_type;
-      }
+      type = TypeOracle::GetGenericStructTypeInstance(generic_struct,
+                                                      ComputeTypeVector(args));
       pos = generic_struct->declaration()->name->pos;
     }
 
@@ -254,10 +226,6 @@ const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
                  UnionTypeExpression::DynamicCast(type_expression)) {
     return TypeOracle::GetUnionType(ComputeType(union_type->a),
                                     ComputeType(union_type->b));
-  } else if (auto* reference_type =
-                 ReferenceTypeExpression::DynamicCast(type_expression)) {
-    return TypeOracle::GetReferenceType(
-        ComputeType(reference_type->referenced_type));
   } else {
     auto* function_type_exp = FunctionTypeExpression::cast(type_expression);
     TypeVector argument_types;
@@ -269,22 +237,23 @@ const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
   }
 }
 
-Signature TypeVisitor::MakeSignature(const CallableNodeSignature* signature) {
+Signature TypeVisitor::MakeSignature(const CallableDeclaration* declaration) {
   LabelDeclarationVector definition_vector;
-  for (const auto& label : signature->labels) {
+  for (const auto& label : declaration->labels) {
     LabelDeclaration def = {label.name, ComputeTypeVector(label.types)};
     definition_vector.push_back(def);
   }
   base::Optional<std::string> arguments_variable;
-  if (signature->parameters.has_varargs)
-    arguments_variable = signature->parameters.arguments_variable;
-  Signature result{signature->parameters.names,
+  if (declaration->parameters.has_varargs)
+    arguments_variable = declaration->parameters.arguments_variable;
+  Signature result{declaration->parameters.names,
                    arguments_variable,
-                   {ComputeTypeVector(signature->parameters.types),
-                    signature->parameters.has_varargs},
-                   signature->parameters.implicit_count,
-                   ComputeType(signature->return_type),
-                   definition_vector};
+                   {ComputeTypeVector(declaration->parameters.types),
+                    declaration->parameters.has_varargs},
+                   declaration->parameters.implicit_count,
+                   ComputeType(declaration->return_type),
+                   definition_vector,
+                   declaration->transitioning};
   return result;
 }
 
@@ -345,7 +314,8 @@ void TypeVisitor::VisitClassFieldsAndMethods(
       std::string machine_type;
       std::tie(field_size, size_string) = field.GetFieldSizeInformation();
       // Our allocations don't support alignments beyond kTaggedSize.
-      size_t alignment = std::min(size_t{kTaggedSize}, field_size);
+      size_t alignment = std::min(
+          static_cast<size_t>(TargetArchitecture::TaggedSize()), field_size);
       if (alignment > 0 && class_offset % alignment != 0) {
         ReportError("field ", field_expression.name_and_type.name,
                     " at offset ", class_offset, " is not ", alignment,
@@ -359,6 +329,60 @@ void TypeVisitor::VisitClassFieldsAndMethods(
   DeclareMethods(class_type, class_declaration->methods);
 }
 
+void TypeVisitor::VisitStructMethods(
+    StructType* struct_type, const StructDeclaration* struct_declaration) {
+  DeclareMethods(struct_type, struct_declaration->methods);
+}
+
+const StructType* TypeVisitor::ComputeTypeForStructExpression(
+    TypeExpression* type_expression,
+    const std::vector<const Type*>& term_argument_types) {
+  auto* basic = BasicTypeExpression::DynamicCast(type_expression);
+  if (!basic) {
+    ReportError("expected basic type expression referring to struct");
+  }
+
+  QualifiedName qualified_name{basic->namespace_qualification, basic->name};
+  base::Optional<GenericStructType*> maybe_generic_struct =
+      Declarations::TryLookupGenericStructType(qualified_name);
+
+  // Compute types of non-generic structs as usual
+  if (!maybe_generic_struct) {
+    const Type* type = ComputeType(type_expression);
+    const StructType* struct_type = StructType::DynamicCast(type);
+    if (!struct_type) {
+      ReportError(*type, " is not a struct, but used like one");
+    }
+    return struct_type;
+  }
+
+  auto generic_struct = *maybe_generic_struct;
+  auto explicit_type_arguments = ComputeTypeVector(basic->generic_arguments);
+
+  std::vector<TypeExpression*> term_parameters;
+  auto& fields = generic_struct->declaration()->fields;
+  term_parameters.reserve(fields.size());
+  for (auto& field : fields) {
+    term_parameters.push_back(field.name_and_type.type);
+  }
+
+  CurrentScope::Scope generic_scope(generic_struct->ParentScope());
+  TypeArgumentInference inference(
+      generic_struct->declaration()->generic_parameters,
+      explicit_type_arguments, term_parameters, term_argument_types);
+
+  if (inference.HasFailed()) {
+    ReportError("failed to infer type arguments for struct ", basic->name,
+                " initialization: ", inference.GetFailureReason());
+  }
+  if (GlobalContext::collect_language_server_data()) {
+    LanguageServerData::AddDefinition(type_expression->pos,
+                                      generic_struct->declaration()->name->pos);
+  }
+  return TypeOracle::GetGenericStructTypeInstance(generic_struct,
+                                                  inference.GetResult());
+}
+
 }  // namespace torque
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/torque/type-visitor.h b/deps/v8/src/torque/type-visitor.h
index 93de02b860b774..cafd752cbf4958 100644
--- a/deps/v8/src/torque/type-visitor.h
+++ b/deps/v8/src/torque/type-visitor.h
@@ -27,14 +27,22 @@ class TypeVisitor {
   static const Type* ComputeType(TypeExpression* type_expression);
   static void VisitClassFieldsAndMethods(
       ClassType* class_type, const ClassDeclaration* class_declaration);
-  static Signature MakeSignature(const CallableNodeSignature* signature);
+  static void VisitStructMethods(StructType* struct_type,
+                                 const StructDeclaration* struct_declaration);
+  static Signature MakeSignature(const CallableDeclaration* declaration);
+  static const StructType* ComputeTypeForStructExpression(
+      TypeExpression* type_expression,
+      const std::vector<const Type*>& term_argument_types);
 
  private:
   friend class TypeAlias;
+  friend class TypeOracle;
   static const Type* ComputeType(TypeDeclaration* decl);
   static const AbstractType* ComputeType(AbstractTypeDeclaration* decl);
   static const Type* ComputeType(TypeAliasDeclaration* decl);
-  static const StructType* ComputeType(StructDeclaration* decl);
+  static const StructType* ComputeType(
+      StructDeclaration* decl,
+      StructType::MaybeSpecializationKey specialized_from = base::nullopt);
   static const ClassType* ComputeType(ClassDeclaration* decl);
 };
 
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 37a328b1dc3c9a..fe792401f6cefc 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -4,9 +4,9 @@
 
 #include <iostream>
 
-#include "src/common/globals.h"
 #include "src/torque/ast.h"
 #include "src/torque/declarable.h"
+#include "src/torque/global-context.h"
 #include "src/torque/type-oracle.h"
 #include "src/torque/type-visitor.h"
 #include "src/torque/types.h"
@@ -263,7 +263,7 @@ const Field& AggregateType::LookupFieldInternal(const std::string& name) const {
       return parent_class->LookupField(name);
     }
   }
-  ReportError("no field ", name, " found");
+  ReportError("no field ", name, " found in ", this->ToString());
 }
 
 const Field& AggregateType::LookupField(const std::string& name) const {
@@ -276,13 +276,14 @@ std::string StructType::GetGeneratedTypeNameImpl() const {
 }
 
 // static
-std::string StructType::ComputeName(const std::string& basename,
-                                    const std::vector<const Type*>& args) {
-  if (args.size() == 0) return basename;
+std::string StructType::ComputeName(
+    const std::string& basename,
+    StructType::MaybeSpecializationKey specialized_from) {
+  if (!specialized_from) return basename;
   std::stringstream s;
   s << basename << "<";
   bool first = true;
-  for (auto t : args) {
+  for (auto t : specialized_from->specialized_types) {
     if (!first) {
       s << ", ";
     }
@@ -293,6 +294,43 @@ std::string StructType::ComputeName(const std::string& basename,
   return s.str();
 }
 
+std::string StructType::MangledName() const {
+  std::stringstream result;
+  // TODO(gsps): Add 'ST' as a prefix once we can control the generated type
+  // name from Torque code
+  result << decl_->name->value;
+  if (specialized_from_) {
+    for (const Type* t : specialized_from_->specialized_types) {
+      std::string arg_type_string = t->MangledName();
+      result << arg_type_string.size() << arg_type_string;
+    }
+  }
+  return result.str();
+}
+
+// static
+base::Optional<const Type*> StructType::MatchUnaryGeneric(
+    const Type* type, GenericStructType* generic) {
+  if (auto* struct_type = StructType::DynamicCast(type)) {
+    return MatchUnaryGeneric(struct_type, generic);
+  }
+  return base::nullopt;
+}
+
+// static
+base::Optional<const Type*> StructType::MatchUnaryGeneric(
+    const StructType* type, GenericStructType* generic) {
+  DCHECK_EQ(generic->generic_parameters().size(), 1);
+  if (!type->specialized_from_) {
+    return base::nullopt;
+  }
+  auto& key = type->specialized_from_.value();
+  if (key.generic != generic || key.specialized_types.size() != 1) {
+    return base::nullopt;
+  }
+  return {key.specialized_types[0]};
+}
+
 std::vector<Method*> AggregateType::Methods(const std::string& name) const {
   if (!is_finalized_) Finalize();
   std::vector<Method*> result;
@@ -307,6 +345,17 @@ std::string StructType::ToExplicitString() const {
   return result.str();
 }
 
+void StructType::Finalize() const {
+  if (is_finalized_) return;
+  {
+    CurrentScope::Scope scope_activator(nspace());
+    CurrentSourcePosition::Scope position_activator(decl_->pos);
+    TypeVisitor::VisitStructMethods(const_cast<StructType*>(this), decl_);
+  }
+  is_finalized_ = true;
+  CheckForDuplicateFields();
+}
+
 constexpr ClassFlags ClassType::kInternalFlags;
 
 ClassType::ClassType(const Type* parent, Namespace* nspace,
@@ -380,6 +429,17 @@ void ClassType::Finalize() const {
   CheckForDuplicateFields();
 }
 
+std::vector<Field> ClassType::ComputeAllFields() const {
+  std::vector<Field> all_fields;
+  const ClassType* super_class = this->GetSuperClass();
+  if (super_class) {
+    all_fields = super_class->ComputeAllFields();
+  }
+  const std::vector<Field>& fields = this->fields();
+  all_fields.insert(all_fields.end(), fields.begin(), fields.end());
+  return all_fields;
+}
+
 void ClassType::GenerateAccessors() {
   // For each field, construct AST snippets that implement a CSA accessor
   // function and define a corresponding '.field' operator. The
@@ -404,8 +464,7 @@ void ClassType::GenerateAccessors() {
         MakeNode<ReturnStatement>(MakeNode<FieldAccessExpression>(
             parameter, MakeNode<Identifier>(field.name_and_type.name)));
     Declarations::DeclareMacro(load_macro_name, true, base::nullopt,
-                               load_signature, false, load_body, base::nullopt,
-                               false);
+                               load_signature, load_body, base::nullopt);
 
     // Store accessor
     IdentifierExpression* value = MakeNode<IdentifierExpression>(
@@ -425,8 +484,8 @@ void ClassType::GenerateAccessors() {
                 parameter, MakeNode<Identifier>(field.name_and_type.name)),
             value));
     Declarations::DeclareMacro(store_macro_name, true, base::nullopt,
-                               store_signature, false, store_body,
-                               base::nullopt, false);
+                               store_signature, store_body, base::nullopt,
+                               false);
   }
 }
 
@@ -560,9 +619,6 @@ void AppendLoweredTypes(const Type* type, std::vector<const Type*>* result) {
     for (const Field& field : s->fields()) {
       AppendLoweredTypes(field.name_and_type.type, result);
     }
-  } else if (type->IsReferenceType()) {
-    result->push_back(TypeOracle::GetHeapObjectType());
-    result->push_back(TypeOracle::GetIntPtrType());
   } else {
     result->push_back(type);
   }
@@ -606,10 +662,10 @@ std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const {
   const Type* field_type = this->name_and_type.type;
   size_t field_size = 0;
   if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
-    field_size = kTaggedSize;
+    field_size = TargetArchitecture::TaggedSize();
     size_string = "kTaggedSize";
   } else if (field_type->IsSubtypeOf(TypeOracle::GetRawPtrType())) {
-    field_size = kSystemPointerSize;
+    field_size = TargetArchitecture::RawPtrSize();
     size_string = "kSystemPointerSize";
   } else if (field_type->IsSubtypeOf(TypeOracle::GetVoidType())) {
     field_size = 0;
@@ -636,10 +692,10 @@ std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const {
     field_size = kDoubleSize;
     size_string = "kDoubleSize";
   } else if (field_type->IsSubtypeOf(TypeOracle::GetIntPtrType())) {
-    field_size = kIntptrSize;
+    field_size = TargetArchitecture::RawPtrSize();
     size_string = "kIntptrSize";
   } else if (field_type->IsSubtypeOf(TypeOracle::GetUIntPtrType())) {
-    field_size = kIntptrSize;
+    field_size = TargetArchitecture::RawPtrSize();
     size_string = "kIntptrSize";
   } else {
     ReportError("fields of type ", *field_type, " are not (yet) supported");
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index f6180c4250eaaa..d2198d50c33afc 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -25,6 +25,7 @@ class AggregateType;
 struct Identifier;
 class Macro;
 class Method;
+class GenericStructType;
 class StructType;
 class ClassType;
 class Value;
@@ -36,7 +37,6 @@ class TypeBase {
     kTopType,
     kAbstractType,
     kBuiltinPointerType,
-    kReferenceType,
     kUnionType,
     kStructType,
     kClassType
@@ -47,7 +47,6 @@ class TypeBase {
   bool IsBuiltinPointerType() const {
     return kind() == Kind::kBuiltinPointerType;
   }
-  bool IsReferenceType() const { return kind() == Kind::kReferenceType; }
   bool IsUnionType() const { return kind() == Kind::kUnionType; }
   bool IsStructType() const { return kind() == Kind::kStructType; }
   bool IsClassType() const { return kind() == Kind::kClassType; }
@@ -143,6 +142,12 @@ struct NameAndType {
 
 std::ostream& operator<<(std::ostream& os, const NameAndType& name_and_type);
 
+template <typename T>
+struct SpecializationKey {
+  T* generic;
+  TypeVector specialized_types;
+};
+
 struct Field {
   // TODO(danno): This likely should be refactored, the handling of the types
   // using the universal grab-bag utility with std::tie, as well as the
@@ -298,43 +303,6 @@ class V8_EXPORT_PRIVATE BuiltinPointerType final : public Type {
   const size_t function_pointer_type_id_;
 };
 
-class ReferenceType final : public Type {
- public:
-  DECLARE_TYPE_BOILERPLATE(ReferenceType)
-  std::string MangledName() const override {
-    return "RT" + referenced_type_->MangledName();
-  }
-  std::string ToExplicitString() const override {
-    std::string s = referenced_type_->ToString();
-    if (s.find(' ') != std::string::npos) {
-      s = "(" + s + ")";
-    }
-    return "&" + s;
-  }
-  std::string GetGeneratedTypeNameImpl() const override {
-    return "CodeStubAssembler::Reference";
-  }
-  std::string GetGeneratedTNodeTypeNameImpl() const override { UNREACHABLE(); }
-
-  const Type* referenced_type() const { return referenced_type_; }
-
-  friend size_t hash_value(const ReferenceType& p) {
-    return base::hash_combine(static_cast<size_t>(Kind::kReferenceType),
-                              p.referenced_type_);
-  }
-  bool operator==(const ReferenceType& other) const {
-    return referenced_type_ == other.referenced_type_;
-  }
-
- private:
-  friend class TypeOracle;
-  explicit ReferenceType(const Type* referenced_type)
-      : Type(Kind::kReferenceType, nullptr),
-        referenced_type_(referenced_type) {}
-
-  const Type* const referenced_type_;
-};
-
 bool operator<(const Type& a, const Type& b);
 struct TypeLess {
   bool operator()(const Type* const a, const Type* const b) const {
@@ -500,32 +468,38 @@ class AggregateType : public Type {
 class StructType final : public AggregateType {
  public:
   DECLARE_TYPE_BOILERPLATE(StructType)
+
+  using MaybeSpecializationKey =
+      base::Optional<SpecializationKey<GenericStructType>>;
+
   std::string ToExplicitString() const override;
   std::string GetGeneratedTypeNameImpl() const override;
-  std::string MangledName() const override {
-    // TODO(gsps): Generate more readable mangled names
-    std::string str(name());
-    std::replace(str.begin(), str.end(), ',', '_');
-    std::replace(str.begin(), str.end(), ' ', '_');
-    std::replace(str.begin(), str.end(), '<', '_');
-    std::replace(str.begin(), str.end(), '>', '_');
-    return str;
+  std::string MangledName() const override;
+  const MaybeSpecializationKey& GetSpecializedFrom() const {
+    return specialized_from_;
   }
 
-  static std::string ComputeName(const std::string& basename,
-                                 const std::vector<const Type*>& args);
+  static base::Optional<const Type*> MatchUnaryGeneric(
+      const Type* type, GenericStructType* generic);
+  static base::Optional<const Type*> MatchUnaryGeneric(
+      const StructType* type, GenericStructType* generic);
 
  private:
   friend class TypeOracle;
-  StructType(Namespace* nspace, const std::string& name)
-      : AggregateType(Kind::kStructType, nullptr, nspace, name) {}
+  StructType(Namespace* nspace, const StructDeclaration* decl,
+             MaybeSpecializationKey specialized_from = base::nullopt)
+      : AggregateType(Kind::kStructType, nullptr, nspace,
+                      ComputeName(decl->name->value, specialized_from)),
+        decl_(decl),
+        specialized_from_(specialized_from) {}
 
-  void Finalize() const override {
-    is_finalized_ = true;
-    CheckForDuplicateFields();
-  }
+  void Finalize() const override;
+
+  static std::string ComputeName(const std::string& basename,
+                                 MaybeSpecializationKey specialized_from);
 
-  const std::string& GetStructName() const { return name(); }
+  const StructDeclaration* decl_;
+  MaybeSpecializationKey specialized_from_;
 };
 
 class TypeAlias;
@@ -573,6 +547,8 @@ class ClassType final : public AggregateType {
   }
   void Finalize() const override;
 
+  std::vector<Field> ComputeAllFields() const;
+
  private:
   friend class TypeOracle;
   friend class TypeVisitor;
@@ -668,22 +644,25 @@ using NameVector = std::vector<Identifier*>;
 
 struct Signature {
   Signature(NameVector n, base::Optional<std::string> arguments_variable,
-            ParameterTypes p, size_t i, const Type* r, LabelDeclarationVector l)
+            ParameterTypes p, size_t i, const Type* r, LabelDeclarationVector l,
+            bool transitioning)
       : parameter_names(std::move(n)),
         arguments_variable(arguments_variable),
         parameter_types(std::move(p)),
         implicit_count(i),
         return_type(r),
-        labels(std::move(l)) {}
-  Signature() : implicit_count(0), return_type(nullptr) {}
+        labels(std::move(l)),
+        transitioning(transitioning) {}
+  Signature() = default;
   const TypeVector& types() const { return parameter_types.types; }
   NameVector parameter_names;
   base::Optional<std::string> arguments_variable;
   ParameterTypes parameter_types;
-  size_t implicit_count;
+  size_t implicit_count = 0;
   size_t ExplicitCount() const { return types().size() - implicit_count; }
   const Type* return_type;
   LabelDeclarationVector labels;
+  bool transitioning = false;
   bool HasSameTypesAs(
       const Signature& other,
       ParameterMode mode = ParameterMode::kProcessImplicit) const;
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 244d1587dbc55c..38862b31b0efad 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -168,9 +168,9 @@ bool IsKeywordLikeName(const std::string& s) {
 // naming convention and are those exempt from the normal type convention.
 bool IsMachineType(const std::string& s) {
   static const char* const machine_types[]{
-      "void",    "never",   "int8",    "uint8",  "int16",  "uint16",
-      "int31",   "uint31",  "int32",   "uint32", "int64",  "intptr",
-      "uintptr", "float32", "float64", "bool",   "string", "bint"};
+      "void",    "never", "int8",   "uint8", "int16",  "uint16",  "int31",
+      "uint31",  "int32", "uint32", "int64", "intptr", "uintptr", "float32",
+      "float64", "bool",  "string", "bint",  "char8",  "char16"};
 
   return std::find(std::begin(machine_types), std::end(machine_types), s) !=
          std::end(machine_types);
@@ -292,6 +292,42 @@ void ReplaceFileContentsIfDifferent(const std::string& file_path,
   }
 }
 
+IfDefScope::IfDefScope(std::ostream& os, std::string d)
+    : os_(os), d_(std::move(d)) {
+  os_ << "#ifdef " << d_ << "\n";
+}
+IfDefScope::~IfDefScope() { os_ << "#endif  // " << d_ << "\n"; }
+
+NamespaceScope::NamespaceScope(std::ostream& os,
+                               std::initializer_list<std::string> namespaces)
+    : os_(os), d_(std::move(namespaces)) {
+  for (const std::string& s : d_) {
+    os_ << "namespace " << s << " {\n";
+  }
+}
+NamespaceScope::~NamespaceScope() {
+  for (auto i = d_.rbegin(); i != d_.rend(); ++i) {
+    os_ << "}  // namespace " << *i << "\n";
+  }
+}
+
+IncludeGuardScope::IncludeGuardScope(std::ostream& os, std::string file_name)
+    : os_(os),
+      d_("V8_GEN_TORQUE_GENERATED_" + CapifyStringWithUnderscores(file_name) +
+         "_") {
+  os_ << "#ifndef " << d_ << "\n";
+  os_ << "#define " << d_ << "\n\n";
+}
+IncludeGuardScope::~IncludeGuardScope() { os_ << "#endif  // " << d_ << "\n"; }
+
+IncludeObjectMacrosScope::IncludeObjectMacrosScope(std::ostream& os) : os_(os) {
+  os_ << "\n// Has to be the last include (doesn't have include guards):\n"
+         "#include \"src/objects/object-macros.h\"\n";
+}
+IncludeObjectMacrosScope::~IncludeObjectMacrosScope() {
+  os_ << "\n#include \"src/objects/object-macros-undef.h\"\n";
+}
+
 }  // namespace torque
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index fb4ad59f99dcaa..5f44dedea9ec6c 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -292,7 +292,7 @@ T* CheckNotNull(T* x) {
 }
 
 template <class T>
-inline std::ostream& operator<<(std::ostream& os, Stack<T>& t) {
+inline std::ostream& operator<<(std::ostream& os, const Stack<T>& t) {
   os << "Stack{";
   PrintCommaSeparatedList(os, t);
   os << "}";
@@ -356,6 +356,54 @@ inline bool StringEndsWith(const std::string& s, const std::string& suffix) {
   return s.substr(s.size() - suffix.size()) == suffix;
 }
 
+class IfDefScope {
+ public:
+  IfDefScope(std::ostream& os, std::string d);
+  ~IfDefScope();
+
+ private:
+  IfDefScope(const IfDefScope&) = delete;
+  IfDefScope& operator=(const IfDefScope&) = delete;
+  std::ostream& os_;
+  std::string d_;
+};
+
+class NamespaceScope {
+ public:
+  NamespaceScope(std::ostream& os,
+                 std::initializer_list<std::string> namespaces);
+  ~NamespaceScope();
+
+ private:
+  NamespaceScope(const NamespaceScope&) = delete;
+  NamespaceScope& operator=(const NamespaceScope&) = delete;
+  std::ostream& os_;
+  std::vector<std::string> d_;
+};
+
+class IncludeGuardScope {
+ public:
+  IncludeGuardScope(std::ostream& os, std::string file_name);
+  ~IncludeGuardScope();
+
+ private:
+  IncludeGuardScope(const IncludeGuardScope&) = delete;
+  IncludeGuardScope& operator=(const IncludeGuardScope&) = delete;
+  std::ostream& os_;
+  std::string d_;
+};
+
+class IncludeObjectMacrosScope {
+ public:
+  explicit IncludeObjectMacrosScope(std::ostream& os);
+  ~IncludeObjectMacrosScope();
+
+ private:
+  IncludeObjectMacrosScope(const IncludeObjectMacrosScope&) = delete;
+  IncludeObjectMacrosScope& operator=(const IncludeObjectMacrosScope&) = delete;
+  std::ostream& os_;
+};
+
 }  // namespace torque
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/utils/OWNERS b/deps/v8/src/utils/OWNERS
index 3f9de7e204c675..47506200728b99 100644
--- a/deps/v8/src/utils/OWNERS
+++ b/deps/v8/src/utils/OWNERS
@@ -1,3 +1,3 @@
-file://COMMON_OWNERS
+file:../../COMMON_OWNERS
 
 # COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index af32e90088ea03..f44b3c42ea9dcf 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -161,15 +161,14 @@ void* GetRandomMmapAddr() {
   return GetPlatformPageAllocator()->GetRandomMmapAddr();
 }
 
-void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
-                    size_t size, size_t alignment,
-                    PageAllocator::Permission access) {
+void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
+                    size_t alignment, PageAllocator::Permission access) {
   DCHECK_NOT_NULL(page_allocator);
-  DCHECK_EQ(address, AlignedAddress(address, alignment));
+  DCHECK_EQ(hint, AlignedAddress(hint, alignment));
   DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
   void* result = nullptr;
   for (int i = 0; i < kAllocationTries; ++i) {
-    result = page_allocator->AllocatePages(address, size, alignment, access);
+    result = page_allocator->AllocatePages(hint, size, alignment, access);
     if (result != nullptr) break;
     size_t request_size = size + alignment - page_allocator->AllocatePageSize();
     if (!OnCriticalMemoryPressure(request_size)) break;
@@ -198,16 +197,6 @@ bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
   return page_allocator->SetPermissions(address, size, access);
 }
 
-byte* AllocatePage(v8::PageAllocator* page_allocator, void* address,
-                   size_t* allocated) {
-  DCHECK_NOT_NULL(page_allocator);
-  size_t page_size = page_allocator->AllocatePageSize();
-  void* result = AllocatePages(page_allocator, address, page_size, page_size,
-                               PageAllocator::kReadWrite);
-  if (result != nullptr) *allocated = page_size;
-  return static_cast<byte*>(result);
-}
-
 bool OnCriticalMemoryPressure(size_t length) {
   // TODO(bbudge) Rework retry logic once embedders implement the more
   // informative overload.
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index 2f7074acb00060..4cb244172c5ba9 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -89,7 +89,7 @@ V8_EXPORT_PRIVATE v8::PageAllocator* SetPlatformPageAllocatorForTesting(
     v8::PageAllocator* page_allocator);
 
 // Gets the page granularity for AllocatePages and FreePages. Addresses returned
-// by AllocatePages and AllocatePage are aligned to this size.
+// by AllocatePages are aligned to this size.
 V8_EXPORT_PRIVATE size_t AllocatePageSize();
 
 // Gets the granularity at which the permissions and release calls can be made.
@@ -142,13 +142,6 @@ inline bool SetPermissions(v8::PageAllocator* page_allocator, Address address,
                         access);
 }
 
-// Convenience function that allocates a single system page with read and write
-// permissions. |address| is a hint. Returns the base address of the memory and
-// the page size via |allocated| on success. Returns nullptr on failure.
-V8_EXPORT_PRIVATE
-V8_WARN_UNUSED_RESULT byte* AllocatePage(v8::PageAllocator* page_allocator,
-                                         void* address, size_t* allocated);
-
 // Function that may release reserved memory regions to allow failed allocations
 // to succeed. |length| is the amount of memory needed. Returns |true| if memory
 // could be released, false otherwise.
diff --git a/deps/v8/src/utils/utils.h b/deps/v8/src/utils/utils.h
index 20d85aae106a30..27d3d5ef217bdc 100644
--- a/deps/v8/src/utils/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -20,10 +20,13 @@
 #include "src/base/platform/platform.h"
 #include "src/base/v8-fallthrough.h"
 #include "src/common/globals.h"
-#include "src/third_party/siphash/halfsiphash.h"
 #include "src/utils/allocation.h"
 #include "src/utils/vector.h"
 
+#if defined(V8_USE_SIPHASH)
+#include "src/third_party/siphash/halfsiphash.h"
+#endif
+
 #if defined(V8_OS_AIX)
 #include <fenv.h>  // NOLINT(build/c++11)
 #endif
@@ -302,29 +305,36 @@ T SaturateSub(T a, T b) {
 // ----------------------------------------------------------------------------
 // BitField is a help template for encoding and decode bitfield with
 // unsigned content.
+// Instantiate them via 'using', which is cheaper than deriving a new class:
+// using MyBitField = BitField<int, 4, 2, MyEnum>;
+// The BitField class is final to enforce this style over derivation.
 
 template <class T, int shift, int size, class U = uint32_t>
-class BitField {
+class BitField final {
  public:
   STATIC_ASSERT(std::is_unsigned<U>::value);
   STATIC_ASSERT(shift < 8 * sizeof(U));  // Otherwise shifts by {shift} are UB.
   STATIC_ASSERT(size < 8 * sizeof(U));   // Otherwise shifts by {size} are UB.
   STATIC_ASSERT(shift + size <= 8 * sizeof(U));
+  STATIC_ASSERT(size > 0);
 
   using FieldType = T;
 
   // A type U mask of bit field.  To use all bits of a type U of x bits
   // in a bitfield without compiler warnings we have to compute 2^x
   // without using a shift count of x in the computation.
-  static constexpr U kShift = shift;
-  static constexpr U kSize = size;
+  static constexpr int kShift = shift;
+  static constexpr int kSize = size;
   static constexpr U kMask = ((U{1} << kShift) << kSize) - (U{1} << kShift);
-  static constexpr U kNext = kShift + kSize;
+  static constexpr int kLastUsedBit = kShift + kSize - 1;
   static constexpr U kNumValues = U{1} << kSize;
 
   // Value for the field with all bits set.
   static constexpr T kMax = static_cast<T>(kNumValues - 1);
 
+  template <class T2, int size2>
+  using Next = BitField<T2, kShift + kSize, size2, U>;
+
   // Tells whether the provided value fits into the bit field.
   static constexpr bool is_valid(T value) {
     return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0;
@@ -892,7 +902,8 @@ class BailoutId {
 
 // Our version of printf().
 V8_EXPORT_PRIVATE void PRINTF_FORMAT(1, 2) PrintF(const char* format, ...);
-void PRINTF_FORMAT(2, 3) PrintF(FILE* out, const char* format, ...);
+V8_EXPORT_PRIVATE void PRINTF_FORMAT(2, 3)
+    PrintF(FILE* out, const char* format, ...);
 
 // Prepends the current process ID to the output.
 void PRINTF_FORMAT(1, 2) PrintPID(const char* format, ...);
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 57a157d3a7f607..dc68267825cde1 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -210,7 +210,9 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
 
 void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
                                                      uint32_t offset) {
-  LoadFromInstance(dst, offset, kTaggedSize);
+  DCHECK_LE(offset, kMaxInt);
+  Ldr(dst, liftoff::GetInstanceOperand());
+  LoadTaggedPointerField(dst, MemOperand(dst, offset));
 }
 
 void LiftoffAssembler::SpillInstance(Register instance) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 7a87ae1a956a4d..02de06763c10cd 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -1606,32 +1606,35 @@ class LiftoffCompiler {
 
   void GenerateRuntimeCall(Runtime::FunctionId runtime_function, int num_args,
                            Register* args) {
-    auto call_descriptor = compiler::Linkage::GetRuntimeCallDescriptor(
-        compilation_zone_, runtime_function, num_args,
-        compiler::Operator::kNoProperties, compiler::CallDescriptor::kNoFlags);
     // Currently, only one argument is supported. More arguments require some
     // caution for the parallel register moves (reuse StackTransferRecipe).
     DCHECK_EQ(1, num_args);
+#ifdef DEBUG
+    auto call_descriptor = compiler::Linkage::GetRuntimeCallDescriptor(
+        compilation_zone_, runtime_function, num_args,
+        compiler::Operator::kNoProperties, compiler::CallDescriptor::kNoFlags);
     constexpr size_t kInputShift = 1;  // Input 0 is the call target.
     compiler::LinkageLocation param_loc =
         call_descriptor->GetInputLocation(kInputShift);
-    if (param_loc.IsRegister()) {
-      Register reg = Register::from_code(param_loc.AsRegister());
-      __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]),
-              LiftoffAssembler::kWasmIntPtr);
-    } else {
-      DCHECK(param_loc.IsCallerFrameSlot());
-      LiftoffStackSlots stack_slots(&asm_);
-      stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
-                                                 LiftoffRegister(args[0])));
-      stack_slots.Construct();
-    }
+    // Runtime calls take their arguments on the stack.
+    DCHECK(param_loc.IsCallerFrameSlot());
+#endif
+    LiftoffStackSlots stack_slots(&asm_);
+    stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
+                                               LiftoffRegister(args[0])));
+    stack_slots.Construct();
 
     // Set context to "no context" for the runtime call.
     __ TurboAssembler::Move(kContextRegister,
                             Smi::FromInt(Context::kNoContext));
     Register centry = kJavaScriptCallCodeStartRegister;
-    LOAD_TAGGED_PTR_INSTANCE_FIELD(centry, CEntryStub);
+    LOAD_INSTANCE_FIELD(centry, IsolateRoot, kSystemPointerSize);
+    // All cache registers are spilled and there are no register arguments.
+    LiftoffRegList pinned;
+    auto centry_id =
+        Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+    __ LoadTaggedPointer(centry, centry, no_reg,
+                         IsolateData::builtin_slot_offset(centry_id), pinned);
     __ CallRuntimeWithCEntry(runtime_function, centry);
     safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
   }
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 86bba189b837b8..e812dd7994fdf7 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -29,6 +29,9 @@
 #include "include/libplatform/libplatform.h"
 #include "src/api/api-inl.h"
 #include "src/compiler/wasm-compiler.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/managed.h"
+#include "src/objects/stack-frame-info-inl.h"
 #include "src/wasm/leb-helper.h"
 #include "src/wasm/module-instantiate.h"
 #include "src/wasm/wasm-arguments.h"
@@ -37,6 +40,10 @@
 #include "src/wasm/wasm-result.h"
 #include "src/wasm/wasm-serialization.h"
 
+#ifdef WASM_API_DEBUG
+#error "WASM_API_DEBUG is unsupported"
+#endif
+
 namespace wasm {
 
 namespace {
@@ -98,35 +105,36 @@ Name GetNameFromWireBytes(const i::wasm::WireBytesRef& ref,
                           const i::Vector<const uint8_t>& wire_bytes) {
   DCHECK_LE(ref.offset(), wire_bytes.length());
   DCHECK_LE(ref.end_offset(), wire_bytes.length());
+  if (ref.length() == 0) return Name::make();
   Name name = Name::make_uninitialized(ref.length());
   std::memcpy(name.get(), wire_bytes.begin() + ref.offset(), ref.length());
   return name;
 }
 
-own<FuncType*> FunctionSigToFuncType(const i::wasm::FunctionSig* sig) {
+own<FuncType> FunctionSigToFuncType(const i::wasm::FunctionSig* sig) {
   size_t param_count = sig->parameter_count();
-  vec<ValType*> params = vec<ValType*>::make_uninitialized(param_count);
+  ownvec<ValType> params = ownvec<ValType>::make_uninitialized(param_count);
   for (size_t i = 0; i < param_count; i++) {
     params[i] = ValType::make(V8ValueTypeToWasm(sig->GetParam(i)));
   }
   size_t return_count = sig->return_count();
-  vec<ValType*> results = vec<ValType*>::make_uninitialized(return_count);
+  ownvec<ValType> results = ownvec<ValType>::make_uninitialized(return_count);
   for (size_t i = 0; i < return_count; i++) {
     results[i] = ValType::make(V8ValueTypeToWasm(sig->GetReturn(i)));
   }
   return FuncType::make(std::move(params), std::move(results));
 }
 
-own<ExternType*> GetImportExportType(const i::wasm::WasmModule* module,
-                                     const i::wasm::ImportExportKindCode kind,
-                                     const uint32_t index) {
+own<ExternType> GetImportExportType(const i::wasm::WasmModule* module,
+                                    const i::wasm::ImportExportKindCode kind,
+                                    const uint32_t index) {
   switch (kind) {
     case i::wasm::kExternalFunction: {
       return FunctionSigToFuncType(module->functions[index].sig);
     }
     case i::wasm::kExternalTable: {
       const i::wasm::WasmTable& table = module->tables[index];
-      own<ValType*> elem = ValType::make(V8ValueTypeToWasm(table.type));
+      own<ValType> elem = ValType::make(V8ValueTypeToWasm(table.type));
       Limits limits(table.initial_size,
                     table.has_maximum_size ? table.maximum_size : -1);
       return TableType::make(std::move(elem), limits);
@@ -139,7 +147,7 @@ own<ExternType*> GetImportExportType(const i::wasm::WasmModule* module,
     }
     case i::wasm::kExternalGlobal: {
       const i::wasm::WasmGlobal& global = module->globals[index];
-      own<ValType*> content = ValType::make(V8ValueTypeToWasm(global.type));
+      own<ValType> content = ValType::make(V8ValueTypeToWasm(global.type));
       Mutability mutability = global.mutability ? VAR : CONST;
       return GlobalType::make(std::move(content), mutability);
     }
@@ -187,14 +195,6 @@ auto seal(const typename implement<C>::type* x) -> const C* {
   return reinterpret_cast<const C*>(x);
 }
 
-#ifdef DEBUG
-template <class T>
-void vec<T>::make_data() {}
-
-template <class T>
-void vec<T>::free_data() {}
-#endif
-
 ///////////////////////////////////////////////////////////////////////////////
 // Runtime Environment
 
@@ -214,8 +214,8 @@ Config::~Config() { impl(this)->~ConfigImpl(); }
 
 void Config::operator delete(void* p) { ::operator delete(p); }
 
-auto Config::make() -> own<Config*> {
-  return own<Config*>(seal<Config>(new (std::nothrow) ConfigImpl()));
+auto Config::make() -> own<Config> {
+  return own<Config>(seal<Config>(new (std::nothrow) ConfigImpl()));
 }
 
 // Engine
@@ -247,13 +247,13 @@ Engine::~Engine() { impl(this)->~EngineImpl(); }
 
 void Engine::operator delete(void* p) { ::operator delete(p); }
 
-auto Engine::make(own<Config*>&& config) -> own<Engine*> {
+auto Engine::make(own<Config>&& config) -> own<Engine> {
   i::FLAG_expose_gc = true;
   i::FLAG_experimental_wasm_anyref = true;
   i::FLAG_experimental_wasm_bigint = true;
   i::FLAG_experimental_wasm_mv = true;
   auto engine = new (std::nothrow) EngineImpl;
-  if (!engine) return own<Engine*>();
+  if (!engine) return own<Engine>();
   engine->platform = v8::platform::NewDefaultPlatform();
   v8::V8::InitializePlatform(engine->platform.get());
   v8::V8::Initialize();
@@ -273,6 +273,38 @@ StoreImpl::~StoreImpl() {
   delete create_params_.array_buffer_allocator;
 }
 
+struct ManagedData {
+  ManagedData(void* info, void (*finalizer)(void*))
+      : info(info), finalizer(finalizer) {}
+
+  ~ManagedData() {
+    if (finalizer) (*finalizer)(info);
+  }
+
+  void* info;
+  void (*finalizer)(void*);
+};
+
+void StoreImpl::SetHostInfo(i::Handle<i::Object> object, void* info,
+                            void (*finalizer)(void*)) {
+  i::HandleScope scope(i_isolate());
+  // Ideally we would specify the total size kept alive by {info} here,
+  // but all we get from the embedder is a {void*}, so our best estimate
+  // is the size of the metadata.
+  size_t estimated_size = sizeof(ManagedData);
+  i::Handle<i::Object> wrapper = i::Managed<ManagedData>::FromRawPtr(
+      i_isolate(), estimated_size, new ManagedData(info, finalizer));
+  int32_t hash = object->GetOrCreateHash(i_isolate()).value();
+  i::JSWeakCollection::Set(host_info_map_, object, wrapper, hash);
+}
+
+void* StoreImpl::GetHostInfo(i::Handle<i::Object> key) {
+  i::Object raw =
+      i::EphemeronHashTable::cast(host_info_map_->table()).Lookup(key);
+  if (raw.IsTheHole(i_isolate())) return nullptr;
+  return i::Managed<ManagedData>::cast(raw).raw()->info;
+}
+
 template <>
 struct implement<Store> {
   using type = StoreImpl;
@@ -282,33 +314,40 @@ Store::~Store() { impl(this)->~StoreImpl(); }
 
 void Store::operator delete(void* p) { ::operator delete(p); }
 
-auto Store::make(Engine*) -> own<Store*> {
+auto Store::make(Engine*) -> own<Store> {
   auto store = make_own(new (std::nothrow) StoreImpl());
-  if (!store) return own<Store*>();
+  if (!store) return own<Store>();
 
   // Create isolate.
   store->create_params_.array_buffer_allocator =
       v8::ArrayBuffer::Allocator::NewDefaultAllocator();
-  auto isolate = v8::Isolate::New(store->create_params_);
-  if (!isolate) return own<Store*>();
+  v8::Isolate* isolate = v8::Isolate::New(store->create_params_);
+  if (!isolate) return own<Store>();
+  store->isolate_ = isolate;
+  isolate->SetData(0, store.get());
+  // We intentionally do not call isolate->Enter() here, because that would
+  // prevent embedders from using stores with overlapping but non-nested
+  // lifetimes. The consequence is that Isolate::Current() is dysfunctional
+  // and hence must not be called by anything reachable via this file.
 
   {
     v8::HandleScope handle_scope(isolate);
 
     // Create context.
-    auto context = v8::Context::New(isolate);
-    if (context.IsEmpty()) return own<Store*>();
-    v8::Context::Scope context_scope(context);
-
-    store->isolate_ = isolate;
+    v8::Local<v8::Context> context = v8::Context::New(isolate);
+    if (context.IsEmpty()) return own<Store>();
+    context->Enter();  // The Exit() call is in ~StoreImpl.
     store->context_ = v8::Eternal<v8::Context>(isolate, context);
+
+    // Create weak map for Refs with host info.
+    i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+    store->host_info_map_ = i_isolate->global_handles()->Create(
+        *i_isolate->factory()->NewJSWeakMap());
   }
-  // We intentionally do not call isolate->Enter() here, because that would
-  // prevent embedders from using stores with overlapping but non-nested
-  // lifetimes. The consequence is that Isolate::Current() is dysfunctional
-  // and hence must not be called by anything reachable via this file.
-  store->context()->Enter();
-  isolate->SetData(0, store.get());
+  // We want stack traces for traps.
+  constexpr int kStackLimit = 10;
+  isolate->SetCaptureStackTraceForUncaughtExceptions(true, kStackLimit,
+                                                     v8::StackTrace::kOverview);
 
   return make_own(seal<Store>(store.release()));
 }
@@ -329,21 +368,46 @@ struct implement<ValType> {
   using type = ValTypeImpl;
 };
 
-ValTypeImpl* valtypes[] = {
-    new ValTypeImpl(I32), new ValTypeImpl(I64),    new ValTypeImpl(F32),
-    new ValTypeImpl(F64), new ValTypeImpl(ANYREF), new ValTypeImpl(FUNCREF),
-};
+ValTypeImpl* valtype_i32 = new ValTypeImpl(I32);
+ValTypeImpl* valtype_i64 = new ValTypeImpl(I64);
+ValTypeImpl* valtype_f32 = new ValTypeImpl(F32);
+ValTypeImpl* valtype_f64 = new ValTypeImpl(F64);
+ValTypeImpl* valtype_anyref = new ValTypeImpl(ANYREF);
+ValTypeImpl* valtype_funcref = new ValTypeImpl(FUNCREF);
 
 ValType::~ValType() {}
 
 void ValType::operator delete(void*) {}
 
-auto ValType::make(ValKind k) -> own<ValType*> {
-  auto result = seal<ValType>(valtypes[k]);
-  return own<ValType*>(result);
+own<ValType> ValType::make(ValKind k) {
+  ValTypeImpl* valtype;
+  switch (k) {
+    case I32:
+      valtype = valtype_i32;
+      break;
+    case I64:
+      valtype = valtype_i64;
+      break;
+    case F32:
+      valtype = valtype_f32;
+      break;
+    case F64:
+      valtype = valtype_f64;
+      break;
+    case ANYREF:
+      valtype = valtype_anyref;
+      break;
+    case FUNCREF:
+      valtype = valtype_funcref;
+      break;
+    default:
+      // TODO(wasm+): support new value types
+      UNREACHABLE();
+  }
+  return own<ValType>(seal<ValType>(valtype));
 }
 
-auto ValType::copy() const -> own<ValType*> { return make(kind()); }
+auto ValType::copy() const -> own<ValType> { return make(kind()); }
 
 auto ValType::kind() const -> ValKind { return impl(this)->kind; }
 
@@ -365,7 +429,7 @@ ExternType::~ExternType() { impl(this)->~ExternTypeImpl(); }
 
 void ExternType::operator delete(void* p) { ::operator delete(p); }
 
-auto ExternType::copy() const -> own<ExternType*> {
+auto ExternType::copy() const -> own<ExternType> {
   switch (kind()) {
     case EXTERN_FUNC:
       return func()->copy();
@@ -383,11 +447,11 @@ auto ExternType::kind() const -> ExternKind { return impl(this)->kind; }
 // Function Types
 
 struct FuncTypeImpl : ExternTypeImpl {
-  vec<ValType*> params;
-  vec<ValType*> results;
+  ownvec<ValType> params;
+  ownvec<ValType> results;
 
-  FuncTypeImpl(vec<ValType*>& params,   // NOLINT(runtime/references)
-               vec<ValType*>& results)  // NOLINT(runtime/references)
+  FuncTypeImpl(ownvec<ValType>& params,   // NOLINT(runtime/references)
+               ownvec<ValType>& results)  // NOLINT(runtime/references)
       : ExternTypeImpl(EXTERN_FUNC),
         params(std::move(params)),
         results(std::move(results)) {}
@@ -402,23 +466,23 @@ struct implement<FuncType> {
 
 FuncType::~FuncType() {}
 
-auto FuncType::make(vec<ValType*>&& params, vec<ValType*>&& results)
-    -> own<FuncType*> {
+auto FuncType::make(ownvec<ValType>&& params, ownvec<ValType>&& results)
+    -> own<FuncType> {
   return params && results
-             ? own<FuncType*>(seal<FuncType>(new (std::nothrow)
-                                                 FuncTypeImpl(params, results)))
-             : own<FuncType*>();
+             ? own<FuncType>(seal<FuncType>(new (std::nothrow)
+                                                FuncTypeImpl(params, results)))
+             : own<FuncType>();
 }
 
-auto FuncType::copy() const -> own<FuncType*> {
-  return make(params().copy(), results().copy());
+auto FuncType::copy() const -> own<FuncType> {
+  return make(params().deep_copy(), results().deep_copy());
 }
 
-auto FuncType::params() const -> const vec<ValType*>& {
+auto FuncType::params() const -> const ownvec<ValType>& {
   return impl(this)->params;
 }
 
-auto FuncType::results() const -> const vec<ValType*>& {
+auto FuncType::results() const -> const ownvec<ValType>& {
   return impl(this)->results;
 }
 
@@ -437,10 +501,10 @@ auto ExternType::func() const -> const FuncType* {
 // Global Types
 
 struct GlobalTypeImpl : ExternTypeImpl {
-  own<ValType*> content;
+  own<ValType> content;
   Mutability mutability;
 
-  GlobalTypeImpl(own<ValType*>& content,  // NOLINT(runtime/references)
+  GlobalTypeImpl(own<ValType>& content,  // NOLINT(runtime/references)
                  Mutability mutability)
       : ExternTypeImpl(EXTERN_GLOBAL),
         content(std::move(content)),
@@ -456,14 +520,14 @@ struct implement<GlobalType> {
 
 GlobalType::~GlobalType() {}
 
-auto GlobalType::make(own<ValType*>&& content, Mutability mutability)
-    -> own<GlobalType*> {
-  return content ? own<GlobalType*>(seal<GlobalType>(
+auto GlobalType::make(own<ValType>&& content, Mutability mutability)
+    -> own<GlobalType> {
+  return content ? own<GlobalType>(seal<GlobalType>(
                        new (std::nothrow) GlobalTypeImpl(content, mutability)))
-                 : own<GlobalType*>();
+                 : own<GlobalType>();
 }
 
-auto GlobalType::copy() const -> own<GlobalType*> {
+auto GlobalType::copy() const -> own<GlobalType> {
   return make(content()->copy(), mutability());
 }
 
@@ -490,10 +554,10 @@ auto ExternType::global() const -> const GlobalType* {
 // Table Types
 
 struct TableTypeImpl : ExternTypeImpl {
-  own<ValType*> element;
+  own<ValType> element;
   Limits limits;
 
-  TableTypeImpl(own<ValType*>& element,  // NOLINT(runtime/references)
+  TableTypeImpl(own<ValType>& element,  // NOLINT(runtime/references)
                 Limits limits)
       : ExternTypeImpl(EXTERN_TABLE),
         element(std::move(element)),
@@ -509,14 +573,13 @@ struct implement<TableType> {
 
 TableType::~TableType() {}
 
-auto TableType::make(own<ValType*>&& element, Limits limits)
-    -> own<TableType*> {
-  return element ? own<TableType*>(seal<TableType>(
+auto TableType::make(own<ValType>&& element, Limits limits) -> own<TableType> {
+  return element ? own<TableType>(seal<TableType>(
                        new (std::nothrow) TableTypeImpl(element, limits)))
-                 : own<TableType*>();
+                 : own<TableType>();
 }
 
-auto TableType::copy() const -> own<TableType*> {
+auto TableType::copy() const -> own<TableType> {
   return make(element()->copy(), limits());
 }
 
@@ -556,12 +619,12 @@ struct implement<MemoryType> {
 
 MemoryType::~MemoryType() {}
 
-auto MemoryType::make(Limits limits) -> own<MemoryType*> {
-  return own<MemoryType*>(
+auto MemoryType::make(Limits limits) -> own<MemoryType> {
+  return own<MemoryType>(
       seal<MemoryType>(new (std::nothrow) MemoryTypeImpl(limits)));
 }
 
-auto MemoryType::copy() const -> own<MemoryType*> {
+auto MemoryType::copy() const -> own<MemoryType> {
   return MemoryType::make(limits());
 }
 
@@ -584,11 +647,11 @@ auto ExternType::memory() const -> const MemoryType* {
 struct ImportTypeImpl {
   Name module;
   Name name;
-  own<ExternType*> type;
+  own<ExternType> type;
 
-  ImportTypeImpl(Name& module,            // NOLINT(runtime/references)
-                 Name& name,              // NOLINT(runtime/references)
-                 own<ExternType*>& type)  // NOLINT(runtime/references)
+  ImportTypeImpl(Name& module,           // NOLINT(runtime/references)
+                 Name& name,             // NOLINT(runtime/references)
+                 own<ExternType>& type)  // NOLINT(runtime/references)
       : module(std::move(module)),
         name(std::move(name)),
         type(std::move(type)) {}
@@ -605,15 +668,15 @@ ImportType::~ImportType() { impl(this)->~ImportTypeImpl(); }
 
 void ImportType::operator delete(void* p) { ::operator delete(p); }
 
-auto ImportType::make(Name&& module, Name&& name, own<ExternType*>&& type)
-    -> own<ImportType*> {
+auto ImportType::make(Name&& module, Name&& name, own<ExternType>&& type)
+    -> own<ImportType> {
   return module && name && type
-             ? own<ImportType*>(seal<ImportType>(
+             ? own<ImportType>(seal<ImportType>(
                    new (std::nothrow) ImportTypeImpl(module, name, type)))
-             : own<ImportType*>();
+             : own<ImportType>();
 }
 
-auto ImportType::copy() const -> own<ImportType*> {
+auto ImportType::copy() const -> own<ImportType> {
   return make(module().copy(), name().copy(), type()->copy());
 }
 
@@ -629,10 +692,10 @@ auto ImportType::type() const -> const ExternType* {
 
 struct ExportTypeImpl {
   Name name;
-  own<ExternType*> type;
+  own<ExternType> type;
 
-  ExportTypeImpl(Name& name,              // NOLINT(runtime/references)
-                 own<ExternType*>& type)  // NOLINT(runtime/references)
+  ExportTypeImpl(Name& name,             // NOLINT(runtime/references)
+                 own<ExternType>& type)  // NOLINT(runtime/references)
       : name(std::move(name)), type(std::move(type)) {}
 
   ~ExportTypeImpl() {}
@@ -647,14 +710,13 @@ ExportType::~ExportType() { impl(this)->~ExportTypeImpl(); }
 
 void ExportType::operator delete(void* p) { ::operator delete(p); }
 
-auto ExportType::make(Name&& name, own<ExternType*>&& type)
-    -> own<ExportType*> {
-  return name && type ? own<ExportType*>(seal<ExportType>(
+auto ExportType::make(Name&& name, own<ExternType>&& type) -> own<ExportType> {
+  return name && type ? own<ExportType>(seal<ExportType>(
                             new (std::nothrow) ExportTypeImpl(name, type)))
-                      : own<ExportType*>();
+                      : own<ExportType>();
 }
 
-auto ExportType::copy() const -> own<ExportType*> {
+auto ExportType::copy() const -> own<ExportType> {
   return make(name().copy(), type()->copy());
 }
 
@@ -680,7 +742,7 @@ i::Handle<i::String> VecToString(i::Isolate* isolate,
 template <class Ref, class JSType>
 class RefImpl {
  public:
-  static own<Ref*> make(StoreImpl* store, i::Handle<JSType> obj) {
+  static own<Ref> make(StoreImpl* store, i::Handle<JSType> obj) {
     RefImpl* self = new (std::nothrow) RefImpl();
     if (!self) return nullptr;
     i::Isolate* isolate = store->i_isolate();
@@ -688,17 +750,9 @@ class RefImpl {
     return make_own(seal<Ref>(self));
   }
 
-  void Reset() {
-    i::GlobalHandles::Destroy(location());
-    if (host_data_) {
-      if (host_data_->finalizer) {
-        host_data_->finalizer(host_data_->info);
-      }
-      delete host_data_;
-    }
-  }
+  ~RefImpl() { i::GlobalHandles::Destroy(location()); }
 
-  own<Ref*> copy() const { return make(store(), v8_object()); }
+  own<Ref> copy() const { return make(store(), v8_object()); }
 
   StoreImpl* store() const { return StoreImpl::get(isolate()); }
 
@@ -706,41 +760,20 @@ class RefImpl {
 
   i::Handle<JSType> v8_object() const { return i::Handle<JSType>::cast(val_); }
 
-  void* get_host_info() const {
-    if (host_data_ == nullptr) return nullptr;
-    return host_data_->info;
-  }
+  void* get_host_info() const { return store()->GetHostInfo(v8_object()); }
 
   void set_host_info(void* info, void (*finalizer)(void*)) {
-    host_data_ = new HostData(location(), info, finalizer);
-    i::GlobalHandles::MakeWeak(host_data_->location, host_data_, &v8_finalizer,
-                               v8::WeakCallbackType::kParameter);
+    store()->SetHostInfo(v8_object(), info, finalizer);
   }
 
  private:
-  struct HostData {
-    HostData(i::Address* location, void* info, void (*finalizer)(void*))
-        : location(location), info(info), finalizer(finalizer) {}
-    i::Address* location;
-    void* info;
-    void (*finalizer)(void*);
-  };
-
   RefImpl() {}
 
-  static void v8_finalizer(const v8::WeakCallbackInfo<void>& info) {
-    HostData* data = reinterpret_cast<HostData*>(info.GetParameter());
-    i::GlobalHandles::Destroy(data->location);
-    if (data->finalizer) (*data->finalizer)(data->info);
-    delete data;
-  }
-
   i::Address* location() const {
     return reinterpret_cast<i::Address*>(val_.address());
   }
 
   i::Handle<i::JSReceiver> val_;
-  HostData* host_data_ = nullptr;
 };
 
 template <>
@@ -749,13 +782,17 @@ struct implement<Ref> {
 };
 
 Ref::~Ref() {
-  impl(this)->Reset();
   delete impl(this);
 }
 
 void Ref::operator delete(void* p) {}
 
-auto Ref::copy() const -> own<Ref*> { return impl(this)->copy(); }
+auto Ref::copy() const -> own<Ref> { return impl(this)->copy(); }
+
+auto Ref::same(const Ref* that) const -> bool {
+  i::HandleScope handle_scope(impl(this)->isolate());
+  return impl(this)->v8_object()->SameValue(*impl(that)->v8_object());
+}
 
 auto Ref::get_host_info() const -> void* { return impl(this)->get_host_info(); }
 
@@ -766,6 +803,52 @@ void Ref::set_host_info(void* info, void (*finalizer)(void*)) {
 ///////////////////////////////////////////////////////////////////////////////
 // Runtime Objects
 
+// Frames
+
+namespace {
+
+struct FrameImpl {
+  FrameImpl(own<Instance>&& instance, uint32_t func_index, size_t func_offset,
+            size_t module_offset)
+      : instance(std::move(instance)),
+        func_index(func_index),
+        func_offset(func_offset),
+        module_offset(module_offset) {}
+
+  ~FrameImpl() {}
+
+  own<Instance> instance;
+  uint32_t func_index;
+  size_t func_offset;
+  size_t module_offset;
+};
+
+}  // namespace
+
+template <>
+struct implement<Frame> {
+  using type = FrameImpl;
+};
+
+Frame::~Frame() { impl(this)->~FrameImpl(); }
+
+void Frame::operator delete(void* p) { ::operator delete(p); }
+
+own<Frame> Frame::copy() const {
+  auto self = impl(this);
+  return own<Frame>(seal<Frame>(
+      new (std::nothrow) FrameImpl(self->instance->copy(), self->func_index,
+                                   self->func_offset, self->module_offset)));
+}
+
+Instance* Frame::instance() const { return impl(this)->instance.get(); }
+
+uint32_t Frame::func_index() const { return impl(this)->func_index; }
+
+size_t Frame::func_offset() const { return impl(this)->func_offset; }
+
+size_t Frame::module_offset() const { return impl(this)->module_offset; }
+
 // Traps
 
 template <>
@@ -775,9 +858,9 @@ struct implement<Trap> {
 
 Trap::~Trap() {}
 
-auto Trap::copy() const -> own<Trap*> { return impl(this)->copy(); }
+auto Trap::copy() const -> own<Trap> { return impl(this)->copy(); }
 
-auto Trap::make(Store* store_abs, const Message& message) -> own<Trap*> {
+auto Trap::make(Store* store_abs, const Message& message) -> own<Trap> {
   auto store = impl(store_abs);
   i::Isolate* isolate = store->i_isolate();
   i::HandleScope handle_scope(isolate);
@@ -801,6 +884,58 @@ auto Trap::message() const -> Message {
   return vec<byte_t>::adopt(length, utf8.release());
 }
 
+namespace {
+
+own<Instance> GetInstance(StoreImpl* store,
+                          i::Handle<i::WasmInstanceObject> instance);
+
+own<Frame> CreateFrameFromInternal(i::Handle<i::FixedArray> frames, int index,
+                                   i::Isolate* isolate, StoreImpl* store) {
+  i::Handle<i::StackTraceFrame> frame(i::StackTraceFrame::cast(frames->get(0)),
+                                      isolate);
+  i::Handle<i::WasmInstanceObject> instance =
+      i::StackTraceFrame::GetWasmInstance(frame);
+  uint32_t func_index = i::StackTraceFrame::GetLineNumber(frame);
+  size_t func_offset = i::StackTraceFrame::GetFunctionOffset(frame);
+  size_t module_offset = i::StackTraceFrame::GetColumnNumber(frame);
+  return own<Frame>(seal<Frame>(new (std::nothrow) FrameImpl(
+      GetInstance(store, instance), func_index, func_offset, module_offset)));
+}
+
+}  // namespace
+
+own<Frame> Trap::origin() const {
+  i::Isolate* isolate = impl(this)->isolate();
+  i::HandleScope handle_scope(isolate);
+
+  i::Handle<i::JSMessageObject> message =
+      isolate->CreateMessage(impl(this)->v8_object(), nullptr);
+  i::Handle<i::FixedArray> frames(i::FixedArray::cast(message->stack_frames()),
+                                  isolate);
+  if (frames->length() == 0) {
+    return own<Frame>();
+  }
+  return CreateFrameFromInternal(frames, 0, isolate, impl(this)->store());
+}
+
+ownvec<Frame> Trap::trace() const {
+  i::Isolate* isolate = impl(this)->isolate();
+  i::HandleScope handle_scope(isolate);
+
+  i::Handle<i::JSMessageObject> message =
+      isolate->CreateMessage(impl(this)->v8_object(), nullptr);
+  i::Handle<i::FixedArray> frames(i::FixedArray::cast(message->stack_frames()),
+                                  isolate);
+  int num_frames = frames->length();
+  // {num_frames} can be 0; the code below can handle that case.
+  ownvec<Frame> result = ownvec<Frame>::make_uninitialized(num_frames);
+  for (int i = 0; i < num_frames; i++) {
+    result[i] =
+        CreateFrameFromInternal(frames, i, isolate, impl(this)->store());
+  }
+  return result;
+}
+
 // Foreign Objects
 
 template <>
@@ -810,9 +945,9 @@ struct implement<Foreign> {
 
 Foreign::~Foreign() {}
 
-auto Foreign::copy() const -> own<Foreign*> { return impl(this)->copy(); }
+auto Foreign::copy() const -> own<Foreign> { return impl(this)->copy(); }
 
-auto Foreign::make(Store* store_abs) -> own<Foreign*> {
+auto Foreign::make(Store* store_abs) -> own<Foreign> {
   StoreImpl* store = impl(store_abs);
   i::Isolate* isolate = store->i_isolate();
   i::HandleScope handle_scope(isolate);
@@ -831,7 +966,7 @@ struct implement<Module> {
 
 Module::~Module() {}
 
-auto Module::copy() const -> own<Module*> { return impl(this)->copy(); }
+auto Module::copy() const -> own<Module> { return impl(this)->copy(); }
 
 auto Module::validate(Store* store_abs, const vec<byte_t>& binary) -> bool {
   i::wasm::ModuleWireBytes bytes(
@@ -841,66 +976,60 @@ auto Module::validate(Store* store_abs, const vec<byte_t>& binary) -> bool {
   return isolate->wasm_engine()->SyncValidate(isolate, features, bytes);
 }
 
-class NopErrorThrower : public i::wasm::ErrorThrower {
- public:
-  explicit NopErrorThrower(i::Isolate* isolate)
-      : i::wasm::ErrorThrower(isolate, "ignored") {}
-  ~NopErrorThrower() { Reset(); }
-};
-
-auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module*> {
+auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module> {
   StoreImpl* store = impl(store_abs);
   i::Isolate* isolate = store->i_isolate();
   i::HandleScope scope(isolate);
   i::wasm::ModuleWireBytes bytes(
       {reinterpret_cast<const uint8_t*>(binary.get()), binary.size()});
   i::wasm::WasmFeatures features = i::wasm::WasmFeaturesFromIsolate(isolate);
-  NopErrorThrower thrower(isolate);
+  i::wasm::ErrorThrower thrower(isolate, "ignored");
   i::Handle<i::WasmModuleObject> module;
   if (!isolate->wasm_engine()
            ->SyncCompile(isolate, features, &thrower, bytes)
            .ToHandle(&module)) {
+    thrower.Reset();  // The API provides no way to expose the error.
     return nullptr;
   }
   return implement<Module>::type::make(store, module);
 }
 
-auto Module::imports() const -> vec<ImportType*> {
+auto Module::imports() const -> ownvec<ImportType> {
   const i::wasm::NativeModule* native_module =
       impl(this)->v8_object()->native_module();
   const i::wasm::WasmModule* module = native_module->module();
   const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
   const std::vector<i::wasm::WasmImport>& import_table = module->import_table;
   size_t size = import_table.size();
-  vec<ImportType*> imports = vec<ImportType*>::make_uninitialized(size);
+  ownvec<ImportType> imports = ownvec<ImportType>::make_uninitialized(size);
   for (uint32_t i = 0; i < size; i++) {
     const i::wasm::WasmImport& imp = import_table[i];
     Name module_name = GetNameFromWireBytes(imp.module_name, wire_bytes);
     Name name = GetNameFromWireBytes(imp.field_name, wire_bytes);
-    own<ExternType*> type = GetImportExportType(module, imp.kind, imp.index);
+    own<ExternType> type = GetImportExportType(module, imp.kind, imp.index);
     imports[i] = ImportType::make(std::move(module_name), std::move(name),
                                   std::move(type));
   }
   return imports;
 }
 
-vec<ExportType*> ExportsImpl(i::Handle<i::WasmModuleObject> module_obj) {
+ownvec<ExportType> ExportsImpl(i::Handle<i::WasmModuleObject> module_obj) {
   const i::wasm::NativeModule* native_module = module_obj->native_module();
   const i::wasm::WasmModule* module = native_module->module();
   const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
   const std::vector<i::wasm::WasmExport>& export_table = module->export_table;
   size_t size = export_table.size();
-  vec<ExportType*> exports = vec<ExportType*>::make_uninitialized(size);
+  ownvec<ExportType> exports = ownvec<ExportType>::make_uninitialized(size);
   for (uint32_t i = 0; i < size; i++) {
     const i::wasm::WasmExport& exp = export_table[i];
     Name name = GetNameFromWireBytes(exp.name, wire_bytes);
-    own<ExternType*> type = GetImportExportType(module, exp.kind, exp.index);
+    own<ExternType> type = GetImportExportType(module, exp.kind, exp.index);
     exports[i] = ExportType::make(std::move(name), std::move(type));
   }
   return exports;
 }
 
-auto Module::exports() const -> vec<ExportType*> {
+auto Module::exports() const -> ownvec<ExportType> {
   return ExportsImpl(impl(this)->v8_object());
 }
 
@@ -923,11 +1052,11 @@ auto Module::serialize() const -> vec<byte_t> {
           {reinterpret_cast<uint8_t*>(ptr), serial_size})) {
     buffer.reset();
   }
-  return std::move(buffer);
+  return buffer;
 }
 
 auto Module::deserialize(Store* store_abs, const vec<byte_t>& serialized)
-    -> own<Module*> {
+    -> own<Module> {
   StoreImpl* store = impl(store_abs);
   i::Isolate* isolate = store->i_isolate();
   i::HandleScope handle_scope(isolate);
@@ -963,13 +1092,12 @@ void Shared<Module>::operator delete(void* p) {
   ::operator delete(p);
 }
 
-auto Module::share() const -> own<Shared<Module>*> {
+auto Module::share() const -> own<Shared<Module>> {
   auto shared = seal<Shared<Module>>(new vec<byte_t>(serialize()));
   return make_own(shared);
 }
 
-auto Module::obtain(Store* store, const Shared<Module>* shared)
-    -> own<Module*> {
+auto Module::obtain(Store* store, const Shared<Module>* shared) -> own<Module> {
   return Module::deserialize(store, *impl(shared));
 }
 
@@ -982,7 +1110,7 @@ struct implement<Extern> {
 
 Extern::~Extern() {}
 
-auto Extern::copy() const -> own<Extern*> { return impl(this)->copy(); }
+auto Extern::copy() const -> own<Extern> { return impl(this)->copy(); }
 
 auto Extern::kind() const -> ExternKind {
   i::Handle<i::JSReceiver> obj = impl(this)->v8_object();
@@ -995,7 +1123,7 @@ auto Extern::kind() const -> ExternKind {
   UNREACHABLE();
 }
 
-auto Extern::type() const -> own<ExternType*> {
+auto Extern::type() const -> own<ExternType> {
   switch (kind()) {
     case EXTERN_FUNC:
       return func()->type();
@@ -1053,11 +1181,11 @@ struct implement<Func> {
 
 Func::~Func() {}
 
-auto Func::copy() const -> own<Func*> { return impl(this)->copy(); }
+auto Func::copy() const -> own<Func> { return impl(this)->copy(); }
 
 struct FuncData {
   Store* store;
-  own<FuncType*> type;
+  own<FuncType> type;
   enum Kind { kCallback, kCallbackWithEnv } kind;
   union {
     Func::callback callback;
@@ -1077,8 +1205,7 @@ struct FuncData {
     if (finalizer) (*finalizer)(env);
   }
 
-  static i::Address v8_callback(void* data, i::Address argv);
-  static void finalize_func_data(void* data);
+  static i::Address v8_callback(i::Address host_data_foreign, i::Address argv);
 };
 
 namespace {
@@ -1111,11 +1238,11 @@ class SignatureHelper : public i::AllStatic {
     return sig;
   }
 
-  static own<FuncType*> Deserialize(i::PodArray<i::wasm::ValueType> sig) {
+  static own<FuncType> Deserialize(i::PodArray<i::wasm::ValueType> sig) {
     int result_arity = ResultArity(sig);
     int param_arity = sig.length() - result_arity - 1;
-    vec<ValType*> results = vec<ValType*>::make_uninitialized(result_arity);
-    vec<ValType*> params = vec<ValType*>::make_uninitialized(param_arity);
+    ownvec<ValType> results = ownvec<ValType>::make_uninitialized(result_arity);
+    ownvec<ValType> params = ownvec<ValType>::make_uninitialized(param_arity);
 
     int i = 0;
     for (; i < result_arity; ++i) {
@@ -1146,29 +1273,30 @@ class SignatureHelper : public i::AllStatic {
   }
 };
 
-auto make_func(Store* store_abs, FuncData* data) -> own<Func*> {
+auto make_func(Store* store_abs, FuncData* data) -> own<Func> {
   auto store = impl(store_abs);
   i::Isolate* isolate = store->i_isolate();
   i::HandleScope handle_scope(isolate);
+  i::Handle<i::Managed<FuncData>> embedder_data =
+      i::Managed<FuncData>::FromRawPtr(isolate, sizeof(FuncData), data);
   i::Handle<i::WasmCapiFunction> function = i::WasmCapiFunction::New(
-      isolate, reinterpret_cast<i::Address>(&FuncData::v8_callback), data,
-      SignatureHelper::Serialize(isolate, data->type.get()));
+      isolate, reinterpret_cast<i::Address>(&FuncData::v8_callback),
+      embedder_data, SignatureHelper::Serialize(isolate, data->type.get()));
   auto func = implement<Func>::type::make(store, function);
-  func->set_host_info(data, &FuncData::finalize_func_data);
   return func;
 }
 
 }  // namespace
 
 auto Func::make(Store* store, const FuncType* type, Func::callback callback)
-    -> own<Func*> {
+    -> own<Func> {
   auto data = new FuncData(store, type, FuncData::kCallback);
   data->callback = callback;
   return make_func(store, data);
 }
 
 auto Func::make(Store* store, const FuncType* type, callback_with_env callback,
-                void* env, void (*finalizer)(void*)) -> own<Func*> {
+                void* env, void (*finalizer)(void*)) -> own<Func> {
   auto data = new FuncData(store, type, FuncData::kCallbackWithEnv);
   data->callback_with_env = callback;
   data->env = env;
@@ -1176,7 +1304,7 @@ auto Func::make(Store* store, const FuncType* type, callback_with_env callback,
   return make_func(store, data);
 }
 
-auto Func::type() const -> own<FuncType*> {
+auto Func::type() const -> own<FuncType> {
   i::Handle<i::JSFunction> func = impl(this)->v8_object();
   if (i::WasmCapiFunction::IsWasmCapiFunction(*func)) {
     return SignatureHelper::Deserialize(SignatureHelper::GetSig(func));
@@ -1216,6 +1344,37 @@ auto Func::result_arity() const -> size_t {
 
 namespace {
 
+own<Ref> V8RefValueToWasm(StoreImpl* store, i::Handle<i::Object> value) {
+  if (value->IsNull(store->i_isolate())) return nullptr;
+  return implement<Ref>::type::make(store,
+                                    i::Handle<i::JSReceiver>::cast(value));
+}
+
+i::Handle<i::Object> WasmRefToV8(i::Isolate* isolate, const Ref* ref) {
+  if (ref == nullptr) return i::ReadOnlyRoots(isolate).null_value_handle();
+  return impl(ref)->v8_object();
+}
+
+i::Handle<i::Object> CallTargetForCaching(i::Isolate* isolate,
+                                          i::Address real_call_target) {
+  if (i::kTaggedSize == i::kInt32Size) {
+    return isolate->factory()->NewForeign(real_call_target);
+  } else {
+    // 64-bit uncompressed platform.
+    return i::handle(i::Smi((real_call_target << i::kSmiTagSize) | i::kSmiTag),
+                     isolate);
+  }
+}
+
+i::Address CallTargetFromCache(i::Object cached_call_target) {
+  if (i::kTaggedSize == i::kInt32Size) {
+    return i::Foreign::cast(cached_call_target).foreign_address();
+  } else {
+    // 64-bit uncompressed platform.
+    return cached_call_target.ptr() >> i::kSmiTagSize;
+  }
+}
+
 void PrepareFunctionData(i::Isolate* isolate,
                          i::Handle<i::WasmExportedFunctionData> function_data,
                          i::wasm::FunctionSig* sig) {
@@ -1228,16 +1387,16 @@ void PrepareFunctionData(i::Isolate* isolate,
   // Compute packed args size.
   function_data->set_packed_args_size(
       i::wasm::CWasmArgumentsPacker::TotalSize(sig));
-  // Get call target (function table offset). This is an Address, we store
-  // it as a pseudo-Smi by shifting it by one bit, so the GC leaves it alone.
-  i::Address call_target =
-      function_data->instance().GetCallTarget(function_data->function_index());
-  i::Smi smi_target((call_target << i::kSmiTagSize) | i::kSmiTag);
-  function_data->set_wasm_call_target(smi_target);
+  // Get call target (function table offset), and wrap it as a cacheable object
+  // (pseudo-Smi or Foreign, depending on platform).
+  i::Handle<i::Object> call_target = CallTargetForCaching(
+      isolate,
+      function_data->instance().GetCallTarget(function_data->function_index()));
+  function_data->set_wasm_call_target(*call_target);
 }
 
 void PushArgs(i::wasm::FunctionSig* sig, const Val args[],
-              i::wasm::CWasmArgumentsPacker* packer) {
+              i::wasm::CWasmArgumentsPacker* packer, StoreImpl* store) {
   for (size_t i = 0; i < sig->parameter_count(); i++) {
     i::wasm::ValueType type = sig->GetParam(i);
     switch (type) {
@@ -1255,7 +1414,7 @@ void PushArgs(i::wasm::FunctionSig* sig, const Val args[],
         break;
       case i::wasm::kWasmAnyRef:
       case i::wasm::kWasmFuncRef:
-        packer->Push(impl(args[i].ref())->v8_object()->ptr());
+        packer->Push(WasmRefToV8(store->i_isolate(), args[i].ref())->ptr());
         break;
       case i::wasm::kWasmExnRef:
         // TODO(jkummerow): Implement these.
@@ -1288,13 +1447,8 @@ void PopArgs(i::wasm::FunctionSig* sig, Val results[],
       case i::wasm::kWasmAnyRef:
       case i::wasm::kWasmFuncRef: {
         i::Address raw = packer->Pop<i::Address>();
-        if (raw == i::kNullAddress) {
-          results[i] = Val(nullptr);
-        } else {
-          i::JSReceiver raw_obj = i::JSReceiver::cast(i::Object(raw));
-          i::Handle<i::JSReceiver> obj(raw_obj, store->i_isolate());
-          results[i] = Val(implement<Ref>::type::make(store, obj));
-        }
+        i::Handle<i::Object> obj(i::Object(raw), store->i_isolate());
+        results[i] = Val(V8RefValueToWasm(store, obj));
         break;
       }
       case i::wasm::kWasmExnRef:
@@ -1307,9 +1461,9 @@ void PopArgs(i::wasm::FunctionSig* sig, Val results[],
   }
 }
 
-own<Trap*> CallWasmCapiFunction(i::WasmCapiFunctionData data, const Val args[],
-                                Val results[]) {
-  FuncData* func_data = reinterpret_cast<FuncData*>(data.embedder_data());
+own<Trap> CallWasmCapiFunction(i::WasmCapiFunctionData data, const Val args[],
+                               Val results[]) {
+  FuncData* func_data = i::Managed<FuncData>::cast(data.embedder_data()).raw();
   if (func_data->kind == FuncData::kCallback) {
     return (func_data->callback)(args, results);
   }
@@ -1317,9 +1471,28 @@ own<Trap*> CallWasmCapiFunction(i::WasmCapiFunctionData data, const Val args[],
   return (func_data->callback_with_env)(func_data->env, args, results);
 }
 
+i::Handle<i::JSReceiver> GetProperException(
+    i::Isolate* isolate, i::Handle<i::Object> maybe_exception) {
+  if (maybe_exception->IsJSReceiver()) {
+    return i::Handle<i::JSReceiver>::cast(maybe_exception);
+  }
+  i::MaybeHandle<i::String> maybe_string =
+      i::Object::ToString(isolate, maybe_exception);
+  i::Handle<i::String> string = isolate->factory()->empty_string();
+  if (!maybe_string.ToHandle(&string)) {
+    // If converting the {maybe_exception} to string threw another exception,
+    // just give up and leave {string} as the empty string.
+    isolate->clear_pending_exception();
+  }
+  // {NewError} cannot fail when its input is a plain String, so we always
+  // get an Error object here.
+  return i::Handle<i::JSReceiver>::cast(
+      isolate->factory()->NewError(isolate->error_function(), string));
+}
+
 }  // namespace
 
-auto Func::call(const Val args[], Val results[]) const -> own<Trap*> {
+auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
   auto func = impl(this);
   auto store = func->store();
   auto isolate = store->i_isolate();
@@ -1343,10 +1516,10 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap*> {
   i::Handle<i::Code> wrapper_code = i::Handle<i::Code>(
       i::Code::cast(function_data->c_wrapper_code()), isolate);
   i::Address call_target =
-      function_data->wasm_call_target().ptr() >> i::kSmiTagSize;
+      CallTargetFromCache(function_data->wasm_call_target());
 
   i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
-  PushArgs(sig, args, &packer);
+  PushArgs(sig, args, &packer, store);
 
   i::Handle<i::Object> object_ref = instance;
   if (function_index <
@@ -1377,28 +1550,24 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap*> {
   if (isolate->has_pending_exception()) {
     i::Handle<i::Object> exception(isolate->pending_exception(), isolate);
     isolate->clear_pending_exception();
-    if (!exception->IsJSReceiver()) {
-      i::MaybeHandle<i::String> maybe_string =
-          i::Object::ToString(isolate, exception);
-      i::Handle<i::String> string = maybe_string.is_null()
-                                        ? isolate->factory()->empty_string()
-                                        : maybe_string.ToHandleChecked();
-      exception =
-          isolate->factory()->NewError(isolate->error_function(), string);
-    }
-    return implement<Trap>::type::make(
-        store, i::Handle<i::JSReceiver>::cast(exception));
+    return implement<Trap>::type::make(store,
+                                       GetProperException(isolate, exception));
   }
 
   PopArgs(sig, results, &packer, store);
   return nullptr;
 }
 
-i::Address FuncData::v8_callback(void* data, i::Address argv) {
-  FuncData* self = reinterpret_cast<FuncData*>(data);
+i::Address FuncData::v8_callback(i::Address host_data_foreign,
+                                 i::Address argv) {
+  FuncData* self =
+      i::Managed<FuncData>::cast(i::Object(host_data_foreign))->raw();
+  StoreImpl* store = impl(self->store);
+  i::Isolate* isolate = store->i_isolate();
+  i::HandleScope scope(isolate);
 
-  const vec<ValType*>& param_types = self->type->params();
-  const vec<ValType*>& result_types = self->type->results();
+  const ownvec<ValType>& param_types = self->type->params();
+  const ownvec<ValType>& result_types = self->type->results();
 
   int num_param_types = static_cast<int>(param_types.size());
   int num_result_types = static_cast<int>(result_types.size());
@@ -1428,19 +1597,14 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
       case FUNCREF: {
         i::Address raw = v8::base::ReadUnalignedValue<i::Address>(p);
         p += sizeof(raw);
-        if (raw == i::kNullAddress) {
-          params[i] = Val(nullptr);
-        } else {
-          i::JSReceiver raw_obj = i::JSReceiver::cast(i::Object(raw));
-          i::Handle<i::JSReceiver> obj(raw_obj, raw_obj.GetIsolate());
-          params[i] = Val(implement<Ref>::type::make(impl(self->store), obj));
-        }
+        i::Handle<i::Object> obj(i::Object(raw), isolate);
+        params[i] = Val(V8RefValueToWasm(store, obj));
         break;
       }
     }
   }
 
-  own<Trap*> trap;
+  own<Trap> trap;
   if (self->kind == kCallbackWithEnv) {
     trap = self->callback_with_env(self->env, params.get(), results.get());
   } else {
@@ -1448,7 +1612,6 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
   }
 
   if (trap) {
-    i::Isolate* isolate = impl(self->store)->i_isolate();
     isolate->Throw(*impl(trap.get())->v8_object());
     i::Object ex = isolate->pending_exception();
     isolate->clear_pending_exception();
@@ -1476,12 +1639,8 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
         break;
       case ANYREF:
       case FUNCREF: {
-        if (results[i].ref() == nullptr) {
-          v8::base::WriteUnalignedValue(p, i::kNullAddress);
-        } else {
-          v8::base::WriteUnalignedValue(
-              p, impl(results[i].ref())->v8_object()->ptr());
-        }
+        v8::base::WriteUnalignedValue(
+            p, WasmRefToV8(isolate, results[i].ref())->ptr());
         p += sizeof(i::Address);
         break;
       }
@@ -1490,10 +1649,6 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
   return i::kNullAddress;
 }
 
-void FuncData::finalize_func_data(void* data) {
-  delete reinterpret_cast<FuncData*>(data);
-}
-
 // Global Instances
 
 template <>
@@ -1503,10 +1658,10 @@ struct implement<Global> {
 
 Global::~Global() {}
 
-auto Global::copy() const -> own<Global*> { return impl(this)->copy(); }
+auto Global::copy() const -> own<Global> { return impl(this)->copy(); }
 
 auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
-    -> own<Global*> {
+    -> own<Global> {
   StoreImpl* store = impl(store_abs);
   i::Isolate* isolate = store->i_isolate();
   i::HandleScope handle_scope(isolate);
@@ -1528,7 +1683,7 @@ auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
   return global;
 }
 
-auto Global::type() const -> own<GlobalType*> {
+auto Global::type() const -> own<GlobalType> {
   i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
   ValKind kind = V8ValueTypeToWasm(v8_global->type());
   Mutability mutability = v8_global->is_mutable() ? VAR : CONST;
@@ -1546,15 +1701,11 @@ auto Global::get() const -> Val {
       return Val(v8_global->GetF32());
     case F64:
       return Val(v8_global->GetF64());
-    case ANYREF: {
-      i::Handle<i::JSReceiver> obj =
-          i::Handle<i::JSReceiver>::cast(v8_global->GetRef());
-      return Val(RefImpl<Ref, i::JSReceiver>::make(impl(this)->store(), obj));
-    }
+    case ANYREF:
     case FUNCREF: {
-      i::Handle<i::JSFunction> obj =
-          i::Handle<i::JSFunction>::cast(v8_global->GetRef());
-      return Val(implement<Func>::type::make(impl(this)->store(), obj));
+      StoreImpl* store = impl(this)->store();
+      i::HandleScope scope(store->i_isolate());
+      return Val(V8RefValueToWasm(store, v8_global->GetRef()));
     }
     default:
       // TODO(wasm+): support new value types
@@ -1574,10 +1725,12 @@ void Global::set(const Val& val) {
     case F64:
       return v8_global->SetF64(val.f64());
     case ANYREF:
-      return v8_global->SetAnyRef(impl(val.ref())->v8_object());
+      return v8_global->SetAnyRef(
+          WasmRefToV8(impl(this)->store()->i_isolate(), val.ref()));
     case FUNCREF: {
-      bool result = v8_global->SetFuncRef(impl(this)->store()->i_isolate(),
-                                          impl(val.ref())->v8_object());
+      i::Isolate* isolate = impl(this)->store()->i_isolate();
+      bool result =
+          v8_global->SetFuncRef(isolate, WasmRefToV8(isolate, val.ref()));
       DCHECK(result);
       USE(result);
       return;
@@ -1597,14 +1750,13 @@ struct implement<Table> {
 
 Table::~Table() {}
 
-auto Table::copy() const -> own<Table*> { return impl(this)->copy(); }
+auto Table::copy() const -> own<Table> { return impl(this)->copy(); }
 
 auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
-    -> own<Table*> {
+    -> own<Table> {
   StoreImpl* store = impl(store_abs);
   i::Isolate* isolate = store->i_isolate();
   i::HandleScope scope(isolate);
-  auto enabled_features = i::wasm::WasmFeaturesFromFlags();
 
   // Get "element".
   i::wasm::ValueType i_type;
@@ -1613,13 +1765,11 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
       i_type = i::wasm::kWasmFuncRef;
       break;
     case ANYREF:
-      if (enabled_features.anyref) {
-        i_type = i::wasm::kWasmAnyRef;
-        break;
-      }  // Else fall through.
-      V8_FALLTHROUGH;
+      DCHECK(i::wasm::WasmFeaturesFromFlags().anyref);  // See Engine::make().
+      i_type = i::wasm::kWasmAnyRef;
+      break;
     default:
-      UNREACHABLE();  // 'element' must be 'FUNCREF'.
+      UNREACHABLE();
       return nullptr;
   }
 
@@ -1652,42 +1802,44 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
   return implement<Table>::type::make(store, table_obj);
 }
 
-auto Table::type() const -> own<TableType*> {
+auto Table::type() const -> own<TableType> {
   i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
   uint32_t min = table->current_length();
   uint32_t max;
   if (!table->maximum_length().ToUint32(&max)) max = 0xFFFFFFFFu;
-  // TODO(wasm+): support new element types.
-  return TableType::make(ValType::make(FUNCREF), Limits(min, max));
+  ValKind kind;
+  switch (table->type()) {
+    case i::wasm::kWasmFuncRef:
+      kind = FUNCREF;
+      break;
+    case i::wasm::kWasmAnyRef:
+      kind = ANYREF;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  return TableType::make(ValType::make(kind), Limits(min, max));
 }
 
-auto Table::get(size_t index) const -> own<Ref*> {
+auto Table::get(size_t index) const -> own<Ref> {
   i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
-  if (index >= table->current_length()) return own<Ref*>();
+  if (index >= table->current_length()) return own<Ref>();
   i::Isolate* isolate = table->GetIsolate();
   i::HandleScope handle_scope(isolate);
   i::Handle<i::Object> result =
       i::WasmTableObject::Get(isolate, table, static_cast<uint32_t>(index));
-  if (!result->IsJSFunction()) return own<Ref*>();
-  DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*result) ||
-         i::WasmCapiFunction::IsWasmCapiFunction(*result));
-  // TODO(wasm+): other references
-  return implement<Func>::type::make(impl(this)->store(),
-                                     i::Handle<i::JSFunction>::cast(result));
+  // TODO(jkummerow): If we support both JavaScript and the C-API at the same
+  // time, we need to handle Smis and other JS primitives here.
+  DCHECK(result->IsNull(isolate) || result->IsJSReceiver());
+  return V8RefValueToWasm(impl(this)->store(), result);
 }
 
 auto Table::set(size_t index, const Ref* ref) -> bool {
-  if (ref && !impl(ref)->v8_object()->IsFunction()) {
-    WASM_UNIMPLEMENTED("non-function table elements");
-  }
   i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
   if (index >= table->current_length()) return false;
   i::Isolate* isolate = table->GetIsolate();
   i::HandleScope handle_scope(isolate);
-  i::Handle<i::Object> obj =
-      ref ? i::Handle<i::Object>::cast(impl(ref)->v8_object())
-          : i::Handle<i::Object>::cast(
-                i::ReadOnlyRoots(isolate).null_value_handle());
+  i::Handle<i::Object> obj = WasmRefToV8(isolate, ref);
   i::WasmTableObject::Set(isolate, table, static_cast<uint32_t>(index), obj);
   return true;
 }
@@ -1701,10 +1853,7 @@ auto Table::grow(size_t delta, const Ref* ref) -> bool {
   i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
   i::Isolate* isolate = table->GetIsolate();
   i::HandleScope scope(isolate);
-  i::Handle<i::Object> init_value =
-      ref == nullptr
-          ? i::Handle<i::Object>::cast(isolate->factory()->null_value())
-          : i::Handle<i::Object>::cast(impl(ref)->v8_object());
+  i::Handle<i::Object> init_value = WasmRefToV8(isolate, ref);
   int result = i::WasmTableObject::Grow(
       isolate, table, static_cast<uint32_t>(delta), init_value);
   return result >= 0;
@@ -1719,9 +1868,9 @@ struct implement<Memory> {
 
 Memory::~Memory() {}
 
-auto Memory::copy() const -> own<Memory*> { return impl(this)->copy(); }
+auto Memory::copy() const -> own<Memory> { return impl(this)->copy(); }
 
-auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory*> {
+auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory> {
   StoreImpl* store = impl(store_abs);
   i::Isolate* isolate = store->i_isolate();
   i::HandleScope scope(isolate);
@@ -1738,12 +1887,12 @@ auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory*> {
   i::Handle<i::WasmMemoryObject> memory_obj;
   if (!i::WasmMemoryObject::New(isolate, minimum, maximum, is_shared)
            .ToHandle(&memory_obj)) {
-    return own<Memory*>();
+    return own<Memory>();
   }
   return implement<Memory>::type::make(store, memory_obj);
 }
 
-auto Memory::type() const -> own<MemoryType*> {
+auto Memory::type() const -> own<MemoryType> {
   i::Handle<i::WasmMemoryObject> memory = impl(this)->v8_object();
   uint32_t min = static_cast<uint32_t>(memory->array_buffer().byte_length() /
                                        i::wasm::kWasmPageSize);
@@ -1784,10 +1933,10 @@ struct implement<Instance> {
 
 Instance::~Instance() {}
 
-auto Instance::copy() const -> own<Instance*> { return impl(this)->copy(); }
+auto Instance::copy() const -> own<Instance> { return impl(this)->copy(); }
 
-auto Instance::make(Store* store_abs, const Module* module_abs,
-                    const Extern* const imports[]) -> own<Instance*> {
+own<Instance> Instance::make(Store* store_abs, const Module* module_abs,
+                             const Extern* const imports[], own<Trap>* trap) {
   StoreImpl* store = impl(store_abs);
   const implement<Module>::type* module = impl(module_abs);
   i::Isolate* isolate = store->i_isolate();
@@ -1795,11 +1944,12 @@ auto Instance::make(Store* store_abs, const Module* module_abs,
 
   DCHECK_EQ(module->v8_object()->GetIsolate(), isolate);
 
-  vec<ImportType*> import_types = module_abs->imports();
+  if (trap) *trap = nullptr;
+  ownvec<ImportType> import_types = module_abs->imports();
   i::Handle<i::JSObject> imports_obj =
       isolate->factory()->NewJSObject(isolate->object_function());
   for (size_t i = 0; i < import_types.size(); ++i) {
-    auto type = import_types[i];
+    ImportType* type = import_types[i].get();
     i::Handle<i::String> module_str = VecToString(isolate, type->module());
     i::Handle<i::String> name_str = VecToString(isolate, type->name());
 
@@ -1817,17 +1967,45 @@ auto Instance::make(Store* store_abs, const Module* module_abs,
     ignore(i::Object::SetProperty(isolate, module_obj, name_str,
                                   impl(imports[i])->v8_object()));
   }
+  i::wasm::ErrorThrower thrower(isolate, "instantiation");
+  i::MaybeHandle<i::WasmInstanceObject> instance_obj =
+      isolate->wasm_engine()->SyncInstantiate(
+          isolate, &thrower, module->v8_object(), imports_obj,
+          i::MaybeHandle<i::JSArrayBuffer>());
+  if (trap) {
+    if (thrower.error()) {
+      *trap = implement<Trap>::type::make(
+          store, GetProperException(isolate, thrower.Reify()));
+      DCHECK(!thrower.error());  // Reify() called Reset().
+      DCHECK(!isolate->has_pending_exception());  // Hasn't been thrown yet.
+      return own<Instance>();
+    } else if (isolate->has_pending_exception()) {
+      i::Handle<i::Object> maybe_exception(isolate->pending_exception(),
+                                           isolate);
+      *trap = implement<Trap>::type::make(
+          store, GetProperException(isolate, maybe_exception));
+      isolate->clear_pending_exception();
+      return own<Instance>();
+    }
+  } else if (instance_obj.is_null()) {
+    // If no {trap} output is specified, silently swallow all errors.
+    thrower.Reset();
+    isolate->clear_pending_exception();
+    return own<Instance>();
+  }
+  return implement<Instance>::type::make(store, instance_obj.ToHandleChecked());
+}
 
-  NopErrorThrower thrower(isolate);
-  i::Handle<i::WasmInstanceObject> instance_obj =
-      isolate->wasm_engine()
-          ->SyncInstantiate(isolate, &thrower, module->v8_object(), imports_obj,
-                            i::MaybeHandle<i::JSArrayBuffer>())
-          .ToHandleChecked();
-  return implement<Instance>::type::make(store, instance_obj);
+namespace {
+
+own<Instance> GetInstance(StoreImpl* store,
+                          i::Handle<i::WasmInstanceObject> instance) {
+  return implement<Instance>::type::make(store, instance);
 }
 
-auto Instance::exports() const -> vec<Extern*> {
+}  // namespace
+
+auto Instance::exports() const -> ownvec<Extern> {
   const implement<Instance>::type* instance = impl(this);
   StoreImpl* store = instance->store();
   i::Isolate* isolate = store->i_isolate();
@@ -1837,9 +2015,10 @@ auto Instance::exports() const -> vec<Extern*> {
                                             isolate);
   i::Handle<i::JSObject> exports_obj(instance_obj->exports_object(), isolate);
 
-  vec<ExportType*> export_types = ExportsImpl(module_obj);
-  vec<Extern*> exports = vec<Extern*>::make_uninitialized(export_types.size());
-  if (!exports) return vec<Extern*>::invalid();
+  ownvec<ExportType> export_types = ExportsImpl(module_obj);
+  ownvec<Extern> exports =
+      ownvec<Extern>::make_uninitialized(export_types.size());
+  if (!exports) return ownvec<Extern>::invalid();
 
   for (size_t i = 0; i < export_types.size(); ++i) {
     auto& name = export_types[i]->name();
@@ -1852,20 +2031,20 @@ auto Instance::exports() const -> vec<Extern*> {
     switch (type->kind()) {
       case EXTERN_FUNC: {
         DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*obj));
-        exports[i].reset(implement<Func>::type::make(
-            store, i::Handle<i::WasmExportedFunction>::cast(obj)));
+        exports[i] = implement<Func>::type::make(
+            store, i::Handle<i::WasmExportedFunction>::cast(obj));
       } break;
       case EXTERN_GLOBAL: {
-        exports[i].reset(implement<Global>::type::make(
-            store, i::Handle<i::WasmGlobalObject>::cast(obj)));
+        exports[i] = implement<Global>::type::make(
+            store, i::Handle<i::WasmGlobalObject>::cast(obj));
       } break;
       case EXTERN_TABLE: {
-        exports[i].reset(implement<Table>::type::make(
-            store, i::Handle<i::WasmTableObject>::cast(obj)));
+        exports[i] = implement<Table>::type::make(
+            store, i::Handle<i::WasmTableObject>::cast(obj));
       } break;
       case EXTERN_MEMORY: {
-        exports[i].reset(implement<Memory>::type::make(
-            store, i::Handle<i::WasmMemoryObject>::cast(obj)));
+        exports[i] = implement<Memory>::type::make(
+            store, i::Handle<i::WasmMemoryObject>::cast(obj));
       } break;
     }
   }
@@ -1898,152 +2077,151 @@ struct borrowed_vec {
 
 }  // extern "C++"
 
-#define WASM_DEFINE_OWN(name, Name)                                          \
-  struct wasm_##name##_t : Name {};                                          \
-                                                                             \
-  void wasm_##name##_delete(wasm_##name##_t* x) { delete x; }                \
-                                                                             \
-  extern "C++" inline auto hide(Name* x)->wasm_##name##_t* {                 \
-    return static_cast<wasm_##name##_t*>(x);                                 \
-  }                                                                          \
-  extern "C++" inline auto hide(const Name* x)->const wasm_##name##_t* {     \
-    return static_cast<const wasm_##name##_t*>(x);                           \
-  }                                                                          \
-  extern "C++" inline auto reveal(wasm_##name##_t* x)->Name* { return x; }   \
-  extern "C++" inline auto reveal(const wasm_##name##_t* x)->const Name* {   \
-    return x;                                                                \
-  }                                                                          \
-  extern "C++" inline auto get(wasm::own<Name*>& x)->wasm_##name##_t* {      \
-    return hide(x.get());                                                    \
-  }                                                                          \
-  extern "C++" inline auto get(const wasm::own<Name*>& x)                    \
-      ->const wasm_##name##_t* {                                             \
-    return hide(x.get());                                                    \
-  }                                                                          \
-  extern "C++" inline auto release(wasm::own<Name*>&& x)->wasm_##name##_t* { \
-    return hide(x.release());                                                \
-  }                                                                          \
-  extern "C++" inline auto adopt(wasm_##name##_t* x)->wasm::own<Name*> {     \
-    return make_own(x);                                                      \
+#define WASM_DEFINE_OWN(name, Name)                                            \
+  struct wasm_##name##_t : Name {};                                            \
+                                                                               \
+  void wasm_##name##_delete(wasm_##name##_t* x) { delete x; }                  \
+                                                                               \
+  extern "C++" inline auto hide_##name(Name* x)->wasm_##name##_t* {            \
+    return static_cast<wasm_##name##_t*>(x);                                   \
+  }                                                                            \
+  extern "C++" inline auto hide_##name(const Name* x)                          \
+      ->const wasm_##name##_t* {                                               \
+    return static_cast<const wasm_##name##_t*>(x);                             \
+  }                                                                            \
+  extern "C++" inline auto reveal_##name(wasm_##name##_t* x)->Name* {          \
+    return x;                                                                  \
+  }                                                                            \
+  extern "C++" inline auto reveal_##name(const wasm_##name##_t* x)             \
+      ->const Name* {                                                          \
+    return x;                                                                  \
+  }                                                                            \
+  extern "C++" inline auto get_##name(wasm::own<Name>& x)->wasm_##name##_t* {  \
+    return hide_##name(x.get());                                               \
+  }                                                                            \
+  extern "C++" inline auto get_##name(const wasm::own<Name>& x)                \
+      ->const wasm_##name##_t* {                                               \
+    return hide_##name(x.get());                                               \
+  }                                                                            \
+  extern "C++" inline auto release_##name(wasm::own<Name>&& x)                 \
+      ->wasm_##name##_t* {                                                     \
+    return hide_##name(x.release());                                           \
+  }                                                                            \
+  extern "C++" inline auto adopt_##name(wasm_##name##_t* x)->wasm::own<Name> { \
+    return make_own(x);                                                        \
   }
 
 // Vectors
 
-#define WASM_DEFINE_VEC_BASE(name, Name, ptr_or_none)                       \
-  extern "C++" inline auto hide(wasm::vec<Name ptr_or_none>& v)             \
-      ->wasm_##name##_vec_t* {                                              \
-    static_assert(sizeof(wasm_##name##_vec_t) == sizeof(wasm::vec<Name>),   \
-                  "C/C++ incompatibility");                                 \
-    return reinterpret_cast<wasm_##name##_vec_t*>(&v);                      \
-  }                                                                         \
-  extern "C++" inline auto hide(const wasm::vec<Name ptr_or_none>& v)       \
-      ->const wasm_##name##_vec_t* {                                        \
-    static_assert(sizeof(wasm_##name##_vec_t) == sizeof(wasm::vec<Name>),   \
-                  "C/C++ incompatibility");                                 \
-    return reinterpret_cast<const wasm_##name##_vec_t*>(&v);                \
-  }                                                                         \
-  extern "C++" inline auto hide(Name ptr_or_none* v)                        \
-      ->wasm_##name##_t ptr_or_none* {                                      \
-    static_assert(                                                          \
-        sizeof(wasm_##name##_t ptr_or_none) == sizeof(Name ptr_or_none),    \
-        "C/C++ incompatibility");                                           \
-    return reinterpret_cast<wasm_##name##_t ptr_or_none*>(v);               \
-  }                                                                         \
-  extern "C++" inline auto hide(Name ptr_or_none const* v)                  \
-      ->wasm_##name##_t ptr_or_none const* {                                \
-    static_assert(                                                          \
-        sizeof(wasm_##name##_t ptr_or_none) == sizeof(Name ptr_or_none),    \
-        "C/C++ incompatibility");                                           \
-    return reinterpret_cast<wasm_##name##_t ptr_or_none const*>(v);         \
-  }                                                                         \
-  extern "C++" inline auto reveal(wasm_##name##_t ptr_or_none* v)           \
-      ->Name ptr_or_none* {                                                 \
-    static_assert(                                                          \
-        sizeof(wasm_##name##_t ptr_or_none) == sizeof(Name ptr_or_none),    \
-        "C/C++ incompatibility");                                           \
-    return reinterpret_cast<Name ptr_or_none*>(v);                          \
-  }                                                                         \
-  extern "C++" inline auto reveal(wasm_##name##_t ptr_or_none const* v)     \
-      ->Name ptr_or_none const* {                                           \
-    static_assert(                                                          \
-        sizeof(wasm_##name##_t ptr_or_none) == sizeof(Name ptr_or_none),    \
-        "C/C++ incompatibility");                                           \
-    return reinterpret_cast<Name ptr_or_none const*>(v);                    \
-  }                                                                         \
-  extern "C++" inline auto get(wasm::vec<Name ptr_or_none>& v)              \
-      ->wasm_##name##_vec_t {                                               \
-    wasm_##name##_vec_t v2 = {v.size(), hide(v.get())};                     \
-    return v2;                                                              \
-  }                                                                         \
-  extern "C++" inline auto get(const wasm::vec<Name ptr_or_none>& v)        \
-      ->const wasm_##name##_vec_t {                                         \
-    wasm_##name##_vec_t v2 = {                                              \
-        v.size(), const_cast<wasm_##name##_t ptr_or_none*>(hide(v.get()))}; \
-    return v2;                                                              \
-  }                                                                         \
-  extern "C++" inline auto release(wasm::vec<Name ptr_or_none>&& v)         \
-      ->wasm_##name##_vec_t {                                               \
-    wasm_##name##_vec_t v2 = {v.size(), hide(v.release())};                 \
-    return v2;                                                              \
-  }                                                                         \
-  extern "C++" inline auto adopt(wasm_##name##_vec_t* v)                    \
-      ->wasm::vec<Name ptr_or_none> {                                       \
-    return wasm::vec<Name ptr_or_none>::adopt(v->size, reveal(v->data));    \
-  }                                                                         \
-  extern "C++" inline auto borrow(const wasm_##name##_vec_t* v)             \
-      ->borrowed_vec<Name ptr_or_none> {                                    \
-    return borrowed_vec<Name ptr_or_none>(                                  \
-        wasm::vec<Name ptr_or_none>::adopt(v->size, reveal(v->data)));      \
-  }                                                                         \
-                                                                            \
-  void wasm_##name##_vec_new_uninitialized(wasm_##name##_vec_t* out,        \
-                                           size_t size) {                   \
-    *out = release(wasm::vec<Name ptr_or_none>::make_uninitialized(size));  \
-  }                                                                         \
-  void wasm_##name##_vec_new_empty(wasm_##name##_vec_t* out) {              \
-    wasm_##name##_vec_new_uninitialized(out, 0);                            \
-  }                                                                         \
-                                                                            \
-  void wasm_##name##_vec_delete(wasm_##name##_vec_t* v) { adopt(v); }
+#define WASM_DEFINE_VEC_BASE(name, Name, vec, ptr_or_none)                     \
+  static_assert(sizeof(wasm_##name##_vec_t) == sizeof(vec<Name>),              \
+                "C/C++ incompatibility");                                      \
+  static_assert(                                                               \
+      sizeof(wasm_##name##_t ptr_or_none) == sizeof(vec<Name>::elem_type),     \
+      "C/C++ incompatibility");                                                \
+  extern "C++" inline auto hide_##name##_vec(vec<Name>& v)                     \
+      ->wasm_##name##_vec_t* {                                                 \
+    return reinterpret_cast<wasm_##name##_vec_t*>(&v);                         \
+  }                                                                            \
+  extern "C++" inline auto hide_##name##_vec(const vec<Name>& v)               \
+      ->const wasm_##name##_vec_t* {                                           \
+    return reinterpret_cast<const wasm_##name##_vec_t*>(&v);                   \
+  }                                                                            \
+  extern "C++" inline auto hide_##name##_vec(vec<Name>::elem_type* v)          \
+      ->wasm_##name##_t ptr_or_none* {                                         \
+    return reinterpret_cast<wasm_##name##_t ptr_or_none*>(v);                  \
+  }                                                                            \
+  extern "C++" inline auto hide_##name##_vec(const vec<Name>::elem_type* v)    \
+      ->wasm_##name##_t ptr_or_none const* {                                   \
+    return reinterpret_cast<wasm_##name##_t ptr_or_none const*>(v);            \
+  }                                                                            \
+  extern "C++" inline auto reveal_##name##_vec(wasm_##name##_t ptr_or_none* v) \
+      ->vec<Name>::elem_type* {                                                \
+    return reinterpret_cast<vec<Name>::elem_type*>(v);                         \
+  }                                                                            \
+  extern "C++" inline auto reveal_##name##_vec(                                \
+      wasm_##name##_t ptr_or_none const* v)                                    \
+      ->const vec<Name>::elem_type* {                                          \
+    return reinterpret_cast<const vec<Name>::elem_type*>(v);                   \
+  }                                                                            \
+  extern "C++" inline auto get_##name##_vec(vec<Name>& v)                      \
+      ->wasm_##name##_vec_t {                                                  \
+    wasm_##name##_vec_t v2 = {v.size(), hide_##name##_vec(v.get())};           \
+    return v2;                                                                 \
+  }                                                                            \
+  extern "C++" inline auto get_##name##_vec(const vec<Name>& v)                \
+      ->const wasm_##name##_vec_t {                                            \
+    wasm_##name##_vec_t v2 = {                                                 \
+        v.size(),                                                              \
+        const_cast<wasm_##name##_t ptr_or_none*>(hide_##name##_vec(v.get()))}; \
+    return v2;                                                                 \
+  }                                                                            \
+  extern "C++" inline auto release_##name##_vec(vec<Name>&& v)                 \
+      ->wasm_##name##_vec_t {                                                  \
+    wasm_##name##_vec_t v2 = {v.size(), hide_##name##_vec(v.release())};       \
+    return v2;                                                                 \
+  }                                                                            \
+  extern "C++" inline auto adopt_##name##_vec(wasm_##name##_vec_t* v)          \
+      ->vec<Name> {                                                            \
+    return vec<Name>::adopt(v->size, reveal_##name##_vec(v->data));            \
+  }                                                                            \
+  extern "C++" inline auto borrow_##name##_vec(const wasm_##name##_vec_t* v)   \
+      ->borrowed_vec<vec<Name>::elem_type> {                                   \
+    return borrowed_vec<vec<Name>::elem_type>(                                 \
+        vec<Name>::adopt(v->size, reveal_##name##_vec(v->data)));              \
+  }                                                                            \
+                                                                               \
+  void wasm_##name##_vec_new_uninitialized(wasm_##name##_vec_t* out,           \
+                                           size_t size) {                      \
+    *out = release_##name##_vec(vec<Name>::make_uninitialized(size));          \
+  }                                                                            \
+  void wasm_##name##_vec_new_empty(wasm_##name##_vec_t* out) {                 \
+    wasm_##name##_vec_new_uninitialized(out, 0);                               \
+  }                                                                            \
+                                                                               \
+  void wasm_##name##_vec_delete(wasm_##name##_vec_t* v) {                      \
+    adopt_##name##_vec(v);                                                     \
+  }
 
 // Vectors with no ownership management of elements
-#define WASM_DEFINE_VEC_PLAIN(name, Name, ptr_or_none)                    \
-  WASM_DEFINE_VEC_BASE(name, Name, ptr_or_none)                           \
-                                                                          \
-  void wasm_##name##_vec_new(wasm_##name##_vec_t* out, size_t size,       \
-                             wasm_##name##_t ptr_or_none const data[]) {  \
-    auto v2 = wasm::vec<Name ptr_or_none>::make_uninitialized(size);      \
-    if (v2.size() != 0) {                                                 \
-      memcpy(v2.get(), data, size * sizeof(wasm_##name##_t ptr_or_none)); \
-    }                                                                     \
-    *out = release(std::move(v2));                                        \
-  }                                                                       \
-                                                                          \
-  void wasm_##name##_vec_copy(wasm_##name##_vec_t* out,                   \
-                              wasm_##name##_vec_t* v) {                   \
-    wasm_##name##_vec_new(out, v->size, v->data);                         \
-  }
-
-// Vectors who own their elements
-#define WASM_DEFINE_VEC(name, Name, ptr_or_none)                         \
-  WASM_DEFINE_VEC_BASE(name, Name, ptr_or_none)                          \
-                                                                         \
-  void wasm_##name##_vec_new(wasm_##name##_vec_t* out, size_t size,      \
-                             wasm_##name##_t ptr_or_none const data[]) { \
-    auto v2 = wasm::vec<Name ptr_or_none>::make_uninitialized(size);     \
-    for (size_t i = 0; i < v2.size(); ++i) {                             \
-      v2[i] = adopt(data[i]);                                            \
-    }                                                                    \
-    *out = release(std::move(v2));                                       \
-  }                                                                      \
-                                                                         \
-  void wasm_##name##_vec_copy(wasm_##name##_vec_t* out,                  \
-                              wasm_##name##_vec_t* v) {                  \
-    auto v2 = wasm::vec<Name ptr_or_none>::make_uninitialized(v->size);  \
-    for (size_t i = 0; i < v2.size(); ++i) {                             \
-      v2[i] = adopt(wasm_##name##_copy(v->data[i]));                     \
-    }                                                                    \
-    *out = release(std::move(v2));                                       \
+#define WASM_DEFINE_VEC_PLAIN(name, Name)                           \
+  WASM_DEFINE_VEC_BASE(name, Name,                                  \
+                       wasm::vec, ) /* NOLINT(whitespace/parens) */ \
+                                                                    \
+  void wasm_##name##_vec_new(wasm_##name##_vec_t* out, size_t size, \
+                             const wasm_##name##_t data[]) {        \
+    auto v2 = wasm::vec<Name>::make_uninitialized(size);            \
+    if (v2.size() != 0) {                                           \
+      memcpy(v2.get(), data, size * sizeof(wasm_##name##_t));       \
+    }                                                               \
+    *out = release_##name##_vec(std::move(v2));                     \
+  }                                                                 \
+                                                                    \
+  void wasm_##name##_vec_copy(wasm_##name##_vec_t* out,             \
+                              wasm_##name##_vec_t* v) {             \
+    wasm_##name##_vec_new(out, v->size, v->data);                   \
+  }
+
+// Vectors that own their elements
+#define WASM_DEFINE_VEC_OWN(name, Name)                             \
+  WASM_DEFINE_VEC_BASE(name, Name, wasm::ownvec, *)                 \
+                                                                    \
+  void wasm_##name##_vec_new(wasm_##name##_vec_t* out, size_t size, \
+                             wasm_##name##_t* const data[]) {       \
+    auto v2 = wasm::ownvec<Name>::make_uninitialized(size);         \
+    for (size_t i = 0; i < v2.size(); ++i) {                        \
+      v2[i] = adopt_##name(data[i]);                                \
+    }                                                               \
+    *out = release_##name##_vec(std::move(v2));                     \
+  }                                                                 \
+                                                                    \
+  void wasm_##name##_vec_copy(wasm_##name##_vec_t* out,             \
+                              wasm_##name##_vec_t* v) {             \
+    auto v2 = wasm::ownvec<Name>::make_uninitialized(v->size);      \
+    for (size_t i = 0; i < v2.size(); ++i) {                        \
+      v2[i] = adopt_##name(wasm_##name##_copy(v->data[i]));         \
+    }                                                               \
+    *out = release_##name##_vec(std::move(v2));                     \
   }
 
 extern "C++" {
@@ -2056,7 +2234,7 @@ inline auto is_empty(T* p) -> bool {
 // Byte vectors
 
 using byte = byte_t;
-WASM_DEFINE_VEC_PLAIN(byte, byte, )
+WASM_DEFINE_VEC_PLAIN(byte, byte)
 
 ///////////////////////////////////////////////////////////////////////////////
 // Runtime Environment
@@ -2065,16 +2243,20 @@ WASM_DEFINE_VEC_PLAIN(byte, byte, )
 
 WASM_DEFINE_OWN(config, wasm::Config)
 
-wasm_config_t* wasm_config_new() { return release(wasm::Config::make()); }
+wasm_config_t* wasm_config_new() {
+  return release_config(wasm::Config::make());
+}
 
 // Engine
 
 WASM_DEFINE_OWN(engine, wasm::Engine)
 
-wasm_engine_t* wasm_engine_new() { return release(wasm::Engine::make()); }
+wasm_engine_t* wasm_engine_new() {
+  return release_engine(wasm::Engine::make());
+}
 
 wasm_engine_t* wasm_engine_new_with_config(wasm_config_t* config) {
-  return release(wasm::Engine::make(adopt(config)));
+  return release_engine(wasm::Engine::make(adopt_config(config)));
 }
 
 // Stores
@@ -2082,7 +2264,7 @@ wasm_engine_t* wasm_engine_new_with_config(wasm_config_t* config) {
 WASM_DEFINE_OWN(store, wasm::Store)
 
 wasm_store_t* wasm_store_new(wasm_engine_t* engine) {
-  return release(wasm::Store::make(engine));
+  return release_store(wasm::Store::make(engine));
 }
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -2090,38 +2272,40 @@ wasm_store_t* wasm_store_new(wasm_engine_t* engine) {
 
 // Type attributes
 
-extern "C++" inline auto hide(wasm::Mutability mutability)
+extern "C++" inline auto hide_mutability(wasm::Mutability mutability)
     -> wasm_mutability_t {
   return static_cast<wasm_mutability_t>(mutability);
 }
 
-extern "C++" inline auto reveal(wasm_mutability_t mutability)
+extern "C++" inline auto reveal_mutability(wasm_mutability_t mutability)
     -> wasm::Mutability {
   return static_cast<wasm::Mutability>(mutability);
 }
 
-extern "C++" inline auto hide(const wasm::Limits& limits)
+extern "C++" inline auto hide_limits(const wasm::Limits& limits)
     -> const wasm_limits_t* {
   return reinterpret_cast<const wasm_limits_t*>(&limits);
 }
 
-extern "C++" inline auto reveal(wasm_limits_t limits) -> wasm::Limits {
+extern "C++" inline auto reveal_limits(wasm_limits_t limits) -> wasm::Limits {
   return wasm::Limits(limits.min, limits.max);
 }
 
-extern "C++" inline auto hide(wasm::ValKind kind) -> wasm_valkind_t {
+extern "C++" inline auto hide_valkind(wasm::ValKind kind) -> wasm_valkind_t {
   return static_cast<wasm_valkind_t>(kind);
 }
 
-extern "C++" inline auto reveal(wasm_valkind_t kind) -> wasm::ValKind {
+extern "C++" inline auto reveal_valkind(wasm_valkind_t kind) -> wasm::ValKind {
   return static_cast<wasm::ValKind>(kind);
 }
 
-extern "C++" inline auto hide(wasm::ExternKind kind) -> wasm_externkind_t {
+extern "C++" inline auto hide_externkind(wasm::ExternKind kind)
+    -> wasm_externkind_t {
   return static_cast<wasm_externkind_t>(kind);
 }
 
-extern "C++" inline auto reveal(wasm_externkind_t kind) -> wasm::ExternKind {
+extern "C++" inline auto reveal_externkind(wasm_externkind_t kind)
+    -> wasm::ExternKind {
   return static_cast<wasm::ExternKind>(kind);
 }
 
@@ -2129,10 +2313,10 @@ extern "C++" inline auto reveal(wasm_externkind_t kind) -> wasm::ExternKind {
 
 #define WASM_DEFINE_TYPE(name, Name)                        \
   WASM_DEFINE_OWN(name, Name)                               \
-  WASM_DEFINE_VEC(name, Name, *)                            \
+  WASM_DEFINE_VEC_OWN(name, Name)                           \
                                                             \
   wasm_##name##_t* wasm_##name##_copy(wasm_##name##_t* t) { \
-    return release(t->copy());                              \
+    return release_##name(t->copy());                       \
   }
 
 // Value Types
@@ -2140,11 +2324,11 @@ extern "C++" inline auto reveal(wasm_externkind_t kind) -> wasm::ExternKind {
 WASM_DEFINE_TYPE(valtype, wasm::ValType)
 
 wasm_valtype_t* wasm_valtype_new(wasm_valkind_t k) {
-  return release(wasm::ValType::make(reveal(k)));
+  return release_valtype(wasm::ValType::make(reveal_valkind(k)));
 }
 
 wasm_valkind_t wasm_valtype_kind(const wasm_valtype_t* t) {
-  return hide(t->kind());
+  return hide_valkind(t->kind());
 }
 
 // Function Types
@@ -2153,15 +2337,16 @@ WASM_DEFINE_TYPE(functype, wasm::FuncType)
 
 wasm_functype_t* wasm_functype_new(wasm_valtype_vec_t* params,
                                    wasm_valtype_vec_t* results) {
-  return release(wasm::FuncType::make(adopt(params), adopt(results)));
+  return release_functype(wasm::FuncType::make(adopt_valtype_vec(params),
+                                               adopt_valtype_vec(results)));
 }
 
 const wasm_valtype_vec_t* wasm_functype_params(const wasm_functype_t* ft) {
-  return hide(ft->params());
+  return hide_valtype_vec(ft->params());
 }
 
 const wasm_valtype_vec_t* wasm_functype_results(const wasm_functype_t* ft) {
-  return hide(ft->results());
+  return hide_valtype_vec(ft->results());
 }
 
 // Global Types
@@ -2170,15 +2355,16 @@ WASM_DEFINE_TYPE(globaltype, wasm::GlobalType)
 
 wasm_globaltype_t* wasm_globaltype_new(wasm_valtype_t* content,
                                        wasm_mutability_t mutability) {
-  return release(wasm::GlobalType::make(adopt(content), reveal(mutability)));
+  return release_globaltype(wasm::GlobalType::make(
+      adopt_valtype(content), reveal_mutability(mutability)));
 }
 
 const wasm_valtype_t* wasm_globaltype_content(const wasm_globaltype_t* gt) {
-  return hide(gt->content());
+  return hide_valtype(gt->content());
 }
 
 wasm_mutability_t wasm_globaltype_mutability(const wasm_globaltype_t* gt) {
-  return hide(gt->mutability());
+  return hide_mutability(gt->mutability());
 }
 
 // Table Types
@@ -2187,15 +2373,16 @@ WASM_DEFINE_TYPE(tabletype, wasm::TableType)
 
 wasm_tabletype_t* wasm_tabletype_new(wasm_valtype_t* element,
                                      const wasm_limits_t* limits) {
-  return release(wasm::TableType::make(adopt(element), reveal(*limits)));
+  return release_tabletype(
+      wasm::TableType::make(adopt_valtype(element), reveal_limits(*limits)));
 }
 
 const wasm_valtype_t* wasm_tabletype_element(const wasm_tabletype_t* tt) {
-  return hide(tt->element());
+  return hide_valtype(tt->element());
 }
 
 const wasm_limits_t* wasm_tabletype_limits(const wasm_tabletype_t* tt) {
-  return hide(tt->limits());
+  return hide_limits(tt->limits());
 }
 
 // Memory Types
@@ -2203,11 +2390,11 @@ const wasm_limits_t* wasm_tabletype_limits(const wasm_tabletype_t* tt) {
 WASM_DEFINE_TYPE(memorytype, wasm::MemoryType)
 
 wasm_memorytype_t* wasm_memorytype_new(const wasm_limits_t* limits) {
-  return release(wasm::MemoryType::make(reveal(*limits)));
+  return release_memorytype(wasm::MemoryType::make(reveal_limits(*limits)));
 }
 
 const wasm_limits_t* wasm_memorytype_limits(const wasm_memorytype_t* mt) {
-  return hide(mt->limits());
+  return hide_limits(mt->limits());
 }
 
 // Extern Types
@@ -2215,82 +2402,90 @@ const wasm_limits_t* wasm_memorytype_limits(const wasm_memorytype_t* mt) {
 WASM_DEFINE_TYPE(externtype, wasm::ExternType)
 
 wasm_externkind_t wasm_externtype_kind(const wasm_externtype_t* et) {
-  return hide(et->kind());
+  return hide_externkind(et->kind());
 }
 
 wasm_externtype_t* wasm_functype_as_externtype(wasm_functype_t* ft) {
-  return hide(static_cast<wasm::ExternType*>(ft));
+  return hide_externtype(static_cast<wasm::ExternType*>(ft));
 }
 wasm_externtype_t* wasm_globaltype_as_externtype(wasm_globaltype_t* gt) {
-  return hide(static_cast<wasm::ExternType*>(gt));
+  return hide_externtype(static_cast<wasm::ExternType*>(gt));
 }
 wasm_externtype_t* wasm_tabletype_as_externtype(wasm_tabletype_t* tt) {
-  return hide(static_cast<wasm::ExternType*>(tt));
+  return hide_externtype(static_cast<wasm::ExternType*>(tt));
 }
 wasm_externtype_t* wasm_memorytype_as_externtype(wasm_memorytype_t* mt) {
-  return hide(static_cast<wasm::ExternType*>(mt));
+  return hide_externtype(static_cast<wasm::ExternType*>(mt));
 }
 
 const wasm_externtype_t* wasm_functype_as_externtype_const(
     const wasm_functype_t* ft) {
-  return hide(static_cast<const wasm::ExternType*>(ft));
+  return hide_externtype(static_cast<const wasm::ExternType*>(ft));
 }
 const wasm_externtype_t* wasm_globaltype_as_externtype_const(
     const wasm_globaltype_t* gt) {
-  return hide(static_cast<const wasm::ExternType*>(gt));
+  return hide_externtype(static_cast<const wasm::ExternType*>(gt));
 }
 const wasm_externtype_t* wasm_tabletype_as_externtype_const(
     const wasm_tabletype_t* tt) {
-  return hide(static_cast<const wasm::ExternType*>(tt));
+  return hide_externtype(static_cast<const wasm::ExternType*>(tt));
 }
 const wasm_externtype_t* wasm_memorytype_as_externtype_const(
     const wasm_memorytype_t* mt) {
-  return hide(static_cast<const wasm::ExternType*>(mt));
+  return hide_externtype(static_cast<const wasm::ExternType*>(mt));
 }
 
 wasm_functype_t* wasm_externtype_as_functype(wasm_externtype_t* et) {
   return et->kind() == wasm::EXTERN_FUNC
-             ? hide(static_cast<wasm::FuncType*>(reveal(et)))
+             ? hide_functype(
+                   static_cast<wasm::FuncType*>(reveal_externtype(et)))
              : nullptr;
 }
 wasm_globaltype_t* wasm_externtype_as_globaltype(wasm_externtype_t* et) {
   return et->kind() == wasm::EXTERN_GLOBAL
-             ? hide(static_cast<wasm::GlobalType*>(reveal(et)))
+             ? hide_globaltype(
+                   static_cast<wasm::GlobalType*>(reveal_externtype(et)))
              : nullptr;
 }
 wasm_tabletype_t* wasm_externtype_as_tabletype(wasm_externtype_t* et) {
   return et->kind() == wasm::EXTERN_TABLE
-             ? hide(static_cast<wasm::TableType*>(reveal(et)))
+             ? hide_tabletype(
+                   static_cast<wasm::TableType*>(reveal_externtype(et)))
              : nullptr;
 }
 wasm_memorytype_t* wasm_externtype_as_memorytype(wasm_externtype_t* et) {
   return et->kind() == wasm::EXTERN_MEMORY
-             ? hide(static_cast<wasm::MemoryType*>(reveal(et)))
+             ? hide_memorytype(
+                   static_cast<wasm::MemoryType*>(reveal_externtype(et)))
              : nullptr;
 }
 
 const wasm_functype_t* wasm_externtype_as_functype_const(
     const wasm_externtype_t* et) {
   return et->kind() == wasm::EXTERN_FUNC
-             ? hide(static_cast<const wasm::FuncType*>(reveal(et)))
+             ? hide_functype(
+                   static_cast<const wasm::FuncType*>(reveal_externtype(et)))
              : nullptr;
 }
 const wasm_globaltype_t* wasm_externtype_as_globaltype_const(
     const wasm_externtype_t* et) {
   return et->kind() == wasm::EXTERN_GLOBAL
-             ? hide(static_cast<const wasm::GlobalType*>(reveal(et)))
+             ? hide_globaltype(
+                   static_cast<const wasm::GlobalType*>(reveal_externtype(et)))
              : nullptr;
 }
 const wasm_tabletype_t* wasm_externtype_as_tabletype_const(
     const wasm_externtype_t* et) {
   return et->kind() == wasm::EXTERN_TABLE
-             ? hide(static_cast<const wasm::TableType*>(reveal(et)))
+             ? hide_tabletype(
+                   static_cast<const wasm::TableType*>(reveal_externtype(et)))
              : nullptr;
 }
 const wasm_memorytype_t* wasm_externtype_as_memorytype_const(
     const wasm_externtype_t* et) {
   return et->kind() == wasm::EXTERN_MEMORY
-             ? hide(static_cast<const wasm::MemoryType*>(reveal(et)))
+             ? hide_memorytype(
+                   static_cast<const wasm::MemoryType*>(reveal_externtype(et)))
              : nullptr;
 }
 
@@ -2300,20 +2495,20 @@ WASM_DEFINE_TYPE(importtype, wasm::ImportType)
 
 wasm_importtype_t* wasm_importtype_new(wasm_name_t* module, wasm_name_t* name,
                                        wasm_externtype_t* type) {
-  return release(
-      wasm::ImportType::make(adopt(module), adopt(name), adopt(type)));
+  return release_importtype(wasm::ImportType::make(
+      adopt_byte_vec(module), adopt_byte_vec(name), adopt_externtype(type)));
 }
 
 const wasm_name_t* wasm_importtype_module(const wasm_importtype_t* it) {
-  return hide(it->module());
+  return hide_byte_vec(it->module());
 }
 
 const wasm_name_t* wasm_importtype_name(const wasm_importtype_t* it) {
-  return hide(it->name());
+  return hide_byte_vec(it->name());
 }
 
 const wasm_externtype_t* wasm_importtype_type(const wasm_importtype_t* it) {
-  return hide(it->type());
+  return hide_externtype(it->type());
 }
 
 // Export Types
@@ -2322,15 +2517,16 @@ WASM_DEFINE_TYPE(exporttype, wasm::ExportType)
 
 wasm_exporttype_t* wasm_exporttype_new(wasm_name_t* name,
                                        wasm_externtype_t* type) {
-  return release(wasm::ExportType::make(adopt(name), adopt(type)));
+  return release_exporttype(
+      wasm::ExportType::make(adopt_byte_vec(name), adopt_externtype(type)));
 }
 
 const wasm_name_t* wasm_exporttype_name(const wasm_exporttype_t* et) {
-  return hide(et->name());
+  return hide_byte_vec(et->name());
 }
 
 const wasm_externtype_t* wasm_exporttype_type(const wasm_exporttype_t* et) {
-  return hide(et->type());
+  return hide_externtype(et->type());
 }
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -2342,7 +2538,12 @@ const wasm_externtype_t* wasm_exporttype_type(const wasm_exporttype_t* et) {
   WASM_DEFINE_OWN(name, Name)                                        \
                                                                      \
   wasm_##name##_t* wasm_##name##_copy(const wasm_##name##_t* t) {    \
-    return release(t->copy());                                       \
+    return release_##name(t->copy());                                \
+  }                                                                  \
+                                                                     \
+  bool wasm_##name##_same(const wasm_##name##_t* t1,                 \
+                          const wasm_##name##_t* t2) {               \
+    return t1->same(t2);                                             \
   }                                                                  \
                                                                      \
   void* wasm_##name##_get_host_info(const wasm_##name##_t* r) {      \
@@ -2360,17 +2561,17 @@ const wasm_externtype_t* wasm_exporttype_type(const wasm_exporttype_t* et) {
   WASM_DEFINE_REF_BASE(name, Name)                                         \
                                                                            \
   wasm_ref_t* wasm_##name##_as_ref(wasm_##name##_t* r) {                   \
-    return hide(static_cast<wasm::Ref*>(reveal(r)));                       \
+    return hide_ref(static_cast<wasm::Ref*>(reveal_##name(r)));            \
   }                                                                        \
   wasm_##name##_t* wasm_ref_as_##name(wasm_ref_t* r) {                     \
-    return hide(static_cast<Name*>(reveal(r)));                            \
+    return hide_##name(static_cast<Name*>(reveal_ref(r)));                 \
   }                                                                        \
                                                                            \
   const wasm_ref_t* wasm_##name##_as_ref_const(const wasm_##name##_t* r) { \
-    return hide(static_cast<const wasm::Ref*>(reveal(r)));                 \
+    return hide_ref(static_cast<const wasm::Ref*>(reveal_##name(r)));      \
   }                                                                        \
   const wasm_##name##_t* wasm_ref_as_##name##_const(const wasm_ref_t* r) { \
-    return hide(static_cast<const Name*>(reveal(r)));                      \
+    return hide_##name(static_cast<const Name*>(reveal_ref(r)));           \
   }
 
 #define WASM_DEFINE_SHARABLE_REF(name, Name) \
@@ -2384,11 +2585,11 @@ WASM_DEFINE_REF_BASE(ref, wasm::Ref)
 extern "C++" {
 
 inline auto is_empty(wasm_val_t v) -> bool {
-  return !is_ref(reveal(v.kind)) || !v.of.ref;
+  return !is_ref(reveal_valkind(v.kind)) || !v.of.ref;
 }
 
-inline auto hide(wasm::Val v) -> wasm_val_t {
-  wasm_val_t v2 = {hide(v.kind()), {}};
+inline auto hide_val(wasm::Val v) -> wasm_val_t {
+  wasm_val_t v2 = {hide_valkind(v.kind()), {}};
   switch (v.kind()) {
     case wasm::I32:
       v2.of.i32 = v.i32();
@@ -2404,7 +2605,7 @@ inline auto hide(wasm::Val v) -> wasm_val_t {
       break;
     case wasm::ANYREF:
     case wasm::FUNCREF:
-      v2.of.ref = hide(v.ref());
+      v2.of.ref = hide_ref(v.ref());
       break;
     default:
       UNREACHABLE();
@@ -2412,8 +2613,8 @@ inline auto hide(wasm::Val v) -> wasm_val_t {
   return v2;
 }
 
-inline auto release(wasm::Val v) -> wasm_val_t {
-  wasm_val_t v2 = {hide(v.kind()), {}};
+inline auto release_val(wasm::Val v) -> wasm_val_t {
+  wasm_val_t v2 = {hide_valkind(v.kind()), {}};
   switch (v.kind()) {
     case wasm::I32:
       v2.of.i32 = v.i32();
@@ -2429,7 +2630,7 @@ inline auto release(wasm::Val v) -> wasm_val_t {
       break;
     case wasm::ANYREF:
     case wasm::FUNCREF:
-      v2.of.ref = release(v.release_ref());
+      v2.of.ref = release_ref(v.release_ref());
       break;
     default:
       UNREACHABLE();
@@ -2437,8 +2638,8 @@ inline auto release(wasm::Val v) -> wasm_val_t {
   return v2;
 }
 
-inline auto adopt(wasm_val_t v) -> wasm::Val {
-  switch (reveal(v.kind)) {
+inline auto adopt_val(wasm_val_t v) -> wasm::Val {
+  switch (reveal_valkind(v.kind)) {
     case wasm::I32:
       return wasm::Val(v.of.i32);
     case wasm::I64:
@@ -2449,7 +2650,7 @@ inline auto adopt(wasm_val_t v) -> wasm::Val {
       return wasm::Val(v.of.f64);
     case wasm::ANYREF:
     case wasm::FUNCREF:
-      return wasm::Val(adopt(v.of.ref));
+      return wasm::Val(adopt_ref(v.of.ref));
     default:
       UNREACHABLE();
   }
@@ -2460,13 +2661,13 @@ struct borrowed_val {
   explicit borrowed_val(wasm::Val&& v) : it(std::move(v)) {}
   borrowed_val(borrowed_val&& that) : it(std::move(that.it)) {}
   ~borrowed_val() {
-    if (it.is_ref()) it.release_ref();
+    if (it.is_ref()) it.release_ref().release();
   }
 };
 
-inline auto borrow(const wasm_val_t* v) -> borrowed_val {
+inline auto borrow_val(const wasm_val_t* v) -> borrowed_val {
   wasm::Val v2;
-  switch (reveal(v->kind)) {
+  switch (reveal_valkind(v->kind)) {
     case wasm::I32:
       v2 = wasm::Val(v->of.i32);
       break;
@@ -2481,7 +2682,7 @@ inline auto borrow(const wasm_val_t* v) -> borrowed_val {
       break;
     case wasm::ANYREF:
     case wasm::FUNCREF:
-      v2 = wasm::Val(adopt(v->of.ref));
+      v2 = wasm::Val(adopt_ref(v->of.ref));
       break;
     default:
       UNREACHABLE();
@@ -2491,15 +2692,15 @@ inline auto borrow(const wasm_val_t* v) -> borrowed_val {
 
 }  // extern "C++"
 
-WASM_DEFINE_VEC_BASE(val, wasm::Val, )
+WASM_DEFINE_VEC_BASE(val, wasm::Val, wasm::vec, )
 
 void wasm_val_vec_new(wasm_val_vec_t* out, size_t size,
                       wasm_val_t const data[]) {
   auto v2 = wasm::vec<wasm::Val>::make_uninitialized(size);
   for (size_t i = 0; i < v2.size(); ++i) {
-    v2[i] = adopt(data[i]);
+    v2[i] = adopt_val(data[i]);
   }
-  *out = release(std::move(v2));
+  *out = release_val_vec(std::move(v2));
 }
 
 void wasm_val_vec_copy(wasm_val_vec_t* out, wasm_val_vec_t* v) {
@@ -2507,36 +2708,70 @@ void wasm_val_vec_copy(wasm_val_vec_t* out, wasm_val_vec_t* v) {
   for (size_t i = 0; i < v2.size(); ++i) {
     wasm_val_t val;
     wasm_val_copy(&v->data[i], &val);
-    v2[i] = adopt(val);
+    v2[i] = adopt_val(val);
   }
-  *out = release(std::move(v2));
+  *out = release_val_vec(std::move(v2));
 }
 
 void wasm_val_delete(wasm_val_t* v) {
-  if (is_ref(reveal(v->kind))) adopt(v->of.ref);
+  if (is_ref(reveal_valkind(v->kind))) {
+    adopt_ref(v->of.ref);
+  }
 }
 
 void wasm_val_copy(wasm_val_t* out, const wasm_val_t* v) {
   *out = *v;
-  if (is_ref(reveal(v->kind))) {
-    out->of.ref = release(v->of.ref->copy());
+  if (is_ref(reveal_valkind(v->kind))) {
+    out->of.ref = v->of.ref ? release_ref(v->of.ref->copy()) : nullptr;
   }
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // Runtime Objects
 
+// Frames
+
+WASM_DEFINE_OWN(frame, wasm::Frame)
+WASM_DEFINE_VEC_OWN(frame, wasm::Frame)
+
+wasm_frame_t* wasm_frame_copy(const wasm_frame_t* frame) {
+  return release_frame(frame->copy());
+}
+
+wasm_instance_t* wasm_frame_instance(const wasm_frame_t* frame);
+// Defined below along with wasm_instance_t.
+
+uint32_t wasm_frame_func_index(const wasm_frame_t* frame) {
+  return reveal_frame(frame)->func_index();
+}
+
+size_t wasm_frame_func_offset(const wasm_frame_t* frame) {
+  return reveal_frame(frame)->func_offset();
+}
+
+size_t wasm_frame_module_offset(const wasm_frame_t* frame) {
+  return reveal_frame(frame)->module_offset();
+}
+
 // Traps
 
 WASM_DEFINE_REF(trap, wasm::Trap)
 
 wasm_trap_t* wasm_trap_new(wasm_store_t* store, const wasm_message_t* message) {
-  auto message_ = borrow(message);
-  return release(wasm::Trap::make(store, message_.it));
+  auto message_ = borrow_byte_vec(message);
+  return release_trap(wasm::Trap::make(store, message_.it));
 }
 
 void wasm_trap_message(const wasm_trap_t* trap, wasm_message_t* out) {
-  *out = release(reveal(trap)->message());
+  *out = release_byte_vec(reveal_trap(trap)->message());
+}
+
+wasm_frame_t* wasm_trap_origin(const wasm_trap_t* trap) {
+  return release_frame(reveal_trap(trap)->origin());
+}
+
+void wasm_trap_trace(const wasm_trap_t* trap, wasm_frame_vec_t* out) {
+  *out = release_frame_vec(reveal_trap(trap)->trace());
 }
 
 // Foreign Objects
@@ -2544,7 +2779,7 @@ void wasm_trap_message(const wasm_trap_t* trap, wasm_message_t* out) {
 WASM_DEFINE_REF(foreign, wasm::Foreign)
 
 wasm_foreign_t* wasm_foreign_new(wasm_store_t* store) {
-  return release(wasm::Foreign::make(store));
+  return release_foreign(wasm::Foreign::make(store));
 }
 
 // Modules
@@ -2552,43 +2787,43 @@ wasm_foreign_t* wasm_foreign_new(wasm_store_t* store) {
 WASM_DEFINE_SHARABLE_REF(module, wasm::Module)
 
 bool wasm_module_validate(wasm_store_t* store, const wasm_byte_vec_t* binary) {
-  auto binary_ = borrow(binary);
+  auto binary_ = borrow_byte_vec(binary);
   return wasm::Module::validate(store, binary_.it);
 }
 
 wasm_module_t* wasm_module_new(wasm_store_t* store,
                                const wasm_byte_vec_t* binary) {
-  auto binary_ = borrow(binary);
-  return release(wasm::Module::make(store, binary_.it));
+  auto binary_ = borrow_byte_vec(binary);
+  return release_module(wasm::Module::make(store, binary_.it));
 }
 
 void wasm_module_imports(const wasm_module_t* module,
                          wasm_importtype_vec_t* out) {
-  *out = release(reveal(module)->imports());
+  *out = release_importtype_vec(reveal_module(module)->imports());
 }
 
 void wasm_module_exports(const wasm_module_t* module,
                          wasm_exporttype_vec_t* out) {
-  *out = release(reveal(module)->exports());
+  *out = release_exporttype_vec(reveal_module(module)->exports());
 }
 
 void wasm_module_serialize(const wasm_module_t* module, wasm_byte_vec_t* out) {
-  *out = release(reveal(module)->serialize());
+  *out = release_byte_vec(reveal_module(module)->serialize());
 }
 
 wasm_module_t* wasm_module_deserialize(wasm_store_t* store,
                                        const wasm_byte_vec_t* binary) {
-  auto binary_ = borrow(binary);
-  return release(wasm::Module::deserialize(store, binary_.it));
+  auto binary_ = borrow_byte_vec(binary);
+  return release_module(wasm::Module::deserialize(store, binary_.it));
 }
 
 wasm_shared_module_t* wasm_module_share(const wasm_module_t* module) {
-  return release(reveal(module)->share());
+  return release_shared_module(reveal_module(module)->share());
 }
 
 wasm_module_t* wasm_module_obtain(wasm_store_t* store,
                                   const wasm_shared_module_t* shared) {
-  return release(wasm::Module::obtain(store, shared));
+  return release_module(wasm::Module::obtain(store, shared));
 }
 
 // Function Instances
@@ -2598,9 +2833,9 @@ WASM_DEFINE_REF(func, wasm::Func)
 extern "C++" {
 
 auto wasm_callback(void* env, const wasm::Val args[], wasm::Val results[])
-    -> wasm::own<wasm::Trap*> {
+    -> wasm::own<wasm::Trap> {
   auto f = reinterpret_cast<wasm_func_callback_t>(env);
-  return adopt(f(hide(args), hide(results)));
+  return adopt_trap(f(hide_val_vec(args), hide_val_vec(results)));
 }
 
 struct wasm_callback_env_t {
@@ -2610,9 +2845,10 @@ struct wasm_callback_env_t {
 };
 
 auto wasm_callback_with_env(void* env, const wasm::Val args[],
-                            wasm::Val results[]) -> wasm::own<wasm::Trap*> {
+                            wasm::Val results[]) -> wasm::own<wasm::Trap> {
   auto t = static_cast<wasm_callback_env_t*>(env);
-  return adopt(t->callback(t->env, hide(args), hide(results)));
+  return adopt_trap(
+      t->callback(t->env, hide_val_vec(args), hide_val_vec(results)));
 }
 
 void wasm_callback_env_finalizer(void* env) {
@@ -2625,8 +2861,8 @@ void wasm_callback_env_finalizer(void* env) {
 
 wasm_func_t* wasm_func_new(wasm_store_t* store, const wasm_functype_t* type,
                            wasm_func_callback_t callback) {
-  return release(wasm::Func::make(store, type, wasm_callback,
-                                  reinterpret_cast<void*>(callback)));
+  return release_func(wasm::Func::make(store, type, wasm_callback,
+                                       reinterpret_cast<void*>(callback)));
 }
 
 wasm_func_t* wasm_func_new_with_env(wasm_store_t* store,
@@ -2634,12 +2870,12 @@ wasm_func_t* wasm_func_new_with_env(wasm_store_t* store,
                                     wasm_func_callback_with_env_t callback,
                                     void* env, void (*finalizer)(void*)) {
   auto env2 = new wasm_callback_env_t{callback, env, finalizer};
-  return release(wasm::Func::make(store, type, wasm_callback_with_env, env2,
-                                  wasm_callback_env_finalizer));
+  return release_func(wasm::Func::make(store, type, wasm_callback_with_env,
+                                       env2, wasm_callback_env_finalizer));
 }
 
 wasm_functype_t* wasm_func_type(const wasm_func_t* func) {
-  return release(func->type());
+  return release_functype(func->type());
 }
 
 size_t wasm_func_param_arity(const wasm_func_t* func) {
@@ -2652,7 +2888,8 @@ size_t wasm_func_result_arity(const wasm_func_t* func) {
 
 wasm_trap_t* wasm_func_call(const wasm_func_t* func, const wasm_val_t args[],
                             wasm_val_t results[]) {
-  return release(func->call(reveal(args), reveal(results)));
+  return release_trap(
+      func->call(reveal_val_vec(args), reveal_val_vec(results)));
 }
 
 // Global Instances
@@ -2662,20 +2899,20 @@ WASM_DEFINE_REF(global, wasm::Global)
 wasm_global_t* wasm_global_new(wasm_store_t* store,
                                const wasm_globaltype_t* type,
                                const wasm_val_t* val) {
-  auto val_ = borrow(val);
-  return release(wasm::Global::make(store, type, val_.it));
+  auto val_ = borrow_val(val);
+  return release_global(wasm::Global::make(store, type, val_.it));
 }
 
 wasm_globaltype_t* wasm_global_type(const wasm_global_t* global) {
-  return release(global->type());
+  return release_globaltype(global->type());
 }
 
 void wasm_global_get(const wasm_global_t* global, wasm_val_t* out) {
-  *out = release(global->get());
+  *out = release_val(global->get());
 }
 
 void wasm_global_set(wasm_global_t* global, const wasm_val_t* val) {
-  auto val_ = borrow(val);
+  auto val_ = borrow_val(val);
   global->set(val_.it);
 }
 
@@ -2685,15 +2922,15 @@ WASM_DEFINE_REF(table, wasm::Table)
 
 wasm_table_t* wasm_table_new(wasm_store_t* store, const wasm_tabletype_t* type,
                              wasm_ref_t* ref) {
-  return release(wasm::Table::make(store, type, ref));
+  return release_table(wasm::Table::make(store, type, ref));
 }
 
 wasm_tabletype_t* wasm_table_type(const wasm_table_t* table) {
-  return release(table->type());
+  return release_tabletype(table->type());
 }
 
 wasm_ref_t* wasm_table_get(const wasm_table_t* table, wasm_table_size_t index) {
-  return release(table->get(index));
+  return release_ref(table->get(index));
 }
 
 bool wasm_table_set(wasm_table_t* table, wasm_table_size_t index,
@@ -2716,11 +2953,11 @@ WASM_DEFINE_REF(memory, wasm::Memory)
 
 wasm_memory_t* wasm_memory_new(wasm_store_t* store,
                                const wasm_memorytype_t* type) {
-  return release(wasm::Memory::make(store, type));
+  return release_memory(wasm::Memory::make(store, type));
 }
 
 wasm_memorytype_t* wasm_memory_type(const wasm_memory_t* memory) {
-  return release(memory->type());
+  return release_memorytype(memory->type());
 }
 
 wasm_byte_t* wasm_memory_data(wasm_memory_t* memory) { return memory->data(); }
@@ -2740,67 +2977,67 @@ bool wasm_memory_grow(wasm_memory_t* memory, wasm_memory_pages_t delta) {
 // Externals
 
 WASM_DEFINE_REF(extern, wasm::Extern)
-WASM_DEFINE_VEC(extern, wasm::Extern, *)
+WASM_DEFINE_VEC_OWN(extern, wasm::Extern)
 
 wasm_externkind_t wasm_extern_kind(const wasm_extern_t* external) {
-  return hide(external->kind());
+  return hide_externkind(external->kind());
 }
 wasm_externtype_t* wasm_extern_type(const wasm_extern_t* external) {
-  return release(external->type());
+  return release_externtype(external->type());
 }
 
 wasm_extern_t* wasm_func_as_extern(wasm_func_t* func) {
-  return hide(static_cast<wasm::Extern*>(reveal(func)));
+  return hide_extern(static_cast<wasm::Extern*>(reveal_func(func)));
 }
 wasm_extern_t* wasm_global_as_extern(wasm_global_t* global) {
-  return hide(static_cast<wasm::Extern*>(reveal(global)));
+  return hide_extern(static_cast<wasm::Extern*>(reveal_global(global)));
 }
 wasm_extern_t* wasm_table_as_extern(wasm_table_t* table) {
-  return hide(static_cast<wasm::Extern*>(reveal(table)));
+  return hide_extern(static_cast<wasm::Extern*>(reveal_table(table)));
 }
 wasm_extern_t* wasm_memory_as_extern(wasm_memory_t* memory) {
-  return hide(static_cast<wasm::Extern*>(reveal(memory)));
+  return hide_extern(static_cast<wasm::Extern*>(reveal_memory(memory)));
 }
 
 const wasm_extern_t* wasm_func_as_extern_const(const wasm_func_t* func) {
-  return hide(static_cast<const wasm::Extern*>(reveal(func)));
+  return hide_extern(static_cast<const wasm::Extern*>(reveal_func(func)));
 }
 const wasm_extern_t* wasm_global_as_extern_const(const wasm_global_t* global) {
-  return hide(static_cast<const wasm::Extern*>(reveal(global)));
+  return hide_extern(static_cast<const wasm::Extern*>(reveal_global(global)));
 }
 const wasm_extern_t* wasm_table_as_extern_const(const wasm_table_t* table) {
-  return hide(static_cast<const wasm::Extern*>(reveal(table)));
+  return hide_extern(static_cast<const wasm::Extern*>(reveal_table(table)));
 }
 const wasm_extern_t* wasm_memory_as_extern_const(const wasm_memory_t* memory) {
-  return hide(static_cast<const wasm::Extern*>(reveal(memory)));
+  return hide_extern(static_cast<const wasm::Extern*>(reveal_memory(memory)));
 }
 
 wasm_func_t* wasm_extern_as_func(wasm_extern_t* external) {
-  return hide(external->func());
+  return hide_func(external->func());
 }
 wasm_global_t* wasm_extern_as_global(wasm_extern_t* external) {
-  return hide(external->global());
+  return hide_global(external->global());
 }
 wasm_table_t* wasm_extern_as_table(wasm_extern_t* external) {
-  return hide(external->table());
+  return hide_table(external->table());
 }
 wasm_memory_t* wasm_extern_as_memory(wasm_extern_t* external) {
-  return hide(external->memory());
+  return hide_memory(external->memory());
 }
 
 const wasm_func_t* wasm_extern_as_func_const(const wasm_extern_t* external) {
-  return hide(external->func());
+  return hide_func(external->func());
 }
 const wasm_global_t* wasm_extern_as_global_const(
     const wasm_extern_t* external) {
-  return hide(external->global());
+  return hide_global(external->global());
 }
 const wasm_table_t* wasm_extern_as_table_const(const wasm_extern_t* external) {
-  return hide(external->table());
+  return hide_table(external->table());
 }
 const wasm_memory_t* wasm_extern_as_memory_const(
     const wasm_extern_t* external) {
-  return hide(external->memory());
+  return hide_memory(external->memory());
 }
 
 // Module Instances
@@ -2809,20 +3046,29 @@ WASM_DEFINE_REF(instance, wasm::Instance)
 
 wasm_instance_t* wasm_instance_new(wasm_store_t* store,
                                    const wasm_module_t* module,
-                                   const wasm_extern_t* const imports[]) {
-  return release(wasm::Instance::make(
-      store, module, reinterpret_cast<const wasm::Extern* const*>(imports)));
+                                   const wasm_extern_t* const imports[],
+                                   wasm_trap_t** trap) {
+  wasm::own<wasm::Trap> error;
+  wasm_instance_t* instance = release_instance(wasm::Instance::make(
+      store, module, reinterpret_cast<const wasm::Extern* const*>(imports),
+      &error));
+  if (trap) *trap = hide_trap(error.release());
+  return instance;
 }
 
 void wasm_instance_exports(const wasm_instance_t* instance,
                            wasm_extern_vec_t* out) {
-  *out = release(instance->exports());
+  *out = release_extern_vec(instance->exports());
+}
+
+wasm_instance_t* wasm_frame_instance(const wasm_frame_t* frame) {
+  return hide_instance(reveal_frame(frame)->instance());
 }
 
 #undef WASM_DEFINE_OWN
 #undef WASM_DEFINE_VEC_BASE
 #undef WASM_DEFINE_VEC_PLAIN
-#undef WASM_DEFINE_VEC
+#undef WASM_DEFINE_VEC_OWN
 #undef WASM_DEFINE_TYPE
 #undef WASM_DEFINE_REF_BASE
 #undef WASM_DEFINE_REF
diff --git a/deps/v8/src/wasm/c-api.h b/deps/v8/src/wasm/c-api.h
index c1a914a16ebc43..43a0fb73b2dcc6 100644
--- a/deps/v8/src/wasm/c-api.h
+++ b/deps/v8/src/wasm/c-api.h
@@ -7,8 +7,17 @@
 
 #include "include/v8.h"
 #include "src/common/globals.h"
+#include "src/handles/handles.h"
 #include "third_party/wasm-api/wasm.hh"
 
+namespace v8 {
+namespace internal {
+
+class JSWeakMap;
+
+}  // namespace internal
+}  // namespace v8
+
 namespace wasm {
 
 class StoreImpl {
@@ -27,14 +36,19 @@ class StoreImpl {
         reinterpret_cast<v8::Isolate*>(isolate)->GetData(0));
   }
 
+  void SetHostInfo(i::Handle<i::Object> object, void* info,
+                   void (*finalizer)(void*));
+  void* GetHostInfo(i::Handle<i::Object> key);
+
  private:
-  friend own<Store*> Store::make(Engine*);
+  friend own<Store> Store::make(Engine*);
 
   StoreImpl() {}
 
   v8::Isolate::CreateParams create_params_;
   v8::Isolate* isolate_ = nullptr;
   v8::Eternal<v8::Context> context_;
+  i::Handle<i::JSWeakMap> host_info_map_;
 };
 
 }  // namespace wasm
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 9f1ca23c6235fd..582934e19f031b 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -338,35 +338,6 @@ struct BranchOnExceptionImmediate {
   }
 };
 
-template <Decoder::ValidateFlag validate>
-struct CallIndirectImmediate {
-  uint32_t table_index;
-  uint32_t sig_index;
-  FunctionSig* sig = nullptr;
-  uint32_t length = 0;
-  inline CallIndirectImmediate(const WasmFeatures enabled, Decoder* decoder,
-                               const byte* pc) {
-    uint32_t len = 0;
-    sig_index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
-    table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
-    if (!VALIDATE(table_index == 0 || enabled.anyref)) {
-      decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
-                      table_index);
-    }
-    length = 1 + len;
-  }
-};
-
-template <Decoder::ValidateFlag validate>
-struct CallFunctionImmediate {
-  uint32_t index;
-  FunctionSig* sig = nullptr;
-  uint32_t length;
-  inline CallFunctionImmediate(Decoder* decoder, const byte* pc) {
-    index = decoder->read_u32v<validate>(pc + 1, &length, "function index");
-  }
-};
-
 template <Decoder::ValidateFlag validate>
 struct FunctionIndexImmediate {
   uint32_t index = 0;
@@ -395,7 +366,37 @@ struct TableIndexImmediate {
   unsigned length = 1;
   inline TableIndexImmediate() = default;
   inline TableIndexImmediate(Decoder* decoder, const byte* pc) {
-    index = decoder->read_u8<validate>(pc + 1, "table index");
+    index = decoder->read_u32v<validate>(pc + 1, &length, "table index");
+  }
+};
+
+template <Decoder::ValidateFlag validate>
+struct CallIndirectImmediate {
+  uint32_t table_index;
+  uint32_t sig_index;
+  FunctionSig* sig = nullptr;
+  uint32_t length = 0;
+  inline CallIndirectImmediate(const WasmFeatures enabled, Decoder* decoder,
+                               const byte* pc) {
+    uint32_t len = 0;
+    sig_index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
+    TableIndexImmediate<validate> table(decoder, pc + len);
+    if (!VALIDATE((table.index == 0 && table.length == 1) || enabled.anyref)) {
+      decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
+                      table.index);
+    }
+    table_index = table.index;
+    length = len + table.length;
+  }
+};
+
+template <Decoder::ValidateFlag validate>
+struct CallFunctionImmediate {
+  uint32_t index;
+  FunctionSig* sig = nullptr;
+  uint32_t length;
+  inline CallFunctionImmediate(Decoder* decoder, const byte* pc) {
+    index = decoder->read_u32v<validate>(pc + 1, &length, "function index");
   }
 };
 
@@ -748,8 +749,6 @@ struct ControlBase {
   F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result)             \
   F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm,    \
     const Vector<Value> inputs, Value* result)                                \
-  F(SimdShiftOp, WasmOpcode opcode, const SimdShiftImmediate<validate>& imm,  \
-    const Value& input, Value* result)                                        \
   F(Simd8x16ShuffleOp, const Simd8x16ShuffleImmediate<validate>& imm,         \
     const Value& input0, const Value& input1, Value* result)                  \
   F(Throw, const ExceptionIndexImmediate<validate>& imm,                      \
@@ -849,7 +848,9 @@ class WasmDecoder : public Decoder {
             type = kWasmAnyRef;
             break;
           }
-          decoder->error(decoder->pc() - 1, "invalid local type");
+          decoder->error(decoder->pc() - 1,
+                         "invalid local type 'anyref', enable with "
+                         "--experimental-wasm-anyref");
           return false;
         case kLocalFuncRef:
           if (enabled.anyref) {
@@ -857,7 +858,7 @@ class WasmDecoder : public Decoder {
             break;
           }
           decoder->error(decoder->pc() - 1,
-                         "local type 'funcref' is not enabled with "
+                         "invalid local type 'funcref', enable with "
                          "--experimental-wasm-anyref");
           return false;
         case kLocalExnRef:
@@ -865,14 +866,19 @@ class WasmDecoder : public Decoder {
             type = kWasmExnRef;
             break;
           }
-          decoder->error(decoder->pc() - 1, "invalid local type");
+          decoder->error(decoder->pc() - 1,
+                         "invalid local type 'exception ref', enable with "
+                         "--experimental-wasm-eh");
           return false;
         case kLocalS128:
           if (enabled.simd) {
             type = kWasmS128;
             break;
           }
-          V8_FALLTHROUGH;
+          decoder->error(decoder->pc() - 1,
+                         "invalid local type 'Simd128', enable with "
+                         "--experimental-wasm-simd");
+          return false;
         default:
           decoder->error(decoder->pc() - 1, "invalid local type");
           return false;
@@ -2666,16 +2672,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
     return imm.length;
   }
 
-  uint32_t SimdShiftOp(WasmOpcode opcode) {
-    SimdShiftImmediate<validate> imm(this, this->pc_);
-    if (this->Validate(this->pc_, opcode, imm)) {
-      auto input = Pop(0, kWasmS128);
-      auto* result = Push(kWasmS128);
-      CALL_INTERFACE_IF_REACHABLE(SimdShiftOp, opcode, imm, input, result);
-    }
-    return imm.length;
-  }
-
   uint32_t Simd8x16ShuffleOp() {
     Simd8x16ShuffleImmediate<validate> imm(this, this->pc_);
     if (this->Validate(this->pc_, imm)) {
@@ -2727,21 +2723,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
         len = SimdReplaceLane(opcode, kWasmI32);
         break;
       }
-      case kExprI64x2Shl:
-      case kExprI64x2ShrS:
-      case kExprI64x2ShrU:
-      case kExprI32x4Shl:
-      case kExprI32x4ShrS:
-      case kExprI32x4ShrU:
-      case kExprI16x8Shl:
-      case kExprI16x8ShrS:
-      case kExprI16x8ShrU:
-      case kExprI8x16Shl:
-      case kExprI8x16ShrS:
-      case kExprI8x16ShrU: {
-        len = SimdShiftOp(opcode);
-        break;
-      }
       case kExprS8x16Shuffle: {
         len = Simd8x16ShuffleOp();
         break;
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 7df5abf5c878b4..4940134d53c657 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -262,21 +262,18 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
   }
 }
 
-JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(Isolate* isolate,
-                                                               FunctionSig* sig,
-                                                               bool is_import)
-    : job_(compiler::NewJSToWasmCompilationJob(isolate, sig, is_import)) {}
+JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
+    Isolate* isolate, WasmEngine* wasm_engine, FunctionSig* sig, bool is_import,
+    const WasmFeatures& enabled_features)
+    : is_import_(is_import),
+      sig_(sig),
+      job_(compiler::NewJSToWasmCompilationJob(isolate, wasm_engine, sig,
+                                               is_import, enabled_features)) {}
 
 JSToWasmWrapperCompilationUnit::~JSToWasmWrapperCompilationUnit() = default;
 
-void JSToWasmWrapperCompilationUnit::Prepare(Isolate* isolate) {
-  CompilationJob::Status status = job_->PrepareJob(isolate);
-  CHECK_EQ(status, CompilationJob::SUCCEEDED);
-}
-
 void JSToWasmWrapperCompilationUnit::Execute() {
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileJSToWasmWrapper");
-  DCHECK_EQ(job_->state(), CompilationJob::State::kReadyToExecute);
   CompilationJob::Status status = job_->ExecuteJob();
   CHECK_EQ(status, CompilationJob::SUCCEEDED);
 }
@@ -296,8 +293,9 @@ Handle<Code> JSToWasmWrapperCompilationUnit::Finalize(Isolate* isolate) {
 Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
     Isolate* isolate, FunctionSig* sig, bool is_import) {
   // Run the compilation unit synchronously.
-  JSToWasmWrapperCompilationUnit unit(isolate, sig, is_import);
-  unit.Prepare(isolate);
+  WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate);
+  JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
+                                      is_import, enabled_features);
   unit.Execute();
   return unit.Finalize(isolate);
 }
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index d0b47b91aa88ac..2da028a047e9e0 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -108,19 +108,24 @@ STATIC_ASSERT(sizeof(WasmCompilationUnit) <= 2 * kSystemPointerSize);
 
 class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
  public:
-  JSToWasmWrapperCompilationUnit(Isolate* isolate, FunctionSig* sig,
-                                 bool is_import);
+  JSToWasmWrapperCompilationUnit(Isolate* isolate, WasmEngine* wasm_engine,
+                                 FunctionSig* sig, bool is_import,
+                                 const WasmFeatures& enabled_features);
   ~JSToWasmWrapperCompilationUnit();
 
-  void Prepare(Isolate* isolate);
   void Execute();
   Handle<Code> Finalize(Isolate* isolate);
 
+  bool is_import() const { return is_import_; }
+  FunctionSig* sig() const { return sig_; }
+
   // Run a compilation unit synchronously.
   static Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
                                              bool is_import);
 
  private:
+  bool is_import_;
+  FunctionSig* sig_;
   std::unique_ptr<OptimizedCompilationJob> job_;
 };
 
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 8efac18787ef28..923e1154ea09c3 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -258,8 +258,8 @@ class WasmGraphBuildingInterface {
   void Drop(FullDecoder* decoder, const Value& value) {}
 
   void DoReturn(FullDecoder* decoder, Vector<Value> values) {
-    TFNode** nodes = GetNodes(values);
-    BUILD(Return, static_cast<uint32_t>(values.size()), nodes);
+    Vector<TFNode*> nodes = GetNodes(values);
+    BUILD(Return, nodes);
   }
 
   void GetLocal(FullDecoder* decoder, Value* result,
@@ -319,10 +319,10 @@ class WasmGraphBuildingInterface {
   void BrOrRet(FullDecoder* decoder, uint32_t depth) {
     if (depth == decoder->control_depth() - 1) {
       uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
-      TFNode** values =
-          ret_count == 0 ? nullptr
+      Vector<TFNode*> values =
+          ret_count == 0 ? Vector<TFNode*>{}
                          : GetNodes(decoder->stack_value(ret_count), ret_count);
-      BUILD(Return, ret_count, values);
+      BUILD(Return, values);
     } else {
       Br(decoder, decoder->control_at(depth));
     }
@@ -431,23 +431,16 @@ class WasmGraphBuildingInterface {
 
   void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
               Value* result) {
-    TFNode** inputs = GetNodes(args);
-    TFNode* node = BUILD(SimdOp, opcode, inputs);
+    Vector<TFNode*> inputs = GetNodes(args);
+    TFNode* node = BUILD(SimdOp, opcode, inputs.begin());
     if (result) result->node = node;
   }
 
   void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
                   const SimdLaneImmediate<validate> imm, Vector<Value> inputs,
                   Value* result) {
-    TFNode** nodes = GetNodes(inputs);
-    result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes);
-  }
-
-  void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
-                   const SimdShiftImmediate<validate> imm, const Value& input,
-                   Value* result) {
-    TFNode* inputs[] = {input.node};
-    result->node = BUILD(SimdShiftOp, opcode, imm.shift, inputs);
+    Vector<TFNode*> nodes = GetNodes(inputs);
+    result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes.begin());
   }
 
   void Simd8x16ShuffleOp(FullDecoder* decoder,
@@ -495,7 +488,7 @@ class WasmGraphBuildingInterface {
     SetEnv(if_match_env);
     // TODO(mstarzinger): Can't use BUILD() here, GetExceptionValues() returns
     // TFNode** rather than TFNode*. Fix to add landing pads.
-    TFNode** caught_values =
+    Vector<TFNode*> caught_values =
         builder_->GetExceptionValues(exception.node, imm.exception);
     for (size_t i = 0, e = values.size(); i < e; ++i) {
       values[i].node = caught_values[i];
@@ -526,9 +519,9 @@ class WasmGraphBuildingInterface {
 
   void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
                 const MemoryAccessImmediate<validate>& imm, Value* result) {
-    TFNode** inputs = GetNodes(args);
-    TFNode* node = BUILD(AtomicOp, opcode, inputs, imm.alignment, imm.offset,
-                         decoder->position());
+    Vector<TFNode*> inputs = GetNodes(args);
+    TFNode* node = BUILD(AtomicOp, opcode, inputs.begin(), imm.alignment,
+                         imm.offset, decoder->position());
     if (result) result->node = node;
   }
 
@@ -598,15 +591,15 @@ class WasmGraphBuildingInterface {
         ->try_info;
   }
 
-  TFNode** GetNodes(Value* values, size_t count) {
-    TFNode** nodes = builder_->Buffer(count);
+  Vector<TFNode*> GetNodes(Value* values, size_t count) {
+    Vector<TFNode*> nodes = builder_->Buffer(count);
     for (size_t i = 0; i < count; ++i) {
       nodes[i] = values[i].node;
     }
     return nodes;
   }
 
-  TFNode** GetNodes(Vector<Value> values) {
+  Vector<TFNode*> GetNodes(Vector<Value> values) {
     return GetNodes(values.begin(), values.size());
   }
 
@@ -885,17 +878,17 @@ class WasmGraphBuildingInterface {
               FunctionSig* sig, uint32_t sig_index, const Value args[],
               Value returns[]) {
     int param_count = static_cast<int>(sig->parameter_count());
-    TFNode** arg_nodes = builder_->Buffer(param_count + 1);
+    Vector<TFNode*> arg_nodes = builder_->Buffer(param_count + 1);
     TFNode** return_nodes = nullptr;
     arg_nodes[0] = index_node;
     for (int i = 0; i < param_count; ++i) {
       arg_nodes[i + 1] = args[i].node;
     }
     if (index_node) {
-      BUILD(CallIndirect, table_index, sig_index, arg_nodes, &return_nodes,
-            decoder->position());
+      BUILD(CallIndirect, table_index, sig_index, arg_nodes.begin(),
+            &return_nodes, decoder->position());
     } else {
-      BUILD(CallDirect, sig_index, arg_nodes, &return_nodes,
+      BUILD(CallDirect, sig_index, arg_nodes.begin(), &return_nodes,
             decoder->position());
     }
     int return_count = static_cast<int>(sig->return_count());
@@ -911,16 +904,16 @@ class WasmGraphBuildingInterface {
                     TFNode* index_node, FunctionSig* sig, uint32_t sig_index,
                     const Value args[]) {
     int arg_count = static_cast<int>(sig->parameter_count());
-    TFNode** arg_nodes = builder_->Buffer(arg_count + 1);
+    Vector<TFNode*> arg_nodes = builder_->Buffer(arg_count + 1);
     arg_nodes[0] = index_node;
     for (int i = 0; i < arg_count; ++i) {
       arg_nodes[i + 1] = args[i].node;
     }
     if (index_node) {
-      BUILD(ReturnCallIndirect, table_index, sig_index, arg_nodes,
+      BUILD(ReturnCallIndirect, table_index, sig_index, arg_nodes.begin(),
             decoder->position());
     } else {
-      BUILD(ReturnCall, sig_index, arg_nodes, decoder->position());
+      BUILD(ReturnCall, sig_index, arg_nodes.begin(), decoder->position());
     }
   }
 };
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 379a547b55941a..8889c18e9c5192 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -37,6 +37,21 @@ namespace wasm {
 // The above illustrates jump table lines {Li} containing slots {Si} with each
 // line containing {n} slots and some padding {x} for alignment purposes.
 // Other jump tables are just consecutive.
+//
+// The main jump table will be patched concurrently while other threads execute
+// it. The code at the new target might also have been emitted concurrently, so
+// we need to ensure that there is proper synchronization between code emission,
+// jump table patching and code execution.
+// On Intel platforms, this all works out of the box because there is cache
+// coherency between i-cache and d-cache.
+// On ARM, it is safe because the i-cache flush after code emission executes an
+// "ic ivau" (Instruction Cache line Invalidate by Virtual Address to Point of
+// Unification), which broadcasts to all cores. A core which sees the jump table
+// update thus also sees the new code. Since the other core does not explicitly
+// execute an "isb" (Instruction Synchronization Barrier), it might still
+// execute the old code afterwards, which is no problem, since that code remains
+// available until it is garbage collected. Garbage collection itself is a
+// synchronization barrier though.
 class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
  public:
   // Translate an offset into the continuous jump table to a jump table index.
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 658f329775ed18..c264bac96e8f1d 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -152,6 +152,9 @@ class CompilationUnitQueues {
     for (int task_id = 0; task_id < max_tasks; ++task_id) {
       queues_[task_id].next_steal_task_id = next_task_id(task_id);
     }
+    for (auto& atomic_counter : num_units_) {
+      std::atomic_init(&atomic_counter, size_t{0});
+    }
   }
 
   base::Optional<WasmCompilationUnit> GetNextUnit(
@@ -254,15 +257,14 @@ class CompilationUnitQueues {
   };
 
   struct BigUnitsQueue {
-    BigUnitsQueue() = default;
+    BigUnitsQueue() {
+      for (auto& atomic : has_units) std::atomic_init(&atomic, false);
+    }
 
     base::Mutex mutex;
 
     // Can be read concurrently to check whether any elements are in the queue.
-    std::atomic_bool has_units[kNumTiers] = {
-      ATOMIC_VAR_INIT(false),
-      ATOMIC_VAR_INIT(false)
-    };
+    std::atomic<bool> has_units[kNumTiers];
 
     // Protected by {mutex}:
     std::priority_queue<BigUnit> units[kNumTiers];
@@ -271,11 +273,8 @@ class CompilationUnitQueues {
   std::vector<Queue> queues_;
   BigUnitsQueue big_units_queue_;
 
-  std::atomic_size_t num_units_[kNumTiers] = {
-    ATOMIC_VAR_INIT(0),
-    ATOMIC_VAR_INIT(0)
-  };
-  std::atomic_int next_queue_to_add{0};
+  std::atomic<size_t> num_units_[kNumTiers];
+  std::atomic<int> next_queue_to_add{0};
 
   int next_task_id(int task_id) const {
     int next = task_id + 1;
@@ -382,7 +381,7 @@ class CompilationStateImpl {
   // Initialize compilation progress. Set compilation tiers to expect for
   // baseline and top tier compilation. Must be set before {AddCompilationUnits}
   // is invoked which triggers background compilation.
-  void InitializeCompilationProgress(bool lazy_module, int num_import_wrappers);
+  void InitializeCompilationProgress(bool lazy_module, int num_wrappers);
 
   // Add the callback function to be called on compilation events. Needs to be
   // set before {AddCompilationUnits} is run to ensure that it receives all
@@ -390,13 +389,24 @@ class CompilationStateImpl {
   void AddCallback(CompilationState::callback_t);
 
   // Inserts new functions to compile and kicks off compilation.
-  void AddCompilationUnits(Vector<WasmCompilationUnit> baseline_units,
-                           Vector<WasmCompilationUnit> top_tier_units);
+  void AddCompilationUnits(
+      Vector<WasmCompilationUnit> baseline_units,
+      Vector<WasmCompilationUnit> top_tier_units,
+      Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
+          js_to_wasm_wrapper_units);
   void AddTopTierCompilationUnit(WasmCompilationUnit);
   base::Optional<WasmCompilationUnit> GetNextCompilationUnit(
       int task_id, CompileBaselineOnly baseline_only);
 
+  std::shared_ptr<JSToWasmWrapperCompilationUnit>
+  GetNextJSToWasmWrapperCompilationUnit();
+  void FinalizeJSToWasmWrappers(Isolate* isolate, const WasmModule* module,
+                                Handle<FixedArray>* export_wrappers_out);
+
   void OnFinishedUnits(Vector<WasmCode*>);
+  void OnFinishedJSToWasmWrapperUnits(int num);
+  void TriggerCallbacks(bool completes_baseline_compilation,
+                        bool completes_top_tier_compilation);
 
   void OnBackgroundTaskStopped(int task_id, const WasmFeatures& detected);
   void UpdateDetectedFeatures(const WasmFeatures& detected);
@@ -471,7 +481,7 @@ class CompilationStateImpl {
 
   // Compilation error, atomically updated. This flag can be updated and read
   // using relaxed semantics.
-  std::atomic_bool compile_failed_{false};
+  std::atomic<bool> compile_failed_{false};
 
   const int max_background_tasks_ = 0;
 
@@ -484,6 +494,13 @@ class CompilationStateImpl {
   // tasks a fair chance to utilize the worker threads on a regular basis.
   std::atomic<double> next_compilation_deadline_{0};
 
+  // Index of the next wrapper to compile in {js_to_wasm_wrapper_units_}.
+  std::atomic<int> js_to_wasm_wrapper_id_{0};
+  // Wrapper compilation units are stored in shared_ptrs so that they are kept
+  // alive by the tasks even if the NativeModule dies.
+  std::vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
+      js_to_wasm_wrapper_units_;
+
   // This mutex protects all information of this {CompilationStateImpl} which is
   // being accessed concurrently.
   mutable base::Mutex mutex_;
@@ -525,9 +542,9 @@ class CompilationStateImpl {
   //////////////////////////////////////////////////////////////////////////////
 
   // Encoding of fields in the {compilation_progress_} vector.
-  class RequiredBaselineTierField : public BitField8<ExecutionTier, 0, 2> {};
-  class RequiredTopTierField : public BitField8<ExecutionTier, 2, 2> {};
-  class ReachedTierField : public BitField8<ExecutionTier, 4, 2> {};
+  using RequiredBaselineTierField = BitField8<ExecutionTier, 0, 2>;
+  using RequiredTopTierField = BitField8<ExecutionTier, 2, 2>;
+  using ReachedTierField = BitField8<ExecutionTier, 4, 2>;
 };
 
 CompilationStateImpl* Impl(CompilationState* compilation_state) {
@@ -712,6 +729,11 @@ class CompilationUnitBuilder {
     }
   }
 
+  void AddJSToWasmWrapperUnit(
+      std::shared_ptr<JSToWasmWrapperCompilationUnit> unit) {
+    js_to_wasm_wrapper_units_.emplace_back(std::move(unit));
+  }
+
   void AddTopTierUnit(int func_index) {
     ExecutionTierPair tiers = GetRequestedExecutionTiers(
         native_module_->module(), compilation_state()->compile_mode(),
@@ -730,9 +752,13 @@ class CompilationUnitBuilder {
   }
 
   bool Commit() {
-    if (baseline_units_.empty() && tiering_units_.empty()) return false;
-    compilation_state()->AddCompilationUnits(VectorOf(baseline_units_),
-                                             VectorOf(tiering_units_));
+    if (baseline_units_.empty() && tiering_units_.empty() &&
+        js_to_wasm_wrapper_units_.empty()) {
+      return false;
+    }
+    compilation_state()->AddCompilationUnits(
+        VectorOf(baseline_units_), VectorOf(tiering_units_),
+        VectorOf(js_to_wasm_wrapper_units_));
     Clear();
     return true;
   }
@@ -740,6 +766,7 @@ class CompilationUnitBuilder {
   void Clear() {
     baseline_units_.clear();
     tiering_units_.clear();
+    js_to_wasm_wrapper_units_.clear();
   }
 
  private:
@@ -751,6 +778,8 @@ class CompilationUnitBuilder {
   const ExecutionTier default_tier_;
   std::vector<WasmCompilationUnit> baseline_units_;
   std::vector<WasmCompilationUnit> tiering_units_;
+  std::vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
+      js_to_wasm_wrapper_units_;
 };
 
 void SetCompileError(ErrorThrower* thrower, ModuleWireBytes wire_bytes,
@@ -910,6 +939,33 @@ void RecordStats(const Code code, Counters* counters) {
 
 constexpr int kMainThreadTaskId = -1;
 
+bool ExecuteJSToWasmWrapperCompilationUnits(
+    const std::shared_ptr<BackgroundCompileToken>& token) {
+  std::shared_ptr<JSToWasmWrapperCompilationUnit> wrapper_unit = nullptr;
+  int num_processed_wrappers = 0;
+  do {
+    // TODO(thibaudm): Reschedule the compilation task if it takes too long, so
+    // that the background thread is not blocked.
+    {
+      BackgroundCompileScope compile_scope(token);
+      if (compile_scope.cancelled()) return false;
+      wrapper_unit = compile_scope.compilation_state()
+                         ->GetNextJSToWasmWrapperCompilationUnit();
+    }
+    if (wrapper_unit) {
+      wrapper_unit->Execute();
+      ++num_processed_wrappers;
+    }
+  } while (wrapper_unit);
+  {
+    BackgroundCompileScope compile_scope(token);
+    if (compile_scope.cancelled()) return false;
+    compile_scope.compilation_state()->OnFinishedJSToWasmWrapperUnits(
+        num_processed_wrappers);
+  }
+  return true;
+}
+
 // Run by the main thread and background tasks to take part in compilation.
 // Returns whether any units were executed.
 bool ExecuteCompilationUnits(
@@ -918,6 +974,13 @@ bool ExecuteCompilationUnits(
   TRACE_COMPILE("Compiling (task %d)...\n", task_id);
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ExecuteCompilationUnits");
 
+  // Execute JS to WASM wrapper units first, so that they are ready to be
+  // finalized by the main thread when the kFinishedBaselineCompilation event is
+  // triggered.
+  if (!ExecuteJSToWasmWrapperCompilationUnits(token)) {
+    return false;
+  }
+
   const bool is_foreground = task_id == kMainThreadTaskId;
   // The main thread uses task id 0, which might collide with one of the
   // background tasks. This is fine, as it will only cause some contention on
@@ -1050,6 +1113,35 @@ bool ExecuteCompilationUnits(
   return true;
 }
 
+using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
+
+// Returns the number of units added.
+int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine,
+                          NativeModule* native_module,
+                          CompilationUnitBuilder* builder,
+                          const WasmFeatures& enabled_features) {
+// Disable asynchronous wrapper compilation when builtins are not embedded,
+// otherwise the isolate might be used after tear down to access builtins.
+#ifdef V8_EMBEDDED_BUILTINS
+  std::unordered_set<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>> keys;
+  for (auto exp : native_module->module()->export_table) {
+    if (exp.kind != kExternalFunction) continue;
+    auto& function = native_module->module()->functions[exp.index];
+    JSToWasmWrapperKey key(function.imported, *function.sig);
+    if (keys.insert(key).second) {
+      auto unit = std::make_shared<JSToWasmWrapperCompilationUnit>(
+          isolate, wasm_engine, function.sig, function.imported,
+          enabled_features);
+      builder->AddJSToWasmWrapperUnit(std::move(unit));
+    }
+  }
+
+  return static_cast<int>(keys.size());
+#else
+  return 0;
+#endif
+}
+
 // Returns the number of units added.
 int AddImportWrapperUnits(NativeModule* native_module,
                           CompilationUnitBuilder* builder) {
@@ -1059,8 +1151,7 @@ int AddImportWrapperUnits(NativeModule* native_module,
   int num_imported_functions = native_module->num_imported_functions();
   for (int func_index = 0; func_index < num_imported_functions; func_index++) {
     FunctionSig* sig = native_module->module()->functions[func_index].sig;
-    bool has_bigint_feature = native_module->enabled_features().bigint;
-    if (!IsJSCompatibleSignature(sig, has_bigint_feature)) {
+    if (!IsJSCompatibleSignature(sig, native_module->enabled_features())) {
       continue;
     }
     WasmImportWrapperCache::CacheKey key(compiler::kDefaultImportCallKind, sig);
@@ -1075,7 +1166,7 @@ int AddImportWrapperUnits(NativeModule* native_module,
   return static_cast<int>(keys.size());
 }
 
-void InitializeCompilationUnits(NativeModule* native_module) {
+void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
   CompilationStateImpl* compilation_state =
       Impl(native_module->compilation_state());
   const bool lazy_module = IsLazyModule(native_module->module());
@@ -1099,8 +1190,11 @@ void InitializeCompilationUnits(NativeModule* native_module) {
     }
   }
   int num_import_wrappers = AddImportWrapperUnits(native_module, &builder);
-  compilation_state->InitializeCompilationProgress(lazy_module,
-                                                   num_import_wrappers);
+  int num_export_wrappers =
+      AddExportWrapperUnits(isolate, isolate->wasm_engine(), native_module,
+                            &builder, WasmFeaturesFromIsolate(isolate));
+  compilation_state->InitializeCompilationProgress(
+      lazy_module, num_import_wrappers + num_export_wrappers);
   builder.Commit();
 }
 
@@ -1202,7 +1296,7 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
   }
 
   // Initialize the compilation units and kick off background compile tasks.
-  InitializeCompilationUnits(native_module);
+  InitializeCompilationUnits(isolate, native_module);
 
   // If tiering is disabled, the main thread can execute any unit (all of them
   // are part of initial compilation). Otherwise, just execute baseline units.
@@ -1274,26 +1368,23 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
   OwnedVector<uint8_t> wire_bytes_copy =
       OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
 
-  // Create and compile the native module.
-  size_t code_size_estimate =
-      wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
-
   // Create a new {NativeModule} first.
   auto native_module = isolate->wasm_engine()->NewNativeModule(
-      isolate, enabled, code_size_estimate,
-      wasm::NativeModule::kCanAllocateMoreMemory, std::move(module));
+      isolate, enabled, std::move(module));
   native_module->SetWireBytes(std::move(wire_bytes_copy));
   native_module->SetRuntimeStubs(isolate);
 
   CompileNativeModule(isolate, thrower, wasm_module, native_module.get());
   if (thrower->error()) return {};
 
-  // Compile JS->wasm wrappers for exported functions.
-  int num_wrappers = MaxNumExportWrappers(native_module->module());
-  *export_wrappers_out =
-      isolate->factory()->NewFixedArray(num_wrappers, AllocationType::kOld);
+#ifdef V8_EMBEDDED_BUILTINS
+  Impl(native_module->compilation_state())
+      ->FinalizeJSToWasmWrappers(isolate, native_module->module(),
+                                 export_wrappers_out);
+#else
   CompileJsToWasmWrappers(isolate, native_module->module(),
-                          *export_wrappers_out);
+                          export_wrappers_out);
+#endif
 
   // Log the code within the generated module for profiling.
   native_module->LogWasmCodes(isolate);
@@ -1367,7 +1458,9 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
 
   ModuleDecoder decoder_;
   AsyncCompileJob* job_;
+  WasmEngine* wasm_engine_;
   std::unique_ptr<CompilationUnitBuilder> compilation_unit_builder_;
+  base::TimeTicks start_time_;
   int num_functions_ = 0;
 };
 
@@ -1415,11 +1508,8 @@ void AsyncCompileJob::CreateNativeModule(
   // breakpoints on a (potentially empty) subset of the instances.
   // Create the module object.
 
-  size_t code_size_estimate =
-      wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
   native_module_ = isolate_->wasm_engine()->NewNativeModule(
-      isolate_, enabled_features_, code_size_estimate,
-      wasm::NativeModule::kCanAllocateMoreMemory, std::move(module));
+      isolate_, enabled_features_, std::move(module));
   native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
   native_module_->SetRuntimeStubs(isolate_);
 
@@ -1433,10 +1523,8 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
   Handle<Script> script =
       CreateWasmScript(isolate_, wire_bytes_, module->source_map_url);
 
-  size_t code_size_estimate =
-      wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module);
-  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
-      isolate_, native_module_, script, code_size_estimate);
+  Handle<WasmModuleObject> module_object =
+      WasmModuleObject::New(isolate_, native_module_, script);
 
   module_object_ = isolate_->global_handles()->Create(*module_object);
 }
@@ -1462,17 +1550,25 @@ void AsyncCompileJob::FinishCompile() {
   }
   isolate_->debug()->OnAfterCompile(script);
 
-  // We can only update the feature counts once the entire compile is done.
   auto compilation_state =
       Impl(module_object_->native_module()->compilation_state());
-  compilation_state->PublishDetectedFeatures(isolate_);
-
   // TODO(bbudge) Allow deserialization without wrapper compilation, so we can
   // just compile wrappers here.
   if (!is_after_deserialization) {
-    // TODO(wasm): compiling wrappers should be made async.
-    CompileWrappers();
+#ifdef V8_EMBEDDED_BUILTINS
+    Handle<FixedArray> export_wrappers;
+    compilation_state->FinalizeJSToWasmWrappers(
+        isolate_, module_object_->module(), &export_wrappers);
+    module_object_->set_export_wrappers(*export_wrappers);
+#else
+    Handle<FixedArray> export_wrappers;
+    CompileJsToWasmWrappers(isolate_, module_object_->module(),
+                            &export_wrappers);
+    module_object_->set_export_wrappers(*export_wrappers);
+#endif
   }
+  // We can only update the feature counts once the entire compile is done.
+  compilation_state->PublishDetectedFeatures(isolate_);
 
   FinishModule();
 }
@@ -1792,7 +1888,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
       // then DoAsync would do the same as NextStep already.
 
       // Add compilation units and kick off compilation.
-      InitializeCompilationUnits(job->native_module_.get());
+      InitializeCompilationUnits(job->isolate(), job->native_module_.get());
     }
   }
 };
@@ -1853,17 +1949,8 @@ class AsyncCompileJob::CompileFinished : public CompileStep {
   }
 };
 
-void AsyncCompileJob::CompileWrappers() {
-  // TODO(wasm): Compile all wrappers here, including the start function wrapper
-  // and the wrappers for the function table elements.
-  TRACE_COMPILE("(5) Compile wrappers...\n");
-  // Compile JS->wasm wrappers for exported functions.
-  CompileJsToWasmWrappers(isolate_, module_object_->native_module()->module(),
-                          handle(module_object_->export_wrappers(), isolate_));
-}
-
 void AsyncCompileJob::FinishModule() {
-  TRACE_COMPILE("(6) Finish module...\n");
+  TRACE_COMPILE("(4) Finish module...\n");
   AsyncCompileSucceeded(module_object_);
   isolate_->wasm_engine()->RemoveCompileJob(this);
 }
@@ -1871,7 +1958,9 @@ void AsyncCompileJob::FinishModule() {
 AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
     : decoder_(job->enabled_features_),
       job_(job),
-      compilation_unit_builder_(nullptr) {}
+      wasm_engine_(job_->isolate_->wasm_engine()),
+      compilation_unit_builder_(nullptr),
+      start_time_(base::TimeTicks::Now()) {}
 
 void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
     const WasmError& error) {
@@ -1973,8 +2062,11 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
 
   int num_import_wrappers =
       AddImportWrapperUnits(native_module, compilation_unit_builder_.get());
-  compilation_state->InitializeCompilationProgress(lazy_module,
-                                                   num_import_wrappers);
+  int num_export_wrappers = AddExportWrapperUnits(
+      job_->isolate_, wasm_engine_, native_module,
+      compilation_unit_builder_.get(), job_->enabled_features_);
+  compilation_state->InitializeCompilationProgress(
+      lazy_module, num_import_wrappers + num_export_wrappers);
   return true;
 }
 
@@ -2097,6 +2189,13 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
 
   MaybeHandle<WasmModuleObject> result =
       DeserializeNativeModule(job_->isolate_, module_bytes, wire_bytes);
+  if (base::TimeTicks::IsHighResolution()) {
+    base::TimeDelta duration = base::TimeTicks::Now() - start_time_;
+    auto* histogram = job_->isolate_->counters()
+                          ->wasm_streaming_deserialize_wasm_module_time();
+    histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
+  }
+
   if (result.is_null()) return false;
 
   job_->module_object_ =
@@ -2145,8 +2244,8 @@ void CompilationStateImpl::AbortCompilation() {
   callbacks_.clear();
 }
 
-void CompilationStateImpl::InitializeCompilationProgress(
-    bool lazy_module, int num_import_wrappers) {
+void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
+                                                         int num_wrappers) {
   DCHECK(!failed());
   auto enabled_features = native_module_->enabled_features();
   auto* module = native_module_->module();
@@ -2190,7 +2289,7 @@ void CompilationStateImpl::InitializeCompilationProgress(
   DCHECK_IMPLIES(lazy_module, outstanding_top_tier_functions_ == 0);
   DCHECK_LE(0, outstanding_baseline_units_);
   DCHECK_LE(outstanding_baseline_units_, outstanding_top_tier_functions_);
-  outstanding_baseline_units_ += num_import_wrappers;
+  outstanding_baseline_units_ += num_wrappers;
 
   // Trigger callbacks if module needs no baseline or top tier compilation. This
   // can be the case for an empty or fully lazy module.
@@ -2215,15 +2314,52 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
 
 void CompilationStateImpl::AddCompilationUnits(
     Vector<WasmCompilationUnit> baseline_units,
-    Vector<WasmCompilationUnit> top_tier_units) {
-  compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
-                                    native_module_->module());
+    Vector<WasmCompilationUnit> top_tier_units,
+    Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
+        js_to_wasm_wrapper_units) {
+  if (!baseline_units.empty() || !top_tier_units.empty()) {
+    compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
+                                      native_module_->module());
+  }
+  js_to_wasm_wrapper_units_.insert(js_to_wasm_wrapper_units_.end(),
+                                   js_to_wasm_wrapper_units.begin(),
+                                   js_to_wasm_wrapper_units.end());
 
   RestartBackgroundTasks();
 }
 
 void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
-  AddCompilationUnits({}, {&unit, 1});
+  AddCompilationUnits({}, {&unit, 1}, {});
+}
+
+std::shared_ptr<JSToWasmWrapperCompilationUnit>
+CompilationStateImpl::GetNextJSToWasmWrapperCompilationUnit() {
+  int wrapper_id =
+      js_to_wasm_wrapper_id_.fetch_add(1, std::memory_order_relaxed);
+  if (wrapper_id < static_cast<int>(js_to_wasm_wrapper_units_.size())) {
+    return js_to_wasm_wrapper_units_[wrapper_id];
+  }
+  return nullptr;
+}
+
+void CompilationStateImpl::FinalizeJSToWasmWrappers(
+    Isolate* isolate, const WasmModule* module,
+    Handle<FixedArray>* export_wrappers_out) {
+  *export_wrappers_out = isolate->factory()->NewFixedArray(
+      MaxNumExportWrappers(module), AllocationType::kOld);
+  // TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
+  // optimization we keep the code space unlocked to avoid repeated unlocking
+  // because many such wrapper are allocated in sequence below.
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+               "FinalizeJSToWasmWrappers");
+  CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+  for (auto& unit : js_to_wasm_wrapper_units_) {
+    Handle<Code> code = unit->Finalize(isolate);
+    int wrapper_index =
+        GetExportWrapperIndex(module, unit->sig(), unit->is_import());
+    (*export_wrappers_out)->set(wrapper_index, *code);
+    RecordStats(*code, isolate->counters());
+  }
 }
 
 base::Optional<WasmCompilationUnit>
@@ -2313,25 +2449,38 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
       DCHECK_LE(0, outstanding_baseline_units_);
     }
 
-    // Trigger callbacks.
-    if (completes_baseline_compilation) {
-      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "BaselineFinished");
-      for (auto& callback : callbacks_) {
-        callback(CompilationEvent::kFinishedBaselineCompilation);
-      }
-      if (outstanding_top_tier_functions_ == 0) {
-        completes_top_tier_compilation = true;
-      }
+    TriggerCallbacks(completes_baseline_compilation,
+                     completes_top_tier_compilation);
+  }
+}
+
+void CompilationStateImpl::OnFinishedJSToWasmWrapperUnits(int num) {
+  if (num == 0) return;
+  base::MutexGuard guard(&callbacks_mutex_);
+  outstanding_baseline_units_ -= num;
+  bool completes_baseline_compilation = outstanding_baseline_units_ == 0;
+  TriggerCallbacks(completes_baseline_compilation, false);
+}
+
+void CompilationStateImpl::TriggerCallbacks(
+    bool completes_baseline_compilation, bool completes_top_tier_compilation) {
+  if (completes_baseline_compilation) {
+    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "BaselineFinished");
+    for (auto& callback : callbacks_) {
+      callback(CompilationEvent::kFinishedBaselineCompilation);
     }
-    if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
-      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
-      for (auto& callback : callbacks_) {
-        callback(CompilationEvent::kFinishedTopTierCompilation);
-      }
-      // Clear the callbacks because no more events will be delivered.
-      callbacks_.clear();
+    if (outstanding_top_tier_functions_ == 0) {
+      completes_top_tier_compilation = true;
     }
   }
+  if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
+    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
+    for (auto& callback : callbacks_) {
+      callback(CompilationEvent::kFinishedTopTierCompilation);
+    }
+    // Clear the callbacks because no more events will be delivered.
+    callbacks_.clear();
+  }
 }
 
 void CompilationStateImpl::OnBackgroundTaskStopped(
@@ -2379,6 +2528,11 @@ void CompilationStateImpl::RestartBackgroundTasks() {
     if (failed()) return;
 
     size_t max_num_restart = compilation_unit_queues_.GetTotalSize();
+    if (js_to_wasm_wrapper_id_ <
+        static_cast<int>(js_to_wasm_wrapper_units_.size())) {
+      max_num_restart +=
+          js_to_wasm_wrapper_units_.size() - js_to_wasm_wrapper_id_;
+    }
 
     while (!available_task_ids_.empty() && max_num_restart-- > 0) {
       int task_id = available_task_ids_.back();
@@ -2418,7 +2572,6 @@ void CompilationStateImpl::SetError() {
 }
 
 namespace {
-using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
 using JSToWasmWrapperQueue =
     WrapperQueue<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>>;
 using JSToWasmWrapperUnitMap =
@@ -2449,9 +2602,13 @@ class CompileJSToWasmWrapperTask final : public CancelableTask {
 }  // namespace
 
 void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
-                             Handle<FixedArray> export_wrappers) {
+                             Handle<FixedArray>* export_wrappers_out) {
+  *export_wrappers_out = isolate->factory()->NewFixedArray(
+      MaxNumExportWrappers(module), AllocationType::kOld);
+
   JSToWasmWrapperQueue queue;
   JSToWasmWrapperUnitMap compilation_units;
+  WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate);
 
   // Prepare compilation units in the main thread.
   for (auto exp : module->export_table) {
@@ -2460,8 +2617,8 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
     JSToWasmWrapperKey key(function.imported, *function.sig);
     if (queue.insert(key)) {
       auto unit = base::make_unique<JSToWasmWrapperCompilationUnit>(
-          isolate, function.sig, function.imported);
-      unit->Prepare(isolate);
+          isolate, isolate->wasm_engine(), function.sig, function.imported,
+          enabled_features);
       compilation_units.emplace(key, std::move(unit));
     }
   }
@@ -2492,7 +2649,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
     JSToWasmWrapperCompilationUnit* unit = pair.second.get();
     Handle<Code> code = unit->Finalize(isolate);
     int wrapper_index = GetExportWrapperIndex(module, &key.second, key.first);
-    export_wrappers->set(wrapper_index, *code);
+    (*export_wrappers_out)->set(wrapper_index, *code);
     RecordStats(*code, isolate->counters());
   }
 }
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 27c7bff8683ac5..69eb6bb62c404b 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -46,7 +46,7 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
 
 V8_EXPORT_PRIVATE
 void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
-                             Handle<FixedArray> export_wrappers);
+                             Handle<FixedArray>* export_wrappers_out);
 
 // Compiles the wrapper for this (kind, sig) pair and sets the corresponding
 // cache entry. Assumes the key already exists in the cache but has not been
@@ -153,8 +153,6 @@ class AsyncCompileJob {
 
   void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
 
-  void CompileWrappers();
-
   void FinishModule();
 
   void StartForegroundTask();
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index a4b0139ea43758..976c3cde00154e 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -656,6 +656,7 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
     uint32_t size = segment.source.length();
 
     if (enabled_.bulk_memory) {
+      if (size == 0) continue;
       // Passive segments are not copied during instantiation.
       if (!segment.active) continue;
 
@@ -834,10 +835,18 @@ bool InstanceBuilder::ProcessImportedFunction(
                     module_name, import_name);
     return false;
   }
+  // Store any {WasmExternalFunction} callable in the instance before the call
+  // is resolved to preserve its identity. This handles exported functions as
+  // well as functions constructed via other means (e.g. WebAssembly.Function).
+  if (WasmExternalFunction::IsWasmExternalFunction(*value)) {
+    WasmInstanceObject::SetWasmExternalFunction(
+        isolate_, instance, func_index,
+        Handle<WasmExternalFunction>::cast(value));
+  }
   auto js_receiver = Handle<JSReceiver>::cast(value);
   FunctionSig* expected_sig = module_->functions[func_index].sig;
-  auto resolved = compiler::ResolveWasmImportCall(js_receiver, expected_sig,
-                                                  enabled_.bigint);
+  auto resolved =
+      compiler::ResolveWasmImportCall(js_receiver, expected_sig, enabled_);
   compiler::WasmImportCallKind kind = resolved.first;
   js_receiver = resolved.second;
   switch (kind) {
@@ -854,10 +863,6 @@ bool InstanceBuilder::ProcessImportedFunction(
       Address imported_target = imported_function->GetWasmCallTarget();
       ImportedFunctionEntry entry(instance, func_index);
       entry.SetWasmToWasm(*imported_instance, imported_target);
-      // Also store the {WasmExportedFunction} in the instance to preserve its
-      // identity.
-      WasmInstanceObject::SetWasmExportedFunction(
-          isolate_, instance, func_index, imported_function);
       break;
     }
     case compiler::WasmImportCallKind::kWasmToCapi: {
@@ -1218,8 +1223,7 @@ void InstanceBuilder::CompileImportWrappers(
     auto js_receiver = Handle<JSReceiver>::cast(value);
     uint32_t func_index = module_->import_table[index].index;
     FunctionSig* sig = module_->functions[func_index].sig;
-    auto resolved =
-        compiler::ResolveWasmImportCall(js_receiver, sig, enabled_.bigint);
+    auto resolved = compiler::ResolveWasmImportCall(js_receiver, sig, enabled_);
     compiler::WasmImportCallKind kind = resolved.first;
     if (kind == compiler::WasmImportCallKind::kWasmToWasm ||
         kind == compiler::WasmImportCallKind::kLinkError ||
@@ -1373,7 +1377,7 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
         break;
       case WasmInitExpr::kRefFuncConst: {
         DCHECK(enabled_.anyref);
-        auto function = WasmInstanceObject::GetOrCreateWasmExportedFunction(
+        auto function = WasmInstanceObject::GetOrCreateWasmExternalFunction(
             isolate_, instance, global.init.val.function_index);
         tagged_globals_->set(global.offset, *function);
         break;
@@ -1450,10 +1454,10 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
       const WasmImport& import = module_->import_table[index];
       if (import.kind == kExternalFunction) {
         Handle<Object> value = sanitized_imports_[index].value;
-        if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
-          WasmInstanceObject::SetWasmExportedFunction(
+        if (WasmExternalFunction::IsWasmExternalFunction(*value)) {
+          WasmInstanceObject::SetWasmExternalFunction(
               isolate_, instance, import.index,
-              Handle<WasmExportedFunction>::cast(value));
+              Handle<WasmExternalFunction>::cast(value));
         }
       }
     }
@@ -1498,10 +1502,10 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
       case kExternalFunction: {
         // Wrap and export the code as a JSFunction.
         // TODO(wasm): reduce duplication with LoadElemSegment() further below
-        MaybeHandle<WasmExportedFunction> wasm_exported_function =
-            WasmInstanceObject::GetOrCreateWasmExportedFunction(
+        Handle<WasmExternalFunction> wasm_external_function =
+            WasmInstanceObject::GetOrCreateWasmExternalFunction(
                 isolate_, instance, exp.index);
-        desc.set_value(wasm_exported_function.ToHandleChecked());
+        desc.set_value(wasm_external_function);
 
         if (is_asm_js &&
             String::Equals(isolate_, name,
@@ -1629,6 +1633,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
                          uint32_t table_index,
                          const WasmElemSegment& elem_segment, uint32_t dst,
                          uint32_t src, size_t count) {
+  if (count == 0) return true;
   // TODO(wasm): Move this functionality into wasm-objects, since it is used
   // for both instantiation and in the implementation of the table.init
   // instruction.
@@ -1660,27 +1665,27 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
           .Set(sig_id, instance, func_index);
     }
 
-    // For AnyRef tables, we have to generate the WasmExportedFunction eagerly.
+    // For AnyRef tables, we have to generate the WasmExternalFunction eagerly.
     // Later we cannot know if an entry is a placeholder or not.
     if (table_object->type() == kWasmAnyRef) {
-      Handle<WasmExportedFunction> wasm_exported_function =
-          WasmInstanceObject::GetOrCreateWasmExportedFunction(isolate, instance,
+      Handle<WasmExternalFunction> wasm_external_function =
+          WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
                                                               func_index);
       WasmTableObject::Set(isolate, table_object, entry_index,
-                           wasm_exported_function);
+                           wasm_external_function);
     } else {
       // Update the table object's other dispatch tables.
-      MaybeHandle<WasmExportedFunction> wasm_exported_function =
-          WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
+      MaybeHandle<WasmExternalFunction> wasm_external_function =
+          WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
                                                       func_index);
-      if (wasm_exported_function.is_null()) {
+      if (wasm_external_function.is_null()) {
         // No JSFunction entry yet exists for this function. Create a {Tuple2}
         // holding the information to lazily allocate one.
         WasmTableObject::SetFunctionTablePlaceholder(
             isolate, table_object, entry_index, instance, func_index);
       } else {
         table_object->entries().set(entry_index,
-                                    *wasm_exported_function.ToHandleChecked());
+                                    *wasm_external_function.ToHandleChecked());
       }
       // UpdateDispatchTables() updates all other dispatch tables, since
       // we have not yet added the dispatch table we are currently building.
@@ -1701,6 +1706,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
     uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset);
     uint32_t src = 0;
     size_t count = elem_segment.entries.size();
+    if (enabled_.bulk_memory && count == 0) continue;
 
     bool success = LoadElemSegmentImpl(
         isolate_, instance,
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 3d0cde0cceded2..91cfc01ceae649 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -25,13 +25,14 @@
 #include "src/wasm/function-compiler.h"
 #include "src/wasm/jump-table-assembler.h"
 #include "src/wasm/wasm-import-wrapper-cache.h"
+#include "src/wasm/wasm-module-sourcemap.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-objects-inl.h"
 #include "src/wasm/wasm-objects.h"
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
 #include "src/diagnostics/unwinding-info-win64.h"
-#endif
+#endif  // V8_OS_WIN64
 
 #define TRACE_HEAP(...)                                   \
   do {                                                    \
@@ -88,13 +89,30 @@ base::AddressRegion DisjointAllocationPool::Merge(base::AddressRegion region) {
 }
 
 base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
+  return AllocateInRegion(size,
+                          {kNullAddress, std::numeric_limits<size_t>::max()});
+}
+
+base::AddressRegion DisjointAllocationPool::AllocateInRegion(
+    size_t size, base::AddressRegion region) {
   for (auto it = regions_.begin(), end = regions_.end(); it != end; ++it) {
-    if (size > it->size()) continue;
-    base::AddressRegion ret{it->begin(), size};
+    base::AddressRegion overlap = it->GetOverlap(region);
+    if (size > overlap.size()) continue;
+    base::AddressRegion ret{overlap.begin(), size};
     if (size == it->size()) {
+      // We use the full region --> erase the region from {regions_}.
       regions_.erase(it);
-    } else {
+    } else if (ret.begin() == it->begin()) {
+      // We return a region at the start --> shrink remaining region from front.
       *it = base::AddressRegion{it->begin() + size, it->size() - size};
+    } else if (ret.end() == it->end()) {
+      // We return a region at the end --> shrink remaining region.
+      *it = base::AddressRegion{it->begin(), it->size() - size};
+    } else {
+      // We return something in the middle --> split the remaining region.
+      regions_.insert(
+          it, base::AddressRegion{it->begin(), ret.begin() - it->begin()});
+      *it = base::AddressRegion{ret.end(), it->end() - ret.end()};
     }
     return ret;
   }
@@ -164,6 +182,19 @@ void WasmCode::LogCode(Isolate* isolate) const {
   WireBytesRef name_ref =
       native_module()->module()->LookupFunctionName(wire_bytes, index());
   WasmName name_vec = wire_bytes.GetNameOrNull(name_ref);
+
+  const std::string& source_map_url = native_module()->module()->source_map_url;
+  auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
+  auto source_map = native_module()->GetWasmSourceMap();
+  if (!source_map && !source_map_url.empty() && load_wasm_source_map) {
+    HandleScope scope(isolate);
+    v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+    Local<v8::String> source_map_str =
+        load_wasm_source_map(v8_isolate, source_map_url.c_str());
+    native_module()->SetWasmSourceMap(
+        base::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
+  }
+
   if (!name_vec.empty()) {
     HandleScope scope(isolate);
     MaybeHandle<String> maybe_name = isolate->factory()->NewStringFromUtf8(
@@ -204,11 +235,7 @@ void WasmCode::Validate() const {
     switch (mode) {
       case RelocInfo::WASM_CALL: {
         Address target = it.rinfo()->wasm_call_address();
-        WasmCode* code = native_module_->Lookup(target);
-        CHECK_NOT_NULL(code);
-        CHECK_EQ(WasmCode::kJumpTable, code->kind());
-        CHECK_EQ(native_module()->jump_table_, code);
-        CHECK(code->contains(target));
+        DCHECK(native_module_->is_jump_table_slot(target));
         break;
       }
       case RelocInfo::WASM_STUB_CALL: {
@@ -464,32 +491,51 @@ base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
 
 Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
                                                 size_t size) {
+  return AllocateForCodeInRegion(
+      native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()});
+}
+
+Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
+    NativeModule* native_module, size_t size, base::AddressRegion region) {
   base::MutexGuard lock(&mutex_);
   DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
   DCHECK_LT(0, size);
   v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
-  // This happens under a lock assumed by the caller.
   size = RoundUp<kCodeAlignment>(size);
-  base::AddressRegion code_space = free_code_space_.Allocate(size);
+  base::AddressRegion code_space =
+      free_code_space_.AllocateInRegion(size, region);
   if (code_space.is_empty()) {
-    if (!can_request_more_memory_) {
-      V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
+    const bool in_specific_region =
+        region.size() < std::numeric_limits<size_t>::max();
+    if (!can_request_more_memory_ || in_specific_region) {
+      auto error = in_specific_region ? "wasm code reservation in region"
+                                      : "wasm code reservation";
+      V8::FatalProcessOutOfMemory(nullptr, error);
       UNREACHABLE();
     }
 
     Address hint = owned_code_space_.empty() ? kNullAddress
                                              : owned_code_space_.back().end();
 
+    // Reserve at least 20% of the total generated code size so far, and of
+    // course at least {size}. Round up to the next power of two.
+    size_t total_reserved = 0;
+    for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
+    size_t reserve_size =
+        base::bits::RoundUpToPowerOfTwo(std::max(size, total_reserved / 5));
     VirtualMemory new_mem =
-        code_manager_->TryAllocate(size, reinterpret_cast<void*>(hint));
+        code_manager_->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
     if (!new_mem.IsReserved()) {
       V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
       UNREACHABLE();
     }
-    code_manager_->AssignRange(new_mem.region(), native_module);
 
-    free_code_space_.Merge(new_mem.region());
+    base::AddressRegion new_region = new_mem.region();
+    code_manager_->AssignRange(new_region, native_module);
+    free_code_space_.Merge(new_region);
     owned_code_space_.emplace_back(std::move(new_mem));
+    native_module->AddCodeSpace(new_region);
+
     code_space = free_code_space_.Allocate(size);
     DCHECK(!code_space.is_empty());
     async_counters_->wasm_module_num_code_spaces()->AddSample(
@@ -614,6 +660,12 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
   }
 }
 
+base::AddressRegion WasmCodeAllocator::GetSingleCodeRegion() const {
+  base::MutexGuard lock(&mutex_);
+  DCHECK_EQ(1, owned_code_space_.size());
+  return owned_code_space_[0].region();
+}
+
 NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
                            bool can_request_more, VirtualMemory code_space,
                            std::shared_ptr<const WasmModule> module,
@@ -636,27 +688,10 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
   compilation_state_ =
       CompilationState::New(*shared_this, std::move(async_counters));
   DCHECK_NOT_NULL(module_);
-
-#if defined(V8_OS_WIN_X64)
-  // On some platforms, specifically Win64, we need to reserve some pages at
-  // the beginning of an executable space.
-  // See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
-  // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
-  // for details.
-  if (engine_->code_manager()
-          ->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
-    code_allocator_.AllocateForCode(this, Heap::GetCodeRangeReservedAreaSize());
-  }
-#endif
-
-  uint32_t num_wasm_functions = module_->num_declared_functions;
-  if (num_wasm_functions > 0) {
-    code_table_.reset(new WasmCode* [num_wasm_functions] {});
-
-    WasmCodeRefScope code_ref_scope;
-    jump_table_ = CreateEmptyJumpTable(
-        JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
+  if (module_->num_declared_functions > 0) {
+    code_table_.reset(new WasmCode* [module_->num_declared_functions] {});
   }
+  AddCodeSpace(code_allocator_.GetSingleCodeRegion());
 }
 
 void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
@@ -669,9 +704,12 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
   }
   code_table_.reset(new_table);
 
+  CHECK_EQ(1, code_space_data_.size());
   // Re-allocate jump table.
-  jump_table_ = CreateEmptyJumpTable(
-      JumpTableAssembler::SizeForNumberOfSlots(max_functions));
+  code_space_data_[0].jump_table = CreateEmptyJumpTableInRegion(
+      JumpTableAssembler::SizeForNumberOfSlots(max_functions),
+      code_space_data_[0].region);
+  main_jump_table_ = code_space_data_[0].jump_table;
 }
 
 void NativeModule::LogWasmCodes(Isolate* isolate) {
@@ -704,8 +742,10 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
   if (!lazy_compile_table_) {
     uint32_t num_slots = module_->num_declared_functions;
     WasmCodeRefScope code_ref_scope;
-    lazy_compile_table_ = CreateEmptyJumpTable(
-        JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots));
+    DCHECK_EQ(1, code_space_data_.size());
+    lazy_compile_table_ = CreateEmptyJumpTableInRegion(
+        JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
+        code_space_data_[0].region);
     JumpTableAssembler::GenerateLazyCompileTable(
         lazy_compile_table_->instruction_start(), num_slots,
         module_->num_imported_functions,
@@ -718,7 +758,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
   Address lazy_compile_target =
       lazy_compile_table_->instruction_start() +
       JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
-  JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
+  JumpTableAssembler::PatchJumpTableSlot(main_jump_table_->instruction_start(),
                                          slot_index, lazy_compile_target,
                                          WasmCode::kFlushICache);
 }
@@ -729,9 +769,10 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
   DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]);  // Only called once.
 #ifdef V8_EMBEDDED_BUILTINS
   WasmCodeRefScope code_ref_scope;
-  WasmCode* jump_table =
-      CreateEmptyJumpTable(JumpTableAssembler::SizeForNumberOfStubSlots(
-          WasmCode::kRuntimeStubCount));
+  DCHECK_EQ(1, code_space_data_.size());
+  WasmCode* jump_table = CreateEmptyJumpTableInRegion(
+      JumpTableAssembler::SizeForNumberOfStubSlots(WasmCode::kRuntimeStubCount),
+      code_space_data_[0].region);
   Address base = jump_table->instruction_start();
   EmbeddedData embedded_data = EmbeddedData::FromBlob();
 #define RUNTIME_STUB(Name) Builtins::k##Name,
@@ -995,8 +1036,12 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
 
     // Populate optimized code to the jump table unless there is an active
     // redirection to the interpreter that should be preserved.
-    bool update_jump_table =
-        update_code_table && !has_interpreter_redirection(code->index());
+    DCHECK_IMPLIES(
+        main_jump_table_ == nullptr,
+        engine_->code_manager()->IsImplicitAllocationsDisabledForTesting());
+    bool update_jump_table = update_code_table &&
+                             !has_interpreter_redirection(code->index()) &&
+                             main_jump_table_;
 
     // Ensure that interpreter entries always populate to the jump table.
     if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
@@ -1006,8 +1051,8 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
 
     if (update_jump_table) {
       JumpTableAssembler::PatchJumpTableSlot(
-          jump_table_->instruction_start(), slot_idx, code->instruction_start(),
-          WasmCode::kFlushICache);
+          main_jump_table_->instruction_start(), slot_idx,
+          code->instruction_start(), WasmCode::kFlushICache);
     }
   }
   WasmCodeRefScope::AddRef(code.get());
@@ -1065,11 +1110,22 @@ bool NativeModule::HasCode(uint32_t index) const {
   return code_table_[index - module_->num_imported_functions] != nullptr;
 }
 
-WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
+void NativeModule::SetWasmSourceMap(
+    std::unique_ptr<WasmModuleSourceMap> source_map) {
+  source_map_ = std::move(source_map);
+}
+
+WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
+  return source_map_.get();
+}
+
+WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
+    uint32_t jump_table_size, base::AddressRegion region) {
   // Only call this if we really need a jump table.
   DCHECK_LT(0, jump_table_size);
   Vector<uint8_t> code_space =
-      code_allocator_.AllocateForCode(this, jump_table_size);
+      code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
+  DCHECK(!code_space.empty());
   ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
   std::unique_ptr<WasmCode> code{new WasmCode{
       this,                                     // native_module
@@ -1090,6 +1146,48 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
   return PublishCode(std::move(code));
 }
 
+void NativeModule::AddCodeSpace(base::AddressRegion region) {
+  // Each code space must be at least twice as large as the overhead per code
+  // space. Otherwise, we are wasting too much memory.
+  const bool is_first_code_space = code_space_data_.empty();
+  const bool implicit_alloc_disabled =
+      engine_->code_manager()->IsImplicitAllocationsDisabledForTesting();
+
+#if defined(V8_OS_WIN64)
+  // On some platforms, specifically Win64, we need to reserve some pages at
+  // the beginning of an executable space.
+  // See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
+  // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
+  // for details.
+  if (engine_->code_manager()
+          ->CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
+      !implicit_alloc_disabled) {
+    size_t size = Heap::GetCodeRangeReservedAreaSize();
+    DCHECK_LT(0, size);
+    Vector<byte> padding = code_allocator_.AllocateForCode(this, size);
+    CHECK(region.contains(reinterpret_cast<Address>(padding.begin()),
+                          padding.size()));
+  }
+#endif  // V8_OS_WIN64
+
+  WasmCodeRefScope code_ref_scope;
+  WasmCode* jump_table = nullptr;
+  const uint32_t num_wasm_functions = module_->num_declared_functions;
+  const bool has_functions = num_wasm_functions > 0;
+  const bool needs_jump_table =
+      has_functions && is_first_code_space && !implicit_alloc_disabled;
+
+  if (needs_jump_table) {
+    jump_table = CreateEmptyJumpTableInRegion(
+        JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
+    CHECK(region.contains(jump_table->instruction_start()));
+  }
+
+  if (is_first_code_space) main_jump_table_ = jump_table;
+
+  code_space_data_.push_back(CodeSpaceData{region, jump_table});
+}
+
 namespace {
 class NativeModuleWireBytesStorage final : public WireBytesStorage {
  public:
@@ -1137,17 +1235,17 @@ uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const {
 
 Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
   // Return the jump table slot for that function index.
-  DCHECK_NOT_NULL(jump_table_);
+  DCHECK_NOT_NULL(main_jump_table_);
   uint32_t slot_offset = GetJumpTableOffset(func_index);
-  DCHECK_LT(slot_offset, jump_table_->instructions().size());
-  return jump_table_->instruction_start() + slot_offset;
+  DCHECK_LT(slot_offset, main_jump_table_->instructions().size());
+  return main_jump_table_->instruction_start() + slot_offset;
 }
 
 uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
     Address slot_address) const {
   DCHECK(is_jump_table_slot(slot_address));
-  uint32_t slot_offset =
-      static_cast<uint32_t>(slot_address - jump_table_->instruction_start());
+  uint32_t slot_offset = static_cast<uint32_t>(
+      slot_address - main_jump_table_->instruction_start());
   uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
   DCHECK_LT(slot_idx, module_->num_declared_functions);
   return module_->num_imported_functions + slot_idx;
@@ -1181,21 +1279,16 @@ WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
                                  size_t max_committed)
     : memory_tracker_(memory_tracker),
       max_committed_code_space_(max_committed),
-#if defined(V8_OS_WIN_X64)
-      is_win64_unwind_info_disabled_for_testing_(false),
-#endif
-      total_committed_code_space_(0),
       critical_committed_code_space_(max_committed / 2) {
   DCHECK_LE(max_committed, kMaxWasmCodeMemory);
 }
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
 bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() const {
   return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
-         FLAG_win64_unwinding_info &&
-         !is_win64_unwind_info_disabled_for_testing_;
+         FLAG_win64_unwinding_info;
 }
-#endif
+#endif  // V8_OS_WIN64
 
 bool WasmCodeManager::Commit(base::AddressRegion region) {
   // TODO(v8:8462): Remove eager commit once perf supports remapping.
@@ -1241,8 +1334,8 @@ void WasmCodeManager::Decommit(base::AddressRegion region) {
   USE(old_committed);
   TRACE_HEAP("Discarding system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
              region.begin(), region.end());
-  CHECK(allocator->DiscardSystemPages(reinterpret_cast<void*>(region.begin()),
-                                      region.size()));
+  CHECK(allocator->SetPermissions(reinterpret_cast<void*>(region.begin()),
+                                  region.size(), PageAllocator::kNoAccess));
 }
 
 void WasmCodeManager::AssignRange(base::AddressRegion region,
@@ -1363,12 +1456,13 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
   TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
              size);
 
-#if defined(V8_OS_WIN_X64)
-  if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
+#if defined(V8_OS_WIN64)
+  if (CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
+      !implicit_allocations_disabled_for_testing_) {
     win64_unwindinfo::RegisterNonABICompliantCodeRange(
         reinterpret_cast<void*>(start), size);
   }
-#endif
+#endif  // V8_OS_WIN64
 
   base::MutexGuard lock(&native_modules_mutex_);
   lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
@@ -1481,12 +1575,13 @@ void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
     TRACE_HEAP("VMem Release: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n",
                code_space.address(), code_space.end(), code_space.size());
 
-#if defined(V8_OS_WIN_X64)
-    if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
+#if defined(V8_OS_WIN64)
+    if (CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
+        !implicit_allocations_disabled_for_testing_) {
       win64_unwindinfo::UnregisterNonABICompliantCodeRange(
           reinterpret_cast<void*>(code_space.address()));
     }
-#endif
+#endif  // V8_OS_WIN64
 
     lookup_map_.erase(code_space.address());
     memory_tracker_->ReleaseReservation(code_space.size());
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index db7b4f061d63dc..c2e5249e5ee75d 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -23,6 +23,7 @@
 #include "src/wasm/compilation-environment.h"
 #include "src/wasm/wasm-features.h"
 #include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-module-sourcemap.h"
 #include "src/wasm/wasm-tier.h"
 
 namespace v8 {
@@ -61,6 +62,10 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
   // failure.
   base::AddressRegion Allocate(size_t size);
 
+  // Allocate a contiguous region of size {size} within {region}. Return an
+  // empty pool on failure.
+  base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);
+
   bool IsEmpty() const { return regions_.empty(); }
   const std::list<base::AddressRegion>& regions() const { return regions_; }
 
@@ -295,6 +300,11 @@ class WasmCodeAllocator {
   // Allocate code space. Returns a valid buffer or fails with OOM (crash).
   Vector<byte> AllocateForCode(NativeModule*, size_t size);
 
+  // Allocate code space within a specific region. Returns a valid buffer or
+  // fails with OOM (crash).
+  Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
+                                       base::AddressRegion);
+
   // Sets permissions of all owned code space to executable, or read-write (if
   // {executable} is false). Returns true on success.
   V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
@@ -302,6 +312,10 @@ class WasmCodeAllocator {
   // Free memory pages of all given code objects. Used for wasm code GC.
   void FreeCode(Vector<WasmCode* const>);
 
+  // Returns the region of the single code space managed by this code allocator.
+  // Will fail if more than one code space has been created.
+  base::AddressRegion GetSingleCodeRegion() const;
+
  private:
   // The engine-wide wasm code manager.
   WasmCodeManager* const code_manager_;
@@ -392,6 +406,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
   WasmCode* GetCode(uint32_t index) const;
   bool HasCode(uint32_t index) const;
 
+  void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
+  WasmModuleSourceMap* GetWasmSourceMap() const;
+
   Address runtime_stub_entry(WasmCode::RuntimeStubId index) const {
     DCHECK_LT(index, WasmCode::kRuntimeStubCount);
     Address entry_address = runtime_stub_entries_[index];
@@ -400,17 +417,18 @@ class V8_EXPORT_PRIVATE NativeModule final {
   }
 
   Address jump_table_start() const {
-    return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
+    return main_jump_table_ ? main_jump_table_->instruction_start()
+                            : kNullAddress;
   }
 
   uint32_t GetJumpTableOffset(uint32_t func_index) const;
 
   bool is_jump_table_slot(Address address) const {
-    return jump_table_->contains(address);
+    return main_jump_table_->contains(address);
   }
 
-  // Returns the target to call for the given function (returns a jump table
-  // slot within {jump_table_}).
+  // Returns the canonical target to call for the given function (the slot in
+  // the first jump table).
   Address GetCallTargetForFunction(uint32_t func_index) const;
 
   // Reverse lookup from a given call target (i.e. a jump table slot as the
@@ -485,9 +503,15 @@ class V8_EXPORT_PRIVATE NativeModule final {
 
  private:
   friend class WasmCode;
+  friend class WasmCodeAllocator;
   friend class WasmCodeManager;
   friend class NativeModuleModificationScope;
 
+  struct CodeSpaceData {
+    base::AddressRegion region;
+    WasmCode* jump_table;
+  };
+
   // Private constructor, called via {WasmCodeManager::NewNativeModule()}.
   NativeModule(WasmEngine* engine, const WasmFeatures& enabled_features,
                bool can_request_more, VirtualMemory code_space,
@@ -507,7 +531,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
   WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind,
                                        const char* name = nullptr);
 
-  WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size);
+  WasmCode* CreateEmptyJumpTableInRegion(uint32_t jump_table_size,
+                                         base::AddressRegion);
+
+  // Called by the {WasmCodeAllocator} to register a new code space.
+  void AddCodeSpace(base::AddressRegion);
 
   // Hold the {allocation_mutex_} when calling this method.
   bool has_interpreter_redirection(uint32_t func_index) {
@@ -546,6 +574,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
   // tasks can keep this alive.
   std::shared_ptr<const WasmModule> module_;
 
+  std::unique_ptr<WasmModuleSourceMap> source_map_;
+
   // Wire bytes, held in a shared_ptr so they can be kept alive by the
   // {WireBytesStorage}, held by background compile tasks.
   std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
@@ -556,8 +586,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
   // Jump table used for runtime stubs (i.e. trampolines to embedded builtins).
   WasmCode* runtime_stub_table_ = nullptr;
 
-  // Jump table used to easily redirect wasm function calls.
-  WasmCode* jump_table_ = nullptr;
+  // Jump table used by external calls (from JS). Wasm calls use one of the jump
+  // tables stored in {code_space_data_}.
+  WasmCode* main_jump_table_ = nullptr;
 
   // Lazy compile stub table, containing entries to jump to the
   // {WasmCompileLazy} builtin, passing the function index.
@@ -587,6 +618,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
   // this module marking those functions that have been redirected.
   std::unique_ptr<uint8_t[]> interpreter_redirections_;
 
+  // Data (especially jump table) per code space.
+  std::vector<CodeSpaceData> code_space_data_;
+
   // End of fields protected by {allocation_mutex_}.
   //////////////////////////////////////////////////////////////////////////////
 
@@ -610,9 +644,9 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
   }
 #endif
 
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
   bool CanRegisterUnwindInfoForNonABICompliantCodeRange() const;
-#endif
+#endif  // V8_OS_WIN64
 
   NativeModule* LookupNativeModule(Address pc) const;
   WasmCode* LookupCode(Address pc) const;
@@ -622,11 +656,13 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
 
   void SetMaxCommittedMemoryForTesting(size_t limit);
 
-#if defined(V8_OS_WIN_X64)
-  void DisableWin64UnwindInfoForTesting() {
-    is_win64_unwind_info_disabled_for_testing_ = true;
+  void DisableImplicitAllocationsForTesting() {
+    implicit_allocations_disabled_for_testing_ = true;
+  }
+
+  bool IsImplicitAllocationsDisabledForTesting() const {
+    return implicit_allocations_disabled_for_testing_;
   }
-#endif
 
   static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
   static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
@@ -654,11 +690,9 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
 
   size_t max_committed_code_space_;
 
-#if defined(V8_OS_WIN_X64)
-  bool is_win64_unwind_info_disabled_for_testing_;
-#endif
+  bool implicit_allocations_disabled_for_testing_ = false;
 
-  std::atomic<size_t> total_committed_code_space_;
+  std::atomic<size_t> total_committed_code_space_{0};
   // If the committed code space exceeds {critical_committed_code_space_}, then
   // we trigger a GC before creating the next module. This value is set to the
   // currently committed space plus 50% of the available code space on creation
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 7b91b16b807d8a..97111f8349735a 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -278,13 +278,8 @@ Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
       asm_wasm_data->managed_native_module().get();
   Handle<FixedArray> export_wrappers =
       handle(asm_wasm_data->export_wrappers(), isolate);
-  size_t code_size_estimate =
-      wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
-          native_module->module());
-
-  Handle<WasmModuleObject> module_object =
-      WasmModuleObject::New(isolate, std::move(native_module), script,
-                            export_wrappers, code_size_estimate);
+  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
+      isolate, std::move(native_module), script, export_wrappers);
   module_object->set_asm_js_offset_table(asm_wasm_data->asm_js_offset_table());
   return module_object;
 }
@@ -310,9 +305,6 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
 
   Handle<Script> script =
       CreateWasmScript(isolate, bytes, native_module->module()->source_map_url);
-  size_t code_size_estimate =
-      wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
-          native_module->module());
 
   // Create the module object.
   // TODO(clemensh): For the same module (same bytes / same hash), we should
@@ -323,9 +315,8 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
   // and information needed at instantiation time. This object needs to be
   // serializable. Instantiation may occur off a deserialized version of this
   // object.
-  Handle<WasmModuleObject> module_object =
-      WasmModuleObject::New(isolate, std::move(native_module), script,
-                            export_wrappers, code_size_estimate);
+  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
+      isolate, std::move(native_module), script, export_wrappers);
 
   // Finish the Wasm script now and make it public to the debugger.
   isolate->debug()->OnAfterCompile(script);
@@ -451,14 +442,13 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
     Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module) {
   NativeModule* native_module = shared_native_module.get();
   ModuleWireBytes wire_bytes(native_module->wire_bytes());
-  const WasmModule* module = native_module->module();
-  Handle<Script> script =
-      CreateWasmScript(isolate, wire_bytes, module->source_map_url);
-  size_t code_size = native_module->committed_code_space();
+  Handle<Script> script = CreateWasmScript(
+      isolate, wire_bytes, native_module->module()->source_map_url);
+  Handle<FixedArray> export_wrappers;
+  CompileJsToWasmWrappers(isolate, native_module->module(), &export_wrappers);
   Handle<WasmModuleObject> module_object = WasmModuleObject::New(
-      isolate, std::move(shared_native_module), script, code_size);
-  CompileJsToWasmWrappers(isolate, native_module->module(),
-                          handle(module_object->export_wrappers(), isolate));
+      isolate, std::move(shared_native_module), script, export_wrappers,
+      native_module->committed_code_space());
   {
     base::MutexGuard lock(&mutex_);
     DCHECK_EQ(1, isolates_.count(isolate));
@@ -680,6 +670,16 @@ void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
   WasmCode::DecrementRefCount(VectorOf(code_to_log));
 }
 
+std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
+    Isolate* isolate, const WasmFeatures& enabled,
+    std::shared_ptr<const WasmModule> module) {
+  size_t code_size_estimate =
+      wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
+  return NewNativeModule(isolate, enabled, code_size_estimate,
+                         wasm::NativeModule::kCanAllocateMoreMemory,
+                         std::move(module));
+}
+
 std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
     Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
     bool can_request_more, std::shared_ptr<const WasmModule> module) {
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 69e6cdae6e674c..401cf2b8805984 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -180,6 +180,9 @@ class V8_EXPORT_PRIVATE WasmEngine {
   // is determined with a heuristic based on the total size of wasm
   // code. The native module may later request more memory.
   // TODO(titzer): isolate is only required here for CompilationState.
+  std::shared_ptr<NativeModule> NewNativeModule(
+      Isolate* isolate, const WasmFeatures& enabled_features,
+      std::shared_ptr<const WasmModule> module);
   std::shared_ptr<NativeModule> NewNativeModule(
       Isolate* isolate, const WasmFeatures& enabled_features,
       size_t code_size_estimate, bool can_request_more,
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 08e6139abe9cfc..9ca45183ef628a 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -80,37 +80,73 @@ void int64_to_float32_wrapper(Address data) {
 
 void uint64_to_float32_wrapper(Address data) {
   uint64_t input = ReadUnalignedValue<uint64_t>(data);
-  float result = static_cast<float>(input);
-
-#if V8_CC_MSVC
-  // With MSVC we use static_cast<float>(uint32_t) instead of
-  // static_cast<float>(uint64_t) to achieve round-to-nearest-ties-even
-  // semantics. The idea is to calculate
-  // static_cast<float>(high_word) * 2^32 + static_cast<float>(low_word). To
-  // achieve proper rounding in all cases we have to adjust the high_word
-  // with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
-  // the high_word if the low_word may affect the rounding of the high_word.
-  uint32_t low_word = static_cast<uint32_t>(input & 0xFFFFFFFF);
-  uint32_t high_word = static_cast<uint32_t>(input >> 32);
-
-  float shift = static_cast<float>(1ull << 32);
-  // If the MSB of the high_word is set, then we make space for a rounding bit.
-  if (high_word < 0x80000000) {
-    high_word <<= 1;
-    shift = static_cast<float>(1ull << 31);
+#if defined(V8_OS_WIN)
+  // On Windows, the FP stack registers calculate with less precision, which
+  // leads to a uint64_t to float32 conversion which does not satisfy the
+  // WebAssembly specification. Therefore we do a different approach here:
+  //
+  // / leading 0 \/  24 float data bits  \/  for rounding \/ trailing 0 \
+  // 00000000000001XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX100000000000000
+  //
+  // Float32 can only represent 24 data bit (1 implicit 1 bit + 23 mantissa
+  // bits). Starting from the most significant 1 bit, we can therefore extract
+  // 24 bits and do the conversion only on them. The other bits can affect the
+  // result only through rounding. Rounding works as follows:
+  // * If the most significant rounding bit is not set, then round down.
+  // * If the most significant rounding bit is set, and at least one of the
+  //   other rounding bits is set, then round up.
+  // * If the most significant rounding bit is set, but all other rounding bits
+  //   are not set, then round to even.
+  // We can aggregate 'all other rounding bits' in the second-most significant
+  // rounding bit.
+  // The resulting algorithm is therefore as follows:
+  // * Check if the distance between the most significant bit (MSB) and the
+  //   least significant bit (LSB) is greater than 25 bits. If the distance is
+  //   less or equal to 25 bits, the uint64 to float32 conversion is anyways
+  //   exact, and we just use the C++ conversion.
+  // * Find the most significant bit (MSB).
+  // * Starting from the MSB, extract 25 bits (24 data bits + the first rounding
+  //   bit).
+  // * The remaining rounding bits are guaranteed to contain at least one 1 bit,
+  //   due to the check we did above.
+  // * Store the 25 bits + 1 aggregated bit in an uint32_t.
+  // * Convert this uint32_t to float. The conversion does the correct rounding
+  //   now.
+  // * Shift the result back to the original magnitude.
+  uint32_t leading_zeros = base::bits::CountLeadingZeros(input);
+  uint32_t trailing_zeros = base::bits::CountTrailingZeros(input);
+  constexpr uint32_t num_extracted_bits = 25;
+  // Check if there are any rounding bits we have to aggregate.
+  if (leading_zeros + trailing_zeros + num_extracted_bits < 64) {
+    // Shift to extract the data bits.
+    uint32_t num_aggregation_bits = 64 - num_extracted_bits - leading_zeros;
+    // We extract the bits we want to convert. Note that we convert one bit more
+    // than necessary. This bit is a placeholder where we will store the
+    // aggregation bit.
+    int32_t extracted_bits =
+        static_cast<int32_t>(input >> (num_aggregation_bits - 1));
+    // Set the aggregation bit. We don't have to clear the slot first, because
+    // the bit there is also part of the aggregation.
+    extracted_bits |= 1;
+    float result = static_cast<float>(extracted_bits);
+    // We have to shift the result back. The shift amount is
+    // (num_aggregation_bits - 1), which is the shift amount we did originally,
+    // and (-2), which is for the two additional bits we kept originally for
+    // rounding.
+    int32_t shift_back = static_cast<int32_t>(num_aggregation_bits) - 1 - 2;
+    // Calculate the multiplier to shift the extracted bits back to the original
+    // magnitude. This multiplier is a power of two, so in the float32 bit
+    // representation we just have to construct the correct exponent and put it
+    // at the correct bit offset. The exponent consists of 8 bits, starting at
+    // the second MSB (a.k.a '<< 23'). The encoded exponent itself is
+    // ('actual exponent' - 127).
+    int32_t multiplier_bits = ((shift_back - 127) & 0xff) << 23;
+    result *= bit_cast<float>(multiplier_bits);
+    WriteUnalignedValue<float>(data, result);
+    return;
   }
-
-  if ((high_word & 0xFE000000) && low_word) {
-    // Set the rounding bit.
-    high_word |= 1;
-  }
-
-  result = static_cast<float>(high_word);
-  result *= shift;
-  result += static_cast<float>(low_word);
-#endif
-
-  WriteUnalignedValue<float>(data, result);
+#endif  // defined(V8_OS_WIN)
+  WriteUnalignedValue<float>(data, static_cast<float>(input));
 }
 
 void int64_to_float64_wrapper(Address data) {
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 77d46fdc0d52ce..36f9ebd8a46a54 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -5,29 +5,27 @@
 #ifndef V8_WASM_WASM_FEATURE_FLAGS_H_
 #define V8_WASM_WASM_FEATURE_FLAGS_H_
 
-// The SEPARATOR argument allows generating proper comma-separated lists.
-#define FOREACH_WASM_FEATURE_FLAG(V, SEPARATOR)                       \
-  V(mv, "multi-value support", false)                                 \
-  SEPARATOR                                                           \
-  V(eh, "exception handling opcodes", false)                          \
-  SEPARATOR                                                           \
-  V(se, "sign extension opcodes", true)                               \
-  SEPARATOR                                                           \
-  V(sat_f2i_conversions, "saturating float conversion opcodes", true) \
-  SEPARATOR                                                           \
-  V(threads, "thread opcodes", false)                                 \
-  SEPARATOR                                                           \
-  V(simd, "SIMD opcodes", false)                                      \
-  SEPARATOR                                                           \
-  V(anyref, "anyref opcodes", false)                                  \
-  SEPARATOR                                                           \
-  V(bigint, "JS BigInt support", false)                               \
-  SEPARATOR                                                           \
-  V(bulk_memory, "bulk memory opcodes", true)                         \
-  SEPARATOR                                                           \
-  V(return_call, "return call opcodes", false)                        \
-  SEPARATOR                                                           \
-  V(type_reflection, "wasm type reflection in JS", false)             \
-  SEPARATOR                                                           \
+#define FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(V) \
+  V(mv, "multi-value support", false)             \
+  V(eh, "exception handling opcodes", false)      \
+  V(threads, "thread opcodes", false)             \
+  V(simd, "SIMD opcodes", false)                  \
+  V(bigint, "JS BigInt support", false)           \
+  V(return_call, "return call opcodes", false)    \
   V(compilation_hints, "compilation hints section", false)
+
+#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) \
+  V(anyref, "anyref opcodes", false)         \
+  V(type_reflection, "wasm type reflection in JS", false)
+
+#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V)                          \
+  V(bulk_memory, "bulk memory opcodes", true)                         \
+  V(sat_f2i_conversions, "saturating float conversion opcodes", true) \
+  V(se, "sign extension opcodes", true)
+
+#define FOREACH_WASM_FEATURE_FLAG(V)        \
+  FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(V) \
+  FOREACH_WASM_STAGING_FEATURE_FLAG(V)      \
+  FOREACH_WASM_SHIPPED_FEATURE_FLAG(V)
+
 #endif  // V8_WASM_WASM_FEATURE_FLAGS_H_
diff --git a/deps/v8/src/wasm/wasm-features.cc b/deps/v8/src/wasm/wasm-features.cc
index fc0286655e7cba..d62db91750bb22 100644
--- a/deps/v8/src/wasm/wasm-features.cc
+++ b/deps/v8/src/wasm/wasm-features.cc
@@ -11,17 +11,17 @@ namespace v8 {
 namespace internal {
 namespace wasm {
 
-#define COMMA ,
-#define SPACE
-#define DO_UNION(feat, desc, val) dst->feat |= src.feat;
-#define FLAG_REF(feat, desc, val) FLAG_experimental_wasm_##feat
 
 void UnionFeaturesInto(WasmFeatures* dst, const WasmFeatures& src) {
-  FOREACH_WASM_FEATURE(DO_UNION, SPACE);
+#define DO_UNION(feat, desc, val) dst->feat |= src.feat;
+  FOREACH_WASM_FEATURE(DO_UNION);
+#undef DO_UNION
 }
 
 WasmFeatures WasmFeaturesFromFlags() {
-  return WasmFeatures{FOREACH_WASM_FEATURE(FLAG_REF, COMMA)};
+#define FLAG_REF(feat, desc, val) FLAG_experimental_wasm_##feat,
+  return WasmFeatures(FOREACH_WASM_FEATURE(FLAG_REF){});
+#undef FLAG_REF
 }
 
 WasmFeatures WasmFeaturesFromIsolate(Isolate* isolate) {
@@ -31,10 +31,6 @@ WasmFeatures WasmFeaturesFromIsolate(Isolate* isolate) {
   return features;
 }
 
-#undef DO_UNION
-#undef FLAG_REF
-#undef SPACE
-#undef COMMA
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/wasm/wasm-features.h b/deps/v8/src/wasm/wasm-features.h
index 2c6ab0f85a5f3d..956982536da7d1 100644
--- a/deps/v8/src/wasm/wasm-features.h
+++ b/deps/v8/src/wasm/wasm-features.h
@@ -17,37 +17,50 @@ namespace internal {
 class Isolate;
 namespace wasm {
 
-#define COMMA ,
-#define SPACE
-#define DECL_FIELD(feat, desc, val) bool feat = false;
-#define JUST_TRUE(feat, desc, val) true
-#define JUST_FALSE(feat, desc, val) false
-#define DECL_PARAM(feat, desc, val) bool p##feat
-#define DO_INIT(feat, desc, val) feat(p##feat)
+// This is an empty type to indicate the end of the {WasmFeatures} struct. We
+// use the {end_t} type there to avoid trailing commas that get generated by
+// the macro generators. We considered the following alternatives:
+// * Add "separators" to the {FOREACH_WASM_FEATURE_FLAGS} between entries. This
+//   does not work when we want to have different kinds of flags, e.g. for
+//   experimental, staging, and shipped features.
+// * Use initialization lists, e.g. construct {WasmFeatures} with
+//   "WasmFeatures{true, true, ..., true,}". This solves the comma problem,
+//   because trailing commas are allowed here. However, we cannot
+//   default-initialize the fields of {WasmFeatures} anymore. This seems
+//   error-prone, because default-constructed {WasmFeatures} structs are already
+//   used in the code base.
+// * Avoid the use of {constexpr}. With that we would be more flexible with how
+//   we generate {kAllWasmFeatures} and {kNoWasmFeatures}. These values may be
+//   used in performance-critical code, however, e.g. in the decoder or in the
+//   interpreter.
+struct end_t {};
 
 // Enabled or detected features.
 struct WasmFeatures {
-  FOREACH_WASM_FEATURE(DECL_FIELD, SPACE)
+#define DECL_FIELD(feat, desc, val) bool feat = false;
+  FOREACH_WASM_FEATURE(DECL_FIELD)
+#undef DECL_FIELD
+  // Marker for the end of the list, see the comment at {end_t}.
+  end_t end_;
 
+#define DECL_PARAM(feat, desc, val) bool p##feat,
+#define DO_INIT(feat, desc, val) feat(p##feat),
+  explicit constexpr WasmFeatures(FOREACH_WASM_FEATURE(DECL_PARAM) end_t)
+      : FOREACH_WASM_FEATURE(DO_INIT) end_() {}
+#undef DECL_PARAM
+#undef DO_INIT
   constexpr WasmFeatures() = default;
-
-  explicit constexpr WasmFeatures(FOREACH_WASM_FEATURE(DECL_PARAM, COMMA))
-      : FOREACH_WASM_FEATURE(DO_INIT, COMMA) {}
 };
 
-static constexpr WasmFeatures kAllWasmFeatures{
-    FOREACH_WASM_FEATURE(JUST_TRUE, COMMA)};
-
-static constexpr WasmFeatures kNoWasmFeatures{
-    FOREACH_WASM_FEATURE(JUST_FALSE, COMMA)};
-
+#define JUST_TRUE(feat, desc, val) true,
+static constexpr WasmFeatures kAllWasmFeatures(
+    FOREACH_WASM_FEATURE(JUST_TRUE){});
 #undef JUST_TRUE
+
+#define JUST_FALSE(feat, desc, val) false,
+static constexpr WasmFeatures kNoWasmFeatures(
+    FOREACH_WASM_FEATURE(JUST_FALSE){});
 #undef JUST_FALSE
-#undef DECL_FIELD
-#undef DECL_PARAM
-#undef DO_INIT
-#undef COMMA
-#undef SPACE
 
 static constexpr WasmFeatures kAsmjsWasmFeatures = kNoWasmFeatures;
 
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 4449439896488a..299128860dafb4 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -1676,7 +1676,7 @@ class ThreadImpl {
         converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
 
     Push(result);
-    *len = 1 + imm.length;
+    *len += imm.length;
 
     if (FLAG_trace_wasm_memory) {
       MemoryTracingInfo info(imm.offset + index, false, rep);
@@ -1702,7 +1702,7 @@ class ThreadImpl {
       return false;
     }
     WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
-    *len = 1 + imm.length;
+    *len += imm.length;
 
     if (FLAG_trace_wasm_memory) {
       MemoryTracingInfo info(imm.offset + index, true, rep);
@@ -2241,14 +2241,27 @@ class ThreadImpl {
     Push(WasmValue(Simd128(res)));               \
     return true;                                 \
   }
+      BINOP_CASE(F64x2Add, f64x2, float2, 2, a + b)
+      BINOP_CASE(F64x2Sub, f64x2, float2, 2, a - b)
+      BINOP_CASE(F64x2Mul, f64x2, float2, 2, a * b)
+      BINOP_CASE(F64x2Div, f64x2, float2, 2, base::Divide(a, b))
+      BINOP_CASE(F64x2Min, f64x2, float2, 2, JSMin(a, b))
+      BINOP_CASE(F64x2Max, f64x2, float2, 2, JSMax(a, b))
       BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
       BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
       BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
-      BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
-      BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
+      BINOP_CASE(F32x4Div, f32x4, float4, 4, a / b)
+      BINOP_CASE(F32x4Min, f32x4, float4, 4, JSMin(a, b))
+      BINOP_CASE(F32x4Max, f32x4, float4, 4, JSMax(a, b))
       BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
       BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
       BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
+      BINOP_CASE(I64x2MinS, i64x2, int2, 2, a < b ? a : b)
+      BINOP_CASE(I64x2MinU, i64x2, int2, 2,
+                 static_cast<uint64_t>(a) < static_cast<uint64_t>(b) ? a : b)
+      BINOP_CASE(I64x2MaxS, i64x2, int2, 2, a > b ? a : b)
+      BINOP_CASE(I64x2MaxU, i64x2, int2, 2,
+                 static_cast<uint64_t>(a) > static_cast<uint64_t>(b) ? a : b)
       BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
       BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
       BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
@@ -2422,40 +2435,32 @@ class ThreadImpl {
       case kExprS128StoreMem:
         return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
                                               MachineRepresentation::kSimd128);
-#define SHIFT_CASE(op, name, stype, count, expr)                         \
-  case kExpr##op: {                                                      \
-    SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
-    *len += 1;                                                           \
-    WasmValue v = Pop();                                                 \
-    stype s = v.to_s128().to_##name();                                   \
-    stype res;                                                           \
-    for (size_t i = 0; i < count; ++i) {                                 \
-      auto a = s.val[i];                                                 \
-      res.val[i] = expr;                                                 \
-    }                                                                    \
-    Push(WasmValue(Simd128(res)));                                       \
-    return true;                                                         \
-  }
-        SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
-                   static_cast<uint64_t>(a) << imm.shift)
-        SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> imm.shift)
-        SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
-                   static_cast<uint64_t>(a) >> imm.shift)
-        SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
-                   static_cast<uint32_t>(a) << imm.shift)
-        SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
-        SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
-                   static_cast<uint32_t>(a) >> imm.shift)
-        SHIFT_CASE(I16x8Shl, i16x8, int8, 8,
-                   static_cast<uint16_t>(a) << imm.shift)
-        SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> imm.shift)
-        SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
-                   static_cast<uint16_t>(a) >> imm.shift)
-        SHIFT_CASE(I8x16Shl, i8x16, int16, 16,
-                   static_cast<uint8_t>(a) << imm.shift)
-        SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> imm.shift)
+#define SHIFT_CASE(op, name, stype, count, expr) \
+  case kExpr##op: {                              \
+    uint32_t shift = Pop().to<uint32_t>();       \
+    WasmValue v = Pop();                         \
+    stype s = v.to_s128().to_##name();           \
+    stype res;                                   \
+    for (size_t i = 0; i < count; ++i) {         \
+      auto a = s.val[i];                         \
+      res.val[i] = expr;                         \
+    }                                            \
+    Push(WasmValue(Simd128(res)));               \
+    return true;                                 \
+  }
+        SHIFT_CASE(I64x2Shl, i64x2, int2, 2, static_cast<uint64_t>(a) << shift)
+        SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> shift)
+        SHIFT_CASE(I64x2ShrU, i64x2, int2, 2, static_cast<uint64_t>(a) >> shift)
+        SHIFT_CASE(I32x4Shl, i32x4, int4, 4, static_cast<uint32_t>(a) << shift)
+        SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> shift)
+        SHIFT_CASE(I32x4ShrU, i32x4, int4, 4, static_cast<uint32_t>(a) >> shift)
+        SHIFT_CASE(I16x8Shl, i16x8, int8, 8, static_cast<uint16_t>(a) << shift)
+        SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> shift)
+        SHIFT_CASE(I16x8ShrU, i16x8, int8, 8, static_cast<uint16_t>(a) >> shift)
+        SHIFT_CASE(I8x16Shl, i8x16, int16, 16, static_cast<uint8_t>(a) << shift)
+        SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> shift)
         SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
-                   static_cast<uint8_t>(a) >> imm.shift)
+                   static_cast<uint8_t>(a) >> shift)
 #undef SHIFT_CASE
 #define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
                      expr)                                                    \
@@ -3042,8 +3047,8 @@ class ThreadImpl {
                                                            code->at(pc));
           HandleScope handle_scope(isolate_);  // Avoid leaking handles.
 
-          Handle<WasmExportedFunction> function =
-              WasmInstanceObject::GetOrCreateWasmExportedFunction(
+          Handle<WasmExternalFunction> function =
+              WasmInstanceObject::GetOrCreateWasmExternalFunction(
                   isolate_, instance_object_, imm.index);
           Push(WasmValue(function));
           len = 1 + imm.length;
@@ -3679,7 +3684,7 @@ class ThreadImpl {
     WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate);
 
     if (code->kind() == WasmCode::kWasmToJsWrapper &&
-        !IsJSCompatibleSignature(sig, enabled_features.bigint)) {
+        !IsJSCompatibleSignature(sig, enabled_features)) {
       Drop(num_args);  // Pop arguments before throwing.
       isolate->Throw(*isolate->factory()->NewTypeError(
           MessageTemplate::kWasmTrapTypeError));
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 1ee76fc11dff83..f10f5ff2bfecb8 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -1223,6 +1223,9 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
   } else if (enabled_features.anyref &&
              string->StringEquals(v8_str(isolate, "anyfunc"))) {
     *type = i::wasm::kWasmFuncRef;
+  } else if (enabled_features.eh &&
+             string->StringEquals(v8_str(isolate, "exnref"))) {
+    *type = i::wasm::kWasmExnRef;
   } else {
     // Unrecognized type.
     *type = i::wasm::kWasmStmt;
@@ -1337,7 +1340,8 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
       global_obj->SetF64(f64_value);
       break;
     }
-    case i::wasm::kWasmAnyRef: {
+    case i::wasm::kWasmAnyRef:
+    case i::wasm::kWasmExnRef: {
       if (args.Length() < 2) {
         // When no inital value is provided, we have to use the WebAssembly
         // default value 'null', and not the JS default value 'undefined'.
@@ -1379,6 +1383,21 @@ void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
   thrower.TypeError("WebAssembly.Exception cannot be called");
 }
 
+namespace {
+
+uint32_t GetIterableLength(i::Isolate* isolate, Local<Context> context,
+                           Local<Object> iterable) {
+  Local<String> length = Utils::ToLocal(isolate->factory()->length_string());
+  MaybeLocal<Value> property = iterable->Get(context, length);
+  if (property.IsEmpty()) return i::kMaxUInt32;
+  MaybeLocal<Uint32> number = property.ToLocalChecked()->ToArrayIndex(context);
+  if (number.IsEmpty()) return i::kMaxUInt32;
+  DCHECK_NE(i::kMaxUInt32, number.ToLocalChecked()->Value());
+  return number.ToLocalChecked()->Value();
+}
+
+}  // namespace
+
 // WebAssembly.Function
 void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
@@ -1403,13 +1422,16 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
       function_type->Get(context, parameters_key);
   v8::Local<v8::Value> parameters_value;
   if (!parameters_maybe.ToLocal(&parameters_value)) return;
-  // TODO(7742): Allow any iterable, not just {Array} here.
-  if (!parameters_value->IsArray()) {
+  if (!parameters_value->IsObject()) {
     thrower.TypeError("Argument 0 must be a function type with 'parameters'");
     return;
   }
-  Local<Array> parameters = parameters_value.As<Array>();
-  uint32_t parameters_len = parameters->Length();
+  Local<Object> parameters = parameters_value.As<Object>();
+  uint32_t parameters_len = GetIterableLength(i_isolate, context, parameters);
+  if (parameters_len == i::kMaxUInt32) {
+    thrower.TypeError("Argument 0 contains parameters without 'length'");
+    return;
+  }
   if (parameters_len > i::wasm::kV8MaxWasmFunctionParams) {
     thrower.TypeError("Argument 0 contains too many parameters");
     return;
@@ -1421,13 +1443,16 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
       function_type->Get(context, results_key);
   v8::Local<v8::Value> results_value;
   if (!results_maybe.ToLocal(&results_value)) return;
-  // TODO(7742): Allow any iterable, not just {Array} here.
-  if (!results_value->IsArray()) {
+  if (!results_value->IsObject()) {
     thrower.TypeError("Argument 0 must be a function type with 'results'");
     return;
   }
-  Local<Array> results = results_value.As<Array>();
-  uint32_t results_len = results->Length();
+  Local<Object> results = results_value.As<Object>();
+  uint32_t results_len = GetIterableLength(i_isolate, context, results);
+  if (results_len == i::kMaxUInt32) {
+    thrower.TypeError("Argument 0 contains results without 'length'");
+    return;
+  }
   if (results_len > (enabled_features.mv
                          ? i::wasm::kV8MaxWasmFunctionMultiReturns
                          : i::wasm::kV8MaxWasmFunctionReturns)) {
@@ -1474,37 +1499,6 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
   args.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
-// Converts the given {type} into a string representation that can be used in
-// reflective functions. Should be kept in sync with the {GetValueType} helper.
-Local<String> ToValueTypeString(Isolate* isolate, i::wasm::ValueType type) {
-  Local<String> string;
-  switch (type) {
-    case i::wasm::kWasmI32: {
-      string = v8_str(isolate, "i32");
-      break;
-    }
-    case i::wasm::kWasmI64: {
-      string = v8_str(isolate, "i64");
-      break;
-    }
-    case i::wasm::kWasmF32: {
-      string = v8_str(isolate, "f32");
-      break;
-    }
-    case i::wasm::kWasmF64: {
-      string = v8_str(isolate, "f64");
-      break;
-    }
-    case i::wasm::kWasmAnyRef: {
-      string = v8_str(isolate, "anyref");
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-  return string;
-}
-
 // WebAssembly.Function.type(WebAssembly.Function) -> FunctionType
 void WebAssemblyFunctionType(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
@@ -1524,36 +1518,8 @@ void WebAssemblyFunctionType(const v8::FunctionCallbackInfo<v8::Value>& args) {
     return;
   }
 
-  // Extract values for the {ValueType[]} arrays.
-  size_t param_index = 0;
-  i::ScopedVector<Local<Value>> param_values(sig->parameter_count());
-  for (i::wasm::ValueType type : sig->parameters()) {
-    param_values[param_index++] = ToValueTypeString(isolate, type);
-  }
-  size_t result_index = 0;
-  i::ScopedVector<Local<Value>> result_values(sig->return_count());
-  for (i::wasm::ValueType type : sig->returns()) {
-    result_values[result_index++] = ToValueTypeString(isolate, type);
-  }
-
-  // Create the resulting {FunctionType} object.
-  Local<Object> ret = v8::Object::New(isolate);
-  Local<Context> context = isolate->GetCurrentContext();
-  Local<Array> params =
-      v8::Array::New(isolate, param_values.begin(), param_values.size());
-  if (!ret->CreateDataProperty(context, v8_str(isolate, "parameters"), params)
-           .IsJust()) {
-    return;
-  }
-  Local<Array> results =
-      v8::Array::New(isolate, result_values.begin(), result_values.size());
-  if (!ret->CreateDataProperty(context, v8_str(isolate, "results"), results)
-           .IsJust()) {
-    return;
-  }
-
-  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
-  return_value.Set(ret);
+  auto type = i::wasm::GetTypeForFunction(i_isolate, sig);
+  args.GetReturnValue().Set(Utils::ToLocal(type));
 }
 
 constexpr const char* kName_WasmGlobalObject = "WebAssembly.Global";
@@ -1681,48 +1647,15 @@ void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
   auto maybe_table = GetFirstArgumentAsTable(args, &thrower);
   if (thrower.error()) return;
   i::Handle<i::WasmTableObject> table = maybe_table.ToHandleChecked();
-  v8::Local<v8::Object> ret = v8::Object::New(isolate);
-
-  Local<String> element;
-  auto enabled_features = i::wasm::WasmFeaturesFromFlags();
-  if (table->type() == i::wasm::ValueType::kWasmFuncRef) {
-    element = v8_str(isolate, "anyfunc");
-  } else if (enabled_features.anyref &&
-             table->type() == i::wasm::ValueType::kWasmAnyRef) {
-    element = v8_str(isolate, "anyref");
-  } else {
-    UNREACHABLE();
-  }
-  if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
-                               v8_str(isolate, "element"), element)
-           .IsJust()) {
-    return;
-  }
-
-  uint32_t curr_size = table->current_length();
-  DCHECK_LE(curr_size, std::numeric_limits<uint32_t>::max());
-  if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
-                               v8_str(isolate, "minimum"),
-                               v8::Integer::NewFromUnsigned(
-                                   isolate, static_cast<uint32_t>(curr_size)))
-           .IsJust()) {
-    return;
-  }
-
+  base::Optional<uint32_t> max_size;
   if (!table->maximum_length().IsUndefined()) {
-    uint64_t max_size = table->maximum_length().Number();
-    DCHECK_LE(max_size, std::numeric_limits<uint32_t>::max());
-    if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
-                                 v8_str(isolate, "maximum"),
-                                 v8::Integer::NewFromUnsigned(
-                                     isolate, static_cast<uint32_t>(max_size)))
-             .IsJust()) {
-      return;
-    }
+    uint64_t max_size64 = table->maximum_length().Number();
+    DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
+    max_size.emplace(static_cast<uint32_t>(max_size64));
   }
-
-  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
-  return_value.Set(ret);
+  auto type = i::wasm::GetTypeForTable(i_isolate, table->type(),
+                                       table->current_length(), max_size);
+  args.GetReturnValue().Set(Utils::ToLocal(type));
 }
 
 // WebAssembly.Memory.grow(num) -> num
@@ -1802,33 +1735,18 @@ void WebAssemblyMemoryType(const v8::FunctionCallbackInfo<v8::Value>& args) {
   auto maybe_memory = GetFirstArgumentAsMemory(args, &thrower);
   if (thrower.error()) return;
   i::Handle<i::WasmMemoryObject> memory = maybe_memory.ToHandleChecked();
-  v8::Local<v8::Object> ret = v8::Object::New(isolate);
   i::Handle<i::JSArrayBuffer> buffer(memory->array_buffer(), i_isolate);
-
   size_t curr_size = buffer->byte_length() / i::wasm::kWasmPageSize;
   DCHECK_LE(curr_size, std::numeric_limits<uint32_t>::max());
-  if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
-                               v8_str(isolate, "minimum"),
-                               v8::Integer::NewFromUnsigned(
-                                   isolate, static_cast<uint32_t>(curr_size)))
-           .IsJust()) {
-    return;
-  }
-
+  uint32_t min_size = static_cast<uint32_t>(curr_size);
+  base::Optional<uint32_t> max_size;
   if (memory->has_maximum_pages()) {
-    uint64_t max_size = memory->maximum_pages();
-    DCHECK_LE(max_size, std::numeric_limits<uint32_t>::max());
-    if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
-                                 v8_str(isolate, "maximum"),
-                                 v8::Integer::NewFromUnsigned(
-                                     isolate, static_cast<uint32_t>(max_size)))
-             .IsJust()) {
-      return;
-    }
+    uint64_t max_size64 = memory->maximum_pages();
+    DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
+    max_size.emplace(static_cast<uint32_t>(max_size64));
   }
-
-  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
-  return_value.Set(ret);
+  auto type = i::wasm::GetTypeForMemory(i_isolate, min_size, max_size);
+  args.GetReturnValue().Set(Utils::ToLocal(type));
 }
 
 void WebAssemblyGlobalGetValueCommon(
@@ -1960,24 +1878,9 @@ void WebAssemblyGlobalType(const v8::FunctionCallbackInfo<v8::Value>& args) {
   auto maybe_global = GetFirstArgumentAsGlobal(args, &thrower);
   if (thrower.error()) return;
   i::Handle<i::WasmGlobalObject> global = maybe_global.ToHandleChecked();
-  v8::Local<v8::Object> ret = v8::Object::New(isolate);
-
-  if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
-                               v8_str(isolate, "mutable"),
-                               v8::Boolean::New(isolate, global->is_mutable()))
-           .IsJust()) {
-    return;
-  }
-
-  Local<String> type = ToValueTypeString(isolate, global->type());
-  if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
-                               v8_str(isolate, "value"), type)
-           .IsJust()) {
-    return;
-  }
-
-  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
-  return_value.Set(ret);
+  auto type = i::wasm::GetTypeForGlobal(i_isolate, global->is_mutable(),
+                                        global->type());
+  args.GetReturnValue().Set(Utils::ToLocal(type));
 }
 
 }  // namespace
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 7dd6b1c7b24e9e..d3874e1a3447d3 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -233,6 +233,7 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
       global_imports_(zone),
       exports_(zone),
       functions_(zone),
+      tables_(zone),
       data_segments_(zone),
       indirect_functions_(zone),
       globals_(zone),
@@ -269,15 +270,29 @@ uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
 }
 
 uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
+  DCHECK(allocating_indirect_functions_allowed_);
   uint32_t index = static_cast<uint32_t>(indirect_functions_.size());
   DCHECK_GE(FLAG_wasm_max_table_size, index);
   if (count > FLAG_wasm_max_table_size - index) {
     return std::numeric_limits<uint32_t>::max();
   }
-  DCHECK(max_table_size_ == 0 ||
-         indirect_functions_.size() + count <= max_table_size_);
-  indirect_functions_.resize(indirect_functions_.size() + count,
-                             WasmElemSegment::kNullIndex);
+  uint32_t new_size = static_cast<uint32_t>(indirect_functions_.size()) + count;
+  DCHECK(max_table_size_ == 0 || new_size <= max_table_size_);
+  indirect_functions_.resize(new_size, WasmElemSegment::kNullIndex);
+  uint32_t max = max_table_size_ > 0 ? max_table_size_ : new_size;
+  if (tables_.empty()) {
+    // This cannot use {AddTable} because that would flip the
+    // {allocating_indirect_functions_allowed_} flag.
+    tables_.push_back({kWasmFuncRef, new_size, max, true});
+  } else {
+    // There can only be the indirect function table so far, otherwise the
+    // {allocating_indirect_functions_allowed_} flag would have been false.
+    DCHECK_EQ(1u, tables_.size());
+    DCHECK_EQ(kWasmFuncRef, tables_[0].type);
+    DCHECK(tables_[0].has_maximum);
+    tables_[0].min_size = new_size;
+    tables_[0].max_size = max;
+  }
   return index;
 }
 
@@ -290,6 +305,27 @@ void WasmModuleBuilder::SetMaxTableSize(uint32_t max) {
   DCHECK_GE(FLAG_wasm_max_table_size, max);
   DCHECK_GE(max, indirect_functions_.size());
   max_table_size_ = max;
+  DCHECK(allocating_indirect_functions_allowed_);
+  if (!tables_.empty()) {
+    tables_[0].max_size = max;
+  }
+}
+
+uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size) {
+#if DEBUG
+  allocating_indirect_functions_allowed_ = false;
+#endif
+  tables_.push_back({type, min_size, 0, false});
+  return static_cast<uint32_t>(tables_.size() - 1);
+}
+
+uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
+                                     uint32_t max_size) {
+#if DEBUG
+  allocating_indirect_functions_allowed_ = false;
+#endif
+  tables_.push_back({type, min_size, max_size, true});
+  return static_cast<uint32_t>(tables_.size() - 1);
 }
 
 uint32_t WasmModuleBuilder::AddImport(Vector<const char> name,
@@ -408,21 +444,20 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
     FixupSection(buffer, start);
   }
 
-  // == emit function table ====================================================
-  if (indirect_functions_.size() > 0) {
+  // == Emit tables ============================================================
+  if (tables_.size() > 0) {
     size_t start = EmitSection(kTableSectionCode, buffer);
-    buffer->write_u8(1);  // table count
-    buffer->write_u8(kLocalFuncRef);
-    buffer->write_u8(kHasMaximumFlag);
-    buffer->write_size(indirect_functions_.size());
-    size_t max =
-        max_table_size_ > 0 ? max_table_size_ : indirect_functions_.size();
-    DCHECK_GE(max, indirect_functions_.size());
-    buffer->write_size(max);
+    buffer->write_size(tables_.size());
+    for (const WasmTable& table : tables_) {
+      buffer->write_u8(ValueTypes::ValueTypeCodeFor(table.type));
+      buffer->write_u8(table.has_maximum ? kHasMaximumFlag : kNoMaximumFlag);
+      buffer->write_size(table.min_size);
+      if (table.has_maximum) buffer->write_size(table.max_size);
+    }
     FixupSection(buffer, start);
   }
 
-  // == emit memory declaration ================================================
+  // == Emit memory declaration ================================================
   {
     size_t start = EmitSection(kMemorySectionCode, buffer);
     buffer->write_u8(1);  // memory count
@@ -473,7 +508,13 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
           buffer->write_u8(kExprGetGlobal);
           buffer->write_u32v(global.init.val.global_index);
           break;
-        default: {
+        case WasmInitExpr::kRefNullConst:
+          buffer->write_u8(kExprRefNull);
+          break;
+        case WasmInitExpr::kRefFuncConst:
+          UNIMPLEMENTED();
+          break;
+        case WasmInitExpr::kNone: {
           // No initializer, emit a default value.
           switch (global.type) {
             case kWasmI32:
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 9e6a8933e2fc65..4c122b806235bd 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -243,6 +243,8 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
   uint32_t AllocateIndirectFunctions(uint32_t count);
   void SetIndirectFunction(uint32_t indirect, uint32_t direct);
   void SetMaxTableSize(uint32_t max);
+  uint32_t AddTable(ValueType type, uint32_t min_size);
+  uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size);
   void MarkStartFunction(WasmFunctionBuilder* builder);
   void AddExport(Vector<const char> name, ImportExportKindCode kind,
                  uint32_t index);
@@ -288,6 +290,13 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
     WasmInitExpr init;
   };
 
+  struct WasmTable {
+    ValueType type;
+    uint32_t min_size;
+    uint32_t max_size;
+    bool has_maximum;
+  };
+
   struct WasmDataSegment {
     ZoneVector<byte> data;
     uint32_t dest;
@@ -300,6 +309,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
   ZoneVector<WasmGlobalImport> global_imports_;
   ZoneVector<WasmExport> exports_;
   ZoneVector<WasmFunctionBuilder*> functions_;
+  ZoneVector<WasmTable> tables_;
   ZoneVector<WasmDataSegment> data_segments_;
   ZoneVector<uint32_t> indirect_functions_;
   ZoneVector<WasmGlobal> globals_;
@@ -313,6 +323,8 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
 #if DEBUG
   // Once AddExportedImport is called, no more imports can be added.
   bool adding_imports_allowed_ = true;
+  // Indirect functions must be allocated before adding extra tables.
+  bool allocating_indirect_functions_allowed_ = true;
 #endif
 };
 
diff --git a/deps/v8/src/wasm/wasm-module-sourcemap.cc b/deps/v8/src/wasm/wasm-module-sourcemap.cc
new file mode 100644
index 00000000000000..cfe54e7c375885
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-module-sourcemap.cc
@@ -0,0 +1,161 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-module-sourcemap.h"
+
+#include <algorithm>
+
+#include "include/v8.h"
+#include "src/api/api.h"
+#include "src/base/vlq-base64.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+WasmModuleSourceMap::WasmModuleSourceMap(v8::Isolate* v8_isolate,
+                                         v8::Local<v8::String> src_map_str) {
+  v8::HandleScope scope(v8_isolate);
+  v8::Local<v8::Context> context = v8::Context::New(v8_isolate);
+
+  v8::Local<v8::Value> src_map_value;
+  if (!v8::JSON::Parse(context, src_map_str).ToLocal(&src_map_value)) return;
+  v8::Local<v8::Object> src_map_obj =
+      v8::Local<v8::Object>::Cast(src_map_value);
+
+  v8::Local<v8::Value> version_value, sources_value, mappings_value;
+  bool has_valid_version =
+      src_map_obj
+          ->Get(context,
+                v8::String::NewFromUtf8(v8_isolate, "version").ToLocalChecked())
+          .ToLocal(&version_value) &&
+      version_value->IsUint32();
+  uint32_t version = 0;
+  if (!has_valid_version || !version_value->Uint32Value(context).To(&version) ||
+      version != 3u)
+    return;
+
+  bool has_valid_sources =
+      src_map_obj
+          ->Get(context,
+                v8::String::NewFromUtf8(v8_isolate, "sources").ToLocalChecked())
+          .ToLocal(&sources_value) &&
+      sources_value->IsArray();
+  if (!has_valid_sources) return;
+
+  v8::Local<v8::Object> sources_arr =
+      v8::Local<v8::Object>::Cast(sources_value);
+  v8::Local<v8::Value> sources_len_value;
+  if (!sources_arr
+           ->Get(context,
+                 v8::String::NewFromUtf8(v8_isolate, "length").ToLocalChecked())
+           .ToLocal(&sources_len_value))
+    return;
+  uint32_t sources_len = 0;
+  if (!sources_len_value->Uint32Value(context).To(&sources_len)) return;
+
+  for (uint32_t i = 0; i < sources_len; ++i) {
+    v8::Local<v8::Value> file_name_value;
+    if (!sources_arr->Get(context, i).ToLocal(&file_name_value) ||
+        !file_name_value->IsString())
+      return;
+    v8::Local<v8::String> file_name =
+        v8::Local<v8::String>::Cast(file_name_value);
+    auto file_name_sz = file_name->Utf8Length(v8_isolate);
+    std::unique_ptr<char[]> file_name_buf(new char[file_name_sz + 1]);
+    file_name->WriteUtf8(v8_isolate, file_name_buf.get());
+    file_name_buf.get()[file_name_sz] = '\0';
+    filenames.emplace_back(file_name_buf.get());
+  }
+
+  bool has_valid_mappings =
+      src_map_obj
+          ->Get(
+              context,
+              v8::String::NewFromUtf8(v8_isolate, "mappings").ToLocalChecked())
+          .ToLocal(&mappings_value) &&
+      mappings_value->IsString();
+  if (!has_valid_mappings) return;
+
+  v8::Local<v8::String> mappings = v8::Local<v8::String>::Cast(mappings_value);
+  int mappings_sz = mappings->Utf8Length(v8_isolate);
+  std::unique_ptr<char[]> mappings_buf(new char[mappings_sz + 1]);
+  mappings->WriteUtf8(v8_isolate, mappings_buf.get());
+  mappings_buf.get()[mappings_sz] = '\0';
+
+  valid_ = DecodeMapping(mappings_buf.get());
+}
+
+size_t WasmModuleSourceMap::GetSourceLine(size_t wasm_offset) const {
+  std::vector<std::size_t>::const_iterator up =
+      std::upper_bound(offsets.begin(), offsets.end(), wasm_offset);
+  CHECK_NE(offsets.begin(), up);
+  size_t source_idx = up - offsets.begin() - 1;
+  return source_row[source_idx];
+}
+
+std::string WasmModuleSourceMap::GetFilename(size_t wasm_offset) const {
+  std::vector<size_t>::const_iterator up =
+      std::upper_bound(offsets.begin(), offsets.end(), wasm_offset);
+  CHECK_NE(offsets.begin(), up);
+  size_t offset_idx = up - offsets.begin() - 1;
+  size_t filename_idx = file_idxs[offset_idx];
+  return filenames[filename_idx];
+}
+
+bool WasmModuleSourceMap::HasSource(size_t start, size_t end) const {
+  return start <= *(offsets.end() - 1) && end > *offsets.begin();
+}
+
+bool WasmModuleSourceMap::HasValidEntry(size_t start, size_t addr) const {
+  std::vector<size_t>::const_iterator up =
+      std::upper_bound(offsets.begin(), offsets.end(), addr);
+  if (up == offsets.begin()) return false;
+  size_t offset_idx = up - offsets.begin() - 1;
+  size_t entry_offset = offsets[offset_idx];
+  if (entry_offset < start) return false;
+  return true;
+}
+
+bool WasmModuleSourceMap::DecodeMapping(const std::string& s) {
+  size_t pos = 0, gen_col = 0, file_idx = 0, ori_line = 0;
+  int32_t qnt = 0;
+
+  while (pos < s.size()) {
+    // Skip redundant commas.
+    if (s[pos] == ',') {
+      ++pos;
+      continue;
+    }
+    if ((qnt = base::VLQBase64Decode(s.c_str(), s.size(), &pos)) ==
+        std::numeric_limits<int32_t>::min())
+      return false;
+    gen_col += qnt;
+    if ((qnt = base::VLQBase64Decode(s.c_str(), s.size(), &pos)) ==
+        std::numeric_limits<int32_t>::min())
+      return false;
+    file_idx += qnt;
+    if ((qnt = base::VLQBase64Decode(s.c_str(), s.size(), &pos)) ==
+        std::numeric_limits<int32_t>::min())
+      return false;
+    ori_line += qnt;
+    // Column number in source file is always 0 in source map generated by
+    // Emscripten. We just decode this value without further usage of it.
+    if ((qnt = base::VLQBase64Decode(s.c_str(), s.size(), &pos)) ==
+        std::numeric_limits<int32_t>::min())
+      return false;
+
+    if (pos < s.size() && s[pos] != ',') return false;
+    pos++;
+
+    file_idxs.push_back(file_idx);
+    source_row.push_back(ori_line);
+    offsets.push_back(gen_col);
+  }
+  return true;
+}
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module-sourcemap.h b/deps/v8/src/wasm/wasm-module-sourcemap.h
new file mode 100644
index 00000000000000..83293ae2050da4
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-module-sourcemap.h
@@ -0,0 +1,83 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_MODULE_SOURCEMAP_H_
+#define V8_WASM_WASM_MODULE_SOURCEMAP_H_
+
+#include <string>
+#include <vector>
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+// The class is for decoding and managing source map generated by a WebAssembly
+// toolchain (e.g. Emscripten). This implementation mostly complies with the
+// specification (https://sourcemaps.info/spec.html), with the following
+// accommodations:
+// 1. "names" field is an empty array in current source maps of WASM, hence it
+// is not handled;
+// 2. The semicolons divides "mappings" field into groups, each of which
+// represents a line in the generated code. As *.wasm is in binary format, there
+// is one "line" of generated code, and ";" is treated as illegal symbol in
+// "mappings".
+// 3. Though each comma-separated section may contains 1, 4 or 5 fields, we only
+// consider "mappings" with 4 fields, i.e. start line of generated code, index
+// into "sources" fields, start line of source code and start column of source
+// code.
+class V8_EXPORT_PRIVATE WasmModuleSourceMap {
+ public:
+  WasmModuleSourceMap(v8::Isolate* v8_isolate,
+                      v8::Local<v8::String> src_map_str);
+
+  // Member valid_ is true only if the source map complies with specification
+  // and can be correctly decoded.
+  bool IsValid() const { return valid_; }
+
+  // Given a function located at [start, end) in WASM Module, this function
+  // checks if this function has its corresponding source code.
+  bool HasSource(size_t start, size_t end) const;
+
+  // Given a function's base address start and an address addr within, this
+  // function checks if the address can be mapped to an offset in this function.
+  // For example, we have the following memory layout for WASM functions, foo
+  // and bar, and O1, O2, O3 and O4 are the decoded offsets of source map:
+  //
+  // O1 --- O2 ----- O3 ----- O4
+  // --->|<-foo->|<--bar->|<-----
+  // --------------A-------------
+  //
+  // Address A of function bar should be mapped to its nearest lower offset, O2.
+  // However, O2 is an address of function foo, thus, this mapping is treated as
+  // invalid.
+  bool HasValidEntry(size_t start, size_t addr) const;
+
+  // This function is responsible for looking up an offset's corresponding line
+  // number in source file. It should only be called when current function is
+  // checked with IsValid, HasSource and HasValidEntry.
+  size_t GetSourceLine(size_t wasm_offset) const;
+
+  // This function is responsible for looking up an offset's corresponding
+  // source file name. It should only be called when current function is checked
+  // with IsValid, HasSource and HasValidEntry.
+  std::string GetFilename(size_t wasm_offset) const;
+
+ private:
+  std::vector<size_t> offsets;
+  std::vector<std::string> filenames;
+  std::vector<size_t> file_idxs;
+  std::vector<size_t> source_row;
+  // As column number in source file is always 0 in source map generated by
+  // WebAssembly toolchain, we will not store this value.
+
+  bool valid_ = false;
+
+  bool DecodeMapping(const std::string& s);
+};
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+#endif  // V8_WASM_WASM_MODULE_SOURCEMAP_H_
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 05057301ed6472..5a10368a8b6f16 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -113,13 +113,156 @@ bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
              v8::Utils::ToLocal(isolate->factory()->empty_string()));
 }
 
+namespace {
+
+// Converts the given {type} into a string representation that can be used in
+// reflective functions. Should be kept in sync with the {GetValueType} helper.
+Handle<String> ToValueTypeString(Isolate* isolate, ValueType type) {
+  Factory* factory = isolate->factory();
+  Handle<String> string;
+  switch (type) {
+    case i::wasm::kWasmI32: {
+      string = factory->InternalizeUtf8String("i32");
+      break;
+    }
+    case i::wasm::kWasmI64: {
+      string = factory->InternalizeUtf8String("i64");
+      break;
+    }
+    case i::wasm::kWasmF32: {
+      string = factory->InternalizeUtf8String("f32");
+      break;
+    }
+    case i::wasm::kWasmF64: {
+      string = factory->InternalizeUtf8String("f64");
+      break;
+    }
+    case i::wasm::kWasmAnyRef: {
+      string = factory->InternalizeUtf8String("anyref");
+      break;
+    }
+    case i::wasm::kWasmFuncRef: {
+      string = factory->InternalizeUtf8String("anyfunc");
+      break;
+    }
+    case i::wasm::kWasmExnRef: {
+      string = factory->InternalizeUtf8String("exnref");
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  return string;
+}
+
+}  // namespace
+
+Handle<JSObject> GetTypeForFunction(Isolate* isolate, FunctionSig* sig) {
+  Factory* factory = isolate->factory();
+
+  // Extract values for the {ValueType[]} arrays.
+  int param_index = 0;
+  int param_count = static_cast<int>(sig->parameter_count());
+  Handle<FixedArray> param_values = factory->NewFixedArray(param_count);
+  for (ValueType type : sig->parameters()) {
+    Handle<String> type_value = ToValueTypeString(isolate, type);
+    param_values->set(param_index++, *type_value);
+  }
+  int result_index = 0;
+  int result_count = static_cast<int>(sig->return_count());
+  Handle<FixedArray> result_values = factory->NewFixedArray(result_count);
+  for (ValueType type : sig->returns()) {
+    Handle<String> type_value = ToValueTypeString(isolate, type);
+    result_values->set(result_index++, *type_value);
+  }
+
+  // Create the resulting {FunctionType} object.
+  Handle<JSFunction> object_function = isolate->object_function();
+  Handle<JSObject> object = factory->NewJSObject(object_function);
+  Handle<JSArray> params = factory->NewJSArrayWithElements(param_values);
+  Handle<JSArray> results = factory->NewJSArrayWithElements(result_values);
+  Handle<String> params_string = factory->InternalizeUtf8String("parameters");
+  Handle<String> results_string = factory->InternalizeUtf8String("results");
+  JSObject::AddProperty(isolate, object, params_string, params, NONE);
+  JSObject::AddProperty(isolate, object, results_string, results, NONE);
+
+  return object;
+}
+
+Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
+                                  ValueType type) {
+  Factory* factory = isolate->factory();
+
+  Handle<JSFunction> object_function = isolate->object_function();
+  Handle<JSObject> object = factory->NewJSObject(object_function);
+  Handle<String> mutable_string = factory->InternalizeUtf8String("mutable");
+  Handle<String> value_string = factory->InternalizeUtf8String("value");
+  JSObject::AddProperty(isolate, object, mutable_string,
+                        factory->ToBoolean(is_mutable), NONE);
+  JSObject::AddProperty(isolate, object, value_string,
+                        ToValueTypeString(isolate, type), NONE);
+
+  return object;
+}
+
+Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
+                                  base::Optional<uint32_t> max_size) {
+  Factory* factory = isolate->factory();
+
+  Handle<JSFunction> object_function = isolate->object_function();
+  Handle<JSObject> object = factory->NewJSObject(object_function);
+  Handle<String> minimum_string = factory->InternalizeUtf8String("minimum");
+  Handle<String> maximum_string = factory->InternalizeUtf8String("maximum");
+  JSObject::AddProperty(isolate, object, minimum_string,
+                        factory->NewNumberFromUint(min_size), NONE);
+  if (max_size.has_value()) {
+    JSObject::AddProperty(isolate, object, maximum_string,
+                          factory->NewNumberFromUint(max_size.value()), NONE);
+  }
+
+  return object;
+}
+
+Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
+                                 uint32_t min_size,
+                                 base::Optional<uint32_t> max_size) {
+  Factory* factory = isolate->factory();
+
+  Handle<String> element;
+  if (type == ValueType::kWasmFuncRef) {
+    // TODO(wasm): We should define the "anyfunc" string in one central place
+    // and then use that constant everywhere.
+    element = factory->InternalizeUtf8String("anyfunc");
+  } else {
+    DCHECK(WasmFeaturesFromFlags().anyref && type == ValueType::kWasmAnyRef);
+    element = factory->InternalizeUtf8String("anyref");
+  }
+
+  Handle<JSFunction> object_function = isolate->object_function();
+  Handle<JSObject> object = factory->NewJSObject(object_function);
+  Handle<String> element_string = factory->InternalizeUtf8String("element");
+  Handle<String> minimum_string = factory->InternalizeUtf8String("minimum");
+  Handle<String> maximum_string = factory->InternalizeUtf8String("maximum");
+  JSObject::AddProperty(isolate, object, element_string, element, NONE);
+  JSObject::AddProperty(isolate, object, minimum_string,
+                        factory->NewNumberFromUint(min_size), NONE);
+  if (max_size.has_value()) {
+    JSObject::AddProperty(isolate, object, maximum_string,
+                          factory->NewNumberFromUint(max_size.value()), NONE);
+  }
+
+  return object;
+}
+
 Handle<JSArray> GetImports(Isolate* isolate,
                            Handle<WasmModuleObject> module_object) {
+  auto enabled_features = i::wasm::WasmFeaturesFromIsolate(isolate);
   Factory* factory = isolate->factory();
 
   Handle<String> module_string = factory->InternalizeUtf8String("module");
   Handle<String> name_string = factory->InternalizeUtf8String("name");
   Handle<String> kind_string = factory->InternalizeUtf8String("kind");
+  Handle<String> type_string = factory->InternalizeUtf8String("type");
 
   Handle<String> function_string = factory->InternalizeUtf8String("function");
   Handle<String> table_string = factory->InternalizeUtf8String("table");
@@ -145,17 +288,43 @@ Handle<JSArray> GetImports(Isolate* isolate,
     Handle<JSObject> entry = factory->NewJSObject(object_function);
 
     Handle<String> import_kind;
+    Handle<JSObject> type_value;
     switch (import.kind) {
       case kExternalFunction:
+        if (enabled_features.type_reflection) {
+          auto& func = module->functions[import.index];
+          type_value = GetTypeForFunction(isolate, func.sig);
+        }
         import_kind = function_string;
         break;
       case kExternalTable:
+        if (enabled_features.type_reflection) {
+          auto& table = module->tables[import.index];
+          base::Optional<uint32_t> maximum_size;
+          if (table.has_maximum_size) maximum_size.emplace(table.maximum_size);
+          type_value = GetTypeForTable(isolate, table.type, table.initial_size,
+                                       maximum_size);
+        }
         import_kind = table_string;
         break;
       case kExternalMemory:
+        if (enabled_features.type_reflection) {
+          DCHECK_EQ(0, import.index);  // Only one memory supported.
+          base::Optional<uint32_t> maximum_size;
+          if (module->has_maximum_pages) {
+            maximum_size.emplace(module->maximum_pages);
+          }
+          type_value =
+              GetTypeForMemory(isolate, module->initial_pages, maximum_size);
+        }
         import_kind = memory_string;
         break;
       case kExternalGlobal:
+        if (enabled_features.type_reflection) {
+          auto& global = module->globals[import.index];
+          type_value =
+              GetTypeForGlobal(isolate, global.mutability, global.type);
+        }
         import_kind = global_string;
         break;
       case kExternalException:
@@ -178,6 +347,9 @@ Handle<JSArray> GetImports(Isolate* isolate,
     JSObject::AddProperty(isolate, entry, name_string,
                           import_name.ToHandleChecked(), NONE);
     JSObject::AddProperty(isolate, entry, kind_string, import_kind, NONE);
+    if (!type_value.is_null()) {
+      JSObject::AddProperty(isolate, entry, type_string, type_value, NONE);
+    }
 
     storage->set(index, *entry);
   }
@@ -187,10 +359,12 @@ Handle<JSArray> GetImports(Isolate* isolate,
 
 Handle<JSArray> GetExports(Isolate* isolate,
                            Handle<WasmModuleObject> module_object) {
+  auto enabled_features = i::wasm::WasmFeaturesFromIsolate(isolate);
   Factory* factory = isolate->factory();
 
   Handle<String> name_string = factory->InternalizeUtf8String("name");
   Handle<String> kind_string = factory->InternalizeUtf8String("kind");
+  Handle<String> type_string = factory->InternalizeUtf8String("type");
 
   Handle<String> function_string = factory->InternalizeUtf8String("function");
   Handle<String> table_string = factory->InternalizeUtf8String("table");
@@ -214,17 +388,43 @@ Handle<JSArray> GetExports(Isolate* isolate,
     const WasmExport& exp = module->export_table[index];
 
     Handle<String> export_kind;
+    Handle<JSObject> type_value;
     switch (exp.kind) {
       case kExternalFunction:
+        if (enabled_features.type_reflection) {
+          auto& func = module->functions[exp.index];
+          type_value = GetTypeForFunction(isolate, func.sig);
+        }
         export_kind = function_string;
         break;
       case kExternalTable:
+        if (enabled_features.type_reflection) {
+          auto& table = module->tables[exp.index];
+          base::Optional<uint32_t> maximum_size;
+          if (table.has_maximum_size) maximum_size.emplace(table.maximum_size);
+          type_value = GetTypeForTable(isolate, table.type, table.initial_size,
+                                       maximum_size);
+        }
         export_kind = table_string;
         break;
       case kExternalMemory:
+        if (enabled_features.type_reflection) {
+          DCHECK_EQ(0, exp.index);  // Only one memory supported.
+          base::Optional<uint32_t> maximum_size;
+          if (module->has_maximum_pages) {
+            maximum_size.emplace(module->maximum_pages);
+          }
+          type_value =
+              GetTypeForMemory(isolate, module->initial_pages, maximum_size);
+        }
         export_kind = memory_string;
         break;
       case kExternalGlobal:
+        if (enabled_features.type_reflection) {
+          auto& global = module->globals[exp.index];
+          type_value =
+              GetTypeForGlobal(isolate, global.mutability, global.type);
+        }
         export_kind = global_string;
         break;
       case kExternalException:
@@ -243,6 +443,9 @@ Handle<JSArray> GetExports(Isolate* isolate,
     JSObject::AddProperty(isolate, entry, name_string,
                           export_name.ToHandleChecked(), NONE);
     JSObject::AddProperty(isolate, entry, kind_string, export_kind, NONE);
+    if (!type_value.is_null()) {
+      JSObject::AddProperty(isolate, entry, type_string, type_value, NONE);
+    }
 
     storage->set(index, *entry);
   }
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 7dea208d8e614e..69c57725de3c87 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -7,6 +7,7 @@
 
 #include <memory>
 
+#include "src/base/optional.h"
 #include "src/common/globals.h"
 #include "src/handles/handles.h"
 #include "src/utils/vector.h"
@@ -301,13 +302,19 @@ V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes(
 V8_EXPORT_PRIVATE bool IsWasmCodegenAllowed(Isolate* isolate,
                                             Handle<Context> context);
 
-V8_EXPORT_PRIVATE Handle<JSArray> GetImports(Isolate* isolate,
-                                             Handle<WasmModuleObject> module);
-V8_EXPORT_PRIVATE Handle<JSArray> GetExports(Isolate* isolate,
-                                             Handle<WasmModuleObject> module);
-V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
-    Isolate* isolate, Handle<WasmModuleObject> module, Handle<String> name,
-    ErrorThrower* thrower);
+Handle<JSObject> GetTypeForFunction(Isolate* isolate, FunctionSig* sig);
+Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
+                                  ValueType type);
+Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
+                                  base::Optional<uint32_t> max_size);
+Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
+                                 uint32_t min_size,
+                                 base::Optional<uint32_t> max_size);
+Handle<JSArray> GetImports(Isolate* isolate, Handle<WasmModuleObject> module);
+Handle<JSArray> GetExports(Isolate* isolate, Handle<WasmModuleObject> module);
+Handle<JSArray> GetCustomSections(Isolate* isolate,
+                                  Handle<WasmModuleObject> module,
+                                  Handle<String> name, ErrorThrower* thrower);
 
 // Decode local variable names from the names section. Return FixedArray of
 // FixedArray of <undefined|String>. The outer fixed array is indexed by the
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 7a80b7ea2ba291..66d3a2716e99b0 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -28,7 +28,7 @@ namespace v8 {
 namespace internal {
 
 OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
 OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
 OBJECT_CONSTRUCTORS_IMPL(WasmDebugInfo, Struct)
 OBJECT_CONSTRUCTORS_IMPL(WasmGlobalObject, JSObject)
@@ -42,7 +42,6 @@ NEVER_READ_ONLY_SPACE_IMPL(WasmDebugInfo)
 
 CAST_ACCESSOR(WasmDebugInfo)
 CAST_ACCESSOR(WasmExceptionObject)
-CAST_ACCESSOR(WasmExceptionTag)
 CAST_ACCESSOR(WasmExportedFunctionData)
 CAST_ACCESSOR(WasmGlobalObject)
 CAST_ACCESSOR(WasmInstanceObject)
@@ -261,9 +260,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
                    kManagedNativeAllocationsOffset)
 OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
                    kExceptionsTableOffset)
-ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_exported_functions, FixedArray,
-                   kWasmExportedFunctionsOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_external_functions, FixedArray,
+                   kWasmExternalFunctionsOffset)
 
 void WasmInstanceObject::clear_padding() {
   if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
@@ -325,7 +323,7 @@ SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
               kJumpTableOffsetOffset)
 SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
 ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
-ACCESSORS(WasmExportedFunctionData, wasm_call_target, Smi,
+ACCESSORS(WasmExportedFunctionData, wasm_call_target, Object,
           kWasmCallTargetOffset)
 SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
 
@@ -358,12 +356,17 @@ OBJECT_CONSTRUCTORS_IMPL(WasmCapiFunctionData, Struct)
 CAST_ACCESSOR(WasmCapiFunctionData)
 PRIMITIVE_ACCESSORS(WasmCapiFunctionData, call_target, Address,
                     kCallTargetOffset)
-PRIMITIVE_ACCESSORS(WasmCapiFunctionData, embedder_data, void*,
-                    kEmbedderDataOffset)
+ACCESSORS(WasmCapiFunctionData, embedder_data, Foreign, kEmbedderDataOffset)
 ACCESSORS(WasmCapiFunctionData, wrapper_code, Code, kWrapperCodeOffset)
 ACCESSORS(WasmCapiFunctionData, serialized_signature, PodArray<wasm::ValueType>,
           kSerializedSignatureOffset)
 
+// WasmExternalFunction
+WasmExternalFunction::WasmExternalFunction(Address ptr) : JSFunction(ptr) {
+  SLOW_DCHECK(IsWasmExternalFunction(*this));
+}
+CAST_ACCESSOR(WasmExternalFunction)
+
 // WasmIndirectFunctionTable
 OBJECT_CONSTRUCTORS_IMPL(WasmIndirectFunctionTable, Struct)
 CAST_ACCESSOR(WasmIndirectFunctionTable)
@@ -399,7 +402,7 @@ wasm::ValueType WasmTableObject::type() {
 bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
 
 // WasmExceptionTag
-SMI_ACCESSORS(WasmExceptionTag, index, kIndexOffset)
+TQ_SMI_ACCESSORS(WasmExceptionTag, index)
 
 // AsmWasmData
 ACCESSORS(AsmWasmData, managed_native_module, Managed<wasm::NativeModule>,
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index f44f8326ad67da..d9417943a843ed 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -207,36 +207,19 @@ enum DispatchTableElements : int {
 
 // static
 Handle<WasmModuleObject> WasmModuleObject::New(
-    Isolate* isolate, const wasm::WasmFeatures& enabled,
-    std::shared_ptr<const wasm::WasmModule> shared_module,
-    OwnedVector<const uint8_t> wire_bytes, Handle<Script> script,
-    Handle<ByteArray> asm_js_offset_table) {
-  // Create a new {NativeModule} first.
-  size_t code_size_estimate =
-      wasm::WasmCodeManager::EstimateNativeModuleCodeSize(shared_module.get());
-  auto native_module = isolate->wasm_engine()->NewNativeModule(
-      isolate, enabled, code_size_estimate,
-      wasm::NativeModule::kCanAllocateMoreMemory, std::move(shared_module));
-  native_module->SetWireBytes(std::move(wire_bytes));
-  native_module->SetRuntimeStubs(isolate);
-
-  // Delegate to the shared {WasmModuleObject::New} allocator.
-  Handle<WasmModuleObject> module_object =
-      New(isolate, std::move(native_module), script, code_size_estimate);
-  if (!asm_js_offset_table.is_null()) {
-    module_object->set_asm_js_offset_table(*asm_js_offset_table);
-  }
-  return module_object;
+    Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
+    Handle<Script> script) {
+  Handle<FixedArray> export_wrappers = isolate->factory()->NewFixedArray(0);
+  return New(isolate, std::move(native_module), script, export_wrappers);
 }
 
 // static
 Handle<WasmModuleObject> WasmModuleObject::New(
     Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
-    Handle<Script> script, size_t code_size_estimate) {
+    Handle<Script> script, Handle<FixedArray> export_wrappers) {
   const WasmModule* module = native_module->module();
-  int num_wrappers = MaxNumExportWrappers(module);
-  Handle<FixedArray> export_wrappers =
-      isolate->factory()->NewFixedArray(num_wrappers, AllocationType::kOld);
+  size_t code_size_estimate =
+      wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module);
   return New(isolate, std::move(native_module), script, export_wrappers,
              code_size_estimate);
 }
@@ -964,6 +947,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
 
   // Now we handle the funcref case.
   if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
+      WasmJSFunction::IsWasmJSFunction(*entry) ||
       WasmCapiFunction::IsWasmCapiFunction(*entry)) {
     return entry;
   }
@@ -980,7 +964,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
 
   // Check if we already compiled a wrapper for the function but did not store
   // it in the table slot yet.
-  entry = WasmInstanceObject::GetOrCreateWasmExportedFunction(isolate, instance,
+  entry = WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
                                                               function_index);
   entries->set(entry_index, *entry);
   return entry;
@@ -1726,9 +1710,6 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
       isolate->factory()->NewFixedArray(num_imported_functions);
   instance->set_imported_function_refs(*imported_function_refs);
 
-  Handle<Code> centry_stub = CodeFactory::CEntry(isolate);
-  instance->set_centry_stub(*centry_stub);
-
   instance->SetRawMemory(nullptr, 0);
   instance->set_isolate_root(isolate->isolate_root());
   instance->set_stack_limit_address(
@@ -1878,27 +1859,27 @@ bool WasmInstanceObject::InitTableEntries(Isolate* isolate,
                                dst, src, count);
 }
 
-MaybeHandle<WasmExportedFunction> WasmInstanceObject::GetWasmExportedFunction(
+MaybeHandle<WasmExternalFunction> WasmInstanceObject::GetWasmExternalFunction(
     Isolate* isolate, Handle<WasmInstanceObject> instance, int index) {
-  MaybeHandle<WasmExportedFunction> result;
-  if (instance->has_wasm_exported_functions()) {
-    Object val = instance->wasm_exported_functions().get(index);
+  MaybeHandle<WasmExternalFunction> result;
+  if (instance->has_wasm_external_functions()) {
+    Object val = instance->wasm_external_functions().get(index);
     if (!val.IsUndefined(isolate)) {
-      result = Handle<WasmExportedFunction>(WasmExportedFunction::cast(val),
+      result = Handle<WasmExternalFunction>(WasmExternalFunction::cast(val),
                                             isolate);
     }
   }
   return result;
 }
 
-Handle<WasmExportedFunction>
-WasmInstanceObject::GetOrCreateWasmExportedFunction(
+Handle<WasmExternalFunction>
+WasmInstanceObject::GetOrCreateWasmExternalFunction(
     Isolate* isolate, Handle<WasmInstanceObject> instance, int function_index) {
-  MaybeHandle<WasmExportedFunction> maybe_result =
-      WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
+  MaybeHandle<WasmExternalFunction> maybe_result =
+      WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
                                                   function_index);
 
-  Handle<WasmExportedFunction> result;
+  Handle<WasmExternalFunction> result;
   if (maybe_result.ToHandle(&result)) {
     return result;
   }
@@ -1923,27 +1904,27 @@ WasmInstanceObject::GetOrCreateWasmExportedFunction(
         isolate, function.sig, function.imported);
     module_object->export_wrappers().set(wrapper_index, *wrapper);
   }
-  result = WasmExportedFunction::New(
+  result = Handle<WasmExternalFunction>::cast(WasmExportedFunction::New(
       isolate, instance, function_index,
-      static_cast<int>(function.sig->parameter_count()), wrapper);
+      static_cast<int>(function.sig->parameter_count()), wrapper));
 
-  WasmInstanceObject::SetWasmExportedFunction(isolate, instance, function_index,
+  WasmInstanceObject::SetWasmExternalFunction(isolate, instance, function_index,
                                               result);
   return result;
 }
 
-void WasmInstanceObject::SetWasmExportedFunction(
+void WasmInstanceObject::SetWasmExternalFunction(
     Isolate* isolate, Handle<WasmInstanceObject> instance, int index,
-    Handle<WasmExportedFunction> val) {
+    Handle<WasmExternalFunction> val) {
   Handle<FixedArray> functions;
-  if (!instance->has_wasm_exported_functions()) {
-    // lazily-allocate the wasm exported functions.
+  if (!instance->has_wasm_external_functions()) {
+    // Lazily allocate the wasm external functions array.
     functions = isolate->factory()->NewFixedArray(
         static_cast<int>(instance->module()->functions.size()));
-    instance->set_wasm_exported_functions(*functions);
+    instance->set_wasm_external_functions(*functions);
   } else {
     functions =
-        Handle<FixedArray>(instance->wasm_exported_functions(), isolate);
+        Handle<FixedArray>(instance->wasm_external_functions(), isolate);
   }
   functions->set(index, *val);
 }
@@ -1968,8 +1949,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
         instance->module_object().native_module();
     // TODO(mstarzinger): Cache and reuse wrapper code.
     const wasm::WasmFeatures enabled = native_module->enabled_features();
-    auto resolved =
-        compiler::ResolveWasmImportCall(callable, sig, enabled.bigint);
+    auto resolved = compiler::ResolveWasmImportCall(callable, sig, enabled);
     compiler::WasmImportCallKind kind = resolved.first;
     callable = resolved.second;  // Update to ultimate target.
     DCHECK_NE(compiler::WasmImportCallKind::kLinkError, kind);
@@ -2183,13 +2163,13 @@ bool WasmCapiFunction::IsWasmCapiFunction(Object object) {
 }
 
 Handle<WasmCapiFunction> WasmCapiFunction::New(
-    Isolate* isolate, Address call_target, void* embedder_data,
+    Isolate* isolate, Address call_target, Handle<Foreign> embedder_data,
     Handle<PodArray<wasm::ValueType>> serialized_signature) {
   Handle<WasmCapiFunctionData> fun_data =
       Handle<WasmCapiFunctionData>::cast(isolate->factory()->NewStruct(
           WASM_CAPI_FUNCTION_DATA_TYPE, AllocationType::kOld));
   fun_data->set_call_target(call_target);
-  fun_data->set_embedder_data(embedder_data);
+  fun_data->set_embedder_data(*embedder_data);
   fun_data->set_serialized_signature(*serialized_signature);
   // TODO(jkummerow): Install a JavaScript wrapper. For now, calling
   // these functions directly is unsupported; they can only be called
@@ -2301,6 +2281,10 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
   if (sig_size > 0) {
     serialized_sig->copy_in(0, sig->all().begin(), sig_size);
   }
+  // TODO(mstarzinger): Think about caching and sharing the JS-to-JS wrappers
+  // per signature instead of compiling a new one for every instantiation.
+  Handle<Code> wrapper_code =
+      compiler::CompileJSToJSWrapper(isolate, sig).ToHandleChecked();
   Handle<WasmJSFunctionData> function_data =
       Handle<WasmJSFunctionData>::cast(isolate->factory()->NewStruct(
           WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
@@ -2308,9 +2292,7 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
   function_data->set_serialized_parameter_count(parameter_count);
   function_data->set_serialized_signature(*serialized_sig);
   function_data->set_callable(*callable);
-  // TODO(7742): Make this callable by using a proper wrapper code.
-  function_data->set_wrapper_code(
-      isolate->builtins()->builtin(Builtins::kIllegal));
+  function_data->set_wrapper_code(*wrapper_code);
   Handle<String> name = isolate->factory()->Function_string();
   if (callable->IsJSFunction()) {
     name = JSFunction::GetName(Handle<JSFunction>::cast(callable));
@@ -2319,6 +2301,7 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
   NewFunctionArgs args =
       NewFunctionArgs::ForWasm(name, function_data, function_map);
   Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
+  js_function->shared().set_internal_formal_parameter_count(parameter_count);
   return Handle<WasmJSFunction>::cast(js_function);
 }
 
@@ -2361,6 +2344,11 @@ PodArray<wasm::ValueType> WasmCapiFunction::GetSerializedSignature() const {
   return shared().wasm_capi_function_data().serialized_signature();
 }
 
+bool WasmExternalFunction::IsWasmExternalFunction(Object object) {
+  return WasmExportedFunction::IsWasmExportedFunction(object) ||
+         WasmJSFunction::IsWasmJSFunction(object);
+}
+
 Handle<WasmExceptionTag> WasmExceptionTag::New(Isolate* isolate, int index) {
   Handle<WasmExceptionTag> result =
       Handle<WasmExceptionTag>::cast(isolate->factory()->NewStruct(
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 1200f7040aa9eb..c198a9bc637de5 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -41,6 +41,7 @@ class WasmCapiFunction;
 class WasmDebugInfo;
 class WasmExceptionTag;
 class WasmExportedFunction;
+class WasmExternalFunction;
 class WasmInstanceObject;
 class WasmJSFunction;
 class WasmModuleObject;
@@ -139,18 +140,14 @@ class WasmModuleObject : public JSObject {
   DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
                                 TORQUE_GENERATED_WASM_MODULE_OBJECT_FIELDS)
 
-  // Creates a new {WasmModuleObject} with a new {NativeModule} underneath.
-  V8_EXPORT_PRIVATE static Handle<WasmModuleObject> New(
-      Isolate* isolate, const wasm::WasmFeatures& enabled,
-      std::shared_ptr<const wasm::WasmModule> module,
-      OwnedVector<const uint8_t> wire_bytes, Handle<Script> script,
-      Handle<ByteArray> asm_js_offset_table);
-
   // Creates a new {WasmModuleObject} for an existing {NativeModule} that is
   // reference counted and might be shared between multiple Isolates.
   V8_EXPORT_PRIVATE static Handle<WasmModuleObject> New(
       Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
-      Handle<Script> script, size_t code_size_estimate);
+      Handle<Script> script);
+  V8_EXPORT_PRIVATE static Handle<WasmModuleObject> New(
+      Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
+      Handle<Script> script, Handle<FixedArray> export_wrappers);
   V8_EXPORT_PRIVATE static Handle<WasmModuleObject> New(
       Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
       Handle<Script> script, Handle<FixedArray> export_wrappers,
@@ -444,8 +441,7 @@ class WasmInstanceObject : public JSObject {
   DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
   DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
   DECL_OPTIONAL_ACCESSORS(exceptions_table, FixedArray)
-  DECL_ACCESSORS(centry_stub, Code)
-  DECL_OPTIONAL_ACCESSORS(wasm_exported_functions, FixedArray)
+  DECL_OPTIONAL_ACCESSORS(wasm_external_functions, FixedArray)
   DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
   DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
   DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
@@ -504,8 +500,7 @@ class WasmInstanceObject : public JSObject {
   V(kIndirectFunctionTablesOffset, kTaggedSize)                           \
   V(kManagedNativeAllocationsOffset, kTaggedSize)                         \
   V(kExceptionsTableOffset, kTaggedSize)                                  \
-  V(kCEntryStubOffset, kTaggedSize)                                       \
-  V(kWasmExportedFunctionsOffset, kTaggedSize)                            \
+  V(kWasmExternalFunctionsOffset, kTaggedSize)                            \
   V(kRealStackLimitAddressOffset, kSystemPointerSize)                     \
   V(kDataSegmentStartsOffset, kSystemPointerSize)                         \
   V(kDataSegmentSizesOffset, kSystemPointerSize)                          \
@@ -544,8 +539,7 @@ class WasmInstanceObject : public JSObject {
       kIndirectFunctionTablesOffset,
       kManagedNativeAllocationsOffset,
       kExceptionsTableOffset,
-      kCEntryStubOffset,
-      kWasmExportedFunctionsOffset};
+      kWasmExternalFunctionsOffset};
 
   V8_EXPORT_PRIVATE const wasm::WasmModule* module();
 
@@ -588,22 +582,22 @@ class WasmInstanceObject : public JSObject {
   // Iterates all fields in the object except the untagged fields.
   class BodyDescriptor;
 
-  static MaybeHandle<WasmExportedFunction> GetWasmExportedFunction(
+  static MaybeHandle<WasmExternalFunction> GetWasmExternalFunction(
       Isolate* isolate, Handle<WasmInstanceObject> instance, int index);
 
-  // Acquires the {WasmExportedFunction} for a given {function_index} from the
+  // Acquires the {WasmExternalFunction} for a given {function_index} from the
   // cache of the given {instance}, or creates a new {WasmExportedFunction} if
   // it does not exist yet. The new {WasmExportedFunction} is added to the
   // cache of the {instance} immediately.
-  V8_EXPORT_PRIVATE static Handle<WasmExportedFunction>
-  GetOrCreateWasmExportedFunction(Isolate* isolate,
+  V8_EXPORT_PRIVATE static Handle<WasmExternalFunction>
+  GetOrCreateWasmExternalFunction(Isolate* isolate,
                                   Handle<WasmInstanceObject> instance,
                                   int function_index);
 
-  static void SetWasmExportedFunction(Isolate* isolate,
+  static void SetWasmExternalFunction(Isolate* isolate,
                                       Handle<WasmInstanceObject> instance,
                                       int index,
-                                      Handle<WasmExportedFunction> val);
+                                      Handle<WasmExternalFunction> val);
 
   // Imports a constructed {WasmJSFunction} into the indirect function table of
   // this instance. Note that this might trigger wrapper compilation, since a
@@ -713,7 +707,7 @@ class WasmCapiFunction : public JSFunction {
   static bool IsWasmCapiFunction(Object object);
 
   static Handle<WasmCapiFunction> New(
-      Isolate* isolate, Address call_target, void* embedder_data,
+      Isolate* isolate, Address call_target, Handle<Foreign> embedder_data,
       Handle<PodArray<wasm::ValueType>> serialized_signature);
 
   Address GetHostCallTarget() const;
@@ -726,6 +720,19 @@ class WasmCapiFunction : public JSFunction {
   OBJECT_CONSTRUCTORS(WasmCapiFunction, JSFunction);
 };
 
+// Any external function that can be imported/exported in modules. This abstract
+// class just dispatches to the following concrete classes:
+//  - {WasmExportedFunction}: A proper Wasm function exported from a module.
+//  - {WasmJSFunction}: A function constructed via WebAssembly.Function in JS.
+// // TODO(wasm): Potentially {WasmCapiFunction} will be added here as well.
+class WasmExternalFunction : public JSFunction {
+ public:
+  static bool IsWasmExternalFunction(Object object);
+
+  DECL_CAST(WasmExternalFunction)
+  OBJECT_CONSTRUCTORS(WasmExternalFunction, JSFunction);
+};
+
 class WasmIndirectFunctionTable : public Struct {
  public:
   DECL_PRIMITIVE_ACCESSORS(size, uint32_t)
@@ -757,7 +764,7 @@ class WasmIndirectFunctionTable : public Struct {
 class WasmCapiFunctionData : public Struct {
  public:
   DECL_PRIMITIVE_ACCESSORS(call_target, Address)
-  DECL_PRIMITIVE_ACCESSORS(embedder_data, void*)
+  DECL_ACCESSORS(embedder_data, Foreign)
   DECL_ACCESSORS(wrapper_code, Code)
   DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
 
@@ -769,7 +776,7 @@ class WasmCapiFunctionData : public Struct {
   DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
                                 TORQUE_GENERATED_WASM_CAPI_FUNCTION_DATA_FIELDS)
 
-  STATIC_ASSERT(kStartOfStrongFieldsOffset == kWrapperCodeOffset);
+  STATIC_ASSERT(kStartOfStrongFieldsOffset == kEmbedderDataOffset);
   using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
 
   OBJECT_CONSTRUCTORS(WasmCapiFunctionData, Struct);
@@ -785,7 +792,7 @@ class WasmExportedFunctionData : public Struct {
   DECL_INT_ACCESSORS(jump_table_offset)
   DECL_INT_ACCESSORS(function_index)
   DECL_ACCESSORS(c_wrapper_code, Object)
-  DECL_ACCESSORS(wasm_call_target, Smi)
+  DECL_ACCESSORS(wasm_call_target, Object)
   DECL_INT_ACCESSORS(packed_args_size)
 
   DECL_CAST(WasmExportedFunctionData)
@@ -914,7 +921,8 @@ class WasmDebugInfo : public Struct {
 // header. They are referenced by the following fields:
 //  - {WasmExceptionObject::exception_tag}  : The tag of the exception object.
 //  - {WasmInstanceObject::exceptions_table}: List of tags used by an instance.
-class WasmExceptionTag : public Struct {
+class WasmExceptionTag
+    : public TorqueGeneratedWasmExceptionTag<WasmExceptionTag, Struct> {
  public:
   V8_EXPORT_PRIVATE static Handle<WasmExceptionTag> New(Isolate* isolate,
                                                         int index);
@@ -924,14 +932,9 @@ class WasmExceptionTag : public Struct {
   // least one field, hence this also serves as a padding field for now.
   DECL_INT_ACCESSORS(index)
 
-  DECL_CAST(WasmExceptionTag)
   DECL_PRINTER(WasmExceptionTag)
-  DECL_VERIFIER(WasmExceptionTag)
-
-  DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
-                                TORQUE_GENERATED_WASM_EXCEPTION_TAG_FIELDS)
 
-  OBJECT_CONSTRUCTORS(WasmExceptionTag, Struct);
+  TQ_OBJECT_CONSTRUCTORS(WasmExceptionTag)
 };
 
 class AsmWasmData : public Struct {
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index d3fb4c42cf2c62..879da1445ba17c 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -10,6 +10,7 @@
 #include "src/codegen/signature.h"
 #include "src/execution/messages.h"
 #include "src/runtime/runtime.h"
+#include "src/wasm/wasm-features.h"
 
 namespace v8 {
 namespace internal {
@@ -229,11 +230,16 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
     CASE_F64x2_OP(Ne, "ne")
     CASE_I64x2_OP(Ne, "ne")
     CASE_SIMD_OP(Add, "add")
+    CASE_F64x2_OP(Add, "add")
     CASE_I64x2_OP(Add, "add")
     CASE_SIMD_OP(Sub, "sub")
+    CASE_F64x2_OP(Sub, "sub")
     CASE_I64x2_OP(Sub, "sub")
     CASE_SIMD_OP(Mul, "mul")
+    CASE_F64x2_OP(Mul, "mul")
     CASE_I64x2_OP(Mul, "mul")
+    CASE_F64x2_OP(Div, "div")
+    CASE_F32x4_OP(Div, "div")
     CASE_F64x2_OP(Splat, "splat")
     CASE_F64x2_OP(Lt, "lt")
     CASE_F64x2_OP(Le, "le")
@@ -244,7 +250,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
     CASE_F32x4_OP(AddHoriz, "add_horizontal")
     CASE_F32x4_OP(RecipApprox, "recip_approx")
     CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
+    CASE_F64x2_OP(Min, "min")
     CASE_F32x4_OP(Min, "min")
+    CASE_F64x2_OP(Max, "max")
     CASE_F32x4_OP(Max, "max")
     CASE_F32x4_OP(Lt, "lt")
     CASE_F32x4_OP(Le, "le")
@@ -267,7 +275,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
     CASE_SIMDI_OP(ExtractLane, "extract_lane")
     CASE_SIMDI_OP(ReplaceLane, "replace_lane")
     CASE_SIGN_OP(SIMDI, Min, "min")
+    CASE_SIGN_OP(I64x2, Min, "min")
     CASE_SIGN_OP(SIMDI, Max, "max")
+    CASE_SIGN_OP(I64x2, Max, "max")
     CASE_SIGN_OP(SIMDI, Lt, "lt")
     CASE_SIGN_OP(I64x2, Lt, "lt")
     CASE_SIGN_OP(SIMDI, Le, "le")
@@ -439,12 +449,13 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
   return os;
 }
 
-bool IsJSCompatibleSignature(const FunctionSig* sig, bool has_bigint_feature) {
-  if (sig->return_count() > 1) {
+bool IsJSCompatibleSignature(const FunctionSig* sig,
+                             const WasmFeatures& enabled_features) {
+  if (!enabled_features.mv && sig->return_count() > 1) {
     return false;
   }
   for (auto type : sig->all()) {
-    if (!has_bigint_feature && type == kWasmI64) {
+    if (!enabled_features.bigint && type == kWasmI64) {
       return false;
     }
 
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 22bd47d54b42f3..0b19d7452c352f 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -15,8 +15,10 @@ namespace internal {
 
 namespace wasm {
 
+struct WasmFeatures;
+
 std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
-bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
+bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
 
 // Control expressions and blocks.
 #define FOREACH_CONTROL_OPCODE(V)        \
@@ -335,6 +337,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
   V(I8x16Neg, 0xfd51, s_s)               \
   V(S1x16AnyTrue, 0xfd52, i_s)           \
   V(S1x16AllTrue, 0xfd53, i_s)           \
+  V(I8x16Shl, 0xfd54, s_si)              \
+  V(I8x16ShrS, 0xfd55, s_si)             \
+  V(I8x16ShrU, 0xfd56, s_si)             \
   V(I8x16Add, 0xfd57, s_ss)              \
   V(I8x16AddSaturateS, 0xfd58, s_ss)     \
   V(I8x16AddSaturateU, 0xfd59, s_ss)     \
@@ -349,6 +354,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
   V(I16x8Neg, 0xfd62, s_s)               \
   V(S1x8AnyTrue, 0xfd63, i_s)            \
   V(S1x8AllTrue, 0xfd64, i_s)            \
+  V(I16x8Shl, 0xfd65, s_si)              \
+  V(I16x8ShrS, 0xfd66, s_si)             \
+  V(I16x8ShrU, 0xfd67, s_si)             \
   V(I16x8Add, 0xfd68, s_ss)              \
   V(I16x8AddSaturateS, 0xfd69, s_ss)     \
   V(I16x8AddSaturateU, 0xfd6a, s_ss)     \
@@ -363,6 +371,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
   V(I32x4Neg, 0xfd73, s_s)               \
   V(S1x4AnyTrue, 0xfd74, i_s)            \
   V(S1x4AllTrue, 0xfd75, i_s)            \
+  V(I32x4Shl, 0xfd76, s_si)              \
+  V(I32x4ShrS, 0xfd77, s_si)             \
+  V(I32x4ShrU, 0xfd78, s_si)             \
   V(I32x4Add, 0xfd79, s_ss)              \
   V(I32x4Sub, 0xfd7c, s_ss)              \
   V(I32x4Mul, 0xfd7f, s_ss)              \
@@ -373,9 +384,16 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
   V(I64x2Neg, 0xfd84, s_s)               \
   V(S1x2AnyTrue, 0xfd85, i_s)            \
   V(S1x2AllTrue, 0xfd86, i_s)            \
+  V(I64x2Shl, 0xfd87, s_si)              \
+  V(I64x2ShrS, 0xfd88, s_si)             \
+  V(I64x2ShrU, 0xfd89, s_si)             \
   V(I64x2Add, 0xfd8a, s_ss)              \
   V(I64x2Sub, 0xfd8d, s_ss)              \
   V(I64x2Mul, 0xfd8c, s_ss)              \
+  V(I64x2MinS, 0xfd8e, s_ss)             \
+  V(I64x2MinU, 0xfd8f, s_ss)             \
+  V(I64x2MaxS, 0xfd90, s_ss)             \
+  V(I64x2MaxU, 0xfd91, s_ss)             \
   V(F32x4Abs, 0xfd95, s_s)               \
   V(F32x4Neg, 0xfd96, s_s)               \
   V(F32x4RecipApprox, 0xfd98, s_s)       \
@@ -383,26 +401,33 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
   V(F32x4Add, 0xfd9a, s_ss)              \
   V(F32x4Sub, 0xfd9b, s_ss)              \
   V(F32x4Mul, 0xfd9c, s_ss)              \
+  V(F32x4Div, 0xfd9d, s_ss)              \
   V(F32x4Min, 0xfd9e, s_ss)              \
   V(F32x4Max, 0xfd9f, s_ss)              \
   V(F64x2Abs, 0xfda0, s_s)               \
   V(F64x2Neg, 0xfda1, s_s)               \
+  V(F64x2Add, 0xfda5, s_ss)              \
+  V(F64x2Sub, 0xfda6, s_ss)              \
+  V(F64x2Mul, 0xfda7, s_ss)              \
+  V(F64x2Div, 0xfda8, s_ss)              \
+  V(F64x2Min, 0xfda9, s_ss)              \
+  V(F64x2Max, 0xfdaa, s_ss)              \
   V(I32x4SConvertF32x4, 0xfdab, s_s)     \
   V(I32x4UConvertF32x4, 0xfdac, s_s)     \
   V(F32x4SConvertI32x4, 0xfdaf, s_s)     \
   V(F32x4UConvertI32x4, 0xfdb0, s_s)     \
-  V(I8x16SConvertI16x8, 0xfdb1, s_ss)    \
-  V(I8x16UConvertI16x8, 0xfdb2, s_ss)    \
-  V(I16x8SConvertI32x4, 0xfdb3, s_ss)    \
-  V(I16x8UConvertI32x4, 0xfdb4, s_ss)    \
-  V(I16x8SConvertI8x16Low, 0xfdb5, s_s)  \
-  V(I16x8SConvertI8x16High, 0xfdb6, s_s) \
-  V(I16x8UConvertI8x16Low, 0xfdb7, s_s)  \
-  V(I16x8UConvertI8x16High, 0xfdb8, s_s) \
-  V(I32x4SConvertI16x8Low, 0xfdb9, s_s)  \
-  V(I32x4SConvertI16x8High, 0xfdba, s_s) \
-  V(I32x4UConvertI16x8Low, 0xfdbb, s_s)  \
-  V(I32x4UConvertI16x8High, 0xfdbc, s_s) \
+  V(I8x16SConvertI16x8, 0xfdc6, s_ss)    \
+  V(I8x16UConvertI16x8, 0xfdc7, s_ss)    \
+  V(I16x8SConvertI32x4, 0xfdc8, s_ss)    \
+  V(I16x8UConvertI32x4, 0xfdc9, s_ss)    \
+  V(I16x8SConvertI8x16Low, 0xfdca, s_s)  \
+  V(I16x8SConvertI8x16High, 0xfdcb, s_s) \
+  V(I16x8UConvertI8x16Low, 0xfdcc, s_s)  \
+  V(I16x8UConvertI8x16High, 0xfdcd, s_s) \
+  V(I32x4SConvertI16x8Low, 0xfdce, s_s)  \
+  V(I32x4SConvertI16x8High, 0xfdcf, s_s) \
+  V(I32x4UConvertI16x8Low, 0xfdd0, s_s)  \
+  V(I32x4UConvertI16x8High, 0xfdd1, s_s) \
   V(I16x8AddHoriz, 0xfdbd, s_ss)         \
   V(I32x4AddHoriz, 0xfdbe, s_ss)         \
   V(F32x4AddHoriz, 0xfdbf, s_ss)
@@ -413,19 +438,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
   V(I32x4ExtractLane, 0xfd0d, _)                 \
   V(I64x2ExtractLane, 0xfd10, _)                 \
   V(F32x4ExtractLane, 0xfd13, _)                 \
-  V(F64x2ExtractLane, 0xfd16, _)                 \
-  V(I8x16Shl, 0xfd54, _)                         \
-  V(I8x16ShrS, 0xfd55, _)                        \
-  V(I8x16ShrU, 0xfd56, _)                        \
-  V(I16x8Shl, 0xfd65, _)                         \
-  V(I16x8ShrS, 0xfd66, _)                        \
-  V(I16x8ShrU, 0xfd67, _)                        \
-  V(I32x4Shl, 0xfd76, _)                         \
-  V(I32x4ShrS, 0xfd77, _)                        \
-  V(I32x4ShrU, 0xfd78, _)                        \
-  V(I64x2Shl, 0xfd87, _)                         \
-  V(I64x2ShrS, 0xfd88, _)                        \
-  V(I64x2ShrU, 0xfd89, _)
+  V(F64x2ExtractLane, 0xfd16, _)
 
 #define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
   V(I8x16ReplaceLane, 0xfd07, _)                 \
@@ -678,6 +691,8 @@ struct WasmInitExpr {
       val.global_index = index;
     } else if (kind == kRefFuncConst) {
       val.function_index = index;
+    } else if (kind == kRefNullConst) {
+      // Nothing to do.
     } else {
       // For the other types, the other initializers should be used.
       UNREACHABLE();
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index a20b2f115a1458..81460b9fe29912 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -625,12 +625,17 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
   Handle<Script> script =
       CreateWasmScript(isolate, wire_bytes, module->source_map_url);
 
-  OwnedVector<uint8_t> wire_bytes_copy =
-      OwnedVector<uint8_t>::Of(wire_bytes_vec);
+  auto shared_native_module = isolate->wasm_engine()->NewNativeModule(
+      isolate, enabled_features, std::move(decode_result.value()));
+  shared_native_module->SetWireBytes(OwnedVector<uint8_t>::Of(wire_bytes_vec));
+  shared_native_module->SetRuntimeStubs(isolate);
+
+  Handle<FixedArray> export_wrappers;
+  CompileJsToWasmWrappers(isolate, shared_native_module->module(),
+                          &export_wrappers);
 
   Handle<WasmModuleObject> module_object = WasmModuleObject::New(
-      isolate, enabled_features, std::move(decode_result).value(),
-      std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
+      isolate, std::move(shared_native_module), script, export_wrappers);
   NativeModule* native_module = module_object->native_module();
 
   NativeModuleDeserializer deserializer(native_module);
@@ -639,9 +644,6 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
   Reader reader(data + kVersionSize);
   if (!deserializer.Read(&reader)) return {};
 
-  CompileJsToWasmWrappers(isolate, native_module->module(),
-                          handle(module_object->export_wrappers(), isolate));
-
   // Log the code within the generated module for profiling.
   native_module->LogWasmCodes(isolate);
 
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index e17d34e36fcf3e..44abd7144596a5 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -321,23 +321,6 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
             break;
           }
 
-          case kExprI8x16Shl:
-          case kExprI8x16ShrS:
-          case kExprI8x16ShrU:
-          case kExprI16x8Shl:
-          case kExprI16x8ShrS:
-          case kExprI16x8ShrU:
-          case kExprI32x4Shl:
-          case kExprI32x4ShrS:
-          case kExprI32x4ShrU:
-          case kExprI64x2Shl:
-          case kExprI64x2ShrS:
-          case kExprI64x2ShrU: {
-            SimdShiftImmediate<Decoder::kNoValidate> imm(&i, i.pc());
-            os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.shift;
-            break;
-          }
-
             FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE) {
               os << WasmOpcodes::OpcodeName(opcode);
               break;
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index e2b66253f54ab4..df72864c5a9b12 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -322,13 +322,23 @@ class ZoneList final {
 template <typename T>
 using ZonePtrList = ZoneList<T*>;
 
-template <typename T>
-class ScopedPtrList final {
+// ScopedList is a scope-lifetime list with a std::vector backing that can be
+// re-used between ScopedLists. Note that a ScopedList in an outer scope cannot
+// add any entries if there is a ScopedList with the same backing in an inner
+// scope.
+template <typename T, typename TBacking = T>
+class ScopedList final {
+  // The backing can either be the same type as the list type, or, for pointers,
+  // we additionally allow a void* backing store.
+  STATIC_ASSERT((std::is_same<TBacking, T>::value) ||
+                (std::is_same<TBacking, void*>::value &&
+                 std::is_pointer<T>::value));
+
  public:
-  explicit ScopedPtrList(std::vector<void*>* buffer)
+  explicit ScopedList(std::vector<TBacking>* buffer)
       : buffer_(*buffer), start_(buffer->size()), end_(buffer->size()) {}
 
-  ~ScopedPtrList() { Rewind(); }
+  ~ScopedList() { Rewind(); }
 
   void Rewind() {
     DCHECK_EQ(buffer_.size(), end_);
@@ -336,7 +346,7 @@ class ScopedPtrList final {
     end_ = start_;
   }
 
-  void MergeInto(ScopedPtrList* parent) {
+  void MergeInto(ScopedList* parent) {
     DCHECK_EQ(parent->end_, start_);
     parent->end_ = end_;
     start_ = end_;
@@ -344,38 +354,46 @@ class ScopedPtrList final {
   }
 
   int length() const { return static_cast<int>(end_ - start_); }
-  T* at(int i) const {
+
+  const T& at(int i) const {
     size_t index = start_ + i;
     DCHECK_LE(start_, index);
     DCHECK_LT(index, buffer_.size());
-    return reinterpret_cast<T*>(buffer_[index]);
+    return *reinterpret_cast<T*>(&buffer_[index]);
   }
 
-  void CopyTo(ZonePtrList<T>* target, Zone* zone) const {
+  T& at(int i) {
+    size_t index = start_ + i;
+    DCHECK_LE(start_, index);
+    DCHECK_LT(index, buffer_.size());
+    return *reinterpret_cast<T*>(&buffer_[index]);
+  }
+
+  void CopyTo(ZoneList<T>* target, Zone* zone) const {
     DCHECK_LE(end_, buffer_.size());
     // Make sure we don't reference absent elements below.
     if (length() == 0) return;
     target->Initialize(length(), zone);
-    T** data = reinterpret_cast<T**>(&buffer_[start_]);
-    target->AddAll(Vector<T*>(data, length()), zone);
+    T* data = reinterpret_cast<T*>(&buffer_[start_]);
+    target->AddAll(Vector<T>(data, length()), zone);
   }
 
-  Vector<T*> CopyTo(Zone* zone) {
+  Vector<T> CopyTo(Zone* zone) {
     DCHECK_LE(end_, buffer_.size());
-    T** data = zone->NewArray<T*>(length());
+    T* data = zone->NewArray<T>(length());
     if (length() != 0) {
-      MemCopy(data, &buffer_[start_], length() * sizeof(T*));
+      MemCopy(data, &buffer_[start_], length() * sizeof(T));
     }
-    return Vector<T*>(data, length());
+    return Vector<T>(data, length());
   }
 
-  void Add(T* value) {
+  void Add(const T& value) {
     DCHECK_EQ(buffer_.size(), end_);
     buffer_.push_back(value);
     ++end_;
   }
 
-  void AddAll(const ZonePtrList<T>& list) {
+  void AddAll(const ZoneList<T>& list) {
     DCHECK_EQ(buffer_.size(), end_);
     buffer_.reserve(buffer_.size() + list.length());
     for (int i = 0; i < list.length(); i++) {
@@ -384,20 +402,23 @@ class ScopedPtrList final {
     end_ += list.length();
   }
 
-  using iterator = T**;
+  using iterator = T*;
   inline iterator begin() const {
-    return reinterpret_cast<T**>(buffer_.data() + start_);
+    return reinterpret_cast<T*>(buffer_.data() + start_);
   }
   inline iterator end() const {
-    return reinterpret_cast<T**>(buffer_.data() + end_);
+    return reinterpret_cast<T*>(buffer_.data() + end_);
   }
 
  private:
-  std::vector<void*>& buffer_;
+  std::vector<TBacking>& buffer_;
   size_t start_;
   size_t end_;
 };
 
+template <typename T>
+using ScopedPtrList = ScopedList<T*, void*>;
+
 using ZoneHashMap = base::PointerTemplateHashMapImpl<ZoneAllocationPolicy>;
 
 using CustomMatcherZoneHashMap =
diff --git a/deps/v8/test/OWNERS b/deps/v8/test/OWNERS
index 852d438bb0a884..3c70cea2fd5e6a 100644
--- a/deps/v8/test/OWNERS
+++ b/deps/v8/test/OWNERS
@@ -1 +1 @@
-file://COMMON_OWNERS
+file:../COMMON_OWNERS
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 32a766736f4690..d0934c99773456 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -4,6 +4,13 @@
 
 import("../../gni/v8.gni")
 
+config("cctest_config") {
+  # Work around a bug in the gold linker.
+  if (use_gold && target_cpu == "x86") {
+    ldflags = [ "-Wl,--icf=none" ]
+  }
+}
+
 v8_executable("cctest") {
   testonly = true
 
@@ -28,6 +35,7 @@ v8_executable("cctest") {
   configs = [
     "../..:external_config",
     "../..:internal_config_base",
+    ":cctest_config",
   ]
 
   ldflags = []
@@ -188,6 +196,7 @@ v8_source_set("cctest_sources") {
     "test-conversions.cc",
     "test-cpu-profiler.cc",
     "test-date.cc",
+    "test-debug-helper.cc",
     "test-debug.cc",
     "test-decls.cc",
     "test-deoptimization.cc",
@@ -313,11 +322,15 @@ v8_source_set("cctest_sources") {
       "test-javascript-arm64.cc",
       "test-js-arm64-variables.cc",
       "test-macro-assembler-arm64.cc",
+      "test-pointer-auth-arm64.cc",
       "test-poison-disasm-arm64.cc",
       "test-sync-primitives-arm64.cc",
       "test-utils-arm64.cc",
       "test-utils-arm64.h",
     ]
+    if (is_win) {
+      sources += [ "test-stack-unwinding-win64.cc" ]
+    }
   } else if (v8_current_cpu == "x86") {
     sources += [  ### gcmole(arch:ia32) ###
       "test-assembler-ia32.cc",
@@ -356,7 +369,7 @@ v8_source_set("cctest_sources") {
       "test-macro-assembler-x64.cc",
     ]
     if (is_win) {
-      sources += [ "test-stack-unwinding-x64.cc" ]
+      sources += [ "test-stack-unwinding-win64.cc" ]
     }
   } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
     sources += [  ### gcmole(arch:ppc) ###
@@ -382,6 +395,7 @@ v8_source_set("cctest_sources") {
     "../..:v8_libbase",
     "../..:v8_libplatform",
     "../..:wasm_module_runner",
+    "../../tools/debug_helper:v8_debug_helper",
     "//build/win:default_exe_manifest",
   ]
 
diff --git a/deps/v8/test/cctest/DEPS b/deps/v8/test/cctest/DEPS
index 909e60372e7ed5..7373012870d578 100644
--- a/deps/v8/test/cctest/DEPS
+++ b/deps/v8/test/cctest/DEPS
@@ -1,5 +1,6 @@
 include_rules = [
   "+src",
+  "+tools",
   "+torque-generated",
-  "+perfetto/tracing.h"
-]
+  "+perfetto",
+]
\ No newline at end of file
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 17d0096140984e..b1a7b5c1012225 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -518,6 +518,7 @@
   'test-regexp/MacroAssemblerNativeSimpleUC16': [SKIP],
   'test-regexp/MacroAssemblerNativeSuccess': [SKIP],
   'test-regexp/MacroAssemblerStackOverflow': [SKIP],
+  'test-regexp/Graph': [SKIP],
   'test-run-bytecode-graph-builder/*': [SKIP],
   'test-run-calls-to-external-references/*': [SKIP],
   'test-run-deopt/*': [SKIP],
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 62db9445ea54af..210fe2cf1c18d5 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -34,7 +34,7 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
                 main_zone(),
                 CSignature::New(main_zone(), MachineTypeForC<ReturnType>(),
                                 p...),
-                true),
+                CallDescriptor::kInitializeRootRegister),
             MachineType::PointerRepresentation(),
             InstructionSelector::SupportedMachineOperatorFlags(),
             InstructionSelector::AlignmentRequirements()) {}
@@ -51,7 +51,7 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
                 main_zone(),
                 CSignature::New(main_zone(), MachineTypeForC<ReturnType>(),
                                 p...),
-                true),
+                CallDescriptor::kInitializeRootRegister),
             MachineType::PointerRepresentation(),
             InstructionSelector::SupportedMachineOperatorFlags(),
             InstructionSelector::AlignmentRequirements()),
@@ -79,7 +79,7 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
  protected:
   Address Generate() override {
     if (code_.is_null()) {
-      Schedule* schedule = this->Export();
+      Schedule* schedule = this->ExportForTest();
       auto call_descriptor = this->call_descriptor();
       Graph* graph = this->graph();
       OptimizedCompilationInfo info(ArrayVector("testing"), main_zone(), kind_);
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.cc b/deps/v8/test/cctest/compiler/serializer-tester.cc
index 2b56e07d24b18b..338d1bcbfb1d2c 100644
--- a/deps/v8/test/cctest/compiler/serializer-tester.cc
+++ b/deps/v8/test/cctest/compiler/serializer-tester.cc
@@ -70,7 +70,11 @@ void CheckForSerializedInlinee(const char* source, int argc = 0,
   Handle<Object> g;
   CHECK(g_obj.ToHandle(&g));
 
+  CHECK_WITH_MSG(
+      g->IsJSFunction(),
+      "The return value of the outer function must be a function too");
   Handle<JSFunction> g_func = Handle<JSFunction>::cast(g);
+
   SharedFunctionInfoRef g_sfi(tester.broker(),
                               handle(g_func->shared(), tester.isolate()));
   FeedbackVectorRef g_fv(tester.broker(),
@@ -288,6 +292,34 @@ TEST(MergeJumpTargetEnvironment) {
       "f(); return f;");  // Two calls to f to make g() megamorhpic.
 }
 
+TEST(BoundFunctionTarget) {
+  CheckForSerializedInlinee(
+      "function apply(foo, arg) { return foo(arg); };"
+      "%EnsureFeedbackVectorForFunction(apply);"
+      "function test() {"
+      "  const lambda = (a) => a;"
+      "  %EnsureFeedbackVectorForFunction(lambda);"
+      "  let bound = apply.bind(null, lambda).bind(null, 42);"
+      "  %TurbofanStaticAssert(bound() == 42); return apply;"
+      "};"
+      "%EnsureFeedbackVectorForFunction(test);"
+      "test(); return test;");
+}
+
+TEST(BoundFunctionArguments) {
+  CheckForSerializedInlinee(
+      "function apply(foo, arg) { return foo(arg); };"
+      "%EnsureFeedbackVectorForFunction(apply);"
+      "function test() {"
+      "  const lambda = (a) => a;"
+      "  %EnsureFeedbackVectorForFunction(lambda);"
+      "  let bound = apply.bind(null, lambda).bind(null, 42);"
+      "  %TurbofanStaticAssert(bound() == 42); return lambda;"
+      "};"
+      "%EnsureFeedbackVectorForFunction(test);"
+      "test(); return test;");
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index 375c6586f3de43..9e6318ee88808a 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -24,7 +24,7 @@ using Variable = CodeAssemblerVariable;
 Node* SmiTag(CodeAssembler& m,  // NOLINT(runtime/references)
              Node* value) {
   int32_t constant_value;
-  if (m.ToInt32Constant(value, constant_value) &&
+  if (m.ToInt32Constant(value, &constant_value) &&
       Smi::IsValid(constant_value)) {
     return m.SmiConstant(Smi::FromInt(constant_value));
   }
@@ -89,7 +89,8 @@ TEST(SimpleCallRuntime1Arg) {
   Isolate* isolate(CcTest::InitIsolateOnce());
   CodeAssemblerTester asm_tester(isolate);
   CodeAssembler m(asm_tester.state());
-  Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+  TNode<Context> context =
+      m.HeapConstant(Handle<Context>(isolate->native_context()));
   Node* b = SmiTag(m, m.Int32Constant(0));
   m.Return(m.CallRuntime(Runtime::kIsSmi, context, b));
   FunctionTester ft(asm_tester.GenerateCode());
@@ -101,7 +102,8 @@ TEST(SimpleTailCallRuntime1Arg) {
   Isolate* isolate(CcTest::InitIsolateOnce());
   CodeAssemblerTester asm_tester(isolate);
   CodeAssembler m(asm_tester.state());
-  Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+  TNode<Context> context =
+      m.HeapConstant(Handle<Context>(isolate->native_context()));
   Node* b = SmiTag(m, m.Int32Constant(0));
   m.TailCallRuntime(Runtime::kIsSmi, context, b);
   FunctionTester ft(asm_tester.GenerateCode());
@@ -113,7 +115,8 @@ TEST(SimpleCallRuntime2Arg) {
   Isolate* isolate(CcTest::InitIsolateOnce());
   CodeAssemblerTester asm_tester(isolate);
   CodeAssembler m(asm_tester.state());
-  Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+  TNode<Context> context =
+      m.HeapConstant(Handle<Context>(isolate->native_context()));
   Node* a = SmiTag(m, m.Int32Constant(2));
   Node* b = SmiTag(m, m.Int32Constant(4));
   m.Return(m.CallRuntime(Runtime::kAdd, context, a, b));
@@ -125,7 +128,8 @@ TEST(SimpleTailCallRuntime2Arg) {
   Isolate* isolate(CcTest::InitIsolateOnce());
   CodeAssemblerTester asm_tester(isolate);
   CodeAssembler m(asm_tester.state());
-  Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+  TNode<Context> context =
+      m.HeapConstant(Handle<Context>(isolate->native_context()));
   Node* a = SmiTag(m, m.Int32Constant(2));
   Node* b = SmiTag(m, m.Int32Constant(4));
   m.TailCallRuntime(Runtime::kAdd, context, a, b);
@@ -225,7 +229,7 @@ TEST(VariableMerge1) {
   CodeAssembler m(asm_tester.state());
   Variable var1(&m, MachineRepresentation::kTagged);
   Label l1(&m), l2(&m), merge(&m);
-  Node* temp = m.Int32Constant(0);
+  TNode<Int32T> temp = m.Int32Constant(0);
   var1.Bind(temp);
   m.Branch(m.Int32Constant(1), &l1, &l2);
   m.Bind(&l1);
@@ -244,14 +248,14 @@ TEST(VariableMerge2) {
   CodeAssembler m(asm_tester.state());
   Variable var1(&m, MachineRepresentation::kTagged);
   Label l1(&m), l2(&m), merge(&m);
-  Node* temp = m.Int32Constant(0);
+  TNode<Int32T> temp = m.Int32Constant(0);
   var1.Bind(temp);
   m.Branch(m.Int32Constant(1), &l1, &l2);
   m.Bind(&l1);
   CHECK_EQ(var1.value(), temp);
   m.Goto(&merge);
   m.Bind(&l2);
-  Node* temp2 = m.Int32Constant(2);
+  TNode<Int32T> temp2 = m.Int32Constant(2);
   var1.Bind(temp2);
   CHECK_EQ(var1.value(), temp2);
   m.Goto(&merge);
@@ -266,7 +270,7 @@ TEST(VariableMerge3) {
   Variable var1(&m, MachineRepresentation::kTagged);
   Variable var2(&m, MachineRepresentation::kTagged);
   Label l1(&m), l2(&m), merge(&m);
-  Node* temp = m.Int32Constant(0);
+  TNode<Int32T> temp = m.Int32Constant(0);
   var1.Bind(temp);
   var2.Bind(temp);
   m.Branch(m.Int32Constant(1), &l1, &l2);
@@ -274,7 +278,7 @@ TEST(VariableMerge3) {
   CHECK_EQ(var1.value(), temp);
   m.Goto(&merge);
   m.Bind(&l2);
-  Node* temp2 = m.Int32Constant(2);
+  TNode<Int32T> temp2 = m.Int32Constant(2);
   var1.Bind(temp2);
   CHECK_EQ(var1.value(), temp2);
   m.Goto(&merge);
@@ -290,7 +294,7 @@ TEST(VariableMergeBindFirst) {
   CodeAssembler m(asm_tester.state());
   Variable var1(&m, MachineRepresentation::kTagged);
   Label l1(&m), l2(&m), merge(&m, &var1), end(&m);
-  Node* temp = m.Int32Constant(0);
+  TNode<Int32T> temp = m.Int32Constant(0);
   var1.Bind(temp);
   m.Branch(m.Int32Constant(1), &l1, &l2);
   m.Bind(&l1);
@@ -301,7 +305,7 @@ TEST(VariableMergeBindFirst) {
   CHECK_NOT_NULL(var1.value());
   m.Goto(&end);
   m.Bind(&l2);
-  Node* temp2 = m.Int32Constant(2);
+  TNode<Int32T> temp2 = m.Int32Constant(2);
   var1.Bind(temp2);
   CHECK_EQ(var1.value(), temp2);
   m.Goto(&merge);
@@ -318,7 +322,7 @@ TEST(VariableMergeSwitch) {
   Label l1(&m), l2(&m), default_label(&m);
   Label* labels[] = {&l1, &l2};
   int32_t values[] = {1, 2};
-  Node* temp1 = m.Int32Constant(0);
+  TNode<Smi> temp1 = m.SmiConstant(0);
   var1.Bind(temp1);
   m.Switch(m.Int32Constant(2), &default_label, values, labels, 2);
   m.Bind(&l1);
@@ -326,7 +330,7 @@ TEST(VariableMergeSwitch) {
   m.Return(temp1);
   m.Bind(&l2);
   CHECK_EQ(temp1, var1.value());
-  Node* temp2 = m.Int32Constant(7);
+  TNode<Smi> temp2 = m.SmiConstant(7);
   var1.Bind(temp2);
   m.Goto(&default_label);
   m.Bind(&default_label);
@@ -374,24 +378,24 @@ TEST(TestToConstant) {
   int32_t value32;
   int64_t value64;
   Node* a = m.Int32Constant(5);
-  CHECK(m.ToInt32Constant(a, value32));
-  CHECK(m.ToInt64Constant(a, value64));
+  CHECK(m.ToInt32Constant(a, &value32));
+  CHECK(m.ToInt64Constant(a, &value64));
 
   a = m.Int64Constant(static_cast<int64_t>(1) << 32);
-  CHECK(!m.ToInt32Constant(a, value32));
-  CHECK(m.ToInt64Constant(a, value64));
+  CHECK(!m.ToInt32Constant(a, &value32));
+  CHECK(m.ToInt64Constant(a, &value64));
 
   a = m.Int64Constant(13);
-  CHECK(m.ToInt32Constant(a, value32));
-  CHECK(m.ToInt64Constant(a, value64));
+  CHECK(m.ToInt32Constant(a, &value32));
+  CHECK(m.ToInt64Constant(a, &value64));
 
   a = UndefinedConstant(m);
-  CHECK(!m.ToInt32Constant(a, value32));
-  CHECK(!m.ToInt64Constant(a, value64));
+  CHECK(!m.ToInt32Constant(a, &value32));
+  CHECK(!m.ToInt64Constant(a, &value64));
 
   a = UndefinedConstant(m);
-  CHECK(!m.ToInt32Constant(a, value32));
-  CHECK(!m.ToInt64Constant(a, value64));
+  CHECK(!m.ToInt32Constant(a, &value32));
+  CHECK(!m.ToInt64Constant(a, &value64));
 }
 
 TEST(DeferredCodePhiHints) {
@@ -453,14 +457,15 @@ TEST(GotoIfException) {
   CodeAssemblerTester asm_tester(isolate, kNumParams);
   CodeAssembler m(asm_tester.state());
 
-  Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
-  Node* to_string_tag =
+  TNode<Context> context =
+      m.HeapConstant(Handle<Context>(isolate->native_context()));
+  TNode<Symbol> to_string_tag =
       m.HeapConstant(isolate->factory()->to_string_tag_symbol());
   Variable exception(&m, MachineRepresentation::kTagged);
 
   Label exception_handler(&m);
   Callable to_string = Builtins::CallableFor(isolate, Builtins::kToString);
-  Node* string = m.CallStub(to_string, context, to_string_tag);
+  TNode<Object> string = m.CallStub(to_string, context, to_string_tag);
   m.GotoIfException(string, &exception_handler, &exception);
   m.Return(string);
 
@@ -487,7 +492,8 @@ TEST(GotoIfExceptionMultiple) {
   CodeAssemblerTester asm_tester(isolate, kNumParams);
   CodeAssembler m(asm_tester.state());
 
-  Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+  TNode<Context> context =
+      m.HeapConstant(Handle<Context>(isolate->native_context()));
   Node* first_value = m.Parameter(0);
   Node* second_value = m.Parameter(1);
   Node* third_value = m.Parameter(2);
@@ -502,7 +508,7 @@ TEST(GotoIfExceptionMultiple) {
 
   // try { return ToString(param1) } catch (e) { ... }
   Callable to_string = Builtins::CallableFor(isolate, Builtins::kToString);
-  Node* string = m.CallStub(to_string, context, first_value);
+  TNode<Object> string = m.CallStub(to_string, context, first_value);
   m.GotoIfException(string, &exception_handler1, &error);
   m.Return(string);
 
@@ -575,7 +581,8 @@ TEST(ExceptionHandler) {
   Label exception(&m, {&var}, Label::kDeferred);
   {
     CodeAssemblerScopedExceptionHandler handler(&m, &exception, &var);
-    Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+    TNode<Context> context =
+        m.HeapConstant(Handle<Context>(isolate->native_context()));
     m.CallRuntime(Runtime::kThrow, context, m.SmiConstant(2));
   }
   m.Return(m.SmiConstant(1));
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 30cd7da7b5a8c9..74c50a4bfacbc9 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -223,7 +223,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
         TNode<FixedArray> vector =
             __ Cast(__ LoadFixedArrayElement(result_array, i));
         for (int lane = 0; lane < 4; lane++) {
-          Node* lane_value =
+          TNode<Smi> lane_value =
               __ SmiFromInt32(tester.raw_assembler_for_testing()->AddNode(
                   tester.raw_assembler_for_testing()
                       ->machine()
@@ -990,13 +990,14 @@ class CodeGeneratorTester {
       i++;
     }
 
+    static constexpr size_t kMaxUnoptimizedFrameHeight = 0;
     generator_ = new CodeGenerator(
         environment->main_zone(), &frame_, &linkage_,
         environment->instructions(), &info_, environment->main_isolate(),
         base::Optional<OsrHelper>(), kNoSourcePosition, nullptr,
         PoisoningMitigationLevel::kDontPoison,
         AssemblerOptions::Default(environment->main_isolate()),
-        Builtins::kNoBuiltinId);
+        Builtins::kNoBuiltinId, kMaxUnoptimizedFrameHeight);
 
     // Force a frame to be created.
     generator_->frame_access_state()->MarkHasFrame(true);
diff --git a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
index f80718e05e591c..7aa408f6530515 100644
--- a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
@@ -76,7 +76,7 @@ TEST(DeoptInMiddleOfBasicBlock) {
   // Dummy node for FlagsContinuation::ForDeoptimize (which won't accept
   // nullptr).
   Node* node = Node::New(zone, 0, nullptr, 0, nullptr, false);
-  VectorSlotPair feedback;
+  FeedbackSource feedback;
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
       kEqual, DeoptimizeKind::kEager, DeoptimizeReason::kUnknown, feedback,
       node);
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index aef10b472d83f5..cc2eddd1a06524 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -4,6 +4,7 @@
 
 #include "src/codegen/assembler.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
 #include "src/compiler/node-properties.h"
 #include "src/heap/factory-inl.h"
 #include "test/cctest/cctest.h"
@@ -35,7 +36,9 @@ class JSConstantCacheTester : public HandleAndZoneScope,
   JSConstantCacheTester()
       : JSCacheTesterHelper(main_zone()),
         JSGraph(main_isolate(), &main_graph_, &main_common_, &main_javascript_,
-                nullptr, &main_machine_) {
+                nullptr, &main_machine_),
+        canonical_(main_isolate()),
+        broker_(main_isolate(), main_zone(), false) {
     main_graph_.SetStart(main_graph_.NewNode(common()->Start(0)));
     main_graph_.SetEnd(
         main_graph_.NewNode(common()->End(1), main_graph_.start()));
@@ -47,6 +50,11 @@ class JSConstantCacheTester : public HandleAndZoneScope,
   }
 
   Factory* factory() { return main_isolate()->factory(); }
+  JSHeapBroker* broker() { return &broker_; }
+
+ private:
+  CanonicalHandleScope canonical_;
+  JSHeapBroker broker_;
 };
 
 
@@ -182,8 +190,8 @@ TEST(HeapNumbers) {
     Handle<Object> num = T.factory()->NewNumber(value);
     Handle<HeapNumber> heap = T.factory()->NewHeapNumber(value);
     Node* node1 = T.Constant(value);
-    Node* node2 = T.Constant(num);
-    Node* node3 = T.Constant(heap);
+    Node* node2 = T.Constant(ObjectRef(T.broker(), num));
+    Node* node3 = T.Constant(ObjectRef(T.broker(), heap));
     CHECK_EQ(node1, node2);
     CHECK_EQ(node1, node3);
   }
@@ -193,12 +201,18 @@ TEST(HeapNumbers) {
 TEST(OddballHandle) {
   JSConstantCacheTester T;
 
-  CHECK_EQ(T.UndefinedConstant(), T.Constant(T.factory()->undefined_value()));
-  CHECK_EQ(T.TheHoleConstant(), T.Constant(T.factory()->the_hole_value()));
-  CHECK_EQ(T.TrueConstant(), T.Constant(T.factory()->true_value()));
-  CHECK_EQ(T.FalseConstant(), T.Constant(T.factory()->false_value()));
-  CHECK_EQ(T.NullConstant(), T.Constant(T.factory()->null_value()));
-  CHECK_EQ(T.NaNConstant(), T.Constant(T.factory()->nan_value()));
+  CHECK_EQ(T.UndefinedConstant(),
+           T.Constant(ObjectRef(T.broker(), T.factory()->undefined_value())));
+  CHECK_EQ(T.TheHoleConstant(),
+           T.Constant(ObjectRef(T.broker(), T.factory()->the_hole_value())));
+  CHECK_EQ(T.TrueConstant(),
+           T.Constant(ObjectRef(T.broker(), T.factory()->true_value())));
+  CHECK_EQ(T.FalseConstant(),
+           T.Constant(ObjectRef(T.broker(), T.factory()->false_value())));
+  CHECK_EQ(T.NullConstant(),
+           T.Constant(ObjectRef(T.broker(), T.factory()->null_value())));
+  CHECK_EQ(T.NaNConstant(),
+           T.Constant(ObjectRef(T.broker(), T.factory()->nan_value())));
 }
 
 
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index e6703dbbbe8ba4..1b136873b53a8b 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -52,6 +52,8 @@ class ContextSpecializationTester : public HandleAndZoneScope {
   void CheckContextInputAndDepthChanges(Node* node, Node* expected_new_context,
                                         size_t expected_new_depth);
 
+  JSHeapBroker* broker() { return &js_heap_broker_; }
+
  private:
   TickCounter tick_counter_;
   CanonicalHandleScope canonical_;
@@ -126,8 +128,9 @@ TEST(ReduceJSLoadContext0) {
   const int slot = Context::NATIVE_CONTEXT_INDEX;
   native->set(slot, *expected);
 
-  Node* const_context = t.jsgraph()->Constant(native);
-  Node* deep_const_context = t.jsgraph()->Constant(subcontext2);
+  Node* const_context = t.jsgraph()->Constant(ObjectRef(t.broker(), native));
+  Node* deep_const_context =
+      t.jsgraph()->Constant(ObjectRef(t.broker(), subcontext2));
   Node* param_context = t.graph()->NewNode(t.common()->Parameter(0), start);
 
   {
@@ -269,7 +272,8 @@ TEST(ReduceJSLoadContext2) {
   context_object0->set(slot_index, *slot_value0);
   context_object1->set(slot_index, *slot_value1);
 
-  Node* context0 = t.jsgraph()->Constant(context_object1);
+  Node* context0 =
+      t.jsgraph()->Constant(ObjectRef(t.broker(), context_object1));
   Node* context1 =
       t.graph()->NewNode(create_function_context, context0, start, start);
   Node* context2 =
@@ -423,8 +427,9 @@ TEST(ReduceJSStoreContext0) {
   const int slot = Context::NATIVE_CONTEXT_INDEX;
   native->set(slot, *expected);
 
-  Node* const_context = t.jsgraph()->Constant(native);
-  Node* deep_const_context = t.jsgraph()->Constant(subcontext2);
+  Node* const_context = t.jsgraph()->Constant(ObjectRef(t.broker(), native));
+  Node* deep_const_context =
+      t.jsgraph()->Constant(ObjectRef(t.broker(), subcontext2));
   Node* param_context = t.graph()->NewNode(t.common()->Parameter(0), start);
 
   {
@@ -531,7 +536,8 @@ TEST(ReduceJSStoreContext2) {
   context_object0->set(slot_index, *slot_value0);
   context_object1->set(slot_index, *slot_value1);
 
-  Node* context0 = t.jsgraph()->Constant(context_object1);
+  Node* context0 =
+      t.jsgraph()->Constant(ObjectRef(t.broker(), context_object1));
   Node* context1 =
       t.graph()->NewNode(create_function_context, context0, start, start);
   Node* context2 =
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index a34b1e14e529fc..c054e7654a0fc1 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -167,11 +167,11 @@ void TestReturnMultipleValues(MachineType type) {
 
       OptimizedCompilationInfo info(ArrayVector("testing"), handles.main_zone(),
                                     Code::WASM_FUNCTION);
-      Handle<Code> code =
-          Pipeline::GenerateCodeForTesting(
-              &info, handles.main_isolate(), desc, m.graph(),
-              AssemblerOptions::Default(handles.main_isolate()), m.Export())
-              .ToHandleChecked();
+      Handle<Code> code = Pipeline::GenerateCodeForTesting(
+                              &info, handles.main_isolate(), desc, m.graph(),
+                              AssemblerOptions::Default(handles.main_isolate()),
+                              m.ExportForTest())
+                              .ToHandleChecked();
 #ifdef ENABLE_DISASSEMBLER
       if (FLAG_print_code) {
         StdoutStream os;
@@ -272,11 +272,11 @@ void ReturnLastValue(MachineType type) {
 
     OptimizedCompilationInfo info(ArrayVector("testing"), handles.main_zone(),
                                   Code::WASM_FUNCTION);
-    Handle<Code> code =
-        Pipeline::GenerateCodeForTesting(
-            &info, handles.main_isolate(), desc, m.graph(),
-            AssemblerOptions::Default(handles.main_isolate()), m.Export())
-            .ToHandleChecked();
+    Handle<Code> code = Pipeline::GenerateCodeForTesting(
+                            &info, handles.main_isolate(), desc, m.graph(),
+                            AssemblerOptions::Default(handles.main_isolate()),
+                            m.ExportForTest())
+                            .ToHandleChecked();
 
     std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
         handles.main_isolate(), code->raw_instruction_size());
@@ -334,11 +334,11 @@ void ReturnSumOfReturns(MachineType type) {
 
     OptimizedCompilationInfo info(ArrayVector("testing"), handles.main_zone(),
                                   Code::WASM_FUNCTION);
-    Handle<Code> code =
-        Pipeline::GenerateCodeForTesting(
-            &info, handles.main_isolate(), desc, m.graph(),
-            AssemblerOptions::Default(handles.main_isolate()), m.Export())
-            .ToHandleChecked();
+    Handle<Code> code = Pipeline::GenerateCodeForTesting(
+                            &info, handles.main_isolate(), desc, m.graph(),
+                            AssemblerOptions::Default(handles.main_isolate()),
+                            m.ExportForTest())
+                            .ToHandleChecked();
 
     std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
         handles.main_isolate(), code->raw_instruction_size());
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index dac6f6193242ca..d858448ef8cc6e 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -401,11 +401,11 @@ TEST(Word64) {
   CheckChange(
       IrOpcode::kCheckedInt64ToInt32, MachineRepresentation::kWord64,
       TypeCache::Get()->kSafeInteger, MachineRepresentation::kWord32,
-      UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair()));
+      UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, FeedbackSource()));
   CheckChange(
       IrOpcode::kCheckedUint64ToInt32, MachineRepresentation::kWord64,
       TypeCache::Get()->kPositiveSafeInteger, MachineRepresentation::kWord32,
-      UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair()));
+      UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, FeedbackSource()));
 
   CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
               Type::Signed32(), MachineRepresentation::kWord64);
@@ -420,7 +420,7 @@ TEST(Word64) {
   CheckChange(
       IrOpcode::kCheckedFloat64ToInt64, MachineRepresentation::kFloat64,
       Type::Number(), MachineRepresentation::kWord64,
-      UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair()));
+      UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, FeedbackSource()));
 
   CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64,
               Type::Signed32(), MachineRepresentation::kFloat64);
@@ -449,7 +449,7 @@ TEST(Word64) {
       IrOpcode::kChangeFloat32ToFloat64, IrOpcode::kCheckedFloat64ToInt64,
       MachineRepresentation::kFloat32, Type::Number(),
       MachineRepresentation::kWord64,
-      UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair()));
+      UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, FeedbackSource()));
 
   CheckTwoChanges(IrOpcode::kChangeInt64ToFloat64,
                   IrOpcode::kTruncateFloat64ToFloat32,
@@ -470,11 +470,11 @@ TEST(Word64) {
   CheckChange(
       IrOpcode::kCheckedTaggedToInt64, MachineRepresentation::kTagged,
       Type::Number(), MachineRepresentation::kWord64,
-      UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair()));
+      UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, FeedbackSource()));
   CheckChange(
       IrOpcode::kCheckedTaggedToInt64, MachineRepresentation::kTaggedPointer,
       Type::Number(), MachineRepresentation::kWord64,
-      UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair()));
+      UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, FeedbackSource()));
 
   CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32,
                   IrOpcode::kChangeInt31ToTaggedSigned,
@@ -507,12 +507,12 @@ TEST(Word64) {
   CheckChange(IrOpcode::kCheckedInt64ToTaggedSigned,
               MachineRepresentation::kWord64, TypeCache::Get()->kSafeInteger,
               MachineRepresentation::kTaggedSigned,
-              UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()));
+              UseInfo::CheckedSignedSmallAsTaggedSigned(FeedbackSource()));
   CheckChange(IrOpcode::kCheckedUint64ToTaggedSigned,
               MachineRepresentation::kWord64,
               TypeCache::Get()->kPositiveSafeInteger,
               MachineRepresentation::kTaggedSigned,
-              UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()));
+              UseInfo::CheckedSignedSmallAsTaggedSigned(FeedbackSource()));
 
   CheckTwoChanges(
       IrOpcode::kChangeInt64ToFloat64, IrOpcode::kChangeFloat64ToTaggedPointer,
@@ -630,7 +630,7 @@ TEST(SignednessInWord32) {
   CheckChange(IrOpcode::kCheckedTruncateTaggedToWord32,
               MachineRepresentation::kTagged, Type::NonInternal(),
               MachineRepresentation::kWord32,
-              UseInfo::CheckedNumberOrOddballAsWord32(VectorSlotPair()));
+              UseInfo::CheckedNumberOrOddballAsWord32(FeedbackSource()));
 
   CheckTwoChanges(IrOpcode::kChangeInt32ToFloat64,
                   IrOpcode::kTruncateFloat64ToFloat32,
@@ -644,7 +644,7 @@ TEST(SignednessInWord32) {
   CheckChange(
       IrOpcode::kCheckedUint32ToInt32, MachineRepresentation::kWord32,
       Type::Unsigned32(),
-      UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair()));
+      UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, FeedbackSource()));
 }
 
 TEST(CompressedAndTagged) {
@@ -698,19 +698,19 @@ static void TestMinusZeroCheck(IrOpcode::Value expected, Type from_type) {
 
   CheckChange(
       expected, MachineRepresentation::kFloat64, from_type,
-      UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros, VectorSlotPair()));
+      UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros, FeedbackSource()));
 
   CheckChange(
       expected, MachineRepresentation::kFloat64, from_type,
-      UseInfo::CheckedSignedSmallAsWord32(kIdentifyZeros, VectorSlotPair()));
+      UseInfo::CheckedSignedSmallAsWord32(kIdentifyZeros, FeedbackSource()));
 
   CheckChange(
       expected, MachineRepresentation::kFloat64, from_type,
-      UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, VectorSlotPair()));
+      UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, FeedbackSource()));
 
   CheckChange(
       expected, MachineRepresentation::kFloat64, from_type,
-      UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, VectorSlotPair()));
+      UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, FeedbackSource()));
 }
 
 TEST(MinusZeroCheck) {
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index eed6cf1e591020..026e8307aec213 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -439,7 +439,7 @@ class Computer {
       Graph graph(&zone);
       RawMachineAssembler raw(isolate, &graph, desc);
       build(desc, raw);
-      inner = CompileGraph("Compute", desc, &graph, raw.Export());
+      inner = CompileGraph("Compute", desc, &graph, raw.ExportForTest());
     }
 
     CSignatureOf<int32_t> csig;
@@ -466,8 +466,8 @@ class Computer {
         Node* store = io.StoreOutput(raw, call);
         USE(store);
         raw.Return(raw.Int32Constant(seed));
-        wrapper =
-            CompileGraph("Compute-wrapper-const", cdesc, &graph, raw.Export());
+        wrapper = CompileGraph("Compute-wrapper-const", cdesc, &graph,
+                               raw.ExportForTest());
       }
 
       CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
@@ -501,7 +501,8 @@ class Computer {
         Node* store = io.StoreOutput(raw, call);
         USE(store);
         raw.Return(raw.Int32Constant(seed));
-        wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export());
+        wrapper =
+            CompileGraph("Compute-wrapper", cdesc, &graph, raw.ExportForTest());
       }
 
       CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
@@ -576,7 +577,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
                 kNoWriteBarrier);
     }
     raw.Return(raw.Int32Constant(42));
-    inner = CompileGraph("CopyTwentyInt32", desc, &graph, raw.Export());
+    inner = CompileGraph("CopyTwentyInt32", desc, &graph, raw.ExportForTest());
   }
 
   CSignatureOf<int32_t> csig;
@@ -599,8 +600,8 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
 
     Node* call = raw.CallN(desc, input_count, inputs);
     raw.Return(call);
-    wrapper =
-        CompileGraph("CopyTwentyInt32-wrapper", cdesc, &graph, raw.Export());
+    wrapper = CompileGraph("CopyTwentyInt32-wrapper", cdesc, &graph,
+                           raw.ExportForTest());
   }
 
   CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
@@ -962,7 +963,8 @@ static void Build_Select_With_Call(
     Graph graph(&zone);
     RawMachineAssembler raw(isolate, &graph, desc);
     raw.Return(raw.Parameter(which));
-    inner = CompileGraph("Select-indirection", desc, &graph, raw.Export());
+    inner =
+        CompileGraph("Select-indirection", desc, &graph, raw.ExportForTest());
     CHECK(!inner.is_null());
     CHECK(inner->IsCode());
   }
@@ -1058,7 +1060,7 @@ void MixedParamTest(int start) {
       Graph graph(&zone);
       RawMachineAssembler raw(isolate, &graph, desc);
       raw.Return(raw.Parameter(which));
-      select = CompileGraph("Compute", desc, &graph, raw.Export());
+      select = CompileGraph("Compute", desc, &graph, raw.ExportForTest());
     }
 
     {
@@ -1117,7 +1119,7 @@ void MixedParamTest(int start) {
         expected_ret = static_cast<int32_t>(constant);
         raw.Return(raw.Int32Constant(expected_ret));
         wrapper = CompileGraph("Select-mixed-wrapper-const", cdesc, &graph,
-                               raw.Export());
+                               raw.ExportForTest());
       }
 
       CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
@@ -1176,7 +1178,7 @@ void TestStackSlot(MachineType slot_type, T expected) {
   g.Store(slot_type.representation(), g.Parameter(11), g.Parameter(10),
           WriteBarrierKind::kNoWriteBarrier);
   g.Return(g.Parameter(9));
-  inner = CompileGraph("Compute", desc, &graph, g.Export());
+  inner = CompileGraph("Compute", desc, &graph, g.ExportForTest());
 
   // Create function f with a stack slot which calls the inner function g.
   BufferedRawMachineAssemblerTester<T> f(slot_type);
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
index be329e1b0043cf..32569eaaeec0bc 100644
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -27,7 +27,8 @@ Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
   int param_count = static_cast<int>(call_descriptor->StackParameterCount());
   Node* sum = __ IntPtrConstant(0);
   for (int i = 0; i < param_count; ++i) {
-    Node* product = __ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1));
+    TNode<IntPtrT> product =
+        __ Signed(__ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1)));
     sum = __ IntPtrAdd(sum, product);
   }
   __ Return(sum);
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index 1562befb9dc06f..ed8a099090dac0 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -28,8 +28,9 @@ Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
   int param_count = static_cast<int>(call_descriptor->StackParameterCount());
   Node* sum = __ IntPtrConstant(0);
   for (int i = 0; i < param_count; ++i) {
-    Node* product = __ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1));
-    sum = __ IntPtrAdd(sum, product);
+    TNode<WordT> product =
+        __ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1));
+    sum = __ Signed(__ IntPtrAdd(sum, product));
   }
   __ Return(sum);
   return tester.GenerateCodeCloseAndEscape();
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 14a4eb3cd9fe76..6f6cfb46b5c256 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -19,6 +19,10 @@
   V(CompactionSpaceDivideSinglePage)                      \
   V(InvalidatedSlotsAfterTrimming)                        \
   V(InvalidatedSlotsAllInvalidatedRanges)                 \
+  V(InvalidatedSlotsCleanupEachObject)                    \
+  V(InvalidatedSlotsCleanupFull)                          \
+  V(InvalidatedSlotsCleanupRightTrim)                     \
+  V(InvalidatedSlotsCleanupOverlapRight)                  \
   V(InvalidatedSlotsEvacuationCandidate)                  \
   V(InvalidatedSlotsNoInvalidatedRanges)                  \
   V(InvalidatedSlotsResetObjectRegression)                \
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 2d0833e1a36608..28553266ff2ed0 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -16,6 +16,13 @@
 #include "test/cctest/heap/heap-utils.h"
 
 namespace v8 {
+
+// See test below: TracedGlobalNoDestructor.
+template <>
+struct TracedGlobalTrait<v8::TracedGlobal<v8::Value>> {
+  static constexpr bool kRequiresExplicitDestruction = false;
+};
+
 namespace internal {
 namespace heap {
 
@@ -346,6 +353,85 @@ TEST(TracedGlobalInStdVector) {
   CHECK(vec[0].IsEmpty());
 }
 
+TEST(TracedGlobalCopyWithDestructor) {
+  ManualGCScope manual_gc;
+  CcTest::InitializeVM();
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+
+  static_assert(TracedGlobalTrait<
+                    v8::TracedGlobal<v8::Object>>::kRequiresExplicitDestruction,
+                "destructor expected");
+
+  const size_t initial_count = global_handles->handles_count();
+  v8::TracedGlobal<v8::Object> global1;
+  {
+    v8::HandleScope scope(isolate);
+    global1.Reset(isolate, v8::Object::New(isolate));
+  }
+  v8::TracedGlobal<v8::Object> global2(global1);
+  v8::TracedGlobal<v8::Object> global3;
+  global3 = global2;
+  CHECK_EQ(initial_count + 3, global_handles->handles_count());
+  CHECK(!global1.IsEmpty());
+  CHECK_EQ(global1, global2);
+  CHECK_EQ(global2, global3);
+  {
+    v8::HandleScope scope(isolate);
+    auto tmp = v8::Local<v8::Object>::New(isolate, global3);
+    CHECK(!tmp.IsEmpty());
+    InvokeMarkSweep();
+  }
+  CHECK_EQ(initial_count + 3, global_handles->handles_count());
+  CHECK(!global1.IsEmpty());
+  CHECK_EQ(global1, global2);
+  CHECK_EQ(global2, global3);
+  InvokeMarkSweep();
+  CHECK_EQ(initial_count, global_handles->handles_count());
+  CHECK(global1.IsEmpty());
+  CHECK_EQ(global1, global2);
+  CHECK_EQ(global2, global3);
+}
+
+TEST(TracedGlobalCopyNoDestructor) {
+  ManualGCScope manual_gc;
+  CcTest::InitializeVM();
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+
+  static_assert(!TracedGlobalTrait<
+                    v8::TracedGlobal<v8::Value>>::kRequiresExplicitDestruction,
+                "no destructor expected");
+
+  const size_t initial_count = global_handles->handles_count();
+  v8::TracedGlobal<v8::Value> global1;
+  {
+    v8::HandleScope scope(isolate);
+    global1.Reset(isolate, v8::Object::New(isolate));
+  }
+  v8::TracedGlobal<v8::Value> global2(global1);
+  v8::TracedGlobal<v8::Value> global3;
+  global3 = global2;
+  CHECK_EQ(initial_count + 3, global_handles->handles_count());
+  CHECK(!global1.IsEmpty());
+  CHECK_EQ(global1, global2);
+  CHECK_EQ(global2, global3);
+  {
+    v8::HandleScope scope(isolate);
+    auto tmp = v8::Local<v8::Value>::New(isolate, global3);
+    CHECK(!tmp.IsEmpty());
+    InvokeMarkSweep();
+  }
+  CHECK_EQ(initial_count + 3, global_handles->handles_count());
+  CHECK(!global1.IsEmpty());
+  CHECK_EQ(global1, global2);
+  CHECK_EQ(global2, global3);
+  InvokeMarkSweep();
+  CHECK_EQ(initial_count, global_handles->handles_count());
+}
+
 TEST(TracedGlobalInStdUnorderedMap) {
   ManualGCScope manual_gc;
   CcTest::InitializeVM();
@@ -560,6 +646,218 @@ TEST(TracePrologueCallingIntoV8WriteBarrier) {
                                 std::move(global));
   heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
   SimulateIncrementalMarking(CcTest::i_isolate()->heap());
+  // Finish GC to avoid removing the tracer while GC is running which may end up
+  // in an infinite loop because of unprocessed objects.
+  heap::InvokeMarkSweep();
+}
+
+TEST(TracedGlobalWithDestructor) {
+  ManualGCScope manual_gc;
+  CcTest::InitializeVM();
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  TestEmbedderHeapTracer tracer;
+  heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+  i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+
+  const size_t initial_count = global_handles->handles_count();
+  auto* traced = new v8::TracedGlobal<v8::Object>();
+  {
+    v8::HandleScope scope(isolate);
+    v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
+        isolate->GetCurrentContext(), nullptr, nullptr));
+    CHECK(traced->IsEmpty());
+    *traced = v8::TracedGlobal<v8::Object>(isolate, object);
+    CHECK(!traced->IsEmpty());
+    CHECK_EQ(initial_count + 1, global_handles->handles_count());
+  }
+  static_assert(TracedGlobalTrait<
+                    v8::TracedGlobal<v8::Object>>::kRequiresExplicitDestruction,
+                "destructor expected");
+  delete traced;
+  CHECK_EQ(initial_count, global_handles->handles_count());
+  // GC should not need to clear the handle.
+  heap::InvokeMarkSweep();
+  CHECK_EQ(initial_count, global_handles->handles_count());
+}
+
+TEST(TracedGlobalNoDestructor) {
+  ManualGCScope manual_gc;
+  CcTest::InitializeVM();
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  TestEmbedderHeapTracer tracer;
+  heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+  i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+
+  const size_t initial_count = global_handles->handles_count();
+  char* memory = new char[sizeof(v8::TracedGlobal<v8::Value>)];
+  auto* traced = new (memory) v8::TracedGlobal<v8::Value>();
+  {
+    v8::HandleScope scope(isolate);
+    v8::Local<v8::Value> object(ConstructTraceableJSApiObject(
+        isolate->GetCurrentContext(), nullptr, nullptr));
+    CHECK(traced->IsEmpty());
+    *traced = v8::TracedGlobal<v8::Value>(isolate, object);
+    CHECK(!traced->IsEmpty());
+    CHECK_EQ(initial_count + 1, global_handles->handles_count());
+  }
+  static_assert(!TracedGlobalTrait<
+                    v8::TracedGlobal<v8::Value>>::kRequiresExplicitDestruction,
+                "no destructor expected");
+  traced->~TracedGlobal<v8::Value>();
+  CHECK_EQ(initial_count + 1, global_handles->handles_count());
+  // GC should clear the handle.
+  heap::InvokeMarkSweep();
+  CHECK_EQ(initial_count, global_handles->handles_count());
+  delete[] memory;
+}
+
+namespace {
+
+class EmptyEmbedderHeapTracer : public v8::EmbedderHeapTracer {
+ public:
+  void RegisterV8References(
+      const std::vector<std::pair<void*, void*>>& embedder_fields) final {}
+
+  bool AdvanceTracing(double deadline_in_ms) final { return true; }
+  bool IsTracingDone() final { return true; }
+  void TracePrologue(EmbedderHeapTracer::TraceFlags) final {}
+  void TraceEpilogue() final {}
+  void EnterFinalPause(EmbedderStackState) final {}
+};
+
+// EmbedderHeapTracer that can optimize Scavenger handling when used with
+// TraceGlobal handles that have destructors.
+class EmbedderHeapTracerDestructorNonTracingClearing final
+    : public EmptyEmbedderHeapTracer {
+ public:
+  explicit EmbedderHeapTracerDestructorNonTracingClearing(
+      uint16_t class_id_to_optimize)
+      : class_id_to_optimize_(class_id_to_optimize) {}
+
+  bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) final {
+    return handle.WrapperClassId() != class_id_to_optimize_;
+  }
+
+  void ResetHandleInNonTracingGC(
+      const v8::TracedGlobal<v8::Value>& handle) final {
+    // Not called when used with handles that have destructors.
+    CHECK(false);
+  }
+
+ private:
+  uint16_t class_id_to_optimize_;
+};
+
+// EmbedderHeapTracer that can optimize Scavenger handling when used with
+// TraceGlobal handles without destructors.
+class EmbedderHeapTracerNoDestructorNonTracingClearing final
+    : public EmptyEmbedderHeapTracer {
+ public:
+  explicit EmbedderHeapTracerNoDestructorNonTracingClearing(
+      uint16_t class_id_to_optimize)
+      : class_id_to_optimize_(class_id_to_optimize) {}
+
+  bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) final {
+    return handle.WrapperClassId() != class_id_to_optimize_;
+  }
+
+  void ResetHandleInNonTracingGC(
+      const v8::TracedGlobal<v8::Value>& handle) final {
+    if (handle.WrapperClassId() != class_id_to_optimize_) return;
+
+    // Convention (for test): Objects that are optimized have their first field
+    // set as a back pointer.
+    TracedGlobal<v8::Value>* original_handle =
+        reinterpret_cast<TracedGlobal<v8::Value>*>(
+            v8::Object::GetAlignedPointerFromInternalField(
+                handle.As<v8::Object>(), 0));
+    original_handle->Reset();
+  }
+
+ private:
+  uint16_t class_id_to_optimize_;
+};
+
+template <typename T>
+void SetupOptimizedAndNonOptimizedHandle(
+    v8::Isolate* isolate, uint16_t optimized_class_id,
+    v8::TracedGlobal<T>* optimized_handle,
+    v8::TracedGlobal<T>* non_optimized_handle) {
+  v8::HandleScope scope(isolate);
+
+  v8::Local<v8::Object> optimized_object(ConstructTraceableJSApiObject(
+      isolate->GetCurrentContext(), optimized_handle, nullptr));
+  CHECK(optimized_handle->IsEmpty());
+  *optimized_handle = v8::TracedGlobal<T>(isolate, optimized_object);
+  CHECK(!optimized_handle->IsEmpty());
+  optimized_handle->SetWrapperClassId(optimized_class_id);
+
+  v8::Local<v8::Object> non_optimized_object(ConstructTraceableJSApiObject(
+      isolate->GetCurrentContext(), nullptr, nullptr));
+  CHECK(non_optimized_handle->IsEmpty());
+  *non_optimized_handle = v8::TracedGlobal<T>(isolate, non_optimized_object);
+  CHECK(!non_optimized_handle->IsEmpty());
+}
+
+}  // namespace
+
+TEST(TracedGlobalDestructorReclaimedOnScavenge) {
+  ManualGCScope manual_gc;
+  CcTest::InitializeVM();
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  constexpr uint16_t kClassIdToOptimize = 17;
+  EmbedderHeapTracerDestructorNonTracingClearing tracer(kClassIdToOptimize);
+  heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+  i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+
+  static_assert(TracedGlobalTrait<
+                    v8::TracedGlobal<v8::Object>>::kRequiresExplicitDestruction,
+                "destructor expected");
+  const size_t initial_count = global_handles->handles_count();
+  auto* optimized_handle = new v8::TracedGlobal<v8::Object>();
+  auto* non_optimized_handle = new v8::TracedGlobal<v8::Object>();
+  SetupOptimizedAndNonOptimizedHandle(isolate, kClassIdToOptimize,
+                                      optimized_handle, non_optimized_handle);
+  CHECK_EQ(initial_count + 2, global_handles->handles_count());
+  heap::InvokeScavenge();
+  CHECK_EQ(initial_count + 1, global_handles->handles_count());
+  CHECK(optimized_handle->IsEmpty());
+  delete optimized_handle;
+  CHECK(!non_optimized_handle->IsEmpty());
+  delete non_optimized_handle;
+  CHECK_EQ(initial_count, global_handles->handles_count());
+}
+
+TEST(TracedGlobalNoDestructorReclaimedOnScavenge) {
+  ManualGCScope manual_gc;
+  CcTest::InitializeVM();
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  constexpr uint16_t kClassIdToOptimize = 23;
+  EmbedderHeapTracerNoDestructorNonTracingClearing tracer(kClassIdToOptimize);
+  heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+  i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+
+  static_assert(!TracedGlobalTrait<
+                    v8::TracedGlobal<v8::Value>>::kRequiresExplicitDestruction,
+                "no destructor expected");
+  const size_t initial_count = global_handles->handles_count();
+  auto* optimized_handle = new v8::TracedGlobal<v8::Value>();
+  auto* non_optimized_handle = new v8::TracedGlobal<v8::Value>();
+  SetupOptimizedAndNonOptimizedHandle(isolate, kClassIdToOptimize,
+                                      optimized_handle, non_optimized_handle);
+  CHECK_EQ(initial_count + 2, global_handles->handles_count());
+  heap::InvokeScavenge();
+  CHECK_EQ(initial_count + 1, global_handles->handles_count());
+  CHECK(optimized_handle->IsEmpty());
+  delete optimized_handle;
+  CHECK(!non_optimized_handle->IsEmpty());
+  non_optimized_handle->Reset();
+  delete non_optimized_handle;
+  CHECK_EQ(initial_count, global_handles->handles_count());
 }
 
 }  // namespace heap
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index e4dbee2210f0c3..fd17c0f063b5b4 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -1799,36 +1799,6 @@ TEST(HeapNumberAlignment) {
   }
 }
 
-TEST(MutableHeapNumberAlignment) {
-  CcTest::InitializeVM();
-  Isolate* isolate = CcTest::i_isolate();
-  Factory* factory = isolate->factory();
-  Heap* heap = isolate->heap();
-  HandleScope sc(isolate);
-
-  const auto required_alignment =
-      HeapObject::RequiredAlignment(*factory->mutable_heap_number_map());
-  const int maximum_misalignment =
-      Heap::GetMaximumFillToAlign(required_alignment);
-
-  for (int offset = 0; offset <= maximum_misalignment; offset += kTaggedSize) {
-    AlignNewSpace(required_alignment, offset);
-    Handle<Object> number_new = factory->NewMutableHeapNumber(1.000123);
-    CHECK(number_new->IsMutableHeapNumber());
-    CHECK(Heap::InYoungGeneration(*number_new));
-    CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_new).address(),
-                                     required_alignment));
-
-    AlignOldSpace(required_alignment, offset);
-    Handle<Object> number_old =
-        factory->NewMutableHeapNumber(1.000321, AllocationType::kOld);
-    CHECK(number_old->IsMutableHeapNumber());
-    CHECK(heap->InOldSpace(*number_old));
-    CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_old).address(),
-                                     required_alignment));
-  }
-}
-
 TEST(TestSizeOfObjectsVsHeapObjectIteratorPrecision) {
   CcTest::InitializeVM();
   HeapObjectIterator iterator(CcTest::heap());
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index c88cf1f3ba13a7..af42503f864a87 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -44,7 +44,7 @@ Page* HeapTester::AllocateByteArraysOnPage(
       CHECK_EQ(page, Page::FromHeapObject(byte_array));
     }
   }
-  CHECK_NULL(page->invalidated_slots());
+  CHECK_NULL(page->invalidated_slots<OLD_TO_OLD>());
   return page;
 }
 
@@ -53,7 +53,7 @@ HEAP_TEST(InvalidatedSlotsNoInvalidatedRanges) {
   Heap* heap = CcTest::heap();
   std::vector<ByteArray> byte_arrays;
   Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
-  InvalidatedSlotsFilter filter(page);
+  InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
   for (ByteArray byte_array : byte_arrays) {
     Address start = byte_array.address() + ByteArray::kHeaderSize;
     Address end = byte_array.address() + byte_array.Size();
@@ -70,10 +70,10 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
   Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
   // Register every second byte arrays as invalidated.
   for (size_t i = 0; i < byte_arrays.size(); i += 2) {
-    page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
-                                             byte_arrays[i].Size());
+    page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
+                                                         byte_arrays[i].Size());
   }
-  InvalidatedSlotsFilter filter(page);
+  InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
   for (size_t i = 0; i < byte_arrays.size(); i++) {
     ByteArray byte_array = byte_arrays[i];
     Address start = byte_array.address() + ByteArray::kHeaderSize;
@@ -95,10 +95,10 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
   Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
   // Register the all byte arrays as invalidated.
   for (size_t i = 0; i < byte_arrays.size(); i++) {
-    page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
-                                             byte_arrays[i].Size());
+    page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
+                                                         byte_arrays[i].Size());
   }
-  InvalidatedSlotsFilter filter(page);
+  InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
   for (size_t i = 0; i < byte_arrays.size(); i++) {
     ByteArray byte_array = byte_arrays[i];
     Address start = byte_array.address() + ByteArray::kHeaderSize;
@@ -117,12 +117,12 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
   Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
   // Register the all byte arrays as invalidated.
   for (size_t i = 0; i < byte_arrays.size(); i++) {
-    page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
-                                             byte_arrays[i].Size());
+    page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
+                                                         byte_arrays[i].Size());
   }
   // Trim byte arrays and check that the slots outside the byte arrays are
   // considered invalid if the old space page was swept.
-  InvalidatedSlotsFilter filter(page);
+  InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
   for (size_t i = 0; i < byte_arrays.size(); i++) {
     ByteArray byte_array = byte_arrays[i];
     Address start = byte_array.address() + ByteArray::kHeaderSize;
@@ -145,11 +145,11 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
   // This should be no-op because the page is marked as evacuation
   // candidate.
   for (size_t i = 0; i < byte_arrays.size(); i++) {
-    page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
-                                             byte_arrays[i].Size());
+    page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
+                                                         byte_arrays[i].Size());
   }
   // All slots must still be valid.
-  InvalidatedSlotsFilter filter(page);
+  InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
   for (size_t i = 0; i < byte_arrays.size(); i++) {
     ByteArray byte_array = byte_arrays[i];
     Address start = byte_array.address() + ByteArray::kHeaderSize;
@@ -169,11 +169,11 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
   heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
   // Register the all byte arrays as invalidated.
   for (size_t i = 0; i < byte_arrays.size(); i++) {
-    page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
-                                             byte_arrays[i].Size());
+    page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
+                                                         byte_arrays[i].Size());
   }
   // All slots must still be invalid.
-  InvalidatedSlotsFilter filter(page);
+  InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
   for (size_t i = 0; i < byte_arrays.size(); i++) {
     ByteArray byte_array = byte_arrays[i];
     Address start = byte_array.address() + ByteArray::kHeaderSize;
@@ -351,6 +351,78 @@ HEAP_TEST(InvalidatedSlotsFastToSlow) {
   CcTest::CollectGarbage(i::OLD_SPACE);
 }
 
+HEAP_TEST(InvalidatedSlotsCleanupFull) {
+  ManualGCScope manual_gc_scope;
+  CcTest::InitializeVM();
+  Heap* heap = CcTest::heap();
+  std::vector<ByteArray> byte_arrays;
+  Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
+  // Register all byte arrays as invalidated.
+  for (size_t i = 0; i < byte_arrays.size(); i++) {
+    page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
+                                                         byte_arrays[i].Size());
+  }
+
+  // Mark full page as free
+  InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
+  cleanup.Free(page->area_start(), page->area_end());
+
+  // After cleanup there should be no invalidated objects on page left
+  CHECK(page->invalidated_slots<OLD_TO_NEW>()->empty());
+}
+
+HEAP_TEST(InvalidatedSlotsCleanupEachObject) {
+  ManualGCScope manual_gc_scope;
+  CcTest::InitializeVM();
+  Heap* heap = CcTest::heap();
+  std::vector<ByteArray> byte_arrays;
+  Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
+  // Register all byte arrays as invalidated.
+  for (size_t i = 0; i < byte_arrays.size(); i++) {
+    page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
+                                                         byte_arrays[i].Size());
+  }
+
+  // Mark each object as free on page
+  InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
+
+  for (size_t i = 0; i < byte_arrays.size(); i++) {
+    Address free_start = byte_arrays[i].address();
+    Address free_end = free_start + byte_arrays[i].Size();
+    cleanup.Free(free_start, free_end);
+  }
+
+  // After cleanup there should be no invalidated objects on page left
+  CHECK(page->invalidated_slots<OLD_TO_NEW>()->empty());
+}
+
+HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
+  ManualGCScope manual_gc_scope;
+  CcTest::InitializeVM();
+  Heap* heap = CcTest::heap();
+  std::vector<ByteArray> byte_arrays;
+  Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
+
+  CHECK_GT(byte_arrays.size(), 1);
+  ByteArray& invalidated = byte_arrays[1];
+  int invalidated_size = invalidated.Size();
+
+  heap->RightTrimFixedArray(invalidated, invalidated.length() - 8);
+  page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated,
+                                                       invalidated_size);
+
+  // Free memory at end of invalidated object
+  InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
+  Address free_start = invalidated.address() + invalidated.Size();
+  cleanup.Free(free_start, page->area_end());
+
+  // After cleanup the invalidated object should be smaller
+  InvalidatedSlots* invalidated_slots = page->invalidated_slots<OLD_TO_NEW>();
+  CHECK_GE((*invalidated_slots)[HeapObject::FromAddress(invalidated.address())],
+           invalidated.Size());
+  CHECK_EQ(invalidated_slots->size(), 1);
+}
+
 }  // namespace heap
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index 9392d60181b5f6..d6097e938df258 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -141,9 +141,9 @@ handlers: [
 snippet: "
   var a = [ 1, 2 ]; return [ 0, ...a ];
 "
-frame size: 8
+frame size: 7
 parameter count: 1
-bytecode array length: 84
+bytecode array length: 80
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
@@ -152,22 +152,21 @@ bytecodes: [
                 B(Star), R(2),
                 B(LdaConstant), U8(2),
   /*   67 S> */ B(Star), R(1),
-                B(LdaNamedProperty), R(0), U8(3), U8(2),
+                B(GetIterator), R(0), U8(2),
                 B(Star), R(6),
                 B(CallProperty0), R(6), R(0), U8(4),
-                B(Mov), R(0), R(5),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(4),
-                B(LdaNamedProperty), R(4), U8(4), U8(6),
+                B(LdaNamedProperty), R(4), U8(3), U8(6),
                 B(Star), R(3),
                 B(CallProperty0), R(3), R(4), U8(15),
-                B(Star), R(7),
+                B(Star), R(5),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
-                B(LdaNamedProperty), R(7), U8(5), U8(17),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
+                B(LdaNamedProperty), R(5), U8(4), U8(17),
                 B(JumpIfToBooleanTrue), U8(19),
-                B(LdaNamedProperty), R(7), U8(6), U8(8),
+                B(LdaNamedProperty), R(5), U8(5), U8(8),
                 B(StaInArrayLiteral), R(2), R(1), U8(13),
                 B(Ldar), R(1),
                 B(Inc), U8(12),
@@ -180,7 +179,6 @@ constant pool: [
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
   Smi [1],
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index ce579699d866cb..1dbb999371f190 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -212,9 +212,9 @@ snippet: "
   async function* f() { for (let x of [42]) yield x }
   f();
 "
-frame size: 21
+frame size: 19
 parameter count: 1
-bytecode array length: 372
+bytecode array length: 369
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
                 B(Mov), R(closure), R(4),
@@ -235,103 +235,102 @@ bytecodes: [
                 B(LdaSmi), I8(1),
                 B(Star), R(4),
                 B(Mov), R(8), R(5),
-                B(JumpConstant), U8(16),
+                B(JumpConstant), U8(15),
   /*   36 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
                 B(Star), R(10),
-                B(LdaNamedProperty), R(10), U8(5), U8(1),
+                B(GetIterator), R(10), U8(1),
                 B(Star), R(11),
                 B(CallProperty0), R(11), R(10), U8(3),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(9),
-                B(LdaNamedProperty), R(9), U8(6), U8(5),
+                B(LdaNamedProperty), R(9), U8(5), U8(5),
                 B(Star), R(8),
                 B(LdaFalse),
-                B(Star), R(12),
-                B(Mov), R(context), R(15),
+                B(Star), R(10),
+                B(Mov), R(context), R(13),
                 B(LdaTrue),
-                B(Star), R(12),
+                B(Star), R(10),
   /*   31 S> */ B(CallProperty0), R(8), R(9), U8(7),
-                B(Star), R(16),
+                B(Star), R(14),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
-                B(LdaNamedProperty), R(16), U8(7), U8(9),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+                B(LdaNamedProperty), R(14), U8(6), U8(9),
                 B(JumpIfToBooleanTrue), U8(67),
-                B(LdaNamedProperty), R(16), U8(8), U8(11),
-                B(Star), R(16),
+                B(LdaNamedProperty), R(14), U8(7), U8(11),
+                B(Star), R(14),
                 B(LdaFalse),
-                B(Star), R(12),
-                B(Mov), R(16), R(1),
+                B(Star), R(10),
+                B(Mov), R(14), R(1),
   /*   22 E> */ B(StackCheck),
   /*   31 S> */ B(Mov), R(1), R(3),
   /*   42 S> */ B(LdaFalse),
-                B(Star), R(19),
-                B(Mov), R(0), R(17),
-                B(Mov), R(3), R(18),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(17), U8(3),
-  /*   42 E> */ B(SuspendGenerator), R(0), R(0), U8(17), U8(1),
-                B(ResumeGenerator), R(0), R(0), U8(17),
                 B(Star), R(17),
+                B(Mov), R(0), R(15),
+                B(Mov), R(3), R(16),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(15), U8(3),
+  /*   42 E> */ B(SuspendGenerator), R(0), R(0), U8(15), U8(1),
+                B(ResumeGenerator), R(0), R(0), U8(15),
+                B(Star), R(15),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
-                B(Ldar), R(17),
+                B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
+                B(Ldar), R(15),
   /*   42 E> */ B(Throw),
                 B(LdaSmi), I8(1),
-                B(Star), R(13),
-                B(Mov), R(17), R(14),
+                B(Star), R(11),
+                B(Mov), R(15), R(12),
                 B(Jump), U8(20),
-                B(Ldar), R(17),
+                B(Ldar), R(15),
                 B(JumpLoop), U8(84), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(14),
-                B(Star), R(13),
+                B(Star), R(12),
+                B(Star), R(11),
                 B(Jump), U8(7),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(LdaZero),
-                B(Star), R(13),
+                B(Star), R(11),
                 B(LdaTheHole),
                 B(SetPendingMessage),
+                B(Star), R(13),
+                B(Ldar), R(10),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(9), U8(10), U8(13),
                 B(Star), R(15),
-                B(Ldar), R(12),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(9), U8(11), U8(13),
-                B(Star), R(17),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(18),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(16),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(19),
-                B(LdaConstant), U8(12),
-                B(Star), R(20),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
+                B(Star), R(17),
+                B(LdaConstant), U8(11),
+                B(Star), R(18),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
                 B(Throw),
-                B(CallProperty0), R(17), R(9), U8(15),
+                B(CallProperty0), R(15), R(9), U8(15),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(19),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(19), U8(1),
+                B(Star), R(17),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(17), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(18),
+                B(Star), R(16),
                 B(LdaZero),
-                B(TestReferenceEqual), R(13),
+                B(TestReferenceEqual), R(11),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(18),
+                B(Ldar), R(16),
                 B(ReThrow),
-                B(Ldar), R(15),
-                B(SetPendingMessage),
                 B(Ldar), R(13),
-                B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
+                B(SetPendingMessage),
+                B(Ldar), R(11),
+                B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
                 B(Jump), U8(14),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
                 B(LdaSmi), I8(1),
                 B(Star), R(4),
-                B(Mov), R(14), R(5),
+                B(Mov), R(12), R(5),
                 B(Jump), U8(51),
                 B(Jump), U8(36),
                 B(Star), R(8),
-                B(CreateCatchContext), R(8), U8(15),
+                B(CreateCatchContext), R(8), U8(14),
                 B(Star), R(7),
                 B(LdaTheHole),
                 B(SetPendingMessage),
@@ -360,7 +359,7 @@ bytecodes: [
                 B(Ldar), R(6),
                 B(SetPendingMessage),
                 B(Ldar), R(4),
-                B(SwitchOnSmiNoFeedback), U8(17), U8(3), I8(0),
+                B(SwitchOnSmiNoFeedback), U8(16), U8(3), I8(0),
                 B(Jump), U8(22),
                 B(Ldar), R(5),
                 B(ReThrow),
@@ -377,11 +376,10 @@ bytecodes: [
 ]
 constant pool: [
   Smi [30],
-  Smi [149],
+  Smi [148],
   Smi [16],
   Smi [7],
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -392,16 +390,16 @@ constant pool: [
   Smi [6],
   Smi [9],
   SCOPE_INFO_TYPE,
-  Smi [277],
+  Smi [274],
   Smi [6],
   Smi [9],
   Smi [23],
 ]
 handlers: [
-  [20, 318, 326],
-  [23, 282, 284],
-  [93, 180, 188],
-  [214, 247, 249],
+  [20, 315, 323],
+  [23, 279, 281],
+  [92, 179, 187],
+  [211, 244, 246],
 ]
 
 ---
@@ -410,9 +408,9 @@ snippet: "
   async function* f() { yield* g() }
   f();
 "
-frame size: 19
+frame size: 17
 parameter count: 1
-bytecode array length: 475
+bytecode array length: 466
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(5),
                 B(Mov), R(closure), R(1),
@@ -433,44 +431,69 @@ bytecodes: [
                 B(LdaSmi), I8(1),
                 B(Star), R(1),
                 B(Mov), R(5), R(2),
-                B(JumpConstant), U8(18),
+                B(JumpConstant), U8(17),
   /*   49 S> */ B(LdaGlobal), U8(7), U8(0),
                 B(Star), R(9),
   /*   56 E> */ B(CallUndefinedReceiver0), R(9), U8(2),
                 B(Star), R(10),
                 B(LdaNamedProperty), R(10), U8(8), U8(4),
-                B(JumpIfUndefined), U8(17),
-                B(JumpIfNull), U8(15),
+                B(JumpIfUndefinedOrNull), U8(15),
                 B(Star), R(11),
                 B(CallProperty0), R(11), R(10), U8(6),
-                B(JumpIfJSReceiver), U8(23),
+                B(JumpIfJSReceiver), U8(22),
                 B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
-                B(LdaNamedProperty), R(10), U8(9), U8(8),
+                B(GetIterator), R(10), U8(8),
                 B(Star), R(11),
                 B(CallProperty0), R(11), R(10), U8(10),
                 B(Star), R(11),
                 B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(11), U8(1),
                 B(Star), R(7),
-                B(LdaNamedProperty), R(7), U8(10), U8(12),
+                B(LdaNamedProperty), R(7), U8(9), U8(12),
                 B(Star), R(9),
                 B(LdaUndefined),
                 B(Star), R(8),
                 B(LdaZero),
                 B(Star), R(6),
                 B(Ldar), R(6),
-                B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(1),
+                B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(1),
                 B(CallProperty1), R(9), R(7), R(8), U8(14),
-                B(Jump), U8(146),
-                B(LdaNamedProperty), R(7), U8(13), U8(16),
-                B(JumpIfUndefined), U8(13),
-                B(JumpIfNull), U8(11),
+                B(Jump), U8(140),
+                B(LdaNamedProperty), R(7), U8(12), U8(16),
+                B(JumpIfUndefinedOrNull), U8(11),
+                B(Star), R(10),
+                B(CallProperty1), R(10), R(7), R(8), U8(18),
+                B(Jump), U8(125),
+                B(Mov), R(0), R(10),
+                B(Mov), R(8), R(11),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(10), U8(2),
+  /*   49 E> */ B(SuspendGenerator), R(0), R(0), U8(10), U8(1),
+                B(ResumeGenerator), R(0), R(0), U8(10),
+                B(Star), R(10),
+                B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+                B(Star), R(11),
+                B(LdaZero),
+                B(TestReferenceEqual), R(11),
+                B(JumpIfTrue), U8(5),
+                B(Ldar), R(10),
+                B(ReThrow),
+                B(LdaSmi), I8(1),
+                B(Star), R(1),
+                B(Mov), R(10), R(2),
+                B(Jump), U8(241),
+                B(LdaNamedProperty), R(7), U8(13), U8(20),
+                B(JumpIfUndefinedOrNull), U8(11),
                 B(Star), R(12),
-                B(CallProperty1), R(12), R(7), R(8), U8(18),
-                B(Jump), U8(129),
+                B(CallProperty1), R(12), R(7), R(8), U8(22),
+                B(Jump), U8(66),
+                B(LdaNamedProperty), R(7), U8(12), U8(24),
+                B(JumpIfUndefinedOrNull), U8(55),
+                B(Star), R(12),
+                B(CallProperty0), R(12), R(7), U8(26),
+                B(Jump), U8(2),
+                B(Star), R(13),
                 B(Mov), R(0), R(12),
-                B(Mov), R(8), R(13),
                 B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
-  /*   49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(1),
+  /*   49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(2),
                 B(ResumeGenerator), R(0), R(0), U8(12),
                 B(Star), R(12),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
@@ -480,72 +503,43 @@ bytecodes: [
                 B(JumpIfTrue), U8(5),
                 B(Ldar), R(12),
                 B(ReThrow),
-                B(LdaSmi), I8(1),
-                B(Star), R(1),
-                B(Mov), R(12), R(2),
-                B(Jump), U8(245),
-                B(LdaNamedProperty), R(7), U8(14), U8(20),
-                B(JumpIfUndefined), U8(13),
-                B(JumpIfNull), U8(11),
-                B(Star), R(14),
-                B(CallProperty1), R(14), R(7), R(8), U8(22),
-                B(Jump), U8(68),
-                B(LdaNamedProperty), R(7), U8(13), U8(24),
-                B(JumpIfUndefined), U8(57),
-                B(JumpIfNull), U8(55),
-                B(Star), R(14),
-                B(CallProperty0), R(14), R(7), U8(26),
-                B(Jump), U8(2),
-                B(Star), R(15),
-                B(Mov), R(0), R(14),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(14), U8(2),
-  /*   49 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(2),
-                B(ResumeGenerator), R(0), R(0), U8(14),
-                B(Star), R(14),
-                B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(Star), R(15),
-                B(LdaZero),
-                B(TestReferenceEqual), R(15),
-                B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
-                B(ReThrow),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(JumpIfJSReceiver), U8(9),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
-                B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
-                B(Star), R(15),
-                B(Mov), R(0), R(14),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(14), U8(2),
-  /*   49 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(3),
-                B(ResumeGenerator), R(0), R(0), U8(14),
                 B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+                B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
+                B(Star), R(13),
+                B(Mov), R(0), R(12),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
+  /*   49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(3),
+                B(ResumeGenerator), R(0), R(0), U8(12),
+                B(Star), R(12),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(Star), R(15),
+                B(Star), R(13),
                 B(LdaZero),
-                B(TestReferenceEqual), R(15),
+                B(TestReferenceEqual), R(13),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(14),
-                B(Mov), R(14), R(5),
+                B(Ldar), R(12),
+                B(Mov), R(12), R(5),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
-                B(LdaNamedProperty), R(5), U8(15), U8(28),
+                B(LdaNamedProperty), R(5), U8(14), U8(28),
                 B(JumpIfToBooleanTrue), U8(38),
-                B(LdaNamedProperty), R(5), U8(16), U8(30),
-                B(Star), R(17),
+                B(LdaNamedProperty), R(5), U8(15), U8(30),
+                B(Star), R(15),
                 B(LdaFalse),
-                B(Star), R(18),
-                B(Mov), R(0), R(16),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(16), U8(3),
-  /*   49 E> */ B(SuspendGenerator), R(0), R(0), U8(16), U8(4),
-                B(ResumeGenerator), R(0), R(0), U8(16),
+                B(Star), R(16),
+                B(Mov), R(0), R(14),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(14), U8(3),
+  /*   49 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(4),
+                B(ResumeGenerator), R(0), R(0), U8(14),
                 B(Star), R(8),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
                 B(Star), R(6),
-                B(JumpLoop), U8(242), I8(0),
-                B(LdaNamedProperty), R(5), U8(16), U8(32),
+                B(JumpLoop), U8(236), I8(0),
+                B(LdaNamedProperty), R(5), U8(15), U8(32),
                 B(Star), R(7),
                 B(LdaSmi), I8(1),
                 B(TestReferenceEqual), R(6),
@@ -557,7 +551,7 @@ bytecodes: [
                 B(Ldar), R(7),
                 B(Jump), U8(36),
                 B(Star), R(5),
-                B(CreateCatchContext), R(5), U8(17),
+                B(CreateCatchContext), R(5), U8(16),
                 B(Star), R(4),
                 B(LdaTheHole),
                 B(SetPendingMessage),
@@ -586,7 +580,7 @@ bytecodes: [
                 B(Ldar), R(3),
                 B(SetPendingMessage),
                 B(Ldar), R(1),
-                B(SwitchOnSmiNoFeedback), U8(19), U8(3), I8(0),
+                B(SwitchOnSmiNoFeedback), U8(18), U8(3), I8(0),
                 B(Jump), U8(22),
                 B(Ldar), R(2),
                 B(ReThrow),
@@ -603,30 +597,29 @@ bytecodes: [
 ]
 constant pool: [
   Smi [30],
-  Smi [162],
-  Smi [238],
-  Smi [288],
-  Smi [347],
+  Smi [157],
+  Smi [229],
+  Smi [279],
+  Smi [338],
   Smi [16],
   Smi [7],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
   SYMBOL_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   Smi [11],
-  Smi [72],
+  Smi [70],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["throw"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
   SCOPE_INFO_TYPE,
-  Smi [380],
+  Smi [371],
   Smi [6],
   Smi [9],
   Smi [23],
 ]
 handlers: [
-  [20, 421, 429],
-  [23, 383, 387],
+  [20, 412, 420],
+  [23, 374, 378],
 ]
 
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index 33bd5434b4ee82..963cbee018735a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -65,9 +65,9 @@ handlers: [
 snippet: "
   Math.max(0, ...[1, 2, 3], 4);
 "
-frame size: 10
+frame size: 9
 parameter count: 1
-bytecode array length: 107
+bytecode array length: 106
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   34 S> */ B(LdaGlobal), U8(0), U8(0),
@@ -80,22 +80,22 @@ bytecodes: [
                 B(Star), R(3),
   /*   49 S> */ B(CreateArrayLiteral), U8(4), U8(5), U8(37),
                 B(Star), R(7),
-                B(LdaNamedProperty), R(7), U8(5), U8(6),
+                B(GetIterator), R(7), U8(6),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(7), U8(8),
                 B(Mov), R(0), R(2),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(6), U8(10),
+                B(LdaNamedProperty), R(6), U8(5), U8(10),
                 B(Star), R(5),
                 B(CallProperty0), R(5), R(6), U8(19),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
-                B(LdaNamedProperty), R(9), U8(7), U8(21),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+                B(LdaNamedProperty), R(7), U8(6), U8(21),
                 B(JumpIfToBooleanTrue), U8(19),
-                B(LdaNamedProperty), R(9), U8(8), U8(12),
+                B(LdaNamedProperty), R(7), U8(7), U8(12),
                 B(StaInArrayLiteral), R(4), R(3), U8(17),
                 B(Ldar), R(3),
                 B(Inc), U8(16),
@@ -114,7 +114,6 @@ constant pool: [
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
   Smi [1],
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
index 2d44b972a03fb5..e26b79a9fb80ce 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -10,93 +10,90 @@ snippet: "
   var x, a = [0,1,2,3];
   [x] = a;
 "
-frame size: 16
+frame size: 14
 parameter count: 1
-bytecode array length: 178
+bytecode array length: 172
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
                 B(Star), R(1),
-  /*   60 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
+  /*   60 S> */ B(GetIterator), R(1), U8(1),
                 B(Star), R(6),
                 B(CallProperty0), R(6), R(1), U8(3),
-                B(Mov), R(1), R(5),
                 B(Mov), R(1), R(2),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(4),
-                B(LdaNamedProperty), R(4), U8(2), U8(5),
+                B(LdaNamedProperty), R(4), U8(1), U8(5),
                 B(Star), R(3),
                 B(LdaFalse),
-                B(Star), R(7),
-                B(Mov), R(context), R(10),
-  /*   57 S> */ B(Ldar), R(7),
+                B(Star), R(5),
+                B(Mov), R(context), R(8),
+  /*   57 S> */ B(Ldar), R(5),
                 B(JumpIfToBooleanTrue), U8(37),
                 B(LdaTrue),
-                B(Star), R(7),
+                B(Star), R(5),
                 B(CallProperty0), R(3), R(4), U8(11),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
-                B(LdaNamedProperty), R(11), U8(3), U8(9),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+                B(LdaNamedProperty), R(9), U8(2), U8(9),
                 B(JumpIfToBooleanTrue), U8(15),
-                B(LdaNamedProperty), R(11), U8(4), U8(7),
-                B(Star), R(11),
+                B(LdaNamedProperty), R(9), U8(3), U8(7),
+                B(Star), R(9),
                 B(LdaFalse),
-                B(Star), R(7),
-                B(Ldar), R(11),
+                B(Star), R(5),
+                B(Ldar), R(9),
                 B(Jump), U8(3),
                 B(LdaUndefined),
                 B(Star), R(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(9),
-                B(Star), R(8),
+                B(Star), R(7),
+                B(Star), R(6),
                 B(Jump), U8(7),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(LdaZero),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(LdaTheHole),
                 B(SetPendingMessage),
+                B(Star), R(8),
+                B(Ldar), R(5),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(4), U8(4), U8(13),
                 B(Star), R(10),
-                B(Ldar), R(7),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(4), U8(5), U8(13),
-                B(Star), R(12),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(13),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(11),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(14),
-                B(LdaConstant), U8(6),
-                B(Star), R(15),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+                B(Star), R(12),
+                B(LdaConstant), U8(5),
+                B(Star), R(13),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
                 B(Throw),
-                B(CallProperty0), R(12), R(4), U8(15),
+                B(CallProperty0), R(10), R(4), U8(15),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(14),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+                B(Star), R(12),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(13),
+                B(Star), R(11),
                 B(LdaZero),
-                B(TestReferenceEqual), R(8),
+                B(TestReferenceEqual), R(6),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(13),
+                B(Ldar), R(11),
                 B(ReThrow),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(8),
+                B(TestReferenceEqual), R(6),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(9),
+                B(Ldar), R(7),
                 B(ReThrow),
                 B(LdaUndefined),
   /*   65 S> */ B(Return),
 ]
 constant pool: [
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -104,8 +101,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [44, 86, 94],
-  [120, 153, 155],
+  [40, 82, 90],
+  [114, 147, 149],
 ]
 
 ---
@@ -113,127 +110,124 @@ snippet: "
   var x, y, a = [0,1,2,3];
   [,x,...y] = a;
 "
-frame size: 17
+frame size: 15
 parameter count: 1
-bytecode array length: 264
+bytecode array length: 258
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
                 B(Star), R(2),
-  /*   69 S> */ B(LdaNamedProperty), R(2), U8(1), U8(1),
+  /*   69 S> */ B(GetIterator), R(2), U8(1),
                 B(Star), R(7),
                 B(CallProperty0), R(7), R(2), U8(3),
-                B(Mov), R(2), R(6),
                 B(Mov), R(2), R(3),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(5),
-                B(LdaNamedProperty), R(5), U8(2), U8(5),
+                B(LdaNamedProperty), R(5), U8(1), U8(5),
                 B(Star), R(4),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(context), R(11),
-                B(Ldar), R(8),
+                B(Star), R(6),
+                B(Mov), R(context), R(9),
+                B(Ldar), R(6),
                 B(JumpIfToBooleanTrue), U8(35),
                 B(LdaTrue),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(CallProperty0), R(4), R(5), U8(11),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(3), U8(9),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(2), U8(9),
                 B(JumpIfToBooleanTrue), U8(13),
-                B(LdaNamedProperty), R(12), U8(4), U8(7),
-                B(Star), R(12),
+                B(LdaNamedProperty), R(10), U8(3), U8(7),
+                B(Star), R(10),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Ldar), R(12),
-  /*   61 S> */ B(Ldar), R(8),
+                B(Star), R(6),
+                B(Ldar), R(10),
+  /*   61 S> */ B(Ldar), R(6),
                 B(JumpIfToBooleanTrue), U8(37),
                 B(LdaTrue),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(CallProperty0), R(4), R(5), U8(13),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(3), U8(9),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(2), U8(9),
                 B(JumpIfToBooleanTrue), U8(15),
-                B(LdaNamedProperty), R(12), U8(4), U8(7),
-                B(Star), R(12),
+                B(LdaNamedProperty), R(10), U8(3), U8(7),
+                B(Star), R(10),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Ldar), R(12),
+                B(Star), R(6),
+                B(Ldar), R(10),
                 B(Jump), U8(3),
                 B(LdaUndefined),
                 B(Star), R(0),
   /*   63 S> */ B(CreateEmptyArrayLiteral), U8(15),
-                B(Star), R(13),
+                B(Star), R(11),
                 B(LdaZero),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(LdaTrue),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(CallProperty0), R(4), R(5), U8(19),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(3), U8(21),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(2), U8(21),
                 B(JumpIfToBooleanTrue), U8(19),
-                B(LdaNamedProperty), R(12), U8(4), U8(7),
-                B(StaInArrayLiteral), R(13), R(14), U8(16),
-                B(Ldar), R(14),
+                B(LdaNamedProperty), R(10), U8(3), U8(7),
+                B(StaInArrayLiteral), R(11), R(12), U8(16),
+                B(Ldar), R(12),
                 B(Inc), U8(18),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(JumpLoop), U8(33), I8(0),
-                B(Mov), R(13), R(1),
+                B(Mov), R(11), R(1),
                 B(LdaSmi), I8(-1),
-                B(Star), R(10),
-                B(Star), R(9),
+                B(Star), R(8),
+                B(Star), R(7),
                 B(Jump), U8(7),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaZero),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(LdaTheHole),
                 B(SetPendingMessage),
+                B(Star), R(9),
+                B(Ldar), R(6),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(5), U8(4), U8(23),
                 B(Star), R(11),
-                B(Ldar), R(8),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(5), U8(5), U8(23),
-                B(Star), R(13),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(14),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(12),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(15),
-                B(LdaConstant), U8(6),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+                B(Star), R(13),
+                B(LdaConstant), U8(5),
+                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
                 B(Throw),
-                B(CallProperty0), R(13), R(5), U8(25),
+                B(CallProperty0), R(11), R(5), U8(25),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(15),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+                B(Star), R(13),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(11),
+                B(Ldar), R(9),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
                 B(ReThrow),
                 B(LdaUndefined),
   /*   74 S> */ B(Return),
 ]
 constant pool: [
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -241,8 +235,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [44, 172, 180],
-  [206, 239, 241],
+  [40, 168, 176],
+  [200, 233, 235],
 ]
 
 ---
@@ -250,114 +244,111 @@ snippet: "
   var x={}, y, a = [0];
   [x.foo,y=4] = a;
 "
-frame size: 18
+frame size: 16
 parameter count: 1
-bytecode array length: 229
+bytecode array length: 223
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   40 S> */ B(CreateEmptyObjectLiteral),
                 B(Star), R(0),
   /*   51 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
                 B(Star), R(2),
-  /*   68 S> */ B(LdaNamedProperty), R(2), U8(1), U8(1),
+  /*   68 S> */ B(GetIterator), R(2), U8(1),
                 B(Star), R(7),
                 B(CallProperty0), R(7), R(2), U8(3),
-                B(Mov), R(2), R(6),
                 B(Mov), R(2), R(3),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(5),
-                B(LdaNamedProperty), R(5), U8(2), U8(5),
+                B(LdaNamedProperty), R(5), U8(1), U8(5),
                 B(Star), R(4),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(context), R(11),
-  /*   59 S> */ B(Ldar), R(8),
-                B(Mov), R(0), R(13),
+                B(Star), R(6),
+                B(Mov), R(context), R(9),
+  /*   59 S> */ B(Ldar), R(6),
+                B(Mov), R(0), R(11),
                 B(JumpIfToBooleanTrue), U8(37),
                 B(LdaTrue),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(CallProperty0), R(4), R(5), U8(11),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(3), U8(9),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(2), U8(9),
                 B(JumpIfToBooleanTrue), U8(15),
-                B(LdaNamedProperty), R(12), U8(4), U8(7),
-                B(Star), R(12),
+                B(LdaNamedProperty), R(10), U8(3), U8(7),
+                B(Star), R(10),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Ldar), R(12),
+                B(Star), R(6),
+                B(Ldar), R(10),
                 B(Jump), U8(3),
                 B(LdaUndefined),
-                B(StaNamedProperty), R(13), U8(5), U8(13),
-  /*   63 S> */ B(Ldar), R(8),
+                B(StaNamedProperty), R(11), U8(4), U8(13),
+  /*   63 S> */ B(Ldar), R(6),
                 B(JumpIfToBooleanTrue), U8(37),
                 B(LdaTrue),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(CallProperty0), R(4), R(5), U8(15),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(3), U8(9),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(2), U8(9),
                 B(JumpIfToBooleanTrue), U8(15),
-                B(LdaNamedProperty), R(12), U8(4), U8(7),
-                B(Star), R(12),
+                B(LdaNamedProperty), R(10), U8(3), U8(7),
+                B(Star), R(10),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Ldar), R(12),
+                B(Star), R(6),
+                B(Ldar), R(10),
                 B(JumpIfNotUndefined), U8(4),
                 B(LdaSmi), I8(4),
                 B(Star), R(1),
                 B(LdaSmi), I8(-1),
-                B(Star), R(10),
-                B(Star), R(9),
+                B(Star), R(8),
+                B(Star), R(7),
                 B(Jump), U8(7),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaZero),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(LdaTheHole),
                 B(SetPendingMessage),
-                B(Star), R(11),
-                B(Ldar), R(8),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(5), U8(6), U8(17),
-                B(Star), R(14),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(15),
+                B(Star), R(9),
+                B(Ldar), R(6),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(5), U8(5), U8(17),
+                B(Star), R(12),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(13),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(16),
-                B(LdaConstant), U8(7),
-                B(Star), R(17),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+                B(Star), R(14),
+                B(LdaConstant), U8(6),
+                B(Star), R(15),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
                 B(Throw),
-                B(CallProperty0), R(14), R(5), U8(19),
+                B(CallProperty0), R(12), R(5), U8(19),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
+                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(15),
+                B(Star), R(13),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(15),
+                B(Ldar), R(13),
                 B(ReThrow),
-                B(Ldar), R(11),
+                B(Ldar), R(9),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
                 B(ReThrow),
                 B(LdaUndefined),
   /*   73 S> */ B(Return),
 ]
 constant pool: [
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -366,8 +357,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [47, 137, 145],
-  [171, 204, 206],
+  [43, 133, 141],
+  [165, 198, 200],
 ]
 
 ---
@@ -375,18 +366,14 @@ snippet: "
   var x, a = {x:1};
   ({x} = a);
 "
-frame size: 3
+frame size: 2
 parameter count: 1
-bytecode array length: 26
+bytecode array length: 15
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
                 B(Star), R(1),
-  /*   52 S> */ B(JumpIfNull), U8(4),
-                B(JumpIfNotUndefined), U8(7),
-  /*   53 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
-                B(Star), R(2),
-  /*   54 S> */ B(LdaNamedProperty), R(2), U8(1), U8(1),
+  /*   54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
                 B(Star), R(0),
                 B(LdaUndefined),
   /*   63 S> */ B(Return),
@@ -403,20 +390,16 @@ snippet: "
   var x={}, a = {y:1};
   ({y:x.foo} = a);
 "
-frame size: 3
+frame size: 2
 parameter count: 1
-bytecode array length: 31
+bytecode array length: 20
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   40 S> */ B(CreateEmptyObjectLiteral),
                 B(Star), R(0),
   /*   48 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
                 B(Star), R(1),
-  /*   55 S> */ B(JumpIfNull), U8(4),
-                B(JumpIfNotUndefined), U8(7),
-  /*   56 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
-  /*   61 S> */ B(Star), R(2),
-                B(LdaNamedProperty), R(2), U8(1), U8(1),
+  /*   61 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
                 B(StaNamedProperty), R(0), U8(2), U8(3),
                 B(LdaUndefined),
   /*   72 S> */ B(Return),
@@ -436,18 +419,15 @@ snippet: "
 "
 frame size: 4
 parameter count: 1
-bytecode array length: 41
+bytecode array length: 33
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
                 B(Star), R(1),
-  /*   62 S> */ B(JumpIfNull), U8(4),
-                B(JumpIfNotUndefined), U8(7),
-  /*   63 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
-                B(Star), R(2),
   /*   64 S> */ B(LdaConstant), U8(1),
                 B(Star), R(3),
-                B(LdaNamedProperty), R(2), U8(1), U8(1),
+                B(LdaNamedProperty), R(1), U8(1), U8(1),
+                B(Mov), R(1), R(2),
                 B(JumpIfNotUndefined), U8(3),
                 B(LdaZero),
                 B(Star), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 1cafe42d282fa2..f60e591040b948 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -14,9 +14,9 @@ snippet: "
   }
   f();
 "
-frame size: 21
+frame size: 19
 parameter count: 1
-bytecode array length: 325
+bytecode array length: 320
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
                 B(Mov), R(closure), R(4),
@@ -28,110 +28,108 @@ bytecodes: [
   /*   43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
                 B(Star), R(7),
                 B(LdaNamedProperty), R(7), U8(3), U8(1),
-                B(JumpIfUndefined), U8(17),
-                B(JumpIfNull), U8(15),
+                B(JumpIfUndefinedOrNull), U8(15),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(7), U8(3),
-                B(JumpIfJSReceiver), U8(23),
+                B(JumpIfJSReceiver), U8(22),
                 B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
-                B(LdaNamedProperty), R(7), U8(4), U8(5),
+                B(GetIterator), R(7), U8(5),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(7), U8(7),
                 B(Star), R(8),
                 B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(5), U8(9),
+                B(LdaNamedProperty), R(6), U8(4), U8(9),
                 B(Star), R(5),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(context), R(12),
+                B(Star), R(7),
+                B(Mov), R(context), R(10),
                 B(LdaTrue),
-                B(Star), R(9),
+                B(Star), R(7),
   /*   38 S> */ B(CallProperty0), R(5), R(6), U8(11),
-                B(Star), R(15),
-                B(Mov), R(0), R(14),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
-                B(SuspendGenerator), R(0), R(0), U8(14), U8(0),
-                B(ResumeGenerator), R(0), R(0), U8(14),
-                B(Star), R(14),
+                B(Star), R(13),
+                B(Mov), R(0), R(12),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(12), U8(2),
+                B(SuspendGenerator), R(0), R(0), U8(12), U8(0),
+                B(ResumeGenerator), R(0), R(0), U8(12),
+                B(Star), R(12),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(Star), R(15),
+                B(Star), R(13),
                 B(LdaZero),
-                B(TestReferenceEqual), R(15),
+                B(TestReferenceEqual), R(13),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(14),
-                B(Mov), R(14), R(13),
+                B(Ldar), R(12),
+                B(Mov), R(12), R(11),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
-                B(LdaNamedProperty), R(13), U8(6), U8(13),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+                B(LdaNamedProperty), R(11), U8(5), U8(13),
                 B(JumpIfToBooleanTrue), U8(23),
-                B(LdaNamedProperty), R(13), U8(7), U8(15),
-                B(Star), R(13),
+                B(LdaNamedProperty), R(11), U8(6), U8(15),
+                B(Star), R(11),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(13), R(1),
+                B(Star), R(7),
+                B(Mov), R(11), R(1),
   /*   23 E> */ B(StackCheck),
   /*   38 S> */ B(Mov), R(1), R(3),
-                B(Ldar), R(13),
+                B(Ldar), R(11),
                 B(JumpLoop), U8(77), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(11),
-                B(Star), R(10),
+                B(Star), R(9),
+                B(Star), R(8),
                 B(Jump), U8(7),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(LdaZero),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaTheHole),
   /*   38 E> */ B(SetPendingMessage),
-                B(Star), R(12),
-                B(Ldar), R(9),
-                B(JumpIfToBooleanTrue), U8(96),
-                B(LdaNamedProperty), R(6), U8(8), U8(17),
-                B(Star), R(16),
-                B(JumpIfUndefined), U8(88),
-                B(JumpIfNull), U8(86),
-                B(Mov), R(context), R(17),
+                B(Star), R(10),
+                B(Ldar), R(7),
+                B(JumpIfToBooleanTrue), U8(94),
+                B(LdaNamedProperty), R(6), U8(7), U8(17),
+                B(Star), R(14),
+                B(JumpIfUndefinedOrNull), U8(86),
+                B(Mov), R(context), R(15),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(18),
-                B(LdaConstant), U8(9),
-                B(Star), R(19),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
+                B(Star), R(16),
+                B(LdaConstant), U8(8),
+                B(Star), R(17),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
                 B(Throw),
-                B(CallProperty0), R(16), R(6), U8(19),
-                B(Star), R(19),
-                B(Mov), R(0), R(18),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(18), U8(2),
-                B(SuspendGenerator), R(0), R(0), U8(18), U8(1),
-                B(ResumeGenerator), R(0), R(0), U8(18),
-                B(Star), R(18),
+                B(CallProperty0), R(14), R(6), U8(19),
+                B(Star), R(17),
+                B(Mov), R(0), R(16),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(16), U8(2),
+                B(SuspendGenerator), R(0), R(0), U8(16), U8(1),
+                B(ResumeGenerator), R(0), R(0), U8(16),
+                B(Star), R(16),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(Star), R(19),
+                B(Star), R(17),
                 B(LdaZero),
-                B(TestReferenceEqual), R(19),
+                B(TestReferenceEqual), R(17),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(18),
+                B(Ldar), R(16),
                 B(ReThrow),
-                B(Ldar), R(18),
+                B(Ldar), R(16),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(20),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(20), U8(1),
+                B(Star), R(18),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(18), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(17),
+                B(Star), R(15),
                 B(LdaZero),
-                B(TestReferenceEqual), R(10),
+                B(TestReferenceEqual), R(8),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(17),
+                B(Ldar), R(15),
                 B(ReThrow),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(10),
+                B(TestReferenceEqual), R(8),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(11),
+                B(Ldar), R(9),
                 B(ReThrow),
                 B(LdaUndefined),
                 B(Star), R(6),
@@ -141,7 +139,7 @@ bytecodes: [
                 B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
   /*   57 S> */ B(Return),
                 B(Star), R(5),
-                B(CreateCatchContext), R(5), U8(10),
+                B(CreateCatchContext), R(5), U8(9),
                 B(Star), R(4),
                 B(LdaTheHole),
                 B(SetPendingMessage),
@@ -156,11 +154,10 @@ bytecodes: [
   /*   57 S> */ B(Return),
 ]
 constant pool: [
-  Smi [98],
-  Smi [229],
+  Smi [95],
+  Smi [224],
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
   SYMBOL_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -169,9 +166,9 @@ constant pool: [
   SCOPE_INFO_TYPE,
 ]
 handlers: [
-  [20, 297, 297],
-  [77, 157, 165],
-  [191, 260, 262],
+  [20, 292, 292],
+  [74, 154, 162],
+  [186, 255, 257],
 ]
 
 ---
@@ -181,9 +178,9 @@ snippet: "
   }
   f();
 "
-frame size: 21
+frame size: 19
 parameter count: 1
-bytecode array length: 346
+bytecode array length: 341
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
                 B(Mov), R(closure), R(4),
@@ -195,118 +192,116 @@ bytecodes: [
   /*   43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
                 B(Star), R(7),
                 B(LdaNamedProperty), R(7), U8(3), U8(1),
-                B(JumpIfUndefined), U8(17),
-                B(JumpIfNull), U8(15),
+                B(JumpIfUndefinedOrNull), U8(15),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(7), U8(3),
-                B(JumpIfJSReceiver), U8(23),
+                B(JumpIfJSReceiver), U8(22),
                 B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
-                B(LdaNamedProperty), R(7), U8(4), U8(5),
+                B(GetIterator), R(7), U8(5),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(7), U8(7),
                 B(Star), R(8),
                 B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(5), U8(9),
+                B(LdaNamedProperty), R(6), U8(4), U8(9),
                 B(Star), R(5),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(context), R(12),
+                B(Star), R(7),
+                B(Mov), R(context), R(10),
                 B(LdaTrue),
-                B(Star), R(9),
+                B(Star), R(7),
   /*   38 S> */ B(CallProperty0), R(5), R(6), U8(11),
-                B(Star), R(15),
-                B(Mov), R(0), R(14),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
-                B(SuspendGenerator), R(0), R(0), U8(14), U8(0),
-                B(ResumeGenerator), R(0), R(0), U8(14),
-                B(Star), R(14),
+                B(Star), R(13),
+                B(Mov), R(0), R(12),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(12), U8(2),
+                B(SuspendGenerator), R(0), R(0), U8(12), U8(0),
+                B(ResumeGenerator), R(0), R(0), U8(12),
+                B(Star), R(12),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(Star), R(15),
+                B(Star), R(13),
                 B(LdaZero),
-                B(TestReferenceEqual), R(15),
+                B(TestReferenceEqual), R(13),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(14),
-                B(Mov), R(14), R(13),
+                B(Ldar), R(12),
+                B(Mov), R(12), R(11),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
-                B(LdaNamedProperty), R(13), U8(6), U8(13),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+                B(LdaNamedProperty), R(11), U8(5), U8(13),
                 B(JumpIfToBooleanTrue), U8(27),
-                B(LdaNamedProperty), R(13), U8(7), U8(15),
-                B(Star), R(13),
+                B(LdaNamedProperty), R(11), U8(6), U8(15),
+                B(Star), R(11),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(13), R(1),
+                B(Star), R(7),
+                B(Mov), R(11), R(1),
   /*   23 E> */ B(StackCheck),
   /*   38 S> */ B(Mov), R(1), R(3),
   /*   56 S> */ B(LdaSmi), I8(1),
-                B(Mov), R(13), R(11),
-                B(Star), R(10),
+                B(Mov), R(11), R(9),
+                B(Star), R(8),
                 B(Jump), U8(15),
                 B(LdaSmi), I8(-1),
-                B(Star), R(11),
-                B(Star), R(10),
+                B(Star), R(9),
+                B(Star), R(8),
                 B(Jump), U8(7),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(LdaZero),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaTheHole),
                 B(SetPendingMessage),
-                B(Star), R(12),
-                B(Ldar), R(9),
-                B(JumpIfToBooleanTrue), U8(96),
-                B(LdaNamedProperty), R(6), U8(8), U8(17),
-                B(Star), R(16),
-                B(JumpIfUndefined), U8(88),
-                B(JumpIfNull), U8(86),
-                B(Mov), R(context), R(17),
+                B(Star), R(10),
+                B(Ldar), R(7),
+                B(JumpIfToBooleanTrue), U8(94),
+                B(LdaNamedProperty), R(6), U8(7), U8(17),
+                B(Star), R(14),
+                B(JumpIfUndefinedOrNull), U8(86),
+                B(Mov), R(context), R(15),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(18),
-                B(LdaConstant), U8(9),
-                B(Star), R(19),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
+                B(Star), R(16),
+                B(LdaConstant), U8(8),
+                B(Star), R(17),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
                 B(Throw),
-                B(CallProperty0), R(16), R(6), U8(19),
-                B(Star), R(19),
-                B(Mov), R(0), R(18),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(18), U8(2),
-                B(SuspendGenerator), R(0), R(0), U8(18), U8(1),
-                B(ResumeGenerator), R(0), R(0), U8(18),
-                B(Star), R(18),
+                B(CallProperty0), R(14), R(6), U8(19),
+                B(Star), R(17),
+                B(Mov), R(0), R(16),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(16), U8(2),
+                B(SuspendGenerator), R(0), R(0), U8(16), U8(1),
+                B(ResumeGenerator), R(0), R(0), U8(16),
+                B(Star), R(16),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(Star), R(19),
+                B(Star), R(17),
                 B(LdaZero),
-                B(TestReferenceEqual), R(19),
+                B(TestReferenceEqual), R(17),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(18),
+                B(Ldar), R(16),
                 B(ReThrow),
-                B(Ldar), R(18),
+                B(Ldar), R(16),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(20),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(20), U8(1),
+                B(Star), R(18),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(18), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(17),
+                B(Star), R(15),
                 B(LdaZero),
-                B(TestReferenceEqual), R(10),
+                B(TestReferenceEqual), R(8),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(17),
+                B(Ldar), R(15),
                 B(ReThrow),
-                B(Ldar), R(12),
-                B(SetPendingMessage),
                 B(Ldar), R(10),
-                B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
+                B(SetPendingMessage),
+                B(Ldar), R(8),
+                B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
                 B(Jump), U8(19),
-                B(Ldar), R(11),
+                B(Ldar), R(9),
                 B(ReThrow),
                 B(LdaTrue),
-                B(Star), R(18),
-                B(Mov), R(0), R(16),
-                B(Mov), R(11), R(17),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(16), U8(3),
+                B(Star), R(16),
+                B(Mov), R(0), R(14),
+                B(Mov), R(9), R(15),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(14), U8(3),
   /*   68 S> */ B(Return),
                 B(LdaUndefined),
                 B(Star), R(6),
@@ -316,7 +311,7 @@ bytecodes: [
                 B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
   /*   68 S> */ B(Return),
                 B(Star), R(5),
-                B(CreateCatchContext), R(5), U8(12),
+                B(CreateCatchContext), R(5), U8(11),
                 B(Star), R(4),
                 B(LdaTheHole),
                 B(SetPendingMessage),
@@ -331,11 +326,10 @@ bytecodes: [
   /*   68 S> */ B(Return),
 ]
 constant pool: [
-  Smi [98],
-  Smi [233],
+  Smi [95],
+  Smi [228],
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
   SYMBOL_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -346,9 +340,9 @@ constant pool: [
   SCOPE_INFO_TYPE,
 ]
 handlers: [
-  [20, 318, 318],
-  [77, 161, 169],
-  [195, 264, 266],
+  [20, 313, 313],
+  [74, 158, 166],
+  [190, 259, 261],
 ]
 
 ---
@@ -361,9 +355,9 @@ snippet: "
   }
   f();
 "
-frame size: 21
+frame size: 19
 parameter count: 1
-bytecode array length: 341
+bytecode array length: 336
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
                 B(Mov), R(closure), R(4),
@@ -375,50 +369,49 @@ bytecodes: [
   /*   43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
                 B(Star), R(7),
                 B(LdaNamedProperty), R(7), U8(3), U8(1),
-                B(JumpIfUndefined), U8(17),
-                B(JumpIfNull), U8(15),
+                B(JumpIfUndefinedOrNull), U8(15),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(7), U8(3),
-                B(JumpIfJSReceiver), U8(23),
+                B(JumpIfJSReceiver), U8(22),
                 B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
-                B(LdaNamedProperty), R(7), U8(4), U8(5),
+                B(GetIterator), R(7), U8(5),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(7), U8(7),
                 B(Star), R(8),
                 B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(5), U8(9),
+                B(LdaNamedProperty), R(6), U8(4), U8(9),
                 B(Star), R(5),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(context), R(12),
+                B(Star), R(7),
+                B(Mov), R(context), R(10),
                 B(LdaTrue),
-                B(Star), R(9),
+                B(Star), R(7),
   /*   38 S> */ B(CallProperty0), R(5), R(6), U8(11),
-                B(Star), R(15),
-                B(Mov), R(0), R(14),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
-                B(SuspendGenerator), R(0), R(0), U8(14), U8(0),
-                B(ResumeGenerator), R(0), R(0), U8(14),
-                B(Star), R(14),
+                B(Star), R(13),
+                B(Mov), R(0), R(12),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(12), U8(2),
+                B(SuspendGenerator), R(0), R(0), U8(12), U8(0),
+                B(ResumeGenerator), R(0), R(0), U8(12),
+                B(Star), R(12),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(Star), R(15),
+                B(Star), R(13),
                 B(LdaZero),
-                B(TestReferenceEqual), R(15),
+                B(TestReferenceEqual), R(13),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(14),
-                B(Mov), R(14), R(13),
+                B(Ldar), R(12),
+                B(Mov), R(12), R(11),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
-                B(LdaNamedProperty), R(13), U8(6), U8(13),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+                B(LdaNamedProperty), R(11), U8(5), U8(13),
                 B(JumpIfToBooleanTrue), U8(39),
-                B(LdaNamedProperty), R(13), U8(7), U8(15),
-                B(Star), R(13),
+                B(LdaNamedProperty), R(11), U8(6), U8(15),
+                B(Star), R(11),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(13), R(1),
+                B(Star), R(7),
+                B(Mov), R(11), R(1),
   /*   23 E> */ B(StackCheck),
   /*   38 S> */ B(Mov), R(1), R(3),
   /*   63 S> */ B(LdaSmi), I8(10),
@@ -431,61 +424,60 @@ bytecodes: [
   /*  103 S> */ B(Jump), U8(5),
                 B(JumpLoop), U8(93), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(11),
-                B(Star), R(10),
+                B(Star), R(9),
+                B(Star), R(8),
                 B(Jump), U8(7),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(LdaZero),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaTheHole),
                 B(SetPendingMessage),
-                B(Star), R(12),
-                B(Ldar), R(9),
-                B(JumpIfToBooleanTrue), U8(96),
-                B(LdaNamedProperty), R(6), U8(8), U8(19),
-                B(Star), R(16),
-                B(JumpIfUndefined), U8(88),
-                B(JumpIfNull), U8(86),
-                B(Mov), R(context), R(17),
+                B(Star), R(10),
+                B(Ldar), R(7),
+                B(JumpIfToBooleanTrue), U8(94),
+                B(LdaNamedProperty), R(6), U8(7), U8(19),
+                B(Star), R(14),
+                B(JumpIfUndefinedOrNull), U8(86),
+                B(Mov), R(context), R(15),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(18),
-                B(LdaConstant), U8(9),
-                B(Star), R(19),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
+                B(Star), R(16),
+                B(LdaConstant), U8(8),
+                B(Star), R(17),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
                 B(Throw),
-                B(CallProperty0), R(16), R(6), U8(21),
-                B(Star), R(19),
-                B(Mov), R(0), R(18),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(18), U8(2),
-                B(SuspendGenerator), R(0), R(0), U8(18), U8(1),
-                B(ResumeGenerator), R(0), R(0), U8(18),
-                B(Star), R(18),
+                B(CallProperty0), R(14), R(6), U8(21),
+                B(Star), R(17),
+                B(Mov), R(0), R(16),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(16), U8(2),
+                B(SuspendGenerator), R(0), R(0), U8(16), U8(1),
+                B(ResumeGenerator), R(0), R(0), U8(16),
+                B(Star), R(16),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(Star), R(19),
+                B(Star), R(17),
                 B(LdaZero),
-                B(TestReferenceEqual), R(19),
+                B(TestReferenceEqual), R(17),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(18),
+                B(Ldar), R(16),
                 B(ReThrow),
-                B(Ldar), R(18),
+                B(Ldar), R(16),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(20),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(20), U8(1),
+                B(Star), R(18),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(18), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(17),
+                B(Star), R(15),
                 B(LdaZero),
-                B(TestReferenceEqual), R(10),
+                B(TestReferenceEqual), R(8),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(17),
+                B(Ldar), R(15),
                 B(ReThrow),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(10),
+                B(TestReferenceEqual), R(8),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(11),
+                B(Ldar), R(9),
                 B(ReThrow),
                 B(LdaUndefined),
                 B(Star), R(6),
@@ -495,7 +487,7 @@ bytecodes: [
                 B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
   /*  114 S> */ B(Return),
                 B(Star), R(5),
-                B(CreateCatchContext), R(5), U8(10),
+                B(CreateCatchContext), R(5), U8(9),
                 B(Star), R(4),
                 B(LdaTheHole),
                 B(SetPendingMessage),
@@ -510,11 +502,10 @@ bytecodes: [
   /*  114 S> */ B(Return),
 ]
 constant pool: [
-  Smi [98],
-  Smi [245],
+  Smi [95],
+  Smi [240],
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
   SYMBOL_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -523,9 +514,9 @@ constant pool: [
   SCOPE_INFO_TYPE,
 ]
 handlers: [
-  [20, 313, 313],
-  [77, 173, 181],
-  [207, 276, 278],
+  [20, 308, 308],
+  [74, 170, 178],
+  [202, 271, 273],
 ]
 
 ---
@@ -536,9 +527,9 @@ snippet: "
   }
   f();
 "
-frame size: 17
+frame size: 15
 parameter count: 1
-bytecode array length: 261
+bytecode array length: 258
 bytecodes: [
                 B(Mov), R(closure), R(2),
                 B(Mov), R(this), R(3),
@@ -550,86 +541,85 @@ bytecodes: [
                 B(Star), R(1),
   /*   68 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
                 B(Star), R(5),
-                B(LdaNamedProperty), R(5), U8(2), U8(2),
+                B(GetIterator), R(5), U8(2),
                 B(Star), R(6),
                 B(CallProperty0), R(6), R(5), U8(4),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(4),
-                B(LdaNamedProperty), R(4), U8(3), U8(6),
+                B(LdaNamedProperty), R(4), U8(2), U8(6),
                 B(Star), R(3),
                 B(LdaFalse),
-                B(Star), R(7),
-                B(Mov), R(context), R(10),
+                B(Star), R(5),
+                B(Mov), R(context), R(8),
                 B(LdaTrue),
-                B(Star), R(7),
+                B(Star), R(5),
   /*   59 S> */ B(CallProperty0), R(3), R(4), U8(8),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
-                B(LdaNamedProperty), R(11), U8(4), U8(10),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+                B(LdaNamedProperty), R(9), U8(3), U8(10),
                 B(JumpIfToBooleanTrue), U8(33),
-                B(LdaNamedProperty), R(11), U8(5), U8(12),
-                B(Star), R(11),
+                B(LdaNamedProperty), R(9), U8(4), U8(12),
+                B(Star), R(9),
                 B(LdaFalse),
-                B(Star), R(7),
-                B(Ldar), R(11),
-  /*   58 E> */ B(StaNamedProperty), R(1), U8(6), U8(14),
+                B(Star), R(5),
+                B(Ldar), R(9),
+  /*   58 E> */ B(StaNamedProperty), R(1), U8(5), U8(14),
   /*   53 E> */ B(StackCheck),
-  /*   87 S> */ B(LdaNamedProperty), R(1), U8(6), U8(16),
-                B(Star), R(9),
+  /*   87 S> */ B(LdaNamedProperty), R(1), U8(5), U8(16),
+                B(Star), R(7),
                 B(LdaSmi), I8(1),
-                B(Star), R(8),
-                B(Mov), R(1), R(12),
+                B(Star), R(6),
+                B(Mov), R(1), R(10),
                 B(Jump), U8(15),
                 B(LdaSmi), I8(-1),
-                B(Star), R(9),
-                B(Star), R(8),
+                B(Star), R(7),
+                B(Star), R(6),
                 B(Jump), U8(7),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(LdaZero),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(LdaTheHole),
                 B(SetPendingMessage),
-                B(Star), R(10),
-                B(Ldar), R(7),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(4), U8(7), U8(18),
-                B(Star), R(13),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(14),
+                B(Star), R(8),
+                B(Ldar), R(5),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(4), U8(6), U8(18),
+                B(Star), R(11),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(12),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(15),
-                B(LdaConstant), U8(8),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+                B(Star), R(13),
+                B(LdaConstant), U8(7),
+                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
                 B(Throw),
-                B(CallProperty0), R(13), R(4), U8(20),
+                B(CallProperty0), R(11), R(4), U8(20),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(15),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+                B(Star), R(13),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(LdaZero),
-                B(TestReferenceEqual), R(8),
+                B(TestReferenceEqual), R(6),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(10),
-                B(SetPendingMessage),
                 B(Ldar), R(8),
-                B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
+                B(SetPendingMessage),
+                B(Ldar), R(6),
+                B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
                 B(Jump), U8(19),
-                B(Ldar), R(9),
+                B(Ldar), R(7),
                 B(ReThrow),
                 B(LdaFalse),
-                B(Star), R(15),
-                B(Mov), R(0), R(13),
-                B(Mov), R(9), R(14),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(13), U8(3),
+                B(Star), R(13),
+                B(Mov), R(0), R(11),
+                B(Mov), R(7), R(12),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(11), U8(3),
   /*   96 S> */ B(Return),
                 B(LdaUndefined),
                 B(Star), R(4),
@@ -639,7 +629,7 @@ bytecodes: [
                 B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
   /*   96 S> */ B(Return),
                 B(Star), R(3),
-                B(CreateCatchContext), R(3), U8(11),
+                B(CreateCatchContext), R(3), U8(10),
                 B(Star), R(2),
                 B(LdaTheHole),
                 B(SetPendingMessage),
@@ -656,7 +646,6 @@ bytecodes: [
 constant pool: [
   OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -668,8 +657,8 @@ constant pool: [
   SCOPE_INFO_TYPE,
 ]
 handlers: [
-  [16, 233, 233],
-  [59, 112, 120],
-  [146, 179, 181],
+  [16, 230, 230],
+  [58, 111, 119],
+  [143, 176, 178],
 ]
 
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index 8c24e461cddf7c..fe1defeefbb2aa 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -63,13 +63,12 @@ snippet: "
 "
 frame size: 8
 parameter count: 1
-bytecode array length: 46
+bytecode array length: 44
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   42 S> */ B(LdaConstant), U8(0),
                 B(Star), R(0),
-  /*   68 S> */ B(JumpIfUndefined), U8(39),
-                B(JumpIfNull), U8(37),
+  /*   68 S> */ B(JumpIfUndefinedOrNull), U8(37),
                 B(ToObject), R(3),
                 B(ForInEnumerate), R(3),
                 B(ForInPrepare), R(4), U8(0),
@@ -102,14 +101,13 @@ snippet: "
 "
 frame size: 9
 parameter count: 1
-bytecode array length: 58
+bytecode array length: 56
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   42 S> */ B(LdaZero),
                 B(Star), R(0),
   /*   59 S> */ B(CreateArrayLiteral), U8(0), U8(1), U8(37),
-                B(JumpIfUndefined), U8(48),
-                B(JumpIfNull), U8(46),
+                B(JumpIfUndefinedOrNull), U8(46),
                 B(ToObject), R(3),
                 B(ForInEnumerate), R(3),
                 B(ForInPrepare), R(4), U8(0),
@@ -148,14 +146,13 @@ snippet: "
 "
 frame size: 7
 parameter count: 1
-bytecode array length: 85
+bytecode array length: 83
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
                 B(Star), R(0),
   /*   77 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(37),
-                B(JumpIfUndefined), U8(72),
-                B(JumpIfNull), U8(70),
+                B(JumpIfUndefinedOrNull), U8(70),
                 B(ToObject), R(1),
                 B(ForInEnumerate), R(1),
                 B(ForInPrepare), R(2), U8(1),
@@ -202,14 +199,13 @@ snippet: "
 "
 frame size: 9
 parameter count: 1
-bytecode array length: 64
+bytecode array length: 62
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
                 B(Star), R(0),
   /*   72 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(37),
-                B(JumpIfUndefined), U8(51),
-                B(JumpIfNull), U8(49),
+                B(JumpIfUndefinedOrNull), U8(49),
                 B(ToObject), R(1),
                 B(ForInEnumerate), R(1),
                 B(ForInPrepare), R(2), U8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 17e1d5934382fb..1557e8d2a8ba57 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -9,90 +9,88 @@ wrap: yes
 snippet: "
   for (var p of [0, 1, 2]) {}
 "
-frame size: 15
+frame size: 13
 parameter count: 1
-bytecode array length: 173
+bytecode array length: 170
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
                 B(Star), R(4),
-                B(LdaNamedProperty), R(4), U8(1), U8(1),
+                B(GetIterator), R(4), U8(1),
                 B(Star), R(5),
                 B(CallProperty0), R(5), R(4), U8(3),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(3),
-                B(LdaNamedProperty), R(3), U8(2), U8(5),
+                B(LdaNamedProperty), R(3), U8(1), U8(5),
                 B(Star), R(2),
                 B(LdaFalse),
-                B(Star), R(6),
-                B(Mov), R(context), R(9),
+                B(Star), R(4),
+                B(Mov), R(context), R(7),
                 B(LdaTrue),
-                B(Star), R(6),
+                B(Star), R(4),
   /*   43 S> */ B(CallProperty0), R(2), R(3), U8(7),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
-                B(LdaNamedProperty), R(10), U8(3), U8(9),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+                B(LdaNamedProperty), R(8), U8(2), U8(9),
                 B(JumpIfToBooleanTrue), U8(23),
-                B(LdaNamedProperty), R(10), U8(4), U8(11),
-                B(Star), R(10),
+                B(LdaNamedProperty), R(8), U8(3), U8(11),
+                B(Star), R(8),
                 B(LdaFalse),
-                B(Star), R(6),
-                B(Mov), R(10), R(1),
+                B(Star), R(4),
+                B(Mov), R(8), R(1),
   /*   34 E> */ B(StackCheck),
   /*   43 S> */ B(Mov), R(1), R(0),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
                 B(JumpLoop), U8(40), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(8),
-                B(Star), R(7),
+                B(Star), R(6),
+                B(Star), R(5),
                 B(Jump), U8(7),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(LdaZero),
-                B(Star), R(7),
+                B(Star), R(5),
                 B(LdaTheHole),
   /*   43 E> */ B(SetPendingMessage),
+                B(Star), R(7),
+                B(Ldar), R(4),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(3), U8(4), U8(13),
                 B(Star), R(9),
-                B(Ldar), R(6),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(3), U8(5), U8(13),
-                B(Star), R(11),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(12),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(10),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(13),
-                B(LdaConstant), U8(6),
-                B(Star), R(14),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+                B(Star), R(11),
+                B(LdaConstant), U8(5),
+                B(Star), R(12),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
                 B(Throw),
-                B(CallProperty0), R(11), R(3), U8(15),
+                B(CallProperty0), R(9), R(3), U8(15),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(13),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+                B(Star), R(11),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(LdaZero),
-                B(TestReferenceEqual), R(7),
+                B(TestReferenceEqual), R(5),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(ReThrow),
-                B(Ldar), R(9),
+                B(Ldar), R(7),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(7),
+                B(TestReferenceEqual), R(5),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(8),
+                B(Ldar), R(6),
                 B(ReThrow),
                 B(LdaUndefined),
   /*   62 S> */ B(Return),
 ]
 constant pool: [
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -100,8 +98,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [38, 81, 89],
-  [115, 148, 150],
+  [37, 80, 88],
+  [112, 145, 147],
 ]
 
 ---
@@ -109,95 +107,92 @@ snippet: "
   var x = 'potatoes';
   for (var p of x) { return p; }
 "
-frame size: 16
+frame size: 14
 parameter count: 1
-bytecode array length: 184
+bytecode array length: 178
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   42 S> */ B(LdaConstant), U8(0),
                 B(Star), R(0),
-  /*   68 S> */ B(LdaNamedProperty), R(0), U8(1), U8(0),
+  /*   68 S> */ B(GetIterator), R(0), U8(0),
                 B(Star), R(6),
                 B(CallProperty0), R(6), R(0), U8(2),
-                B(Mov), R(0), R(5),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(4),
-                B(LdaNamedProperty), R(4), U8(2), U8(4),
+                B(LdaNamedProperty), R(4), U8(1), U8(4),
                 B(Star), R(3),
                 B(LdaFalse),
-                B(Star), R(7),
-                B(Mov), R(context), R(10),
+                B(Star), R(5),
+                B(Mov), R(context), R(8),
                 B(LdaTrue),
-                B(Star), R(7),
+                B(Star), R(5),
   /*   63 S> */ B(CallProperty0), R(3), R(4), U8(6),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
-                B(LdaNamedProperty), R(11), U8(3), U8(8),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+                B(LdaNamedProperty), R(9), U8(2), U8(8),
                 B(JumpIfToBooleanTrue), U8(27),
-                B(LdaNamedProperty), R(11), U8(4), U8(10),
-                B(Star), R(11),
+                B(LdaNamedProperty), R(9), U8(3), U8(10),
+                B(Star), R(9),
                 B(LdaFalse),
-                B(Star), R(7),
-                B(Mov), R(11), R(2),
+                B(Star), R(5),
+                B(Mov), R(9), R(2),
   /*   54 E> */ B(StackCheck),
   /*   63 S> */ B(Mov), R(2), R(1),
   /*   73 S> */ B(LdaSmi), I8(1),
-                B(Mov), R(11), R(9),
-                B(Star), R(8),
+                B(Mov), R(9), R(7),
+                B(Star), R(6),
                 B(Jump), U8(15),
                 B(LdaSmi), I8(-1),
-                B(Star), R(9),
-                B(Star), R(8),
+                B(Star), R(7),
+                B(Star), R(6),
                 B(Jump), U8(7),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(LdaZero),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(LdaTheHole),
                 B(SetPendingMessage),
+                B(Star), R(8),
+                B(Ldar), R(5),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(4), U8(4), U8(12),
                 B(Star), R(10),
-                B(Ldar), R(7),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(4), U8(5), U8(12),
-                B(Star), R(12),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(13),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(11),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(14),
-                B(LdaConstant), U8(6),
-                B(Star), R(15),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+                B(Star), R(12),
+                B(LdaConstant), U8(5),
+                B(Star), R(13),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
                 B(Throw),
-                B(CallProperty0), R(12), R(4), U8(14),
+                B(CallProperty0), R(10), R(4), U8(14),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(14),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+                B(Star), R(12),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(13),
+                B(Star), R(11),
                 B(LdaZero),
-                B(TestReferenceEqual), R(8),
+                B(TestReferenceEqual), R(6),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(13),
+                B(Ldar), R(11),
                 B(ReThrow),
-                B(Ldar), R(10),
-                B(SetPendingMessage),
                 B(Ldar), R(8),
-                B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
+                B(SetPendingMessage),
+                B(Ldar), R(6),
+                B(SwitchOnSmiNoFeedback), U8(6), U8(2), I8(0),
                 B(Jump), U8(8),
-                B(Ldar), R(9),
+                B(Ldar), R(7),
                 B(ReThrow),
-                B(Ldar), R(9),
+                B(Ldar), R(7),
   /*   85 S> */ B(Return),
                 B(LdaUndefined),
   /*   85 S> */ B(Return),
 ]
 constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["potatoes"],
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -207,8 +202,8 @@ constant pool: [
   Smi [9],
 ]
 handlers: [
-  [39, 86, 94],
-  [120, 153, 155],
+  [35, 82, 90],
+  [114, 147, 149],
 ]
 
 ---
@@ -218,37 +213,37 @@ snippet: "
     if (x == 20) break;
   }
 "
-frame size: 15
+frame size: 13
 parameter count: 1
-bytecode array length: 189
+bytecode array length: 186
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
                 B(Star), R(4),
-                B(LdaNamedProperty), R(4), U8(1), U8(1),
+                B(GetIterator), R(4), U8(1),
                 B(Star), R(5),
                 B(CallProperty0), R(5), R(4), U8(3),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(3),
-                B(LdaNamedProperty), R(3), U8(2), U8(5),
+                B(LdaNamedProperty), R(3), U8(1), U8(5),
                 B(Star), R(2),
                 B(LdaFalse),
-                B(Star), R(6),
-                B(Mov), R(context), R(9),
+                B(Star), R(4),
+                B(Mov), R(context), R(7),
                 B(LdaTrue),
-                B(Star), R(6),
+                B(Star), R(4),
   /*   43 S> */ B(CallProperty0), R(2), R(3), U8(7),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
-                B(LdaNamedProperty), R(10), U8(3), U8(9),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+                B(LdaNamedProperty), R(8), U8(2), U8(9),
                 B(JumpIfToBooleanTrue), U8(39),
-                B(LdaNamedProperty), R(10), U8(4), U8(11),
-                B(Star), R(10),
+                B(LdaNamedProperty), R(8), U8(3), U8(11),
+                B(Star), R(8),
                 B(LdaFalse),
-                B(Star), R(6),
-                B(Mov), R(10), R(1),
+                B(Star), R(4),
+                B(Mov), R(8), R(1),
   /*   34 E> */ B(StackCheck),
   /*   43 S> */ B(Mov), R(1), R(0),
   /*   66 S> */ B(LdaSmi), I8(10),
@@ -261,54 +256,52 @@ bytecodes: [
   /*  104 S> */ B(Jump), U8(5),
                 B(JumpLoop), U8(56), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(8),
-                B(Star), R(7),
+                B(Star), R(6),
+                B(Star), R(5),
                 B(Jump), U8(7),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(LdaZero),
-                B(Star), R(7),
+                B(Star), R(5),
                 B(LdaTheHole),
                 B(SetPendingMessage),
+                B(Star), R(7),
+                B(Ldar), R(4),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(3), U8(4), U8(15),
                 B(Star), R(9),
-                B(Ldar), R(6),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(3), U8(5), U8(15),
-                B(Star), R(11),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(12),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(10),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(13),
-                B(LdaConstant), U8(6),
-                B(Star), R(14),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+                B(Star), R(11),
+                B(LdaConstant), U8(5),
+                B(Star), R(12),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
                 B(Throw),
-                B(CallProperty0), R(11), R(3), U8(17),
+                B(CallProperty0), R(9), R(3), U8(17),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(13),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+                B(Star), R(11),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(LdaZero),
-                B(TestReferenceEqual), R(7),
+                B(TestReferenceEqual), R(5),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(ReThrow),
-                B(Ldar), R(9),
+                B(Ldar), R(7),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(7),
+                B(TestReferenceEqual), R(5),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(8),
+                B(Ldar), R(6),
                 B(ReThrow),
                 B(LdaUndefined),
   /*  113 S> */ B(Return),
 ]
 constant pool: [
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -316,8 +309,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [38, 97, 105],
-  [131, 164, 166],
+  [37, 96, 104],
+  [128, 161, 163],
 ]
 
 ---
@@ -325,91 +318,90 @@ snippet: "
   var x = { 'a': 1, 'b': 2 };
   for (x['a'] of [1,2,3]) { return x['a']; }
 "
-frame size: 15
+frame size: 13
 parameter count: 1
-bytecode array length: 195
+bytecode array length: 192
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
                 B(Star), R(0),
   /*   77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
                 B(Star), R(3),
-                B(LdaNamedProperty), R(3), U8(2), U8(2),
+                B(GetIterator), R(3), U8(2),
                 B(Star), R(4),
                 B(CallProperty0), R(4), R(3), U8(4),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(2),
-                B(LdaNamedProperty), R(2), U8(3), U8(6),
+                B(LdaNamedProperty), R(2), U8(2), U8(6),
                 B(Star), R(1),
                 B(LdaFalse),
-                B(Star), R(5),
-                B(Mov), R(context), R(8),
+                B(Star), R(3),
+                B(Mov), R(context), R(6),
                 B(LdaTrue),
-                B(Star), R(5),
+                B(Star), R(3),
   /*   68 S> */ B(CallProperty0), R(1), R(2), U8(8),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
-                B(LdaNamedProperty), R(9), U8(4), U8(10),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+                B(LdaNamedProperty), R(7), U8(3), U8(10),
                 B(JumpIfToBooleanTrue), U8(33),
-                B(LdaNamedProperty), R(9), U8(5), U8(12),
-                B(Star), R(9),
+                B(LdaNamedProperty), R(7), U8(4), U8(12),
+                B(Star), R(7),
                 B(LdaFalse),
-                B(Star), R(5),
-                B(Ldar), R(9),
-  /*   67 E> */ B(StaNamedProperty), R(0), U8(6), U8(14),
+                B(Star), R(3),
+                B(Ldar), R(7),
+  /*   67 E> */ B(StaNamedProperty), R(0), U8(5), U8(14),
   /*   62 E> */ B(StackCheck),
-  /*   96 S> */ B(LdaNamedProperty), R(0), U8(6), U8(16),
-                B(Star), R(7),
+  /*   96 S> */ B(LdaNamedProperty), R(0), U8(5), U8(16),
+                B(Star), R(5),
                 B(LdaSmi), I8(1),
-                B(Star), R(6),
-                B(Mov), R(0), R(10),
+                B(Star), R(4),
+                B(Mov), R(0), R(8),
                 B(Jump), U8(15),
                 B(LdaSmi), I8(-1),
-                B(Star), R(7),
-                B(Star), R(6),
+                B(Star), R(5),
+                B(Star), R(4),
                 B(Jump), U8(7),
-                B(Star), R(7),
+                B(Star), R(5),
                 B(LdaZero),
-                B(Star), R(6),
+                B(Star), R(4),
                 B(LdaTheHole),
                 B(SetPendingMessage),
-                B(Star), R(8),
-                B(Ldar), R(5),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(2), U8(7), U8(18),
-                B(Star), R(11),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(12),
+                B(Star), R(6),
+                B(Ldar), R(3),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(2), U8(6), U8(18),
+                B(Star), R(9),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(10),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(13),
-                B(LdaConstant), U8(8),
-                B(Star), R(14),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+                B(Star), R(11),
+                B(LdaConstant), U8(7),
+                B(Star), R(12),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
                 B(Throw),
-                B(CallProperty0), R(11), R(2), U8(20),
+                B(CallProperty0), R(9), R(2), U8(20),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(13),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+                B(Star), R(11),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(LdaZero),
-                B(TestReferenceEqual), R(6),
+                B(TestReferenceEqual), R(4),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(ReThrow),
-                B(Ldar), R(8),
-                B(SetPendingMessage),
                 B(Ldar), R(6),
-                B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
+                B(SetPendingMessage),
+                B(Ldar), R(4),
+                B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
                 B(Jump), U8(8),
-                B(Ldar), R(7),
+                B(Ldar), R(5),
                 B(ReThrow),
-                B(Ldar), R(7),
+                B(Ldar), R(5),
   /*  105 S> */ B(Return),
                 B(LdaUndefined),
   /*  105 S> */ B(Return),
@@ -417,7 +409,6 @@ bytecodes: [
 constant pool: [
   OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -428,7 +419,7 @@ constant pool: [
   Smi [9],
 ]
 handlers: [
-  [44, 97, 105],
-  [131, 164, 166],
+  [43, 96, 104],
+  [128, 161, 163],
 ]
 
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 1b10e1bf6e1931..f50891172ecc8b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -13,89 +13,86 @@ snippet: "
   }
   f([1, 2, 3]);
 "
-frame size: 17
+frame size: 15
 parameter count: 2
-bytecode array length: 173
+bytecode array length: 167
 bytecodes: [
   /*   10 E> */ B(StackCheck),
-  /*   34 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+  /*   34 S> */ B(GetIterator), R(arg0), U8(0),
                 B(Star), R(7),
                 B(CallProperty0), R(7), R(arg0), U8(2),
-                B(Mov), R(arg0), R(6),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(5),
-                B(LdaNamedProperty), R(5), U8(1), U8(4),
+                B(LdaNamedProperty), R(5), U8(0), U8(4),
                 B(Star), R(4),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(context), R(11),
+                B(Star), R(6),
+                B(Mov), R(context), R(9),
                 B(LdaTrue),
-                B(Star), R(8),
+                B(Star), R(6),
   /*   29 S> */ B(CallProperty0), R(4), R(5), U8(6),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(2), U8(8),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(1), U8(8),
                 B(JumpIfToBooleanTrue), U8(26),
-                B(LdaNamedProperty), R(12), U8(3), U8(10),
-                B(Star), R(12),
+                B(LdaNamedProperty), R(10), U8(2), U8(10),
+                B(Star), R(10),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(12), R(0),
+                B(Star), R(6),
+                B(Mov), R(10), R(0),
   /*   20 E> */ B(StackCheck),
   /*   29 S> */ B(Mov), R(0), R(2),
   /*   49 S> */ B(Mov), R(2), R(3),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(JumpLoop), U8(43), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(10),
-                B(Star), R(9),
+                B(Star), R(8),
+                B(Star), R(7),
                 B(Jump), U8(7),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaZero),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(LdaTheHole),
   /*   49 E> */ B(SetPendingMessage),
+                B(Star), R(9),
+                B(Ldar), R(6),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(5), U8(3), U8(12),
                 B(Star), R(11),
-                B(Ldar), R(8),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(5), U8(4), U8(12),
-                B(Star), R(13),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(14),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(12),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(15),
-                B(LdaConstant), U8(5),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+                B(Star), R(13),
+                B(LdaConstant), U8(4),
+                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
                 B(Throw),
-                B(CallProperty0), R(13), R(5), U8(14),
+                B(CallProperty0), R(11), R(5), U8(14),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(15),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+                B(Star), R(13),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(11),
+                B(Ldar), R(9),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
                 B(ReThrow),
                 B(LdaUndefined),
   /*   54 S> */ B(Return),
 ]
 constant pool: [
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -103,8 +100,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [35, 81, 89],
-  [115, 148, 150],
+  [31, 77, 85],
+  [109, 142, 144],
 ]
 
 ---
@@ -114,9 +111,9 @@ snippet: "
   }
   f([1, 2, 3]);
 "
-frame size: 22
+frame size: 20
 parameter count: 2
-bytecode array length: 254
+bytecode array length: 251
 bytecodes: [
                 B(CreateFunctionContext), U8(0), U8(4),
                 B(PushContext), R(2),
@@ -135,98 +132,97 @@ bytecodes: [
                 B(StaCurrentContextSlot), U8(4),
   /*   34 S> */ B(LdaContextSlot), R(3), U8(4), U8(0),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(2), U8(0),
+                B(GetIterator), R(6), U8(0),
                 B(Star), R(7),
                 B(CallProperty0), R(7), R(6), U8(2),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(5),
-                B(LdaNamedProperty), R(5), U8(3), U8(4),
+                B(LdaNamedProperty), R(5), U8(2), U8(4),
                 B(Star), R(4),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(context), R(11),
+                B(Star), R(6),
+                B(Mov), R(context), R(9),
                 B(LdaTrue),
-                B(Star), R(8),
+                B(Star), R(6),
   /*   29 S> */ B(CallProperty0), R(4), R(5), U8(6),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(4), U8(8),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(3), U8(8),
                 B(JumpIfToBooleanTrue), U8(75),
-                B(LdaNamedProperty), R(12), U8(5), U8(10),
-                B(Star), R(12),
+                B(LdaNamedProperty), R(10), U8(4), U8(10),
+                B(Star), R(10),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(12), R(0),
+                B(Star), R(6),
+                B(Mov), R(10), R(0),
   /*   20 E> */ B(StackCheck),
-                B(CreateBlockContext), U8(6),
-                B(PushContext), R(13),
+                B(CreateBlockContext), U8(5),
+                B(PushContext), R(11),
                 B(LdaTheHole),
                 B(StaCurrentContextSlot), U8(4),
   /*   29 S> */ B(Ldar), R(0),
   /*   29 E> */ B(StaCurrentContextSlot), U8(4),
-  /*   41 S> */ B(LdaLookupGlobalSlot), U8(7), U8(12), U8(3),
-                B(Star), R(14),
-                B(LdaConstant), U8(8),
-                B(Star), R(15),
+  /*   41 S> */ B(LdaLookupGlobalSlot), U8(6), U8(12), U8(3),
+                B(Star), R(12),
+                B(LdaConstant), U8(7),
+                B(Star), R(13),
                 B(LdaZero),
-                B(Star), R(19),
+                B(Star), R(17),
                 B(LdaSmi), I8(37),
-                B(Star), R(20),
+                B(Star), R(18),
                 B(LdaSmi), I8(41),
-                B(Star), R(21),
-                B(Mov), R(14), R(16),
-                B(Mov), R(15), R(17),
-                B(Mov), R(closure), R(18),
-                B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(16), U8(6),
-                B(Star), R(14),
-  /*   41 E> */ B(CallUndefinedReceiver1), R(14), R(15), U8(14),
-                B(PopContext), R(13),
-                B(Mov), R(0), R(12),
+                B(Star), R(19),
+                B(Mov), R(12), R(14),
+                B(Mov), R(13), R(15),
+                B(Mov), R(closure), R(16),
+                B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(14), U8(6),
+                B(Star), R(12),
+  /*   41 E> */ B(CallUndefinedReceiver1), R(12), R(13), U8(14),
+                B(PopContext), R(11),
+                B(Mov), R(0), R(10),
                 B(JumpLoop), U8(92), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(10),
-                B(Star), R(9),
+                B(Star), R(8),
+                B(Star), R(7),
                 B(Jump), U8(7),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaZero),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(LdaTheHole),
                 B(SetPendingMessage),
-                B(Star), R(11),
-                B(Ldar), R(8),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(5), U8(9), U8(16),
-                B(Star), R(14),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(15),
+                B(Star), R(9),
+                B(Ldar), R(6),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(5), U8(8), U8(16),
+                B(Star), R(12),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(13),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(16),
-                B(LdaConstant), U8(10),
-                B(Star), R(17),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+                B(Star), R(14),
+                B(LdaConstant), U8(9),
+                B(Star), R(15),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
                 B(Throw),
-                B(CallProperty0), R(14), R(5), U8(18),
+                B(CallProperty0), R(12), R(5), U8(18),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
+                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(15),
+                B(Star), R(13),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(15),
+                B(Ldar), R(13),
                 B(ReThrow),
-                B(Ldar), R(11),
+                B(Ldar), R(9),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
                 B(ReThrow),
                 B(PopContext), R(3),
                 B(LdaUndefined),
@@ -235,7 +231,6 @@ bytecodes: [
 constant pool: [
   SCOPE_INFO_TYPE,
   SCOPE_INFO_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -246,8 +241,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [65, 160, 168],
-  [194, 227, 229],
+  [64, 159, 167],
+  [191, 224, 226],
 ]
 
 ---
@@ -257,97 +252,94 @@ snippet: "
   }
   f([1, 2, 3]);
 "
-frame size: 16
+frame size: 14
 parameter count: 2
-bytecode array length: 190
+bytecode array length: 184
 bytecodes: [
   /*   10 E> */ B(StackCheck),
-  /*   34 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+  /*   34 S> */ B(GetIterator), R(arg0), U8(0),
                 B(Star), R(5),
                 B(CallProperty0), R(5), R(arg0), U8(2),
-                B(Mov), R(arg0), R(4),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(3),
-                B(LdaNamedProperty), R(3), U8(1), U8(4),
+                B(LdaNamedProperty), R(3), U8(0), U8(4),
                 B(Star), R(2),
                 B(LdaFalse),
-                B(Star), R(6),
-                B(Mov), R(context), R(9),
+                B(Star), R(4),
+                B(Mov), R(context), R(7),
                 B(LdaTrue),
-                B(Star), R(6),
+                B(Star), R(4),
   /*   29 S> */ B(CallProperty0), R(2), R(3), U8(6),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
-                B(LdaNamedProperty), R(10), U8(2), U8(8),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+                B(LdaNamedProperty), R(8), U8(1), U8(8),
                 B(JumpIfToBooleanTrue), U8(43),
-                B(LdaNamedProperty), R(10), U8(3), U8(10),
-                B(Star), R(10),
+                B(LdaNamedProperty), R(8), U8(2), U8(10),
+                B(Star), R(8),
                 B(LdaFalse),
-                B(Star), R(6),
-                B(Mov), R(10), R(0),
+                B(Star), R(4),
+                B(Mov), R(8), R(0),
   /*   20 E> */ B(StackCheck),
-                B(CreateBlockContext), U8(4),
-                B(PushContext), R(11),
+                B(CreateBlockContext), U8(3),
+                B(PushContext), R(9),
                 B(LdaTheHole),
                 B(StaCurrentContextSlot), U8(4),
   /*   29 S> */ B(Ldar), R(0),
   /*   29 E> */ B(StaCurrentContextSlot), U8(4),
-  /*   41 S> */ B(CreateClosure), U8(5), U8(0), U8(2),
-                B(Star), R(12),
-  /*   67 E> */ B(CallUndefinedReceiver0), R(12), U8(12),
-                B(PopContext), R(11),
-                B(Mov), R(0), R(10),
+  /*   41 S> */ B(CreateClosure), U8(4), U8(0), U8(2),
+                B(Star), R(10),
+  /*   67 E> */ B(CallUndefinedReceiver0), R(10), U8(12),
+                B(PopContext), R(9),
+                B(Mov), R(0), R(8),
                 B(JumpLoop), U8(60), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(8),
-                B(Star), R(7),
+                B(Star), R(6),
+                B(Star), R(5),
                 B(Jump), U8(7),
-                B(Star), R(8),
+                B(Star), R(6),
                 B(LdaZero),
-                B(Star), R(7),
+                B(Star), R(5),
                 B(LdaTheHole),
                 B(SetPendingMessage),
-                B(Star), R(9),
-                B(Ldar), R(6),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(3), U8(6), U8(14),
-                B(Star), R(12),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(13),
+                B(Star), R(7),
+                B(Ldar), R(4),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(3), U8(5), U8(14),
+                B(Star), R(10),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(11),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(14),
-                B(LdaConstant), U8(7),
-                B(Star), R(15),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+                B(Star), R(12),
+                B(LdaConstant), U8(6),
+                B(Star), R(13),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
                 B(Throw),
-                B(CallProperty0), R(12), R(3), U8(16),
+                B(CallProperty0), R(10), R(3), U8(16),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(14),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+                B(Star), R(12),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(13),
+                B(Star), R(11),
                 B(LdaZero),
-                B(TestReferenceEqual), R(7),
+                B(TestReferenceEqual), R(5),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(13),
+                B(Ldar), R(11),
                 B(ReThrow),
-                B(Ldar), R(9),
+                B(Ldar), R(7),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(7),
+                B(TestReferenceEqual), R(5),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(8),
+                B(Ldar), R(6),
                 B(ReThrow),
                 B(LdaUndefined),
   /*   73 S> */ B(Return),
 ]
 constant pool: [
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -357,8 +349,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [35, 98, 106],
-  [132, 165, 167],
+  [31, 94, 102],
+  [126, 159, 161],
 ]
 
 ---
@@ -368,98 +360,90 @@ snippet: "
   }
   f([{ x: 0, y: 3 }, { x: 1, y: 9 }, { x: -12, y: 17 }]);
 "
-frame size: 19
+frame size: 17
 parameter count: 2
-bytecode array length: 197
+bytecode array length: 178
 bytecodes: [
   /*   10 E> */ B(StackCheck),
-  /*   41 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+  /*   41 S> */ B(GetIterator), R(arg0), U8(0),
                 B(Star), R(9),
                 B(CallProperty0), R(9), R(arg0), U8(2),
-                B(Mov), R(arg0), R(8),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(7),
-                B(LdaNamedProperty), R(7), U8(1), U8(4),
+                B(LdaNamedProperty), R(7), U8(0), U8(4),
                 B(Star), R(6),
                 B(LdaFalse),
-                B(Star), R(10),
-                B(Mov), R(context), R(13),
+                B(Star), R(8),
+                B(Mov), R(context), R(11),
                 B(LdaTrue),
-                B(Star), R(10),
+                B(Star), R(8),
   /*   36 S> */ B(CallProperty0), R(6), R(7), U8(6),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
-                B(LdaNamedProperty), R(14), U8(2), U8(8),
-                B(JumpIfToBooleanTrue), U8(50),
-                B(LdaNamedProperty), R(14), U8(3), U8(10),
-                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+                B(LdaNamedProperty), R(12), U8(1), U8(8),
+                B(JumpIfToBooleanTrue), U8(37),
+                B(LdaNamedProperty), R(12), U8(2), U8(10),
+                B(Star), R(12),
                 B(LdaFalse),
-                B(Star), R(10),
-                B(Mov), R(14), R(0),
+                B(Star), R(8),
+                B(Mov), R(12), R(0),
   /*   20 E> */ B(StackCheck),
-  /*   36 S> */ B(Ldar), R(14),
-                B(JumpIfNull), U8(4),
-                B(JumpIfNotUndefined), U8(7),
-  /*   29 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
-                B(Star), R(15),
-  /*   31 S> */ B(LdaNamedProperty), R(15), U8(4), U8(12),
+  /*   31 S> */ B(LdaNamedProperty), R(0), U8(3), U8(12),
                 B(Star), R(3),
-  /*   34 S> */ B(LdaNamedProperty), R(15), U8(5), U8(14),
+  /*   34 S> */ B(LdaNamedProperty), R(0), U8(4), U8(14),
                 B(Star), R(4),
   /*   56 S> */ B(Ldar), R(4),
   /*   58 E> */ B(Add), R(3), U8(16),
                 B(Star), R(5),
-                B(JumpLoop), U8(67), I8(0),
+                B(JumpLoop), U8(54), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(12),
-                B(Star), R(11),
+                B(Star), R(10),
+                B(Star), R(9),
                 B(Jump), U8(7),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(LdaZero),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(LdaTheHole),
   /*   56 E> */ B(SetPendingMessage),
+                B(Star), R(11),
+                B(Ldar), R(8),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(7), U8(5), U8(17),
                 B(Star), R(13),
-                B(Ldar), R(10),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(7), U8(6), U8(17),
-                B(Star), R(15),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(16),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(14),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(17),
-                B(LdaConstant), U8(7),
-                B(Star), R(18),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+                B(Star), R(15),
+                B(LdaConstant), U8(6),
+                B(Star), R(16),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
                 B(Throw),
-                B(CallProperty0), R(15), R(7), U8(19),
+                B(CallProperty0), R(13), R(7), U8(19),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(17),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(17), U8(1),
+                B(Star), R(15),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(16),
+                B(Star), R(14),
                 B(LdaZero),
-                B(TestReferenceEqual), R(11),
+                B(TestReferenceEqual), R(9),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(16),
+                B(Ldar), R(14),
                 B(ReThrow),
-                B(Ldar), R(13),
+                B(Ldar), R(11),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(11),
+                B(TestReferenceEqual), R(9),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(ReThrow),
                 B(LdaUndefined),
   /*   65 S> */ B(Return),
 ]
 constant pool: [
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -469,8 +453,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [35, 105, 113],
-  [139, 172, 174],
+  [31, 88, 96],
+  [120, 153, 155],
 ]
 
 ---
@@ -480,9 +464,9 @@ snippet: "
   }
   f([1, 2, 3]);
 "
-frame size: 18
+frame size: 16
 parameter count: 2
-bytecode array length: 214
+bytecode array length: 208
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
                 B(Mov), R(closure), R(5),
@@ -499,78 +483,76 @@ bytecodes: [
   /*   11 E> */ B(Throw),
                 B(Ldar), R(5),
   /*   55 S> */ B(Return),
-  /*   35 S> */ B(LdaNamedProperty), R(arg0), U8(3), U8(0),
+  /*   35 S> */ B(GetIterator), R(arg0), U8(0),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(arg0), U8(2),
-                B(Mov), R(arg0), R(7),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(4), U8(4),
+                B(LdaNamedProperty), R(6), U8(3), U8(4),
                 B(Star), R(5),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(context), R(12),
+                B(Star), R(7),
+                B(Mov), R(context), R(10),
                 B(LdaTrue),
-                B(Star), R(9),
+                B(Star), R(7),
   /*   30 S> */ B(CallProperty0), R(5), R(6), U8(6),
-                B(Star), R(13),
+                B(Star), R(11),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
-                B(LdaNamedProperty), R(13), U8(5), U8(8),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+                B(LdaNamedProperty), R(11), U8(4), U8(8),
                 B(JumpIfToBooleanTrue), U8(26),
-                B(LdaNamedProperty), R(13), U8(6), U8(10),
-                B(Star), R(13),
+                B(LdaNamedProperty), R(11), U8(5), U8(10),
+                B(Star), R(11),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(13), R(1),
+                B(Star), R(7),
+                B(Mov), R(11), R(1),
   /*   21 E> */ B(StackCheck),
   /*   30 S> */ B(Mov), R(1), R(3),
   /*   50 S> */ B(Mov), R(3), R(4),
-                B(Ldar), R(13),
+                B(Ldar), R(11),
                 B(JumpLoop), U8(43), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(11),
-                B(Star), R(10),
+                B(Star), R(9),
+                B(Star), R(8),
                 B(Jump), U8(7),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(LdaZero),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaTheHole),
   /*   50 E> */ B(SetPendingMessage),
+                B(Star), R(10),
+                B(Ldar), R(7),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(6), U8(6), U8(12),
                 B(Star), R(12),
-                B(Ldar), R(9),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(6), U8(7), U8(12),
-                B(Star), R(14),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(15),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(13),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(16),
-                B(LdaConstant), U8(8),
-                B(Star), R(17),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+                B(Star), R(14),
+                B(LdaConstant), U8(7),
+                B(Star), R(15),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
                 B(Throw),
-                B(CallProperty0), R(14), R(6), U8(14),
+                B(CallProperty0), R(12), R(6), U8(14),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
+                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(15),
+                B(Star), R(13),
                 B(LdaZero),
-                B(TestReferenceEqual), R(10),
+                B(TestReferenceEqual), R(8),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(15),
+                B(Ldar), R(13),
                 B(ReThrow),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(10),
+                B(TestReferenceEqual), R(8),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(11),
+                B(Ldar), R(9),
                 B(ReThrow),
                 B(LdaUndefined),
   /*   55 S> */ B(Return),
@@ -579,7 +561,6 @@ constant pool: [
   Smi [22],
   Smi [10],
   Smi [7],
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -587,8 +568,8 @@ constant pool: [
   ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
 ]
 handlers: [
-  [76, 122, 130],
-  [156, 189, 191],
+  [72, 118, 126],
+  [150, 183, 185],
 ]
 
 ---
@@ -598,9 +579,9 @@ snippet: "
   }
   f([1, 2, 3]);
 "
-frame size: 17
+frame size: 15
 parameter count: 2
-bytecode array length: 258
+bytecode array length: 252
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
                 B(Mov), R(closure), R(4),
@@ -617,104 +598,101 @@ bytecodes: [
   /*   11 E> */ B(Throw),
                 B(Ldar), R(4),
   /*   49 S> */ B(Return),
-  /*   35 S> */ B(LdaNamedProperty), R(arg0), U8(4), U8(0),
+  /*   35 S> */ B(GetIterator), R(arg0), U8(0),
                 B(Star), R(7),
                 B(CallProperty0), R(7), R(arg0), U8(2),
-                B(Mov), R(arg0), R(6),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(5),
-                B(LdaNamedProperty), R(5), U8(5), U8(4),
+                B(LdaNamedProperty), R(5), U8(4), U8(4),
                 B(Star), R(4),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(context), R(11),
+                B(Star), R(6),
+                B(Mov), R(context), R(9),
                 B(LdaTrue),
-                B(Star), R(8),
+                B(Star), R(6),
   /*   30 S> */ B(CallProperty0), R(4), R(5), U8(6),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(6), U8(8),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(5), U8(8),
                 B(JumpIfToBooleanTrue), U8(64),
-                B(LdaNamedProperty), R(12), U8(7), U8(10),
-                B(Star), R(12),
+                B(LdaNamedProperty), R(10), U8(6), U8(10),
+                B(Star), R(10),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(12), R(1),
+                B(Star), R(6),
+                B(Mov), R(10), R(1),
   /*   21 E> */ B(StackCheck),
   /*   30 S> */ B(Mov), R(1), R(3),
   /*   40 S> */ B(LdaFalse),
-                B(Star), R(14),
-                B(Mov), R(3), R(13),
-                B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(13), U8(2),
-  /*   40 E> */ B(SuspendGenerator), R(0), R(0), U8(13), U8(1),
-                B(ResumeGenerator), R(0), R(0), U8(13),
-                B(Star), R(13),
+                B(Star), R(12),
+                B(Mov), R(3), R(11),
+                B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(11), U8(2),
+  /*   40 E> */ B(SuspendGenerator), R(0), R(0), U8(11), U8(1),
+                B(ResumeGenerator), R(0), R(0), U8(11),
+                B(Star), R(11),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
-                B(Ldar), R(13),
+                B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
+                B(Ldar), R(11),
   /*   40 E> */ B(Throw),
                 B(LdaSmi), I8(1),
-                B(Star), R(9),
-                B(Mov), R(13), R(10),
+                B(Star), R(7),
+                B(Mov), R(11), R(8),
                 B(Jump), U8(20),
-                B(Ldar), R(13),
+                B(Ldar), R(11),
                 B(JumpLoop), U8(81), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(10),
-                B(Star), R(9),
+                B(Star), R(8),
+                B(Star), R(7),
                 B(Jump), U8(7),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaZero),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(LdaTheHole),
                 B(SetPendingMessage),
+                B(Star), R(9),
+                B(Ldar), R(6),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(5), U8(9), U8(12),
                 B(Star), R(11),
-                B(Ldar), R(8),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(5), U8(10), U8(12),
-                B(Star), R(13),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(14),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(12),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(15),
-                B(LdaConstant), U8(11),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+                B(Star), R(13),
+                B(LdaConstant), U8(10),
+                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
                 B(Throw),
-                B(CallProperty0), R(13), R(5), U8(14),
+                B(CallProperty0), R(11), R(5), U8(14),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(15),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+                B(Star), R(13),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(11),
-                B(SetPendingMessage),
                 B(Ldar), R(9),
-                B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
+                B(SetPendingMessage),
+                B(Ldar), R(7),
+                B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
                 B(Jump), U8(8),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
                 B(ReThrow),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
   /*   49 S> */ B(Return),
                 B(LdaUndefined),
   /*   49 S> */ B(Return),
 ]
 constant pool: [
   Smi [22],
-  Smi [129],
+  Smi [125],
   Smi [10],
   Smi [7],
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -726,8 +704,8 @@ constant pool: [
   Smi [9],
 ]
 handlers: [
-  [76, 160, 168],
-  [194, 227, 229],
+  [72, 156, 164],
+  [188, 221, 223],
 ]
 
 ---
@@ -737,9 +715,9 @@ snippet: "
   }
   f([1, 2, 3]);
 "
-frame size: 19
+frame size: 17
 parameter count: 2
-bytecode array length: 228
+bytecode array length: 222
 bytecodes: [
                 B(Mov), R(closure), R(5),
                 B(Mov), R(this), R(6),
@@ -747,78 +725,76 @@ bytecodes: [
                 B(Star), R(0),
   /*   16 E> */ B(StackCheck),
                 B(Mov), R(context), R(5),
-  /*   40 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
+  /*   40 S> */ B(GetIterator), R(arg0), U8(0),
                 B(Star), R(9),
                 B(CallProperty0), R(9), R(arg0), U8(2),
-                B(Mov), R(arg0), R(8),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(7),
-                B(LdaNamedProperty), R(7), U8(1), U8(4),
+                B(LdaNamedProperty), R(7), U8(0), U8(4),
                 B(Star), R(6),
                 B(LdaFalse),
-                B(Star), R(10),
-                B(Mov), R(context), R(13),
+                B(Star), R(8),
+                B(Mov), R(context), R(11),
                 B(LdaTrue),
-                B(Star), R(10),
+                B(Star), R(8),
   /*   35 S> */ B(CallProperty0), R(6), R(7), U8(6),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
-                B(LdaNamedProperty), R(14), U8(2), U8(8),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+                B(LdaNamedProperty), R(12), U8(1), U8(8),
                 B(JumpIfToBooleanTrue), U8(26),
-                B(LdaNamedProperty), R(14), U8(3), U8(10),
-                B(Star), R(14),
+                B(LdaNamedProperty), R(12), U8(2), U8(10),
+                B(Star), R(12),
                 B(LdaFalse),
-                B(Star), R(10),
-                B(Mov), R(14), R(1),
+                B(Star), R(8),
+                B(Mov), R(12), R(1),
   /*   26 E> */ B(StackCheck),
   /*   35 S> */ B(Mov), R(1), R(3),
   /*   55 S> */ B(Mov), R(3), R(4),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(JumpLoop), U8(43), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(12),
-                B(Star), R(11),
+                B(Star), R(10),
+                B(Star), R(9),
                 B(Jump), U8(7),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(LdaZero),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(LdaTheHole),
   /*   55 E> */ B(SetPendingMessage),
+                B(Star), R(11),
+                B(Ldar), R(8),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(7), U8(3), U8(12),
                 B(Star), R(13),
-                B(Ldar), R(10),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(7), U8(4), U8(12),
-                B(Star), R(15),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(16),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(14),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(17),
-                B(LdaConstant), U8(5),
-                B(Star), R(18),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+                B(Star), R(15),
+                B(LdaConstant), U8(4),
+                B(Star), R(16),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
                 B(Throw),
-                B(CallProperty0), R(15), R(7), U8(14),
+                B(CallProperty0), R(13), R(7), U8(14),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(17),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(17), U8(1),
+                B(Star), R(15),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(16),
+                B(Star), R(14),
                 B(LdaZero),
-                B(TestReferenceEqual), R(11),
+                B(TestReferenceEqual), R(9),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(16),
+                B(Ldar), R(14),
                 B(ReThrow),
-                B(Ldar), R(13),
+                B(Ldar), R(11),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(11),
+                B(TestReferenceEqual), R(9),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(ReThrow),
                 B(LdaUndefined),
                 B(Star), R(7),
@@ -828,7 +804,7 @@ bytecodes: [
                 B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(6), U8(3),
   /*   60 S> */ B(Return),
                 B(Star), R(6),
-                B(CreateCatchContext), R(6), U8(6),
+                B(CreateCatchContext), R(6), U8(5),
                 B(Star), R(5),
                 B(LdaTheHole),
                 B(SetPendingMessage),
@@ -843,7 +819,6 @@ bytecodes: [
   /*   60 S> */ B(Return),
 ]
 constant pool: [
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -852,9 +827,9 @@ constant pool: [
   SCOPE_INFO_TYPE,
 ]
 handlers: [
-  [16, 200, 200],
-  [50, 96, 104],
-  [130, 163, 165],
+  [16, 194, 194],
+  [46, 92, 100],
+  [124, 157, 159],
 ]
 
 ---
@@ -864,9 +839,9 @@ snippet: "
   }
   f([1, 2, 3]);
 "
-frame size: 18
+frame size: 16
 parameter count: 2
-bytecode array length: 264
+bytecode array length: 258
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
                 B(Mov), R(closure), R(4),
@@ -875,90 +850,88 @@ bytecodes: [
                 B(Star), R(0),
   /*   16 E> */ B(StackCheck),
                 B(Mov), R(context), R(4),
-  /*   40 S> */ B(LdaNamedProperty), R(arg0), U8(1), U8(0),
+  /*   40 S> */ B(GetIterator), R(arg0), U8(0),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(arg0), U8(2),
-                B(Mov), R(arg0), R(7),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(2), U8(4),
+                B(LdaNamedProperty), R(6), U8(1), U8(4),
                 B(Star), R(5),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(context), R(12),
+                B(Star), R(7),
+                B(Mov), R(context), R(10),
                 B(LdaTrue),
-                B(Star), R(9),
+                B(Star), R(7),
   /*   35 S> */ B(CallProperty0), R(5), R(6), U8(6),
-                B(Star), R(13),
+                B(Star), R(11),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
-                B(LdaNamedProperty), R(13), U8(3), U8(8),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+                B(LdaNamedProperty), R(11), U8(2), U8(8),
                 B(JumpIfToBooleanTrue), U8(58),
-                B(LdaNamedProperty), R(13), U8(4), U8(10),
-                B(Star), R(13),
+                B(LdaNamedProperty), R(11), U8(3), U8(10),
+                B(Star), R(11),
                 B(LdaFalse),
-                B(Star), R(9),
-                B(Mov), R(13), R(1),
+                B(Star), R(7),
+                B(Mov), R(11), R(1),
   /*   26 E> */ B(StackCheck),
   /*   35 S> */ B(Mov), R(1), R(3),
-  /*   45 S> */ B(Mov), R(0), R(14),
-                B(Mov), R(3), R(15),
-                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
-  /*   45 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(0),
-                B(ResumeGenerator), R(0), R(0), U8(14),
-                B(Star), R(14),
+  /*   45 S> */ B(Mov), R(0), R(12),
+                B(Mov), R(3), R(13),
+                B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(12), U8(2),
+  /*   45 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(0),
+                B(ResumeGenerator), R(0), R(0), U8(12),
+                B(Star), R(12),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(Star), R(15),
+                B(Star), R(13),
                 B(LdaZero),
-                B(TestReferenceEqual), R(15),
+                B(TestReferenceEqual), R(13),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(JumpLoop), U8(75), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(11),
-                B(Star), R(10),
+                B(Star), R(9),
+                B(Star), R(8),
                 B(Jump), U8(7),
-                B(Star), R(11),
+                B(Star), R(9),
                 B(LdaZero),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaTheHole),
                 B(SetPendingMessage),
+                B(Star), R(10),
+                B(Ldar), R(7),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(6), U8(4), U8(12),
                 B(Star), R(12),
-                B(Ldar), R(9),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(6), U8(5), U8(12),
-                B(Star), R(14),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(15),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(13),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(16),
-                B(LdaConstant), U8(6),
-                B(Star), R(17),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+                B(Star), R(14),
+                B(LdaConstant), U8(5),
+                B(Star), R(15),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
                 B(Throw),
-                B(CallProperty0), R(14), R(6), U8(14),
+                B(CallProperty0), R(12), R(6), U8(14),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
+                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(15),
+                B(Star), R(13),
                 B(LdaZero),
-                B(TestReferenceEqual), R(10),
+                B(TestReferenceEqual), R(8),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(15),
+                B(Ldar), R(13),
                 B(ReThrow),
-                B(Ldar), R(12),
+                B(Ldar), R(10),
                 B(SetPendingMessage),
                 B(LdaZero),
-                B(TestReferenceEqual), R(10),
+                B(TestReferenceEqual), R(8),
                 B(JumpIfFalse), U8(5),
-                B(Ldar), R(11),
+                B(Ldar), R(9),
                 B(ReThrow),
                 B(LdaUndefined),
                 B(Star), R(6),
@@ -968,7 +941,7 @@ bytecodes: [
                 B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
   /*   54 S> */ B(Return),
                 B(Star), R(5),
-                B(CreateCatchContext), R(5), U8(7),
+                B(CreateCatchContext), R(5), U8(6),
                 B(Star), R(4),
                 B(LdaTheHole),
                 B(SetPendingMessage),
@@ -983,8 +956,7 @@ bytecodes: [
   /*   54 S> */ B(Return),
 ]
 constant pool: [
-  Smi [107],
-  SYMBOL_TYPE,
+  Smi [103],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -993,8 +965,8 @@ constant pool: [
   SCOPE_INFO_TYPE,
 ]
 handlers: [
-  [20, 236, 236],
-  [54, 132, 140],
-  [166, 199, 201],
+  [20, 230, 230],
+  [50, 128, 136],
+  [160, 193, 195],
 ]
 
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index bfbd05cd314e64..157b58d81d2753 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -98,9 +98,9 @@ snippet: "
   function* f() { for (let x of [42]) yield x }
   f();
 "
-frame size: 17
+frame size: 15
 parameter count: 1
-bytecode array length: 261
+bytecode array length: 258
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
                 B(Mov), R(closure), R(4),
@@ -119,104 +119,102 @@ bytecodes: [
   /*   44 S> */ B(Return),
   /*   30 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(5), U8(1),
+                B(GetIterator), R(6), U8(1),
                 B(Star), R(7),
                 B(CallProperty0), R(7), R(6), U8(3),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(5),
-                B(LdaNamedProperty), R(5), U8(6), U8(5),
+                B(LdaNamedProperty), R(5), U8(5), U8(5),
                 B(Star), R(4),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(context), R(11),
+                B(Star), R(6),
+                B(Mov), R(context), R(9),
                 B(LdaTrue),
-                B(Star), R(8),
+                B(Star), R(6),
   /*   25 S> */ B(CallProperty0), R(4), R(5), U8(7),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(7), U8(9),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(6), U8(9),
                 B(JumpIfToBooleanTrue), U8(64),
-                B(LdaNamedProperty), R(12), U8(8), U8(11),
-                B(Star), R(12),
+                B(LdaNamedProperty), R(10), U8(7), U8(11),
+                B(Star), R(10),
                 B(LdaFalse),
-                B(Star), R(8),
-                B(Mov), R(12), R(1),
+                B(Star), R(6),
+                B(Mov), R(10), R(1),
   /*   16 E> */ B(StackCheck),
   /*   25 S> */ B(Mov), R(1), R(3),
   /*   36 S> */ B(LdaFalse),
-                B(Star), R(14),
-                B(Mov), R(3), R(13),
-                B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(13), U8(2),
-  /*   36 E> */ B(SuspendGenerator), R(0), R(0), U8(13), U8(1),
-                B(ResumeGenerator), R(0), R(0), U8(13),
-                B(Star), R(13),
+                B(Star), R(12),
+                B(Mov), R(3), R(11),
+                B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(11), U8(2),
+  /*   36 E> */ B(SuspendGenerator), R(0), R(0), U8(11), U8(1),
+                B(ResumeGenerator), R(0), R(0), U8(11),
+                B(Star), R(11),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
-                B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
-                B(Ldar), R(13),
+                B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
+                B(Ldar), R(11),
   /*   36 E> */ B(Throw),
                 B(LdaSmi), I8(1),
-                B(Star), R(9),
-                B(Mov), R(13), R(10),
+                B(Star), R(7),
+                B(Mov), R(11), R(8),
                 B(Jump), U8(20),
-                B(Ldar), R(13),
+                B(Ldar), R(11),
                 B(JumpLoop), U8(81), I8(0),
                 B(LdaSmi), I8(-1),
-                B(Star), R(10),
-                B(Star), R(9),
+                B(Star), R(8),
+                B(Star), R(7),
                 B(Jump), U8(7),
-                B(Star), R(10),
+                B(Star), R(8),
                 B(LdaZero),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(LdaTheHole),
                 B(SetPendingMessage),
+                B(Star), R(9),
+                B(Ldar), R(6),
+                B(JumpIfToBooleanTrue), U8(58),
+                B(LdaNamedProperty), R(5), U8(10), U8(13),
                 B(Star), R(11),
-                B(Ldar), R(8),
-                B(JumpIfToBooleanTrue), U8(60),
-                B(LdaNamedProperty), R(5), U8(11), U8(13),
-                B(Star), R(13),
-                B(JumpIfUndefined), U8(52),
-                B(JumpIfNull), U8(50),
-                B(Mov), R(context), R(14),
+                B(JumpIfUndefinedOrNull), U8(50),
+                B(Mov), R(context), R(12),
                 B(TestTypeOf), U8(6),
                 B(JumpIfTrue), U8(18),
                 B(Wide), B(LdaSmi), I16(159),
-                B(Star), R(15),
-                B(LdaConstant), U8(12),
-                B(Star), R(16),
-                B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+                B(Star), R(13),
+                B(LdaConstant), U8(11),
+                B(Star), R(14),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
                 B(Throw),
-                B(CallProperty0), R(13), R(5), U8(15),
+                B(CallProperty0), R(11), R(5), U8(15),
                 B(JumpIfJSReceiver), U8(21),
-                B(Star), R(15),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+                B(Star), R(13),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
                 B(Jump), U8(12),
-                B(Star), R(14),
+                B(Star), R(12),
                 B(LdaZero),
-                B(TestReferenceEqual), R(9),
+                B(TestReferenceEqual), R(7),
                 B(JumpIfTrue), U8(5),
-                B(Ldar), R(14),
+                B(Ldar), R(12),
                 B(ReThrow),
-                B(Ldar), R(11),
-                B(SetPendingMessage),
                 B(Ldar), R(9),
-                B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
+                B(SetPendingMessage),
+                B(Ldar), R(7),
+                B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
                 B(Jump), U8(8),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
                 B(ReThrow),
-                B(Ldar), R(10),
+                B(Ldar), R(8),
   /*   44 S> */ B(Return),
                 B(LdaUndefined),
   /*   44 S> */ B(Return),
 ]
 constant pool: [
   Smi [22],
-  Smi [132],
+  Smi [131],
   Smi [10],
   Smi [7],
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -228,8 +226,8 @@ constant pool: [
   Smi [9],
 ]
 handlers: [
-  [79, 163, 171],
-  [197, 230, 232],
+  [78, 162, 170],
+  [194, 227, 229],
 ]
 
 ---
@@ -238,9 +236,9 @@ snippet: "
   function* f() { yield* g() }
   f();
 "
-frame size: 9
+frame size: 8
 parameter count: 1
-bytecode array length: 217
+bytecode array length: 210
 bytecodes: [
                 B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
                 B(Mov), R(closure), R(1),
@@ -261,59 +259,56 @@ bytecodes: [
                 B(Star), R(5),
   /*   50 E> */ B(CallUndefinedReceiver0), R(5), U8(2),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(5), U8(4),
+                B(GetIterator), R(6), U8(4),
                 B(Star), R(7),
                 B(CallProperty0), R(7), R(6), U8(6),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(3),
-                B(LdaNamedProperty), R(3), U8(6), U8(8),
+                B(LdaNamedProperty), R(3), U8(5), U8(8),
                 B(Star), R(5),
                 B(LdaUndefined),
                 B(Star), R(4),
                 B(LdaZero),
                 B(Star), R(2),
                 B(Ldar), R(2),
-                B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(1),
+                B(SwitchOnSmiNoFeedback), U8(6), U8(2), I8(1),
                 B(CallProperty1), R(5), R(3), R(4), U8(10),
-                B(Jump), U8(69),
-                B(LdaNamedProperty), R(3), U8(9), U8(12),
-                B(JumpIfUndefined), U8(13),
-                B(JumpIfNull), U8(11),
-                B(Star), R(8),
-                B(CallProperty1), R(8), R(3), R(4), U8(14),
-                B(Jump), U8(52),
+                B(Jump), U8(63),
+                B(LdaNamedProperty), R(3), U8(8), U8(12),
+                B(JumpIfUndefinedOrNull), U8(11),
+                B(Star), R(6),
+                B(CallProperty1), R(6), R(3), R(4), U8(14),
+                B(Jump), U8(48),
                 B(Ldar), R(4),
   /*   54 S> */ B(Return),
-                B(LdaNamedProperty), R(3), U8(10), U8(16),
-                B(JumpIfUndefined), U8(13),
-                B(JumpIfNull), U8(11),
-                B(Star), R(8),
-                B(CallProperty1), R(8), R(3), R(4), U8(18),
-                B(Jump), U8(32),
-                B(LdaNamedProperty), R(3), U8(9), U8(20),
-                B(JumpIfUndefined), U8(21),
-                B(JumpIfNull), U8(19),
-                B(Star), R(8),
-                B(CallProperty0), R(8), R(3), U8(22),
+                B(LdaNamedProperty), R(3), U8(9), U8(16),
+                B(JumpIfUndefinedOrNull), U8(11),
+                B(Star), R(6),
+                B(CallProperty1), R(6), R(3), R(4), U8(18),
+                B(Jump), U8(30),
+                B(LdaNamedProperty), R(3), U8(8), U8(20),
+                B(JumpIfUndefinedOrNull), U8(19),
+                B(Star), R(6),
+                B(CallProperty0), R(6), R(3), U8(22),
                 B(Jump), U8(2),
                 B(JumpIfJSReceiver), U8(9),
-                B(Star), R(8),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+                B(Star), R(6),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
                 B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
                 B(Star), R(1),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(1), U8(1),
-                B(LdaNamedProperty), R(1), U8(11), U8(24),
+                B(LdaNamedProperty), R(1), U8(10), U8(24),
                 B(JumpIfToBooleanTrue), U8(24),
                 B(Ldar), R(1),
-  /*   43 E> */ B(SuspendGenerator), R(0), R(0), U8(8), U8(1),
-                B(ResumeGenerator), R(0), R(0), U8(8),
+  /*   43 E> */ B(SuspendGenerator), R(0), R(0), U8(6), U8(1),
+                B(ResumeGenerator), R(0), R(0), U8(6),
                 B(Star), R(4),
                 B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
                 B(Star), R(2),
-                B(JumpLoop), U8(114), I8(0),
-                B(LdaNamedProperty), R(1), U8(12), U8(26),
+                B(JumpLoop), U8(108), I8(0),
+                B(LdaNamedProperty), R(1), U8(11), U8(26),
                 B(Star), R(3),
                 B(LdaSmi), I8(1),
                 B(TestReferenceEqual), R(2),
@@ -325,14 +320,13 @@ bytecodes: [
 ]
 constant pool: [
   Smi [22],
-  Smi [185],
+  Smi [178],
   Smi [10],
   Smi [7],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   Smi [11],
-  Smi [31],
+  Smi [29],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["throw"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index ed15f99fc95a19..dce8d7ac8c294f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -92,9 +92,9 @@ snippet: "
   class A { constructor(...args) { this.args = args; } }
   new A(0, ...[1, 2, 3], 4);
 "
-frame size: 10
+frame size: 9
 parameter count: 1
-bytecode array length: 131
+bytecode array length: 130
 bytecodes: [
   /*   30 E> */ B(StackCheck),
                 B(CreateBlockContext), U8(0),
@@ -117,22 +117,22 @@ bytecodes: [
                 B(Star), R(3),
   /*  101 S> */ B(CreateArrayLiteral), U8(5), U8(1), U8(37),
                 B(Star), R(7),
-                B(LdaNamedProperty), R(7), U8(6), U8(2),
+                B(GetIterator), R(7), U8(2),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(7), U8(4),
                 B(Mov), R(5), R(2),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(6),
-                B(LdaNamedProperty), R(6), U8(7), U8(6),
+                B(LdaNamedProperty), R(6), U8(6), U8(6),
                 B(Star), R(5),
                 B(CallProperty0), R(5), R(6), U8(15),
-                B(Star), R(9),
+                B(Star), R(7),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
-                B(LdaNamedProperty), R(9), U8(8), U8(17),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+                B(LdaNamedProperty), R(7), U8(7), U8(17),
                 B(JumpIfToBooleanTrue), U8(19),
-                B(LdaNamedProperty), R(9), U8(9), U8(8),
+                B(LdaNamedProperty), R(7), U8(8), U8(8),
                 B(StaInArrayLiteral), R(4), R(3), U8(13),
                 B(Ldar), R(3),
                 B(Inc), U8(12),
@@ -152,7 +152,6 @@ constant pool: [
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
   Smi [1],
   ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
new file mode 100644
index 00000000000000..6fc00999a59662
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -0,0 +1,192 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+test function name: test
+private methods: yes
+
+---
+snippet: "
+  class A {
+    get #a() { return 1; }
+    set #a(val) { }
+  
+    constructor() {
+      this.#a++;
+      this.#a = 1;
+      return this.#a;
+    }
+  }
+  var test = A;
+  new test;
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 95
+bytecodes: [
+  /*   67 E> */ B(StackCheck),
+                B(LdaCurrentContextSlot), U8(5),
+                B(Star), R(1),
+                B(Mov), R(this), R(0),
+                B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(2),
+  /*   76 S> */ B(LdaCurrentContextSlot), U8(4),
+                B(Star), R(3),
+                B(LdaCurrentContextSlot), U8(5),
+  /*   81 E> */ B(LdaKeyedProperty), R(this), U8(0),
+                B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(3), U8(1),
+                B(Star), R(4),
+                B(CallProperty0), R(4), R(this), U8(2),
+                B(Inc), U8(4),
+                B(Star), R(4),
+  /*   83 E> */ B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(3), U8(1),
+                B(Star), R(5),
+                B(CallProperty1), R(5), R(this), R(4), U8(5),
+  /*   91 S> */ B(LdaSmi), I8(1),
+                B(Star), R(2),
+                B(LdaCurrentContextSlot), U8(4),
+                B(Star), R(4),
+                B(LdaCurrentContextSlot), U8(5),
+  /*   96 E> */ B(LdaKeyedProperty), R(this), U8(7),
+                B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(4), U8(1),
+                B(Star), R(5),
+                B(CallProperty1), R(5), R(this), R(2), U8(9),
+  /*  108 S> */ B(LdaCurrentContextSlot), U8(4),
+                B(Star), R(3),
+                B(LdaCurrentContextSlot), U8(5),
+  /*  120 E> */ B(LdaKeyedProperty), R(this), U8(11),
+                B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(3), U8(1),
+                B(Star), R(4),
+                B(CallProperty0), R(4), R(this), U8(13),
+  /*  123 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+  class B {
+    get #b() { return 1; }
+    constructor() { this.#b++; }
+  }
+  var test = B;
+  new test;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 29
+bytecodes: [
+  /*   48 E> */ B(StackCheck),
+                B(LdaCurrentContextSlot), U8(5),
+                B(Star), R(1),
+                B(Mov), R(this), R(0),
+                B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(2),
+  /*   53 S> */ B(Wide), B(LdaSmi), I16(263),
+                B(Star), R(2),
+                B(LdaConstant), U8(0),
+                B(Star), R(3),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+                B(Throw),
+]
+constant pool: [
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["#b"],
+]
+handlers: [
+]
+
+---
+snippet: "
+  class C {
+    set #c(val) { }
+    constructor() { this.#c++; }
+  }
+  var test = C;
+  new test;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 29
+bytecodes: [
+  /*   41 E> */ B(StackCheck),
+                B(LdaCurrentContextSlot), U8(5),
+                B(Star), R(1),
+                B(Mov), R(this), R(0),
+                B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(2),
+  /*   46 S> */ B(Wide), B(LdaSmi), I16(262),
+                B(Star), R(2),
+                B(LdaConstant), U8(0),
+                B(Star), R(3),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+                B(Throw),
+]
+constant pool: [
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["#c"],
+]
+handlers: [
+]
+
+---
+snippet: "
+  class D {
+    get #d() { return 1; }
+    constructor() { this.#d = 1; }
+  }
+  var test = D;
+  new test;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 29
+bytecodes: [
+  /*   48 E> */ B(StackCheck),
+                B(LdaCurrentContextSlot), U8(5),
+                B(Star), R(1),
+                B(Mov), R(this), R(0),
+                B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(2),
+  /*   53 S> */ B(Wide), B(LdaSmi), I16(263),
+                B(Star), R(2),
+                B(LdaConstant), U8(0),
+                B(Star), R(3),
+  /*   61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+                B(Throw),
+]
+constant pool: [
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["#d"],
+]
+handlers: [
+]
+
+---
+snippet: "
+  class E {
+    set #e(val) { }
+    constructor() { this.#e; }
+  }
+  var test = E;
+  new test;
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 29
+bytecodes: [
+  /*   41 E> */ B(StackCheck),
+                B(LdaCurrentContextSlot), U8(5),
+                B(Star), R(1),
+                B(Mov), R(this), R(0),
+                B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(2),
+  /*   46 S> */ B(Wide), B(LdaSmi), I16(262),
+                B(Star), R(3),
+                B(LdaConstant), U8(0),
+                B(Star), R(4),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
+                B(Throw),
+]
+constant pool: [
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["#e"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
new file mode 100644
index 00000000000000..aceee552b5d531
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
@@ -0,0 +1,398 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: yes
+private methods: yes
+
+---
+snippet: "
+  {
+    class A {
+      get #a() { return 1; }
+      set #a(val) { }
+    }
+  }
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 68
+bytecodes: [
+  /*   30 E> */ B(StackCheck),
+                B(CreateBlockContext), U8(0),
+                B(PushContext), R(2),
+                B(LdaTheHole),
+                B(Star), R(6),
+                B(CreateClosure), U8(2), U8(0), U8(2),
+                B(Star), R(3),
+                B(LdaConstant), U8(1),
+                B(Star), R(4),
+                B(Mov), R(3), R(5),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+                B(Star), R(4),
+                B(Mov), R(5), R(1),
+                B(LdaConstant), U8(3),
+                B(Star), R(5),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(CreateClosure), U8(4), U8(1), U8(2),
+                B(Star), R(6),
+                B(CreateClosure), U8(5), U8(2), U8(2),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(PopContext), R(2),
+                B(Mov), R(1), R(0),
+                B(LdaUndefined),
+  /*  101 S> */ B(Return),
+]
+constant pool: [
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["A"],
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+  {
+    class B {
+      get #b() { return 1; }
+    }
+  }
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 65
+bytecodes: [
+  /*   30 E> */ B(StackCheck),
+                B(CreateBlockContext), U8(0),
+                B(PushContext), R(2),
+                B(LdaTheHole),
+                B(Star), R(6),
+                B(CreateClosure), U8(2), U8(0), U8(2),
+                B(Star), R(3),
+                B(LdaConstant), U8(1),
+                B(Star), R(4),
+                B(Mov), R(3), R(5),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+                B(Star), R(4),
+                B(Mov), R(5), R(1),
+                B(LdaConstant), U8(3),
+                B(Star), R(5),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(CreateClosure), U8(4), U8(1), U8(2),
+                B(Star), R(6),
+                B(LdaNull),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(PopContext), R(2),
+                B(Mov), R(1), R(0),
+                B(LdaUndefined),
+  /*   81 S> */ B(Return),
+]
+constant pool: [
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["B"],
+  SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+  {
+    class C {
+      set #c(val) { }
+    }
+  }
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 65
+bytecodes: [
+  /*   30 E> */ B(StackCheck),
+                B(CreateBlockContext), U8(0),
+                B(PushContext), R(2),
+                B(LdaTheHole),
+                B(Star), R(6),
+                B(CreateClosure), U8(2), U8(0), U8(2),
+                B(Star), R(3),
+                B(LdaConstant), U8(1),
+                B(Star), R(4),
+                B(Mov), R(3), R(5),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+                B(Star), R(4),
+                B(Mov), R(5), R(1),
+                B(LdaConstant), U8(3),
+                B(Star), R(5),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(LdaNull),
+                B(Star), R(6),
+                B(CreateClosure), U8(4), U8(1), U8(2),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(PopContext), R(2),
+                B(Mov), R(1), R(0),
+                B(LdaUndefined),
+  /*   74 S> */ B(Return),
+]
+constant pool: [
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
+  SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+  {
+    class D {
+      get #d() { return 1; }
+      set #d(val) { }
+    }
+  
+    class E extends D {
+      get #e() { return 2; }
+      set #e(val) { }
+    }
+  }
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 133
+bytecodes: [
+  /*   30 E> */ B(StackCheck),
+                B(CreateBlockContext), U8(0),
+                B(PushContext), R(4),
+                B(LdaTheHole),
+                B(Star), R(8),
+                B(CreateClosure), U8(2), U8(0), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(1),
+                B(Star), R(6),
+                B(Mov), R(5), R(7),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+                B(Star), R(6),
+                B(Mov), R(7), R(3),
+                B(LdaConstant), U8(3),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(CreateClosure), U8(4), U8(1), U8(2),
+                B(Star), R(8),
+                B(CreateClosure), U8(5), U8(2), U8(2),
+                B(Star), R(9),
+                B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(8), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(PopContext), R(4),
+                B(Mov), R(3), R(0),
+  /*   38 E> */ B(CreateBlockContext), U8(6),
+                B(PushContext), R(4),
+  /*  118 E> */ B(CreateClosure), U8(8), U8(3), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(7),
+                B(Star), R(6),
+                B(Mov), R(5), R(7),
+                B(Mov), R(3), R(8),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+                B(Star), R(6),
+                B(Mov), R(7), R(2),
+                B(LdaConstant), U8(9),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(CreateClosure), U8(10), U8(4), U8(2),
+                B(Star), R(8),
+                B(CreateClosure), U8(11), U8(5), U8(2),
+                B(Star), R(9),
+                B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(8), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(PopContext), R(4),
+                B(Mov), R(2), R(1),
+                B(LdaUndefined),
+  /*  175 S> */ B(Return),
+]
+constant pool: [
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["D"],
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["E"],
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+  {
+    class A { foo() {} }
+    class C extends A {
+      get #a() { return super.foo; }
+    }
+    new C();
+  }
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 119
+bytecodes: [
+  /*   30 E> */ B(StackCheck),
+                B(CreateBlockContext), U8(0),
+                B(PushContext), R(4),
+                B(LdaTheHole),
+                B(Star), R(8),
+                B(CreateClosure), U8(2), U8(0), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(1),
+                B(Star), R(6),
+                B(CreateClosure), U8(3), U8(1), U8(2),
+                B(Star), R(9),
+                B(Mov), R(5), R(7),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
+                B(Star), R(6),
+                B(Mov), R(7), R(3),
+                B(PopContext), R(4),
+                B(Mov), R(3), R(0),
+  /*   38 E> */ B(CreateBlockContext), U8(4),
+                B(PushContext), R(4),
+  /*   77 E> */ B(CreateClosure), U8(6), U8(2), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(5),
+                B(Star), R(6),
+                B(Mov), R(5), R(7),
+                B(Mov), R(3), R(8),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+                B(Star), R(6),
+                B(Mov), R(7), R(2),
+                B(LdaConstant), U8(7),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(CreateClosure), U8(8), U8(3), U8(2),
+                B(Star), R(8),
+                B(Ldar), R(6),
+                B(StaNamedProperty), R(8), U8(9), U8(0),
+                B(LdaNull),
+                B(Star), R(9),
+                B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(8), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(PopContext), R(4),
+                B(Mov), R(2), R(1),
+  /*  122 S> */ B(Ldar), R(1),
+  /*  122 E> */ B(Construct), R(1), R(0), U8(0), U8(2),
+                B(LdaUndefined),
+  /*  133 S> */ B(Return),
+]
+constant pool: [
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
+  SHARED_FUNCTION_INFO_TYPE,
+  SYMBOL_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+  {
+    class A { foo(val) {} }
+    class C extends A {
+      set #a(val) { super.foo(val); }
+    }
+    new C();
+  }
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 119
+bytecodes: [
+  /*   30 E> */ B(StackCheck),
+                B(CreateBlockContext), U8(0),
+                B(PushContext), R(4),
+                B(LdaTheHole),
+                B(Star), R(8),
+                B(CreateClosure), U8(2), U8(0), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(1),
+                B(Star), R(6),
+                B(CreateClosure), U8(3), U8(1), U8(2),
+                B(Star), R(9),
+                B(Mov), R(5), R(7),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
+                B(Star), R(6),
+                B(Mov), R(7), R(3),
+                B(PopContext), R(4),
+                B(Mov), R(3), R(0),
+  /*   38 E> */ B(CreateBlockContext), U8(4),
+                B(PushContext), R(4),
+  /*   80 E> */ B(CreateClosure), U8(6), U8(2), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(5),
+                B(Star), R(6),
+                B(Mov), R(5), R(7),
+                B(Mov), R(3), R(8),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+                B(Star), R(6),
+                B(Mov), R(7), R(2),
+                B(LdaConstant), U8(7),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(LdaNull),
+                B(Star), R(8),
+                B(CreateClosure), U8(8), U8(3), U8(2),
+                B(Star), R(9),
+                B(Ldar), R(6),
+                B(StaNamedProperty), R(9), U8(9), U8(0),
+                B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(8), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(PopContext), R(4),
+                B(Mov), R(2), R(1),
+  /*  126 S> */ B(Ldar), R(1),
+  /*  126 E> */ B(Construct), R(1), R(0), U8(0), U8(2),
+                B(LdaUndefined),
+  /*  137 S> */ B(Return),
+]
+constant pool: [
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
+  SHARED_FUNCTION_INFO_TYPE,
+  SYMBOL_TYPE,
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
new file mode 100644
index 00000000000000..d41b3421879890
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -0,0 +1,104 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+test function name: test
+private methods: yes
+
+---
+snippet: "
+  class A {
+    #a() { return 1; }
+    constructor() { return this.#a(); }
+  }
+  
+  var test = A;
+  new A;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 28
+bytecodes: [
+  /*   44 E> */ B(StackCheck),
+                B(LdaCurrentContextSlot), U8(5),
+                B(Star), R(1),
+                B(Mov), R(this), R(0),
+                B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(2),
+  /*   49 S> */ B(LdaCurrentContextSlot), U8(5),
+  /*   61 E> */ B(LdaKeyedProperty), R(this), U8(0),
+                B(LdaCurrentContextSlot), U8(4),
+                B(Star), R(2),
+  /*   63 E> */ B(CallAnyReceiver), R(2), R(this), U8(1), U8(2),
+  /*   66 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+  class B {
+    #b() { return 1; }
+    constructor() { this.#b = 1; }
+  }
+  
+  var test = B;
+  new test;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 29
+bytecodes: [
+  /*   44 E> */ B(StackCheck),
+                B(LdaCurrentContextSlot), U8(5),
+                B(Star), R(1),
+                B(Mov), R(this), R(0),
+                B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(2),
+  /*   49 S> */ B(Wide), B(LdaSmi), I16(261),
+                B(Star), R(2),
+                B(LdaConstant), U8(0),
+                B(Star), R(3),
+  /*   57 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+                B(Throw),
+]
+constant pool: [
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["#b"],
+]
+handlers: [
+]
+
+---
+snippet: "
+  class C {
+    #c() { return 1; }
+    constructor() { this.#c++; }
+  }
+  
+  var test = C;
+  new test;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 29
+bytecodes: [
+  /*   44 E> */ B(StackCheck),
+                B(LdaCurrentContextSlot), U8(5),
+                B(Star), R(1),
+                B(Mov), R(this), R(0),
+                B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(2),
+  /*   49 S> */ B(Wide), B(LdaSmi), I16(261),
+                B(Star), R(2),
+                B(LdaConstant), U8(0),
+                B(Star), R(3),
+                B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+                B(Throw),
+]
+constant pool: [
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["#c"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
new file mode 100644
index 00000000000000..d1aab34fda7992
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
@@ -0,0 +1,198 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: yes
+private methods: yes
+
+---
+snippet: "
+  {
+    class A {
+      #a() { return 1; }
+    }
+  }
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 55
+bytecodes: [
+  /*   30 E> */ B(StackCheck),
+                B(CreateBlockContext), U8(0),
+                B(PushContext), R(2),
+                B(LdaTheHole),
+                B(Star), R(6),
+                B(CreateClosure), U8(2), U8(0), U8(2),
+                B(Star), R(3),
+                B(LdaConstant), U8(1),
+                B(Star), R(4),
+                B(CreateClosure), U8(3), U8(1), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(Mov), R(3), R(5),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+                B(Star), R(4),
+                B(Mov), R(5), R(1),
+                B(LdaConstant), U8(4),
+                B(Star), R(5),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(PopContext), R(2),
+                B(Mov), R(1), R(0),
+                B(LdaUndefined),
+  /*   77 S> */ B(Return),
+]
+constant pool: [
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["A"],
+]
+handlers: [
+]
+
+---
+snippet: "
+  {
+    class D {
+      #d() { return 1; }
+    }
+    class E extends D {
+      #e() { return 2; }
+    }
+  }
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 107
+bytecodes: [
+  /*   30 E> */ B(StackCheck),
+                B(CreateBlockContext), U8(0),
+                B(PushContext), R(4),
+                B(LdaTheHole),
+                B(Star), R(8),
+                B(CreateClosure), U8(2), U8(0), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(1),
+                B(Star), R(6),
+                B(CreateClosure), U8(3), U8(1), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(Mov), R(5), R(7),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+                B(Star), R(6),
+                B(Mov), R(7), R(3),
+                B(LdaConstant), U8(4),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(PopContext), R(4),
+                B(Mov), R(3), R(0),
+  /*   38 E> */ B(CreateBlockContext), U8(5),
+                B(PushContext), R(4),
+  /*   93 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(6),
+                B(Star), R(6),
+                B(CreateClosure), U8(8), U8(3), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(Mov), R(5), R(7),
+                B(Mov), R(3), R(8),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+                B(Star), R(6),
+                B(Mov), R(7), R(2),
+                B(LdaConstant), U8(9),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(PopContext), R(4),
+                B(Mov), R(2), R(1),
+                B(LdaUndefined),
+  /*  126 S> */ B(Return),
+]
+constant pool: [
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["D"],
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["E"],
+]
+handlers: [
+]
+
+---
+snippet: "
+  {
+    class A { foo() {} }
+    class C extends A {
+      #m() { return super.foo; }
+    }
+  }
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 106
+bytecodes: [
+  /*   30 E> */ B(StackCheck),
+                B(CreateBlockContext), U8(0),
+                B(PushContext), R(4),
+                B(LdaTheHole),
+                B(Star), R(8),
+                B(CreateClosure), U8(2), U8(0), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(1),
+                B(Star), R(6),
+                B(CreateClosure), U8(3), U8(1), U8(2),
+                B(Star), R(9),
+                B(Mov), R(5), R(7),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
+                B(Star), R(6),
+                B(Mov), R(7), R(3),
+                B(PopContext), R(4),
+                B(Mov), R(3), R(0),
+  /*   38 E> */ B(CreateBlockContext), U8(4),
+                B(PushContext), R(4),
+  /*   77 E> */ B(CreateClosure), U8(6), U8(2), U8(2),
+                B(Star), R(5),
+                B(LdaConstant), U8(5),
+                B(Star), R(6),
+                B(CreateClosure), U8(7), U8(3), U8(2),
+                B(StaCurrentContextSlot), U8(4),
+                B(Mov), R(5), R(7),
+                B(Mov), R(3), R(8),
+                B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+                B(Star), R(6),
+                B(Mov), R(7), R(2),
+                B(LdaConstant), U8(8),
+                B(Star), R(7),
+                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+                B(StaCurrentContextSlot), U8(5),
+                B(LdaCurrentContextSlot), U8(4),
+                B(Star), R(8),
+                B(Ldar), R(6),
+                B(StaNamedProperty), R(8), U8(9), U8(0),
+                B(PopContext), R(4),
+                B(Mov), R(2), R(1),
+                B(LdaUndefined),
+  /*  118 S> */ B(Return),
+]
+constant pool: [
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SCOPE_INFO_TYPE,
+  FIXED_ARRAY_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
+  SYMBOL_TYPE,
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden
deleted file mode 100644
index 5821a20069251c..00000000000000
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden
+++ /dev/null
@@ -1,160 +0,0 @@
-#
-# Autogenerated by generate-bytecode-expectations.
-#
-
----
-wrap: yes
-private methods: yes
-
----
-snippet: "
-  {
-    class A {
-      #a() { return 1; }
-      callA() { return this.#a(); }
-    }
-  
-    const a = new A;
-    a.callA();
-  }
-"
-frame size: 9
-parameter count: 1
-bytecode array length: 80
-bytecodes: [
-  /*   30 E> */ B(StackCheck),
-                B(CreateBlockContext), U8(0),
-                B(PushContext), R(3),
-                B(LdaTheHole),
-                B(Star), R(7),
-                B(CreateClosure), U8(2), U8(0), U8(2),
-                B(Star), R(4),
-                B(LdaConstant), U8(1),
-                B(Star), R(5),
-                B(CreateClosure), U8(3), U8(1), U8(2),
-                B(StaCurrentContextSlot), U8(4),
-                B(CreateClosure), U8(4), U8(2), U8(2),
-                B(Star), R(8),
-                B(Mov), R(4), R(6),
-                B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
-                B(Star), R(5),
-                B(Mov), R(6), R(2),
-                B(LdaConstant), U8(5),
-                B(Star), R(6),
-                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(6), U8(1),
-                B(StaCurrentContextSlot), U8(5),
-                B(PopContext), R(3),
-                B(Mov), R(2), R(0),
-  /*  122 S> */ B(Ldar), R(0),
-  /*  122 E> */ B(Construct), R(0), R(0), U8(0), U8(0),
-                B(Star), R(1),
-  /*  133 S> */ B(LdaNamedProperty), R(1), U8(6), U8(2),
-                B(Star), R(3),
-  /*  133 E> */ B(CallProperty0), R(3), R(1), U8(4),
-                B(LdaUndefined),
-  /*  144 S> */ B(Return),
-]
-constant pool: [
-  SCOPE_INFO_TYPE,
-  FIXED_ARRAY_TYPE,
-  SHARED_FUNCTION_INFO_TYPE,
-  SHARED_FUNCTION_INFO_TYPE,
-  SHARED_FUNCTION_INFO_TYPE,
-  ONE_BYTE_INTERNALIZED_STRING_TYPE ["A"],
-  ONE_BYTE_INTERNALIZED_STRING_TYPE ["callA"],
-]
-handlers: [
-]
-
----
-snippet: "
-  {
-    class D {
-      #d() { return 1; }
-      callD() { return this.#d(); }
-    }
-  
-    class E extends D {
-      #e() { return 2; }
-      callE() { return this.callD() + this.#e(); }
-    }
-  
-    const e = new E;
-    e.callE();
-  }
-"
-frame size: 11
-parameter count: 1
-bytecode array length: 138
-bytecodes: [
-  /*   30 E> */ B(StackCheck),
-                B(CreateBlockContext), U8(0),
-                B(PushContext), R(5),
-                B(LdaTheHole),
-                B(Star), R(9),
-                B(CreateClosure), U8(2), U8(0), U8(2),
-                B(Star), R(6),
-                B(LdaConstant), U8(1),
-                B(Star), R(7),
-                B(CreateClosure), U8(3), U8(1), U8(2),
-                B(StaCurrentContextSlot), U8(4),
-                B(CreateClosure), U8(4), U8(2), U8(2),
-                B(Star), R(10),
-                B(Mov), R(6), R(8),
-                B(CallRuntime), U16(Runtime::kDefineClass), R(7), U8(4),
-                B(Star), R(7),
-                B(Mov), R(8), R(4),
-                B(LdaConstant), U8(5),
-                B(Star), R(8),
-                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(8), U8(1),
-                B(StaCurrentContextSlot), U8(5),
-                B(PopContext), R(5),
-                B(Mov), R(4), R(0),
-  /*   38 E> */ B(CreateBlockContext), U8(6),
-                B(PushContext), R(5),
-  /*  128 E> */ B(CreateClosure), U8(8), U8(3), U8(2),
-                B(Star), R(6),
-                B(LdaConstant), U8(7),
-                B(Star), R(7),
-                B(CreateClosure), U8(9), U8(4), U8(2),
-                B(StaCurrentContextSlot), U8(4),
-                B(CreateClosure), U8(10), U8(5), U8(2),
-                B(Star), R(10),
-                B(Mov), R(6), R(8),
-                B(Mov), R(4), R(9),
-                B(CallRuntime), U16(Runtime::kDefineClass), R(7), U8(4),
-                B(Star), R(7),
-                B(Mov), R(8), R(3),
-                B(LdaConstant), U8(11),
-                B(Star), R(8),
-                B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(8), U8(1),
-                B(StaCurrentContextSlot), U8(5),
-                B(PopContext), R(5),
-                B(Mov), R(3), R(1),
-  /*  221 S> */ B(Ldar), R(1),
-  /*  221 E> */ B(Construct), R(1), R(0), U8(0), U8(0),
-                B(Star), R(2),
-  /*  232 S> */ B(LdaNamedProperty), R(2), U8(12), U8(2),
-                B(Star), R(5),
-  /*  232 E> */ B(CallProperty0), R(5), R(2), U8(4),
-                B(LdaUndefined),
-  /*  243 S> */ B(Return),
-]
-constant pool: [
-  SCOPE_INFO_TYPE,
-  FIXED_ARRAY_TYPE,
-  SHARED_FUNCTION_INFO_TYPE,
-  SHARED_FUNCTION_INFO_TYPE,
-  SHARED_FUNCTION_INFO_TYPE,
-  ONE_BYTE_INTERNALIZED_STRING_TYPE ["D"],
-  SCOPE_INFO_TYPE,
-  FIXED_ARRAY_TYPE,
-  SHARED_FUNCTION_INFO_TYPE,
-  SHARED_FUNCTION_INFO_TYPE,
-  SHARED_FUNCTION_INFO_TYPE,
-  ONE_BYTE_INTERNALIZED_STRING_TYPE ["E"],
-  ONE_BYTE_INTERNALIZED_STRING_TYPE ["callE"],
-]
-handlers: [
-]
-
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index 9cb5a6b01cd56a..1a4ad60629aa6e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -220,13 +220,10 @@ snippet: "
 "
 frame size: 4
 parameter count: 1
-bytecode array length: 53
+bytecode array length: 44
 bytecodes: [
   /*   10 E> */ B(StackCheck),
   /*   37 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
-                B(JumpIfNull), U8(4),
-                B(JumpIfNotUndefined), U8(7),
-  /*   26 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
                 B(Star), R(3),
   /*   28 S> */ B(LdaNamedProperty), R(3), U8(1), U8(1),
                 B(Star), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index 74849d1c8556b4..e3eed68138158e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -91,9 +91,9 @@ snippet: "
     test = new B(1, 2, 3).constructor;
   })();
 "
-frame size: 13
+frame size: 12
 parameter count: 1
-bytecode array length: 128
+bytecode array length: 124
 bytecodes: [
                 B(CreateRestParameter),
                 B(Star), R(3),
@@ -111,23 +111,22 @@ bytecodes: [
                 B(Ldar), R(6),
                 B(Inc), U8(3),
   /*  152 S> */ B(Star), R(6),
-                B(LdaNamedProperty), R(3), U8(0), U8(4),
+                B(GetIterator), R(3), U8(4),
                 B(Star), R(11),
                 B(CallProperty0), R(11), R(3), U8(6),
-                B(Mov), R(3), R(10),
                 B(Mov), R(1), R(4),
                 B(JumpIfJSReceiver), U8(7),
                 B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
                 B(Star), R(9),
-                B(LdaNamedProperty), R(9), U8(1), U8(8),
+                B(LdaNamedProperty), R(9), U8(0), U8(8),
                 B(Star), R(8),
                 B(CallProperty0), R(8), R(9), U8(14),
-                B(Star), R(12),
+                B(Star), R(10),
                 B(JumpIfJSReceiver), U8(7),
-                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
-                B(LdaNamedProperty), R(12), U8(2), U8(16),
+                B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+                B(LdaNamedProperty), R(10), U8(1), U8(16),
                 B(JumpIfToBooleanTrue), U8(19),
-                B(LdaNamedProperty), R(12), U8(3), U8(10),
+                B(LdaNamedProperty), R(10), U8(2), U8(10),
                 B(StaInArrayLiteral), R(7), R(6), U8(1),
                 B(Ldar), R(6),
                 B(Inc), U8(3),
@@ -147,7 +146,6 @@ bytecodes: [
   /*  162 S> */ B(Return),
 ]
 constant pool: [
-  SYMBOL_TYPE,
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
   ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
index 19a09ba49c3c12..066d6e9f033e28 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -2655,7 +2655,7 @@ snippet: "
 "
 frame size: 163
 parameter count: 1
-bytecode array length: 626
+bytecode array length: 624
 bytecodes: [
   /*   30 E> */ B(StackCheck),
   /*   43 S> */ B(LdaZero),
@@ -2977,8 +2977,7 @@ bytecodes: [
   /* 2146 S> */ B(LdaZero),
                 B(Star), R(1),
   /* 2162 S> */ B(Ldar), R(0),
-                B(JumpIfUndefined), U8(74),
-                B(JumpIfNull), U8(72),
+                B(JumpIfUndefinedOrNull), U8(72),
                 B(Wide), B(ToObject), R16(157),
                 B(Wide), B(ForInEnumerate), R16(157),
                 B(Wide), B(ForInPrepare), R16(158), U16(0),
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index e1601d4642aaab..fda02933aa04ee 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2758,7 +2758,7 @@ TEST(PrivateClassFields) {
                      LoadGolden("PrivateClassFields.golden")));
 }
 
-TEST(PrivateMethods) {
+TEST(PrivateMethodDeclaration) {
   bool old_methods_flag = i::FLAG_harmony_private_methods;
   i::FLAG_harmony_private_methods = true;
   InitializedIgnitionHandleScope scope;
@@ -2768,30 +2768,179 @@ TEST(PrivateMethods) {
       "{\n"
       "  class A {\n"
       "    #a() { return 1; }\n"
-      "    callA() { return this.#a(); }\n"
       "  }\n"
-      "\n"
-      "  const a = new A;\n"
-      "  a.callA();\n"
       "}\n",
 
       "{\n"
       "  class D {\n"
       "    #d() { return 1; }\n"
-      "    callD() { return this.#d(); }\n"
       "  }\n"
-      "\n"
       "  class E extends D {\n"
       "    #e() { return 2; }\n"
-      "    callE() { return this.callD() + this.#e(); }\n"
+      "  }\n"
+      "}\n",
+
+      "{\n"
+      "  class A { foo() {} }\n"
+      "  class C extends A {\n"
+      "    #m() { return super.foo; }\n"
+      "  }\n"
+      "}\n"};
+
+  CHECK(CompareTexts(BuildActual(printer, snippets),
+                     LoadGolden("PrivateMethodDeclaration.golden")));
+  i::FLAG_harmony_private_methods = old_methods_flag;
+}
+
+TEST(PrivateMethodAccess) {
+  bool old_methods_flag = i::FLAG_harmony_private_methods;
+  i::FLAG_harmony_private_methods = true;
+  InitializedIgnitionHandleScope scope;
+  BytecodeExpectationsPrinter printer(CcTest::isolate());
+  printer.set_wrap(false);
+  printer.set_test_function_name("test");
+
+  const char* snippets[] = {
+      "class A {\n"
+      "  #a() { return 1; }\n"
+      "  constructor() { return this.#a(); }\n"
+      "}\n"
+      "\n"
+      "var test = A;\n"
+      "new A;\n",
+
+      "class B {\n"
+      "  #b() { return 1; }\n"
+      "  constructor() { this.#b = 1; }\n"
+      "}\n"
+      "\n"
+      "var test = B;\n"
+      "new test;\n",
+
+      "class C {\n"
+      "  #c() { return 1; }\n"
+      "  constructor() { this.#c++; }\n"
+      "}\n"
+      "\n"
+      "var test = C;\n"
+      "new test;\n"};
+
+  CHECK(CompareTexts(BuildActual(printer, snippets),
+                     LoadGolden("PrivateMethodAccess.golden")));
+  i::FLAG_harmony_private_methods = old_methods_flag;
+}
+
+TEST(PrivateAccessorAccess) {
+  bool old_methods_flag = i::FLAG_harmony_private_methods;
+  i::FLAG_harmony_private_methods = true;
+  InitializedIgnitionHandleScope scope;
+  BytecodeExpectationsPrinter printer(CcTest::isolate());
+  printer.set_wrap(false);
+  printer.set_test_function_name("test");
+
+  const char* snippets[] = {
+      "class A {\n"
+      "  get #a() { return 1; }\n"
+      "  set #a(val) { }\n"
+      "\n"
+      "  constructor() {\n"
+      "    this.#a++;\n"
+      "    this.#a = 1;\n"
+      "    return this.#a;\n"
+      "  }\n"
+      "}\n"
+      "var test = A;\n"
+      "new test;\n",
+
+      "class B {\n"
+      "  get #b() { return 1; }\n"
+      "  constructor() { this.#b++; }\n"
+      "}\n"
+      "var test = B;\n"
+      "new test;\n",
+
+      "class C {\n"
+      "  set #c(val) { }\n"
+      "  constructor() { this.#c++; }\n"
+      "}\n"
+      "var test = C;\n"
+      "new test;\n",
+
+      "class D {\n"
+      "  get #d() { return 1; }\n"
+      "  constructor() { this.#d = 1; }\n"
+      "}\n"
+      "var test = D;\n"
+      "new test;\n",
+
+      "class E {\n"
+      "  set #e(val) { }\n"
+      "  constructor() { this.#e; }\n"
+      "}\n"
+      "var test = E;\n"
+      "new test;\n"};
+
+  CHECK(CompareTexts(BuildActual(printer, snippets),
+                     LoadGolden("PrivateAccessorAccess.golden")));
+  i::FLAG_harmony_private_methods = old_methods_flag;
+}
+
+TEST(PrivateAccessorDeclaration) {
+  bool old_methods_flag = i::FLAG_harmony_private_methods;
+  i::FLAG_harmony_private_methods = true;
+  InitializedIgnitionHandleScope scope;
+  BytecodeExpectationsPrinter printer(CcTest::isolate());
+
+  const char* snippets[] = {
+      "{\n"
+      "  class A {\n"
+      "    get #a() { return 1; }\n"
+      "    set #a(val) { }\n"
+      "  }\n"
+      "}\n",
+
+      "{\n"
+      "  class B {\n"
+      "    get #b() { return 1; }\n"
+      "  }\n"
+      "}\n",
+
+      "{\n"
+      "  class C {\n"
+      "    set #c(val) { }\n"
+      "  }\n"
+      "}\n",
+
+      "{\n"
+      "  class D {\n"
+      "    get #d() { return 1; }\n"
+      "    set #d(val) { }\n"
       "  }\n"
       "\n"
-      "  const e = new E;\n"
-      "  e.callE();\n"
+      "  class E extends D {\n"
+      "    get #e() { return 2; }\n"
+      "    set #e(val) { }\n"
+      "  }\n"
+      "}\n",
+
+      "{\n"
+      "  class A { foo() {} }\n"
+      "  class C extends A {\n"
+      "    get #a() { return super.foo; }\n"
+      "  }\n"
+      "  new C();\n"
+      "}\n",
+
+      "{\n"
+      "  class A { foo(val) {} }\n"
+      "  class C extends A {\n"
+      "    set #a(val) { super.foo(val); }\n"
+      "  }\n"
+      "  new C();\n"
       "}\n"};
 
   CHECK(CompareTexts(BuildActual(printer, snippets),
-                     LoadGolden("PrivateMethods.golden")));
+                     LoadGolden("PrivateAccessorDeclaration.golden")));
   i::FLAG_harmony_private_methods = old_methods_flag;
 }
 
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index d20dfff6527506..a98445be978f27 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -23,14 +23,10 @@
 class TestDataSource : public perfetto::DataSource<TestDataSource> {
  public:
   void OnSetup(const SetupArgs&) override {}
-  void OnStart(const StartArgs&) override { started_.Signal(); }
+  void OnStart(const StartArgs&) override {}
   void OnStop(const StopArgs&) override {}
-
-  static v8::base::Semaphore started_;
 };
 
-v8::base::Semaphore TestDataSource::started_{0};
-
 PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(TestDataSource);
 #endif  // V8_USE_PERFETTO
 
@@ -625,8 +621,6 @@ TEST(Perfetto) {
     TRACE_EVENT1("v8", "test2", "arg1", uint64_arg);
     TRACE_EVENT2("v8", "test3", "arg1", uint64_arg, "arg2", str_arg);
   }
-  TRACE_EVENT_INSTANT0("v8", "final event not captured",
-                       TRACE_EVENT_SCOPE_THREAD);
 
   harness.StopTracing();
 
@@ -688,8 +682,6 @@ TEST(Categories) {
     TRACE_EVENT0("cat", "v8.Test2");
     TRACE_EVENT0("v8", "v8.Test3");
   }
-  TRACE_EVENT_INSTANT0("v8", "final event not captured",
-                       TRACE_EVENT_SCOPE_THREAD);
 
   harness.StopTracing();
 
@@ -765,8 +757,6 @@ TEST(MultipleArgsAndCopy) {
                          std::move(trace_event_arg), "a2",
                          new ConvertableToTraceFormatMock(123));
   }
-  TRACE_EVENT_INSTANT0("v8", "final event not captured",
-                       TRACE_EVENT_SCOPE_THREAD);
 
   harness.StopTracing();
 
@@ -894,8 +884,6 @@ TEST(JsonIntegrationTest) {
     TRACE_EVENT1("v8", "v8.Test.3", "3", inf_num);
     TRACE_EVENT1("v8", "v8.Test.4", "4", neg_inf_num);
   }
-  TRACE_EVENT_INSTANT0("v8", "final event not captured",
-                       TRACE_EVENT_SCOPE_THREAD);
 
   harness.StopTracing();
   std::string json = harness.perfetto_json_stream();
@@ -922,8 +910,7 @@ TEST(TracingPerfetto) {
   auto tracing_session_ =
       perfetto::Tracing::NewTrace(perfetto::BackendType::kInProcessBackend);
   tracing_session_->Setup(perfetto_trace_config);
-  tracing_session_->Start();
-  TestDataSource::started_.Wait();
+  tracing_session_->StartBlocking();
 
   for (int i = 0; i < 15; i++) {
     TestDataSource::Trace([&](TestDataSource::TraceContext ctx) {
@@ -938,10 +925,7 @@ TEST(TracingPerfetto) {
       trace_event->set_thread_timestamp(123);
     });
   }
-  v8::base::Semaphore stopped_{0};
-  tracing_session_->SetOnStopCallback([&stopped_]() { stopped_.Signal(); });
-  tracing_session_->Stop();
-  stopped_.Wait();
+  tracing_session_->StopBlocking();
 
   std::ostringstream perfetto_json_stream_;
 
@@ -957,6 +941,40 @@ TEST(TracingPerfetto) {
   CHECK_GT(perfetto_json_stream_.str().length(), 0);
 }
 
+TEST(StartAndStopRepeated) {
+  for (int i = 0; i < 3; i++) {
+    ::perfetto::TraceConfig perfetto_trace_config;
+    perfetto_trace_config.add_buffers()->set_size_kb(4096);
+    auto* ds_config =
+        perfetto_trace_config.add_data_sources()->mutable_config();
+    ds_config->set_name("v8.trace_events");
+
+    perfetto::DataSourceDescriptor dsd;
+    dsd.set_name("v8.trace_events");
+    TestDataSource::Register(dsd);
+
+    auto tracing_session_ =
+        perfetto::Tracing::NewTrace(perfetto::BackendType::kInProcessBackend);
+    tracing_session_->Setup(perfetto_trace_config);
+    tracing_session_->StartBlocking();
+
+    for (int i = 0; i < 15; i++) {
+      TestDataSource::Trace([&](TestDataSource::TraceContext ctx) {
+        auto packet = ctx.NewTracePacket();
+        auto* trace_event_bundle = packet->set_chrome_events();
+        auto* trace_event = trace_event_bundle->add_trace_events();
+
+        trace_event->set_phase('c');
+        trace_event->set_thread_id(v8::base::OS::GetCurrentThreadId());
+        trace_event->set_timestamp(123);
+        trace_event->set_process_id(v8::base::OS::GetCurrentProcessId());
+        trace_event->set_thread_timestamp(123);
+      });
+    }
+    tracing_session_->StopBlocking();
+  }
+}
+
 #endif  // V8_USE_PERFETTO
 
 }  // namespace tracing
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index 39d95897d6db76..35b7048bb01e3b 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -776,6 +776,43 @@ TEST(RelocatingCharacterStream) {
   CHECK_EQ('d', two_byte_string_stream->Advance());
 }
 
+TEST(RelocatingUnbufferedCharacterStream) {
+  ManualGCScope manual_gc_scope;
+  CcTest::InitializeVM();
+  i::Isolate* i_isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  const char16_t* string = u"abc\u2603";
+  int length = static_cast<int>(std::char_traits<char16_t>::length(string));
+  std::unique_ptr<i::uc16[]> uc16_buffer(new i::uc16[length]);
+  for (int i = 0; i < length; i++) {
+    uc16_buffer[i] = string[i];
+  }
+  i::Vector<const i::uc16> two_byte_vector(uc16_buffer.get(), length);
+  i::Handle<i::String> two_byte_string =
+      i_isolate->factory()
+          ->NewStringFromTwoByte(two_byte_vector, i::AllocationType::kYoung)
+          .ToHandleChecked();
+  std::unique_ptr<i::Utf16CharacterStream> two_byte_string_stream(
+      i::ScannerStream::For(i_isolate, two_byte_string, 0, length));
+
+  // Seek to offset 2 so that the buffer_pos_ is not zero initially.
+  two_byte_string_stream->Seek(2);
+  CHECK_EQ('c', two_byte_string_stream->Advance());
+  CHECK_EQ(size_t{3}, two_byte_string_stream->pos());
+
+  i::String raw = *two_byte_string;
+  i_isolate->heap()->CollectGarbage(i::NEW_SPACE,
+                                    i::GarbageCollectionReason::kUnknown);
+  // GC moved the string and buffer was updated to the correct location.
+  CHECK_NE(raw, *two_byte_string);
+
+  // Check that we correctly moved based on buffer_pos_, not based on a position
+  // of zero.
+  CHECK_EQ(u'\u2603', two_byte_string_stream->Advance());
+  CHECK_EQ(size_t{4}, two_byte_string_stream->pos());
+}
+
 TEST(CloneCharacterStreams) {
   v8::HandleScope handles(CcTest::isolate());
   v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-accessor-assembler.cc b/deps/v8/test/cctest/test-accessor-assembler.cc
index 20a2bc2d8099d6..c88c85b5860108 100644
--- a/deps/v8/test/cctest/test-accessor-assembler.cc
+++ b/deps/v8/test/cctest/test-accessor-assembler.cc
@@ -18,6 +18,7 @@ namespace internal {
 using compiler::CodeAssemblerTester;
 using compiler::FunctionTester;
 using compiler::Node;
+using compiler::TNode;
 
 namespace {
 
@@ -129,8 +130,9 @@ TEST(TryProbeStubCache) {
 
   {
     Node* receiver = m.Parameter(0);
-    Node* name = m.Parameter(1);
-    Node* expected_handler = m.Parameter(2);
+    TNode<Object> name = m.CAST(m.Parameter(1));
+    TNode<MaybeObject> expected_handler =
+        m.UncheckedCast<MaybeObject>(m.Parameter(2));
 
     Label passed(&m), failed(&m);
 
@@ -140,12 +142,11 @@ TEST(TryProbeStubCache) {
     m.TryProbeStubCache(&stub_cache, receiver, name, &if_handler, &var_handler,
                         &if_miss);
     m.BIND(&if_handler);
-    m.Branch(m.WordEqual(expected_handler,
-                         m.BitcastMaybeObjectToWord(var_handler.value())),
-             &passed, &failed);
+    m.Branch(m.TaggedEqual(expected_handler, var_handler.value()), &passed,
+             &failed);
 
     m.BIND(&if_miss);
-    m.Branch(m.WordEqual(expected_handler, m.IntPtrConstant(0)), &passed,
+    m.Branch(m.TaggedEqual(expected_handler, m.SmiConstant(0)), &passed,
              &failed);
 
     m.BIND(&passed);
diff --git a/deps/v8/test/cctest/test-api-stack-traces.cc b/deps/v8/test/cctest/test-api-stack-traces.cc
index bceba18c4a5910..26941793c66daa 100644
--- a/deps/v8/test/cctest/test-api-stack-traces.cc
+++ b/deps/v8/test/cctest/test-api-stack-traces.cc
@@ -250,31 +250,31 @@ static void AnalyzeStackInNativeCode(
     v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
         args.GetIsolate(), 5, v8::StackTrace::kOverview);
     CHECK_EQ(3, stackTrace->GetFrameCount());
-    checkStackFrame(nullptr, "function.name", 3, 1, true, false,
+    checkStackFrame(nullptr, "function.name", 1, 1, true, false,
                     stackTrace->GetFrame(isolate, 0));
   } else if (testGroup == kDisplayName) {
     v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
         args.GetIsolate(), 5, v8::StackTrace::kOverview);
     CHECK_EQ(3, stackTrace->GetFrameCount());
-    checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
+    checkStackFrame(nullptr, "function.displayName", 1, 1, true, false,
                     stackTrace->GetFrame(isolate, 0));
   } else if (testGroup == kFunctionNameAndDisplayName) {
     v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
         args.GetIsolate(), 5, v8::StackTrace::kOverview);
     CHECK_EQ(3, stackTrace->GetFrameCount());
-    checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
+    checkStackFrame(nullptr, "function.displayName", 1, 1, true, false,
                     stackTrace->GetFrame(isolate, 0));
   } else if (testGroup == kDisplayNameIsNotString) {
     v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
         args.GetIsolate(), 5, v8::StackTrace::kOverview);
     CHECK_EQ(3, stackTrace->GetFrameCount());
-    checkStackFrame(nullptr, "function.name", 3, 1, true, false,
+    checkStackFrame(nullptr, "function.name", 1, 1, true, false,
                     stackTrace->GetFrame(isolate, 0));
   } else if (testGroup == kFunctionNameIsNotString) {
     v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
         args.GetIsolate(), 5, v8::StackTrace::kOverview);
     CHECK_EQ(3, stackTrace->GetFrameCount());
-    checkStackFrame(nullptr, "", 3, 1, true, false,
+    checkStackFrame(nullptr, "", 1, 1, true, false,
                     stackTrace->GetFrame(isolate, 0));
   }
 }
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 73bea08d086b50..1a670a9b10f1f9 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -12975,7 +12975,7 @@ void ApiTestFuzzer::SetUp(PartOfTest part) {
     RegisterThreadedTest::nth(i)->fuzzer_ = new ApiTestFuzzer(i + start);
   }
   for (int i = 0; i < active_tests_; i++) {
-    RegisterThreadedTest::nth(i)->fuzzer_->Start();
+    CHECK(RegisterThreadedTest::nth(i)->fuzzer_->Start());
   }
 }
 
@@ -18386,8 +18386,8 @@ TEST(MultipleIsolatesOnIndividualThreads) {
   IsolateThread thread2(12);
 
   // Compute some fibonacci numbers on 3 threads in 3 isolates.
-  thread1.Start();
-  thread2.Start();
+  CHECK(thread1.Start());
+  CHECK(thread2.Start());
 
   int result1 = CalcFibonacci(CcTest::isolate(), 21);
   int result2 = CalcFibonacci(CcTest::isolate(), 12);
@@ -18481,7 +18481,7 @@ class InitDefaultIsolateThread : public v8::base::Thread {
 
 static void InitializeTestHelper(InitDefaultIsolateThread::TestCase testCase) {
   InitDefaultIsolateThread thread(testCase);
-  thread.Start();
+  CHECK(thread.Start());
   thread.Join();
   CHECK(thread.result());
 }
@@ -19951,6 +19951,71 @@ TEST(ScopedMicrotasks) {
   LocalContext env;
   v8::HandleScope handles(env->GetIsolate());
   env->GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kScoped);
+  {
+    v8::MicrotasksScope scope1(env->GetIsolate(),
+                               v8::MicrotasksScope::kRunMicrotasks);
+    env->GetIsolate()->EnqueueMicrotask(
+        Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+    CompileRun("var ext1Calls = 0;");
+  }
+  {
+    v8::MicrotasksScope scope1(env->GetIsolate(),
+                               v8::MicrotasksScope::kRunMicrotasks);
+    ExpectInt32("ext1Calls", 1);
+  }
+  {
+    v8::MicrotasksScope scope1(env->GetIsolate(),
+                               v8::MicrotasksScope::kRunMicrotasks);
+    env->GetIsolate()->EnqueueMicrotask(
+        Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+    CompileRun("throw new Error()");
+  }
+  {
+    v8::MicrotasksScope scope1(env->GetIsolate(),
+                               v8::MicrotasksScope::kRunMicrotasks);
+    ExpectInt32("ext1Calls", 2);
+  }
+  {
+    v8::MicrotasksScope scope1(env->GetIsolate(),
+                               v8::MicrotasksScope::kRunMicrotasks);
+    env->GetIsolate()->EnqueueMicrotask(
+        Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+    v8::TryCatch try_catch(env->GetIsolate());
+    CompileRun("throw new Error()");
+  }
+  {
+    v8::MicrotasksScope scope1(env->GetIsolate(),
+                               v8::MicrotasksScope::kRunMicrotasks);
+    ExpectInt32("ext1Calls", 3);
+  }
+  {
+    v8::MicrotasksScope scope1(env->GetIsolate(),
+                               v8::MicrotasksScope::kRunMicrotasks);
+    env->GetIsolate()->EnqueueMicrotask(
+        Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+    env->GetIsolate()->TerminateExecution();
+    {
+      v8::MicrotasksScope scope2(env->GetIsolate(),
+                                 v8::MicrotasksScope::kRunMicrotasks);
+      env->GetIsolate()->EnqueueMicrotask(
+          Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+    }
+  }
+  env->GetIsolate()->CancelTerminateExecution();
+  {
+    v8::MicrotasksScope scope1(env->GetIsolate(),
+                               v8::MicrotasksScope::kRunMicrotasks);
+    ExpectInt32("ext1Calls", 3);
+    env->GetIsolate()->EnqueueMicrotask(
+        Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+  }
+  {
+    v8::MicrotasksScope scope1(env->GetIsolate(),
+                               v8::MicrotasksScope::kRunMicrotasks);
+
+    ExpectInt32("ext1Calls", 4);
+  }
+
   {
     v8::MicrotasksScope scope1(env->GetIsolate(),
                                v8::MicrotasksScope::kDoNotRunMicrotasks);
@@ -20746,7 +20811,7 @@ class ThreadInterruptTest {
 
   void RunTest() {
     InterruptThread i_thread(this);
-    i_thread.Start();
+    CHECK(i_thread.Start());
 
     sem_.Wait();
     CHECK_EQ(kExpectedValue, sem_value_);
@@ -21009,7 +21074,7 @@ class RegExpInterruptTest {
     v8::HandleScope handle_scope(isolate_);
 
     i_thread.SetTestBody(test_body_fn);
-    i_thread.Start();
+    CHECK(i_thread.Start());
 
     TestBody();
 
@@ -21213,7 +21278,7 @@ class RequestInterruptTestBaseWithSimpleInterrupt
  public:
   RequestInterruptTestBaseWithSimpleInterrupt() : i_thread(this) { }
 
-  void StartInterruptThread() override { i_thread.Start(); }
+  void StartInterruptThread() override { CHECK(i_thread.Start()); }
 
  private:
   class InterruptThread : public v8::base::Thread {
@@ -21444,7 +21509,7 @@ class RequestMultipleInterrupts : public RequestInterruptTestBase {
  public:
   RequestMultipleInterrupts() : i_thread(this), counter_(0) {}
 
-  void StartInterruptThread() override { i_thread.Start(); }
+  void StartInterruptThread() override { CHECK(i_thread.Start()); }
 
   void TestBody() override {
     Local<Function> func = Function::New(env_.local(), ShouldContinueCallback,
@@ -24851,7 +24916,7 @@ TEST(FutexInterruption) {
   FutexInterruptionThread timeout_thread(isolate);
 
   v8::TryCatch try_catch(CcTest::isolate());
-  timeout_thread.Start();
+  CHECK(timeout_thread.Start());
 
   CompileRun(
       "var ab = new SharedArrayBuffer(4);"
@@ -25268,7 +25333,7 @@ TEST(MemoryPressure) {
     LocalContext env;
     MemoryPressureThread memory_pressure_thread(
         isolate, v8::MemoryPressureLevel::kCritical);
-    memory_pressure_thread.Start();
+    CHECK(memory_pressure_thread.Start());
     memory_pressure_thread.Join();
     // This should trigger GC.
     CHECK_EQ(0, counter.NumberOfWeakCalls());
@@ -26080,7 +26145,7 @@ void AtomicsWaitCallbackForTesting(
         break;
       case AtomicsWaitCallbackAction::StopFromThreadAndThrow:
         info->stop_thread = v8::base::make_unique<StopAtomicsWaitThread>(info);
-        info->stop_thread->Start();
+        CHECK(info->stop_thread->Start());
         break;
       case AtomicsWaitCallbackAction::KeepWaiting:
         break;
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 1f6b732808dc4f..4fdf30ef6461ee 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -234,6 +234,15 @@ static void InitializeVM() {
 #define CHECK_FULL_HEAP_OBJECT_IN_REGISTER(expected, result) \
   CHECK(Equal64(expected->ptr(), &core, result))
 
+#define CHECK_NOT_ZERO_AND_NOT_EQUAL_64(reg0, reg1) \
+  {                                                 \
+    int64_t value0 = core.xreg(reg0.code());        \
+    int64_t value1 = core.xreg(reg1.code());        \
+    CHECK_NE(0, value0);                            \
+    CHECK_NE(0, value1);                            \
+    CHECK_NE(value0, value1);                       \
+  }
+
 #define CHECK_EQUAL_FP64(expected, result)                                    \
   CHECK(EqualFP64(expected, &core, result))
 
@@ -1982,75 +1991,142 @@ TEST(test_branch) {
   CHECK_EQUAL_64(0, x3);
 }
 
+namespace {
+// Generate a block of code that, when hit, always jumps to `landing_pad`.
+void GenerateLandingNops(MacroAssembler* masm, int n, Label* landing_pad) {
+  for (int i = 0; i < (n - 1); i++) {
+    if (i % 100 == 0) {
+      masm->B(landing_pad);
+    } else {
+      masm->Nop();
+    }
+  }
+  masm->B(landing_pad);
+}
+}  // namespace
+
 TEST(far_branch_backward) {
   INIT_V8();
 
-  // Test that the MacroAssembler correctly resolves backward branches to labels
-  // that are outside the immediate range of branch instructions.
-  int max_range =
-    std::max(Instruction::ImmBranchRange(TestBranchType),
-             std::max(Instruction::ImmBranchRange(CompareBranchType),
-                      Instruction::ImmBranchRange(CondBranchType)));
+  ImmBranchType branch_types[] = {TestBranchType, CompareBranchType,
+                                  CondBranchType};
 
-  SETUP_SIZE(max_range + 1000 * kInstrSize);
+  for (ImmBranchType type : branch_types) {
+    int range = Instruction::ImmBranchRange(type);
 
-  START();
+    SETUP_SIZE(range + 1000 * kInstrSize);
 
-  Label done, fail;
-  Label test_tbz, test_cbz, test_bcond;
-  Label success_tbz, success_cbz, success_bcond;
+    START();
 
-  __ Mov(x0, 0);
-  __ Mov(x1, 1);
-  __ Mov(x10, 0);
+    Label done, fail;
+    Label near, far, in_range, out_of_range;
 
-  __ B(&test_tbz);
-  __ Bind(&success_tbz);
-  __ Orr(x0, x0, 1 << 0);
-  __ B(&test_cbz);
-  __ Bind(&success_cbz);
-  __ Orr(x0, x0, 1 << 1);
-  __ B(&test_bcond);
-  __ Bind(&success_bcond);
-  __ Orr(x0, x0, 1 << 2);
+    __ Mov(x0, 0);
+    __ Mov(x1, 1);
+    __ Mov(x10, 0);
 
-  __ B(&done);
+    __ B(&near);
+    __ Bind(&in_range);
+    __ Orr(x0, x0, 1 << 0);
 
-  // Generate enough code to overflow the immediate range of the three types of
-  // branches below.
-  for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
-    if (i % 100 == 0) {
-      // If we do land in this code, we do not want to execute so many nops
-      // before reaching the end of test (especially if tracing is activated).
-      __ B(&fail);
-    } else {
-      __ Nop();
-    }
-  }
-  __ B(&fail);
+    __ B(&far);
+    __ Bind(&out_of_range);
+    __ Orr(x0, x0, 1 << 1);
 
-  __ Bind(&test_tbz);
-  __ Tbz(x10, 7, &success_tbz);
-  __ Bind(&test_cbz);
-  __ Cbz(x10, &success_cbz);
-  __ Bind(&test_bcond);
-  __ Cmp(x10, 0);
-  __ B(eq, &success_bcond);
+    __ B(&done);
 
-  // For each out-of-range branch instructions, at least two instructions should
-  // have been generated.
-  CHECK_GE(7 * kInstrSize, __ SizeOfCodeGeneratedSince(&test_tbz));
+    // We use a slack and an approximate budget instead of checking precisely
+    // when the branch limit is hit, since veneers and literal pool can mess
+    // with our calculation of where the limit is.
+    // In this test, we want to make sure we support backwards branches and the
+    // range is more-or-less correct. It's not a big deal if the macro-assembler
+    // got the range a little wrong, as long as it's not far off which could
+    // affect performance.
+
+    int budget =
+        (range - static_cast<int>(__ SizeOfCodeGeneratedSince(&in_range))) /
+        kInstrSize;
+
+    const int kSlack = 100;
+
+    // Generate enough code so that the next branch will be in range but we are
+    // close to the limit.
+    GenerateLandingNops(&masm, budget - kSlack, &fail);
+
+    __ Bind(&near);
+    switch (type) {
+      case TestBranchType:
+        __ Tbz(x10, 3, &in_range);
+        // This should be:
+        //     TBZ <in_range>
+        CHECK_EQ(1 * kInstrSize, __ SizeOfCodeGeneratedSince(&near));
+        break;
+      case CompareBranchType:
+        __ Cbz(x10, &in_range);
+        // This should be:
+        //     CBZ <in_range>
+        CHECK_EQ(1 * kInstrSize, __ SizeOfCodeGeneratedSince(&near));
+        break;
+      case CondBranchType:
+        __ Cmp(x10, 0);
+        __ B(eq, &in_range);
+        // This should be:
+        //     CMP
+        //     B.EQ <in_range>
+        CHECK_EQ(2 * kInstrSize, __ SizeOfCodeGeneratedSince(&near));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
 
-  __ Bind(&fail);
-  __ Mov(x1, 0);
-  __ Bind(&done);
+    // Now go past the limit so that branches are now out of range.
+    GenerateLandingNops(&masm, kSlack * 2, &fail);
+
+    __ Bind(&far);
+    switch (type) {
+      case TestBranchType:
+        __ Tbz(x10, 5, &out_of_range);
+        // This should be:
+        //     TBNZ <skip>
+        //     B <out_of_range>
+        //   skip:
+        CHECK_EQ(2 * kInstrSize, __ SizeOfCodeGeneratedSince(&far));
+        break;
+      case CompareBranchType:
+        __ Cbz(x10, &out_of_range);
+        // This should be:
+        //     CBNZ <skip>
+        //     B <out_of_range>
+        //   skip:
+        CHECK_EQ(2 * kInstrSize, __ SizeOfCodeGeneratedSince(&far));
+        break;
+      case CondBranchType:
+        __ Cmp(x10, 0);
+        __ B(eq, &out_of_range);
+        // This should be:
+        //     CMP
+        //     B.NE <skip>
+        //     B <out_of_range>
+        //  skip:
+        CHECK_EQ(3 * kInstrSize, __ SizeOfCodeGeneratedSince(&far));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
 
-  END();
+    __ Bind(&fail);
+    __ Mov(x1, 0);
+    __ Bind(&done);
 
-  RUN();
+    END();
 
-  CHECK_EQUAL_64(0x7, x0);
-  CHECK_EQUAL_64(0x1, x1);
+    RUN();
+
+    CHECK_EQUAL_64(0x3, x0);
+    CHECK_EQUAL_64(1, x1);
+  }
 }
 
 TEST(far_branch_simple_veneer) {
@@ -2177,18 +2253,7 @@ TEST(far_branch_veneer_link_chain) {
 
   // Generate enough code to overflow the immediate range of the three types of
   // branches below.
-  for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
-    if (i % 100 == 0) {
-      // If we do land in this code, we do not want to execute so many nops
-      // before reaching the end of test (especially if tracing is activated).
-      // Also, the branches give the MacroAssembler the opportunity to emit the
-      // veneers.
-      __ B(&fail);
-    } else {
-      __ Nop();
-    }
-  }
-  __ B(&fail);
+  GenerateLandingNops(&masm, (max_range / kInstrSize) + 1, &fail);
 
   __ Bind(&success_tbz);
   __ Orr(x0, x0, 1 << 0);
@@ -2219,7 +2284,58 @@ TEST(far_branch_veneer_broken_link_chain) {
   // a branch from the link chain of a label and the two links on each side of
   // the removed branch cannot be linked together (out of range).
   //
-  // We test with tbz because it has a small range.
+  // We want to generate the following code, we test with tbz because it has a
+  // small range:
+  //
+  // ~~~
+  // 1: B <far>
+  //          :
+  //          :
+  //          :
+  // 2: TBZ <far> -------.
+  //          :          |
+  //          :          | out of range
+  //          :          |
+  // 3: TBZ <far>        |
+  //          |          |
+  //          | in range |
+  //          V          |
+  // far:              <-'
+  // ~~~
+  //
+  // If we say that the range of TBZ is 3 lines on this graph, then we can get
+  // into a situation where the link chain gets broken. When emitting the two
+  // TBZ instructions, we are in range of the previous branch in the chain so
+  // we'll generate a TBZ and not a TBNZ+B sequence that can encode a bigger
+  // range.
+  //
+  // However, the first TBZ (2), is out of range of the far label so a veneer
+  // will be generated after the second TBZ (3). And this will result in a
+  // broken chain because we can no longer link from (3) back to (1).
+  //
+  // ~~~
+  // 1: B <far>     <-.
+  //                  :
+  //                  : out of range
+  //                  :
+  // 2: TBZ <veneer>  :
+  //                  :
+  //                  :
+  //                  :
+  // 3: TBZ <far> ----'
+  //
+  //    B <skip>
+  // veneer:
+  //    B <far>
+  // skip:
+  //
+  // far:
+  // ~~~
+  //
+  // This test makes sure the MacroAssembler is able to resolve this case by,
+  // for instance, resolving (1) early and making it jump to <veneer> instead of
+  // <far>.
+
   int max_range = Instruction::ImmBranchRange(TestBranchType);
   int inter_range = max_range / 2 + max_range / 10;
 
@@ -2240,44 +2356,42 @@ TEST(far_branch_veneer_broken_link_chain) {
   __ Mov(x0, 1);
   __ B(&far_target);
 
-  for (int i = 0; i < inter_range / kInstrSize; ++i) {
-    if (i % 100 == 0) {
-      // Do not allow generating veneers. They should not be needed.
-      __ b(&fail);
-    } else {
-      __ Nop();
-    }
-  }
+  GenerateLandingNops(&masm, inter_range / kInstrSize, &fail);
 
   // Will need a veneer to point to reach the target.
   __ Bind(&test_2);
   __ Mov(x0, 2);
-  __ Tbz(x10, 7, &far_target);
-
-  for (int i = 0; i < inter_range / kInstrSize; ++i) {
-    if (i % 100 == 0) {
-      // Do not allow generating veneers. They should not be needed.
-      __ b(&fail);
-    } else {
-      __ Nop();
-    }
+  {
+    Label tbz;
+    __ Bind(&tbz);
+    __ Tbz(x10, 7, &far_target);
+    // This should be a single TBZ since the previous link is in range at this
+    // point.
+    CHECK_EQ(1 * kInstrSize, __ SizeOfCodeGeneratedSince(&tbz));
   }
 
+  GenerateLandingNops(&masm, inter_range / kInstrSize, &fail);
+
   // Does not need a veneer to reach the target, but the initial branch
   // instruction is out of range.
   __ Bind(&test_3);
   __ Mov(x0, 3);
-  __ Tbz(x10, 7, &far_target);
-
-  for (int i = 0; i < inter_range / kInstrSize; ++i) {
-    if (i % 100 == 0) {
-      // Allow generating veneers.
-      __ B(&fail);
-    } else {
-      __ Nop();
-    }
+  {
+    Label tbz;
+    __ Bind(&tbz);
+    __ Tbz(x10, 7, &far_target);
+    // This should be a single TBZ since the previous link is in range at this
+    // point.
+    CHECK_EQ(1 * kInstrSize, __ SizeOfCodeGeneratedSince(&tbz));
   }
 
+  // A veneer will be generated for the first TBZ, which will then remove the
+  // label from the chain and break it because the second TBZ is out of range of
+  // the first branch.
+  // The MacroAssembler should be able to cope with this.
+
+  GenerateLandingNops(&masm, inter_range / kInstrSize, &fail);
+
   __ B(&fail);
 
   __ Bind(&far_target);
@@ -11478,6 +11592,79 @@ TEST(system_msr) {
   CHECK_EQUAL_64(0, x10);
 }
 
+TEST(system_pauth_a) {
+  SETUP();
+  START();
+
+  // Exclude x16 and x17 from the scratch register list so we can use
+  // Pac/Autia1716 safely.
+  UseScratchRegisterScope temps(&masm);
+  temps.Exclude(x16, x17);
+  temps.Include(x10, x11);
+
+  // Backup stack pointer.
+  __ Mov(x20, sp);
+
+  // Modifiers
+  __ Mov(x16, 0x477d469dec0b8768);
+  __ Mov(sp, 0x477d469dec0b8760);
+
+  // Generate PACs using the 3 system instructions.
+  __ Mov(x17, 0x0000000012345678);
+  __ Pacia1716();
+  __ Mov(x0, x17);
+
+  __ Mov(lr, 0x0000000012345678);
+  __ Paciasp();
+  __ Mov(x2, lr);
+
+  // Authenticate the pointers above.
+  __ Mov(x17, x0);
+  __ Autia1716();
+  __ Mov(x3, x17);
+
+  __ Mov(lr, x2);
+  __ Autiasp();
+  __ Mov(x5, lr);
+
+  // Attempt to authenticate incorrect pointers.
+  __ Mov(x17, x2);
+  __ Autia1716();
+  __ Mov(x6, x17);
+
+  __ Mov(lr, x0);
+  __ Autiasp();
+  __ Mov(x8, lr);
+
+  // Restore stack pointer.
+  __ Mov(sp, x20);
+
+  // Mask out just the PAC code bits.
+  __ And(x0, x0, 0x007f000000000000);
+  __ And(x2, x2, 0x007f000000000000);
+
+  END();
+
+// TODO(all): test on real hardware when available
+#ifdef USE_SIMULATOR
+  RUN();
+
+  // Check PAC codes have been generated and aren't equal.
+  // NOTE: with a different ComputePAC implementation, there may be a collision.
+  CHECK_NE(0, core.xreg(2));
+  CHECK_NOT_ZERO_AND_NOT_EQUAL_64(x0, x2);
+
+  // Pointers correctly authenticated.
+  CHECK_EQUAL_64(0x0000000012345678, x3);
+  CHECK_EQUAL_64(0x0000000012345678, x5);
+
+  // Pointers corrupted after failing to authenticate.
+  CHECK_EQUAL_64(0x0020000012345678, x6);
+  CHECK_EQUAL_64(0x0020000012345678, x8);
+
+#endif  // USE_SIMULATOR
+}
+
 TEST(system) {
   INIT_V8();
   SETUP();
@@ -14703,6 +14890,7 @@ TEST(internal_reference_linked) {
 #undef CHECK_EQUAL_FP32
 #undef CHECK_EQUAL_64
 #undef CHECK_FULL_HEAP_OBJECT_IN_REGISTER
+#undef CHECK_NOT_ZERO_AND_NOT_EQUAL_64
 #undef CHECK_EQUAL_FP64
 #undef CHECK_EQUAL_128
 #undef CHECK_CONSTANT_POOL_SIZE
diff --git a/deps/v8/test/cctest/test-circular-queue.cc b/deps/v8/test/cctest/test-circular-queue.cc
index 7b0475ff80036c..5525540d2b0e0d 100644
--- a/deps/v8/test/cctest/test-circular-queue.cc
+++ b/deps/v8/test/cctest/test-circular-queue.cc
@@ -148,7 +148,7 @@ TEST(SamplingCircularQueueMultithreading) {
   ProducerThread producer3(&scq, kRecordsPerChunk, 20, &semaphore);
 
   CHECK(!scq.Peek());
-  producer1.Start();
+  CHECK(producer1.Start());
   semaphore.Wait();
   for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
     Record* rec = reinterpret_cast<Record*>(scq.Peek());
@@ -160,7 +160,7 @@ TEST(SamplingCircularQueueMultithreading) {
   }
 
   CHECK(!scq.Peek());
-  producer2.Start();
+  CHECK(producer2.Start());
   semaphore.Wait();
   for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
     Record* rec = reinterpret_cast<Record*>(scq.Peek());
@@ -172,7 +172,7 @@ TEST(SamplingCircularQueueMultithreading) {
   }
 
   CHECK(!scq.Peek());
-  producer3.Start();
+  CHECK(producer3.Start());
   semaphore.Wait();
   for (Record i = 20; i < 20 + kRecordsPerChunk; ++i) {
     Record* rec = reinterpret_cast<Record*>(scq.Peek());
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index e7fc94667522e0..3a4f11e1265e1c 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -53,9 +53,9 @@ Handle<String> MakeName(const char* str, int suffix) {
   return MakeString(buffer.begin());
 }
 
-int sum9(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7,
-         int a8) {
-  return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
+int sum10(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7,
+          int a8, int a9) {
+  return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9;
 }
 
 static int sum3(int a0, int a1, int a2) { return a0 + a1 + a2; }
@@ -70,8 +70,8 @@ TEST(CallCFunction) {
   CodeStubAssembler m(asm_tester.state());
 
   {
-    Node* const fun_constant = m.ExternalConstant(
-        ExternalReference::Create(reinterpret_cast<Address>(sum9)));
+    TNode<ExternalReference> const fun_constant = m.ExternalConstant(
+        ExternalReference::Create(reinterpret_cast<Address>(sum10)));
 
     MachineType type_intptr = MachineType::IntPtr();
 
@@ -85,14 +85,15 @@ TEST(CallCFunction) {
                         std::make_pair(type_intptr, m.IntPtrConstant(5)),
                         std::make_pair(type_intptr, m.IntPtrConstant(6)),
                         std::make_pair(type_intptr, m.IntPtrConstant(7)),
-                        std::make_pair(type_intptr, m.IntPtrConstant(8)));
+                        std::make_pair(type_intptr, m.IntPtrConstant(8)),
+                        std::make_pair(type_intptr, m.IntPtrConstant(9)));
     m.Return(m.SmiTag(result));
   }
 
   FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
 
   Handle<Object> result = ft.Call().ToHandleChecked();
-  CHECK_EQ(36, Handle<Smi>::cast(result)->value());
+  CHECK_EQ(45, Handle<Smi>::cast(result)->value());
 }
 
 TEST(CallCFunctionWithCallerSavedRegisters) {
@@ -103,7 +104,7 @@ TEST(CallCFunctionWithCallerSavedRegisters) {
   CodeStubAssembler m(asm_tester.state());
 
   {
-    Node* const fun_constant = m.ExternalConstant(
+    TNode<ExternalReference> const fun_constant = m.ExternalConstant(
         ExternalReference::Create(reinterpret_cast<Address>(sum3)));
 
     MachineType type_intptr = MachineType::IntPtr();
@@ -311,7 +312,7 @@ TEST(DecodeWordFromWord32) {
   CodeAssemblerTester asm_tester(isolate);
   CodeStubAssembler m(asm_tester.state());
 
-  class TestBitField : public BitField<unsigned, 3, 3> {};
+  using TestBitField = BitField<unsigned, 3, 3>;
   m.Return(m.SmiTag(
       m.Signed(m.DecodeWordFromWord32<TestBitField>(m.Int32Constant(0x2F)))));
   FunctionTester ft(asm_tester.GenerateCode());
@@ -430,35 +431,38 @@ TEST(TryToName) {
   enum Result { kKeyIsIndex, kKeyIsUnique, kBailout };
   {
     Node* key = m.Parameter(0);
-    Node* expected_result = m.Parameter(1);
-    Node* expected_arg = m.Parameter(2);
+    TNode<MaybeObject> expected_result =
+        m.UncheckedCast<MaybeObject>(m.Parameter(1));
+    TNode<Object> expected_arg = m.CAST(m.Parameter(2));
 
     Label passed(&m), failed(&m);
     Label if_keyisindex(&m), if_keyisunique(&m), if_bailout(&m);
     {
-      Variable var_index(&m, MachineType::PointerRepresentation());
-      Variable var_unique(&m, MachineRepresentation::kTagged);
+      TYPED_VARIABLE_DEF(IntPtrT, var_index, &m);
+      TYPED_VARIABLE_DEF(Object, var_unique, &m);
 
       m.TryToName(key, &if_keyisindex, &var_index, &if_keyisunique, &var_unique,
                   &if_bailout);
 
       m.BIND(&if_keyisindex);
-      m.GotoIfNot(m.WordEqual(expected_result,
-                              m.SmiConstant(Smi::FromInt(kKeyIsIndex))),
+      m.GotoIfNot(m.TaggedEqual(expected_result,
+                                m.SmiConstant(Smi::FromInt(kKeyIsIndex))),
                   &failed);
-      m.Branch(m.WordEqual(m.SmiUntag(expected_arg), var_index.value()),
-               &passed, &failed);
+      m.Branch(
+          m.IntPtrEqual(m.SmiUntag(m.CAST(expected_arg)), var_index.value()),
+          &passed, &failed);
 
       m.BIND(&if_keyisunique);
-      m.GotoIfNot(m.WordEqual(expected_result,
-                              m.SmiConstant(Smi::FromInt(kKeyIsUnique))),
+      m.GotoIfNot(m.TaggedEqual(expected_result,
+                                m.SmiConstant(Smi::FromInt(kKeyIsUnique))),
                   &failed);
-      m.Branch(m.WordEqual(expected_arg, var_unique.value()), &passed, &failed);
+      m.Branch(m.TaggedEqual(expected_arg, var_unique.value()), &passed,
+               &failed);
     }
 
     m.BIND(&if_bailout);
     m.Branch(
-        m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
         &passed, &failed);
 
     m.BIND(&passed);
@@ -653,7 +657,7 @@ void TestNameDictionaryLookup() {
                                        &var_name_index, &if_not_found);
     m.BIND(&if_found);
     m.GotoIfNot(
-        m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
         &failed);
     m.Branch(
         m.WordEqual(m.SmiUntag(m.CAST(expected_arg)), var_name_index.value()),
@@ -661,7 +665,7 @@ void TestNameDictionaryLookup() {
 
     m.BIND(&if_not_found);
     m.Branch(
-        m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
         &passed, &failed);
 
     m.BIND(&passed);
@@ -756,14 +760,14 @@ TEST(NumberDictionaryLookup) {
                              &if_not_found);
     m.BIND(&if_found);
     m.GotoIfNot(
-        m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
         &failed);
     m.Branch(m.WordEqual(m.SmiUntag(m.CAST(expected_arg)), var_entry.value()),
              &passed, &failed);
 
     m.BIND(&if_not_found);
     m.Branch(
-        m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
         &passed, &failed);
 
     m.BIND(&passed);
@@ -847,12 +851,12 @@ TEST(TransitionLookup) {
                        &if_not_found);
 
       BIND(&if_found);
-      GotoIfNot(WordEqual(expected_result, SmiConstant(kFound)), &failed);
-      Branch(WordEqual(expected_arg, SmiTag(var_transition_index.value())),
+      GotoIfNot(TaggedEqual(expected_result, SmiConstant(kFound)), &failed);
+      Branch(TaggedEqual(expected_arg, SmiTag(var_transition_index.value())),
              &passed, &failed);
 
       BIND(&if_not_found);
-      Branch(WordEqual(expected_result, SmiConstant(kNotFound)), &passed,
+      Branch(TaggedEqual(expected_result, SmiConstant(kNotFound)), &passed,
              &failed);
 
       BIND(&passed);
@@ -1010,29 +1014,31 @@ TEST(TryHasOwnProperty) {
   {
     Node* object = m.Parameter(0);
     Node* unique_name = m.Parameter(1);
-    Node* expected_result = m.Parameter(2);
+    TNode<MaybeObject> expected_result =
+        m.UncheckedCast<MaybeObject>(m.Parameter(2));
 
     Label passed(&m), failed(&m);
     Label if_found(&m), if_not_found(&m), if_bailout(&m);
 
-    Node* map = m.LoadMap(object);
-    Node* instance_type = m.LoadMapInstanceType(map);
+    TNode<Map> map = m.LoadMap(object);
+    TNode<Uint16T> instance_type = m.LoadMapInstanceType(map);
 
     m.TryHasOwnProperty(object, map, instance_type, unique_name, &if_found,
                         &if_not_found, &if_bailout);
 
     m.BIND(&if_found);
-    m.Branch(m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
-             &passed, &failed);
+    m.Branch(
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
+        &passed, &failed);
 
     m.BIND(&if_not_found);
     m.Branch(
-        m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
         &passed, &failed);
 
     m.BIND(&if_bailout);
     m.Branch(
-        m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
         &passed, &failed);
 
     m.BIND(&passed);
@@ -1207,8 +1213,8 @@ TEST(TryGetOwnProperty) {
     Variable var_value(&m, MachineRepresentation::kTagged);
     Label if_found(&m), if_not_found(&m), if_bailout(&m);
 
-    Node* map = m.LoadMap(object);
-    Node* instance_type = m.LoadMapInstanceType(map);
+    TNode<Map> map = m.LoadMap(object);
+    TNode<Uint16T> instance_type = m.LoadMapInstanceType(map);
 
     m.TryGetOwnProperty(context, object, object, map, instance_type,
                         unique_name, &if_found, &var_value, &if_not_found,
@@ -1421,34 +1427,37 @@ TEST(TryLookupElement) {
   enum Result { kFound, kAbsent, kNotFound, kBailout };
   {
     Node* object = m.Parameter(0);
-    Node* index = m.SmiUntag(m.Parameter(1));
-    Node* expected_result = m.Parameter(2);
+    TNode<IntPtrT> index = m.SmiUntag(m.Parameter(1));
+    TNode<MaybeObject> expected_result =
+        m.UncheckedCast<MaybeObject>(m.Parameter(2));
 
     Label passed(&m), failed(&m);
     Label if_found(&m), if_not_found(&m), if_bailout(&m), if_absent(&m);
 
-    Node* map = m.LoadMap(object);
-    Node* instance_type = m.LoadMapInstanceType(map);
+    TNode<Map> map = m.LoadMap(object);
+    TNode<Uint16T> instance_type = m.LoadMapInstanceType(map);
 
     m.TryLookupElement(object, map, instance_type, index, &if_found, &if_absent,
                        &if_not_found, &if_bailout);
 
     m.BIND(&if_found);
-    m.Branch(m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
-             &passed, &failed);
+    m.Branch(
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
+        &passed, &failed);
 
     m.BIND(&if_absent);
-    m.Branch(m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kAbsent))),
-             &passed, &failed);
+    m.Branch(
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kAbsent))),
+        &passed, &failed);
 
     m.BIND(&if_not_found);
     m.Branch(
-        m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
         &passed, &failed);
 
     m.BIND(&if_bailout);
     m.Branch(
-        m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
+        m.TaggedEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
         &passed, &failed);
 
     m.BIND(&passed);
@@ -1600,19 +1609,19 @@ TEST(TryLookupElement) {
     CHECK_NOT_FOUND(object, 42);
   }
 
-// TODO(ishell): uncomment once NO_ELEMENTS kind is supported.
-//  {
-//    Handle<Map> map = Map::Create(isolate, 0);
-//    map->set_elements_kind(NO_ELEMENTS);
-//    Handle<JSObject> object = factory->NewJSObjectFromMap(map);
-//    CHECK_EQ(NO_ELEMENTS, object->map()->elements_kind());
-//
-//    CHECK_NOT_FOUND(object, 0);
-//    CHECK_NOT_FOUND(object, 1);
-//    CHECK_NOT_FOUND(object, 7);
-//    CHECK_NOT_FOUND(object, 13);
-//    CHECK_NOT_FOUND(object, 42);
-//  }
+  // TODO(ishell): uncomment once NO_ELEMENTS kind is supported.
+  //  {
+  //    Handle<Map> map = Map::Create(isolate, 0);
+  //    map->set_elements_kind(NO_ELEMENTS);
+  //    Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+  //    CHECK_EQ(NO_ELEMENTS, object->map()->elements_kind());
+  //
+  //    CHECK_NOT_FOUND(object, 0);
+  //    CHECK_NOT_FOUND(object, 1);
+  //    CHECK_NOT_FOUND(object, 7);
+  //    CHECK_NOT_FOUND(object, 13);
+  //    CHECK_NOT_FOUND(object, 42);
+  //  }
 
 #undef CHECK_FOUND
 #undef CHECK_NOT_FOUND
@@ -1652,7 +1661,8 @@ TEST(AllocateJSObjectFromMap) {
     Node* properties = m.Parameter(1);
     Node* elements = m.Parameter(2);
 
-    Node* result = m.AllocateJSObjectFromMap(map, properties, elements);
+    TNode<JSObject> result =
+        m.AllocateJSObjectFromMap(map, properties, elements);
 
     CodeStubAssembler::Label done(&m);
     m.GotoIfNot(m.IsJSArrayMap(map), &done);
@@ -1722,7 +1732,8 @@ TEST(AllocateNameDictionary) {
 
   {
     Node* capacity = m.Parameter(0);
-    Node* result = m.AllocateNameDictionary(m.SmiUntag(capacity));
+    TNode<NameDictionary> result =
+        m.AllocateNameDictionary(m.SmiUntag(capacity));
     m.Return(result);
   }
 
@@ -1931,11 +1942,11 @@ TEST(Arguments) {
   CodeStubArguments arguments(&m, m.IntPtrConstant(3));
 
   CSA_ASSERT(
-      &m, m.WordEqual(arguments.AtIndex(0), m.SmiConstant(Smi::FromInt(12))));
+      &m, m.TaggedEqual(arguments.AtIndex(0), m.SmiConstant(Smi::FromInt(12))));
   CSA_ASSERT(
-      &m, m.WordEqual(arguments.AtIndex(1), m.SmiConstant(Smi::FromInt(13))));
+      &m, m.TaggedEqual(arguments.AtIndex(1), m.SmiConstant(Smi::FromInt(13))));
   CSA_ASSERT(
-      &m, m.WordEqual(arguments.AtIndex(2), m.SmiConstant(Smi::FromInt(14))));
+      &m, m.TaggedEqual(arguments.AtIndex(2), m.SmiConstant(Smi::FromInt(14))));
 
   arguments.PopAndReturn(arguments.GetReceiver());
 
@@ -1959,17 +1970,17 @@ TEST(ArgumentsWithSmiConstantIndices) {
                               CodeStubAssembler::SMI_PARAMETERS);
 
   CSA_ASSERT(&m,
-             m.WordEqual(arguments.AtIndex(m.SmiConstant(0),
-                                           CodeStubAssembler::SMI_PARAMETERS),
-                         m.SmiConstant(Smi::FromInt(12))));
+             m.TaggedEqual(arguments.AtIndex(m.SmiConstant(0),
+                                             CodeStubAssembler::SMI_PARAMETERS),
+                           m.SmiConstant(Smi::FromInt(12))));
   CSA_ASSERT(&m,
-             m.WordEqual(arguments.AtIndex(m.SmiConstant(1),
-                                           CodeStubAssembler::SMI_PARAMETERS),
-                         m.SmiConstant(Smi::FromInt(13))));
+             m.TaggedEqual(arguments.AtIndex(m.SmiConstant(1),
+                                             CodeStubAssembler::SMI_PARAMETERS),
+                           m.SmiConstant(Smi::FromInt(13))));
   CSA_ASSERT(&m,
-             m.WordEqual(arguments.AtIndex(m.SmiConstant(2),
-                                           CodeStubAssembler::SMI_PARAMETERS),
-                         m.SmiConstant(Smi::FromInt(14))));
+             m.TaggedEqual(arguments.AtIndex(m.SmiConstant(2),
+                                             CodeStubAssembler::SMI_PARAMETERS),
+                           m.SmiConstant(Smi::FromInt(14))));
 
   arguments.PopAndReturn(arguments.GetReceiver());
 
@@ -2012,17 +2023,17 @@ TEST(ArgumentsWithSmiIndices) {
                               CodeStubAssembler::SMI_PARAMETERS);
 
   CSA_ASSERT(&m,
-             m.WordEqual(arguments.AtIndex(NonConstantSmi(&m, 0),
-                                           CodeStubAssembler::SMI_PARAMETERS),
-                         m.SmiConstant(Smi::FromInt(12))));
+             m.TaggedEqual(arguments.AtIndex(NonConstantSmi(&m, 0),
+                                             CodeStubAssembler::SMI_PARAMETERS),
+                           m.SmiConstant(Smi::FromInt(12))));
   CSA_ASSERT(&m,
-             m.WordEqual(arguments.AtIndex(NonConstantSmi(&m, 1),
-                                           CodeStubAssembler::SMI_PARAMETERS),
-                         m.SmiConstant(Smi::FromInt(13))));
+             m.TaggedEqual(arguments.AtIndex(NonConstantSmi(&m, 1),
+                                             CodeStubAssembler::SMI_PARAMETERS),
+                           m.SmiConstant(Smi::FromInt(13))));
   CSA_ASSERT(&m,
-             m.WordEqual(arguments.AtIndex(NonConstantSmi(&m, 2),
-                                           CodeStubAssembler::SMI_PARAMETERS),
-                         m.SmiConstant(Smi::FromInt(14))));
+             m.TaggedEqual(arguments.AtIndex(NonConstantSmi(&m, 2),
+                                             CodeStubAssembler::SMI_PARAMETERS),
+                           m.SmiConstant(Smi::FromInt(14))));
 
   arguments.PopAndReturn(arguments.GetReceiver());
 
@@ -2368,7 +2379,7 @@ TEST(CreatePromiseResolvingFunctionsContext) {
   PromiseBuiltinsAssembler m(asm_tester.state());
 
   Node* const context = m.Parameter(kNumParams + 2);
-  Node* const native_context = m.LoadNativeContext(context);
+  TNode<NativeContext> const native_context = m.LoadNativeContext(context);
   Node* const promise =
       m.AllocateAndInitJSPromise(context, m.UndefinedConstant());
   Node* const promise_context = m.CreatePromiseResolvingFunctionsContext(
@@ -2396,13 +2407,13 @@ TEST(CreatePromiseResolvingFunctions) {
   PromiseBuiltinsAssembler m(asm_tester.state());
 
   Node* const context = m.Parameter(kNumParams + 2);
-  Node* const native_context = m.LoadNativeContext(context);
+  TNode<NativeContext> const native_context = m.LoadNativeContext(context);
   Node* const promise =
       m.AllocateAndInitJSPromise(context, m.UndefinedConstant());
   Node *resolve, *reject;
   std::tie(resolve, reject) = m.CreatePromiseResolvingFunctions(
       promise, m.BooleanConstant(false), native_context);
-  Node* const kSize = m.IntPtrConstant(2);
+  TNode<IntPtrT> const kSize = m.IntPtrConstant(2);
   TNode<FixedArray> const arr =
       m.Cast(m.AllocateFixedArray(PACKED_ELEMENTS, kSize));
   m.StoreFixedArrayElement(arr, 0, resolve);
@@ -2486,15 +2497,15 @@ TEST(AllocateFunctionWithMapAndContext) {
   PromiseBuiltinsAssembler m(asm_tester.state());
 
   Node* const context = m.Parameter(kNumParams + 2);
-  Node* const native_context = m.LoadNativeContext(context);
+  TNode<NativeContext> const native_context = m.LoadNativeContext(context);
   Node* const promise =
       m.AllocateAndInitJSPromise(context, m.UndefinedConstant());
   Node* promise_context = m.CreatePromiseResolvingFunctionsContext(
       promise, m.BooleanConstant(false), native_context);
-  Node* resolve_info = m.LoadContextElement(
+  TNode<Object> resolve_info = m.LoadContextElement(
       native_context,
       Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX);
-  Node* const map = m.LoadContextElement(
+  TNode<Object> const map = m.LoadContextElement(
       native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
   Node* const resolve =
       m.AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
@@ -2524,9 +2535,9 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
   PromiseBuiltinsAssembler m(asm_tester.state());
 
   Node* const context = m.Parameter(kNumParams + 2);
-  Node* const native_context = m.LoadNativeContext(context);
+  TNode<NativeContext> const native_context = m.LoadNativeContext(context);
 
-  Node* const map = m.LoadRoot(RootIndex::kPromiseCapabilityMap);
+  TNode<Map> const map = m.PromiseCapabilityMapConstant();
   Node* const capability = m.AllocateStruct(map);
   m.StoreObjectFieldNoWriteBarrier(
       capability, PromiseCapability::kPromiseOffset, m.UndefinedConstant());
@@ -2560,12 +2571,12 @@ TEST(NewPromiseCapability) {
     PromiseBuiltinsAssembler m(asm_tester.state());
 
     Node* const context = m.Parameter(kNumParams + 2);
-    Node* const native_context = m.LoadNativeContext(context);
-    Node* const promise_constructor =
+    TNode<NativeContext> const native_context = m.LoadNativeContext(context);
+    TNode<Object> const promise_constructor =
         m.LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
 
-    Node* const debug_event = m.TrueConstant();
-    Node* const capability =
+    TNode<Oddball> const debug_event = m.TrueConstant();
+    TNode<Object> const capability =
         m.CallBuiltin(Builtins::kNewPromiseCapability, context,
                       promise_constructor, debug_event);
     m.Return(capability);
@@ -2608,9 +2619,9 @@ TEST(NewPromiseCapability) {
     Node* const context = m.Parameter(kNumParams + 2);
 
     Node* const constructor = m.Parameter(1);
-    Node* const debug_event = m.TrueConstant();
-    Node* const capability = m.CallBuiltin(Builtins::kNewPromiseCapability,
-                                           context, constructor, debug_event);
+    TNode<Oddball> const debug_event = m.TrueConstant();
+    TNode<Object> const capability = m.CallBuiltin(
+        Builtins::kNewPromiseCapability, context, constructor, debug_event);
     m.Return(capability);
 
     FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2674,12 +2685,13 @@ TEST(DirectMemoryTest8BitWord32Immediate) {
   const int element_count = 8;
   Label bad(&m);
 
-  Node* buffer_node = m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
+  TNode<IntPtrT> buffer_node =
+      m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
   for (size_t i = 0; i < element_count; ++i) {
     for (size_t j = 0; j < element_count; ++j) {
       Node* loaded = m.LoadBufferObject(buffer_node, static_cast<int>(i),
                                         MachineType::Uint8());
-      Node* masked = m.Word32And(loaded, m.Int32Constant(buffer[j]));
+      TNode<Word32T> masked = m.Word32And(loaded, m.Int32Constant(buffer[j]));
       if ((buffer[j] & buffer[i]) != 0) {
         m.GotoIf(m.Word32Equal(masked, m.Int32Constant(0)), &bad);
       } else {
@@ -2706,13 +2718,14 @@ TEST(DirectMemoryTest16BitWord32Immediate) {
   const int element_count = 8;
   Label bad(&m);
 
-  Node* buffer_node = m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
+  TNode<IntPtrT> buffer_node =
+      m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
   for (size_t i = 0; i < element_count; ++i) {
     for (size_t j = 0; j < element_count; ++j) {
       Node* loaded =
           m.LoadBufferObject(buffer_node, static_cast<int>(i * sizeof(int16_t)),
                              MachineType::Uint16());
-      Node* masked = m.Word32And(loaded, m.Int32Constant(buffer[j]));
+      TNode<Word32T> masked = m.Word32And(loaded, m.Int32Constant(buffer[j]));
       if ((buffer[j] & buffer[i]) != 0) {
         m.GotoIf(m.Word32Equal(masked, m.Int32Constant(0)), &bad);
       } else {
@@ -2740,7 +2753,8 @@ TEST(DirectMemoryTest8BitWord32) {
   Label bad(&m);
   Node* constants[element_count];
 
-  Node* buffer_node = m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
+  TNode<IntPtrT> buffer_node =
+      m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
   for (size_t i = 0; i < element_count; ++i) {
     constants[i] = m.LoadBufferObject(buffer_node, static_cast<int>(i),
                                       MachineType::Uint8());
@@ -2750,7 +2764,7 @@ TEST(DirectMemoryTest8BitWord32) {
     for (size_t j = 0; j < element_count; ++j) {
       Node* loaded = m.LoadBufferObject(buffer_node, static_cast<int>(i),
                                         MachineType::Uint8());
-      Node* masked = m.Word32And(loaded, constants[j]);
+      TNode<Word32T> masked = m.Word32And(loaded, constants[j]);
       if ((buffer[j] & buffer[i]) != 0) {
         m.GotoIf(m.Word32Equal(masked, m.Int32Constant(0)), &bad);
       } else {
@@ -2785,20 +2799,22 @@ TEST(DirectMemoryTest16BitWord32) {
   Label bad(&m);
   Node* constants[element_count];
 
-  Node* buffer_node1 = m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
+  TNode<IntPtrT> buffer_node1 =
+      m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
   for (size_t i = 0; i < element_count; ++i) {
     constants[i] =
         m.LoadBufferObject(buffer_node1, static_cast<int>(i * sizeof(int16_t)),
                            MachineType::Uint16());
   }
-  Node* buffer_node2 = m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
+  TNode<IntPtrT> buffer_node2 =
+      m.IntPtrConstant(reinterpret_cast<intptr_t>(buffer));
 
   for (size_t i = 0; i < element_count; ++i) {
     for (size_t j = 0; j < element_count; ++j) {
       Node* loaded = m.LoadBufferObject(buffer_node1,
                                         static_cast<int>(i * sizeof(int16_t)),
                                         MachineType::Uint16());
-      Node* masked = m.Word32And(loaded, constants[j]);
+      TNode<Word32T> masked = m.Word32And(loaded, constants[j]);
       if ((buffer[j] & buffer[i]) != 0) {
         m.GotoIf(m.Word32Equal(masked, m.Int32Constant(0)), &bad);
       } else {
@@ -2841,8 +2857,8 @@ TEST(LoadJSArrayElementsMap) {
   {
     CodeStubAssembler m(asm_tester.state());
     Node* context = m.Parameter(kNumParams + 2);
-    Node* native_context = m.LoadNativeContext(context);
-    Node* kind = m.SmiToInt32(m.Parameter(0));
+    TNode<NativeContext> native_context = m.LoadNativeContext(context);
+    TNode<Int32T> kind = m.SmiToInt32(m.Parameter(0));
     m.Return(m.LoadJSArrayElementsMap(kind, native_context));
   }
 
@@ -3288,8 +3304,8 @@ TEST(ExtractFixedArraySimpleIntPtrParameters) {
   CodeAssemblerTester asm_tester(isolate, kNumParams);
   {
     CodeStubAssembler m(asm_tester.state());
-    Node* p1_untagged = m.SmiUntag(m.Parameter(1));
-    Node* p2_untagged = m.SmiUntag(m.Parameter(2));
+    TNode<IntPtrT> p1_untagged = m.SmiUntag(m.Parameter(1));
+    TNode<IntPtrT> p2_untagged = m.SmiUntag(m.Parameter(2));
     m.Return(m.ExtractFixedArray(m.Parameter(0), p1_untagged, p2_untagged));
   }
   FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3334,8 +3350,9 @@ TEST(SingleInputPhiElimination) {
     Label end_label(&m, {&temp1, &temp2});
     temp1.Bind(m.Parameter(1));
     temp2.Bind(m.Parameter(1));
-    m.Branch(m.WordEqual(m.Parameter(0), m.Parameter(1)), &end_label,
-             &temp_label);
+    m.Branch(m.TaggedEqual(m.UncheckedCast<Object>(m.Parameter(0)),
+                           m.UncheckedCast<Object>(m.Parameter(1))),
+             &end_label, &temp_label);
     temp1.Bind(m.Parameter(2));
     temp2.Bind(m.Parameter(2));
     m.BIND(&temp_label);
@@ -3486,7 +3503,7 @@ TEST(TestCallBuiltinInlineTrampoline) {
   Node* str = m.Parameter(0);
   Node* context = m.Parameter(kNumParams + kContextOffset);
 
-  Node* index = m.SmiConstant(2);
+  TNode<Smi> index = m.SmiConstant(2);
 
   m.Return(m.CallStub(Builtins::CallableFor(isolate, Builtins::kStringRepeat),
                       context, str, index));
@@ -3511,7 +3528,7 @@ TEST(TestCallBuiltinIndirectLoad) {
   Node* str = m.Parameter(0);
   Node* context = m.Parameter(kNumParams + kContextOffset);
 
-  Node* index = m.SmiConstant(2);
+  TNode<Smi> index = m.SmiConstant(2);
 
   m.Return(m.CallStub(Builtins::CallableFor(isolate, Builtins::kStringRepeat),
                       context, str, index));
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index 1ddd463795374f..2bf07888af004d 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -312,11 +312,10 @@ TEST(ExponentNumberStr) {
   CHECK_EQ(1e-106, StringToDouble(".000001e-100", NO_FLAGS));
 }
 
-
-class OneBit1: public BitField<uint32_t, 0, 1> {};
-class OneBit2: public BitField<uint32_t, 7, 1> {};
-class EightBit1: public BitField<uint32_t, 0, 8> {};
-class EightBit2: public BitField<uint32_t, 13, 8> {};
+using OneBit1 = BitField<uint32_t, 0, 1>;
+using OneBit2 = BitField<uint32_t, 7, 1>;
+using EightBit1 = BitField<uint32_t, 0, 8>;
+using EightBit2 = BitField<uint32_t, 13, 8>;
 
 TEST(BitField) {
   uint32_t x;
@@ -351,9 +350,8 @@ TEST(BitField) {
   CHECK(!EightBit2::is_valid(256));
 }
 
-
-class UpperBits: public BitField64<int, 61, 3> {};
-class MiddleBits: public BitField64<int, 31, 2> {};
+using UpperBits = BitField64<int, 61, 3>;
+using MiddleBits = BitField64<int, 31, 2>;
 
 TEST(BitField64) {
   uint64_t x;
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 4b9ee4629fcbab..6d0ee0e512856a 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -37,6 +37,7 @@
 #include "src/base/platform/platform.h"
 #include "src/codegen/source-position-table.h"
 #include "src/deoptimizer/deoptimizer.h"
+#include "src/heap/spaces.h"
 #include "src/libplatform/default-platform.h"
 #include "src/logging/log.h"
 #include "src/objects/objects-inl.h"
@@ -45,11 +46,18 @@
 #include "src/profiler/tracing-cpu-profiler.h"
 #include "src/utils/utils.h"
 #include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
 #include "test/cctest/profiler-extension.h"
 
 #include "include/libplatform/v8-tracing.h"
+#include "src/libplatform/tracing/trace-event-listener.h"
 #include "src/tracing/trace-event.h"
 
+#ifdef V8_USE_PERFETTO
+#include "perfetto/trace/chrome/chrome_trace_event.pb.h"
+#include "perfetto/trace/trace.pb.h"
+#endif
+
 namespace v8 {
 namespace internal {
 namespace test_cpu_profiler {
@@ -79,12 +87,13 @@ static const char* reason(const i::DeoptimizeReason reason) {
 TEST(StartStop) {
   i::Isolate* isolate = CcTest::i_isolate();
   CpuProfilesCollection profiles(isolate);
-  ProfileGenerator generator(&profiles);
+  ProfilerCodeObserver code_observer(isolate);
+  ProfileGenerator generator(&profiles, code_observer.code_map());
   std::unique_ptr<ProfilerEventsProcessor> processor(
-      new SamplingEventsProcessor(isolate, &generator,
+      new SamplingEventsProcessor(isolate, &generator, &code_observer,
                                   v8::base::TimeDelta::FromMicroseconds(100),
                                   true));
-  processor->Start();
+  CHECK(processor->Start());
   processor->StopSynchronously();
 }
 
@@ -163,10 +172,13 @@ TEST(CodeEvents) {
   i::AbstractCode moved_code = CreateCode(&env);
 
   CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
-  ProfileGenerator* generator = new ProfileGenerator(profiles);
+  ProfilerCodeObserver code_observer(isolate);
+  ProfileGenerator* generator =
+      new ProfileGenerator(profiles, code_observer.code_map());
   ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
-      isolate, generator, v8::base::TimeDelta::FromMicroseconds(100), true);
-  processor->Start();
+      isolate, generator, &code_observer,
+      v8::base::TimeDelta::FromMicroseconds(100), true);
+  CHECK(processor->Start());
   ProfilerListener profiler_listener(isolate, processor);
   isolate->logger()->AddCodeEventListener(&profiler_listener);
 
@@ -222,13 +234,16 @@ TEST(TickEvents) {
   i::AbstractCode frame3_code = CreateCode(&env);
 
   CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
-  ProfileGenerator* generator = new ProfileGenerator(profiles);
+  ProfilerCodeObserver code_observer(isolate);
+  ProfileGenerator* generator =
+      new ProfileGenerator(profiles, code_observer.code_map());
   ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
-      CcTest::i_isolate(), generator,
+      CcTest::i_isolate(), generator, &code_observer,
       v8::base::TimeDelta::FromMicroseconds(100), true);
-  CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+  CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
+                       processor);
   profiles->StartProfiling("");
-  processor->Start();
+  CHECK(processor->Start());
   ProfilerListener profiler_listener(isolate, processor);
   isolate->logger()->AddCodeEventListener(&profiler_listener);
 
@@ -291,13 +306,16 @@ TEST(Issue1398) {
   i::AbstractCode code = CreateCode(&env);
 
   CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
-  ProfileGenerator* generator = new ProfileGenerator(profiles);
+  ProfilerCodeObserver code_observer(isolate);
+  ProfileGenerator* generator =
+      new ProfileGenerator(profiles, code_observer.code_map());
   ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
-      CcTest::i_isolate(), generator,
+      CcTest::i_isolate(), generator, &code_observer,
       v8::base::TimeDelta::FromMicroseconds(100), true);
-  CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+  CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
+                       processor);
   profiles->StartProfiling("");
-  processor->Start();
+  CHECK(processor->Start());
   ProfilerListener profiler_listener(isolate, processor);
 
   profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, code, "bbb");
@@ -305,7 +323,7 @@ TEST(Issue1398) {
   v8::internal::TickSample sample;
   sample.pc = reinterpret_cast<void*>(code.InstructionStart());
   sample.tos = nullptr;
-  sample.frames_count = v8::TickSample::kMaxFramesCount;
+  sample.frames_count = TickSample::kMaxFramesCount;
   for (unsigned i = 0; i < sample.frames_count; ++i) {
     sample.stack[i] = reinterpret_cast<void*>(code.InstructionStart());
   }
@@ -323,7 +341,7 @@ TEST(Issue1398) {
     ++actual_depth;
   }
 
-  CHECK_EQ(1 + v8::TickSample::kMaxFramesCount, actual_depth);  // +1 for PC.
+  CHECK_EQ(1 + TickSample::kMaxFramesCount, actual_depth);  // +1 for PC.
 }
 
 TEST(DeleteAllCpuProfiles) {
@@ -440,7 +458,8 @@ class ProfilerHelper {
       v8::Local<v8::Function> function, v8::Local<v8::Value> argv[], int argc,
       unsigned min_js_samples = 0, unsigned min_external_samples = 0,
       ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers,
-      unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
+      unsigned max_samples = v8::CpuProfilingOptions::kNoSampleLimit,
+      v8::Local<v8::Context> context = v8::Local<v8::Context>());
 
   v8::CpuProfiler* profiler() { return profiler_; }
 
@@ -453,11 +472,12 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
                                     v8::Local<v8::Value> argv[], int argc,
                                     unsigned min_js_samples,
                                     unsigned min_external_samples,
-                                    ProfilingMode mode, unsigned max_samples) {
+                                    ProfilingMode mode, unsigned max_samples,
+                                    v8::Local<v8::Context> context) {
   v8::Local<v8::String> profile_name = v8_str("my_profile");
 
   profiler_->SetSamplingInterval(100);
-  profiler_->StartProfiling(profile_name, {mode, max_samples});
+  profiler_->StartProfiling(profile_name, {mode, max_samples, 0, context});
 
   v8::internal::CpuProfiler* iprofiler =
       reinterpret_cast<v8::internal::CpuProfiler*>(profiler_);
@@ -465,6 +485,7 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
       reinterpret_cast<i::SamplingEventsProcessor*>(iprofiler->processor())
           ->sampler();
   sampler->StartCountingSamples();
+
   do {
     function->Call(context_, context_->Global(), argc, argv).ToLocalChecked();
   } while (sampler->js_sample_count() < min_js_samples ||
@@ -1154,18 +1175,21 @@ static void TickLines(bool optimize) {
   CHECK_NE(code_address, kNullAddress);
 
   CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
-  ProfileGenerator* generator = new ProfileGenerator(profiles);
+  ProfilerCodeObserver code_observer(isolate);
+  ProfileGenerator* generator =
+      new ProfileGenerator(profiles, code_observer.code_map());
   ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
-      CcTest::i_isolate(), generator,
+      CcTest::i_isolate(), generator, &code_observer,
       v8::base::TimeDelta::FromMicroseconds(100), true);
-  CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+  CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
+                       processor);
   profiles->StartProfiling("");
   // TODO(delphick): Stop using the CpuProfiler internals here: This forces
   // LogCompiledFunctions so that source positions are collected everywhere.
   // This would normally happen automatically with CpuProfiler::StartProfiling
   // but doesn't because it's constructed with a generator and a processor.
   isolate->logger()->LogCompiledFunctions();
-  processor->Start();
+  CHECK(processor->Start());
   ProfilerListener profiler_listener(isolate, processor);
 
   // Enqueue code creation events.
@@ -1806,7 +1830,7 @@ TEST(Inlining2) {
   v8::Local<v8::String> profile_name = v8_str("inlining");
   profiler->StartProfiling(
       profile_name,
-      CpuProfilingOptions{v8::CpuProfilingMode::kCallerLineNumbers});
+      v8::CpuProfilingOptions{v8::CpuProfilingMode::kCallerLineNumbers});
 
   v8::Local<v8::Value> args[] = {
       v8::Integer::New(env->GetIsolate(), 50000 * load_factor)};
@@ -2488,7 +2512,6 @@ TEST(DeoptAtFirstLevelInlinedSource) {
   iprofiler->DeleteProfile(iprofile);
 }
 
-
 // deopt at the second level inlined function
 TEST(DeoptAtSecondLevelInlinedSource) {
   if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
@@ -2623,6 +2646,41 @@ using v8::platform::tracing::TraceObject;
 
 namespace {
 
+#ifdef V8_USE_PERFETTO
+
+class CpuProfilerListener : public platform::tracing::TraceEventListener {
+ public:
+  void ProcessPacket(const ::perfetto::protos::TracePacket& packet) {
+    for (const ::perfetto::protos::ChromeTraceEvent& trace_event :
+         packet.chrome_events().trace_events()) {
+      if (trace_event.name() != std::string("Profile") &&
+          trace_event.name() != std::string("ProfileChunk"))
+        return;
+      CHECK(!profile_id_ || trace_event.id() == profile_id_);
+      CHECK_EQ(1, trace_event.args_size());
+      CHECK(trace_event.args()[0].has_json_value());
+      profile_id_ = trace_event.id();
+      result_json_ += result_json_.empty() ? "[" : ",\n";
+      result_json_ += trace_event.args()[0].json_value();
+    }
+  }
+
+  const std::string& result_json() {
+    result_json_ += "]";
+    return result_json_;
+  }
+  void Reset() {
+    result_json_.clear();
+    profile_id_ = 0;
+  }
+
+ private:
+  std::string result_json_;
+  uint64_t profile_id_ = 0;
+};
+
+#else
+
 class CpuProfileEventChecker : public v8::platform::tracing::TraceWriter {
  public:
   void AppendTraceEvent(TraceObject* trace_event) override {
@@ -2651,6 +2709,8 @@ class CpuProfileEventChecker : public v8::platform::tracing::TraceWriter {
   uint64_t profile_id_ = 0;
 };
 
+#endif  // !V8_USE_PERFETTO
+
 }  // namespace
 
 TEST(TracingCpuProfiler) {
@@ -2658,17 +2718,20 @@ TEST(TracingCpuProfiler) {
   v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
   v8::Context::Scope context_scope(env);
 
-  CpuProfileEventChecker* event_checker = new CpuProfileEventChecker();
-  TraceBuffer* ring_buffer =
-      TraceBuffer::CreateTraceBufferRingBuffer(1, event_checker);
   auto* tracing_controller =
       static_cast<v8::platform::tracing::TracingController*>(
           i::V8::GetCurrentPlatform()->GetTracingController());
-  tracing_controller->Initialize(ring_buffer);
 
 #ifdef V8_USE_PERFETTO
   std::ostringstream perfetto_output;
   tracing_controller->InitializeForPerfetto(&perfetto_output);
+  CpuProfilerListener listener;
+  tracing_controller->SetTraceEventListenerForTesting(&listener);
+#else
+  CpuProfileEventChecker* event_checker = new CpuProfileEventChecker();
+  TraceBuffer* ring_buffer =
+      TraceBuffer::CreateTraceBufferRingBuffer(1, event_checker);
+  tracing_controller->Initialize(ring_buffer);
 #endif
 
   bool result = false;
@@ -2693,8 +2756,13 @@ TEST(TracingCpuProfiler) {
     CompileRun(test_code.c_str());
     tracing_controller->StopTracing();
 
+#ifdef V8_USE_PERFETTO
+    std::string profile_json = listener.result_json();
+    listener.Reset();
+#else
     std::string profile_json = event_checker->result_json();
     event_checker->Reset();
+#endif
     CHECK_LT(0u, profile_json.length());
     printf("Profile JSON: %s\n", profile_json.c_str());
 
@@ -2930,6 +2998,15 @@ TEST(SourcePositionTable) {
 
   CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(21));
   CHECK_EQ(0, info.GetInliningId(100));
+
+  // Test that subsequent SetPosition calls with the same pc_offset are ignored.
+  info.SetPosition(25, 4, SourcePosition::kNotInlined);
+  CHECK_EQ(2, info.GetSourceLineNumber(21));
+  CHECK_EQ(3, info.GetSourceLineNumber(100));
+  CHECK_EQ(3, info.GetSourceLineNumber(std::numeric_limits<int>::max()));
+
+  CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(21));
+  CHECK_EQ(0, info.GetInliningId(100));
 }
 
 TEST(MultipleProfilers) {
@@ -3034,8 +3111,8 @@ TEST(MultipleIsolates) {
   IsolateThread thread1;
   IsolateThread thread2;
 
-  thread1.Start();
-  thread2.Start();
+  CHECK(thread1.Start());
+  CHECK(thread2.Start());
 
   thread1.Join();
   thread2.Join();
@@ -3062,12 +3139,13 @@ TEST(FastStopProfiling) {
 TEST(LowPrecisionSamplingStartStopInternal) {
   i::Isolate* isolate = CcTest::i_isolate();
   CpuProfilesCollection profiles(isolate);
-  ProfileGenerator generator(&profiles);
+  ProfilerCodeObserver code_observer(isolate);
+  ProfileGenerator generator(&profiles, code_observer.code_map());
   std::unique_ptr<ProfilerEventsProcessor> processor(
-      new SamplingEventsProcessor(isolate, &generator,
+      new SamplingEventsProcessor(isolate, &generator, &code_observer,
                                   v8::base::TimeDelta::FromMicroseconds(100),
                                   false));
-  processor->Start();
+  CHECK(processor->Start());
   processor->StopSynchronously();
 }
 
@@ -3187,11 +3265,15 @@ TEST(ProflilerSubsampling) {
   i::HandleScope scope(isolate);
 
   CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
-  ProfileGenerator* generator = new ProfileGenerator(profiles);
-  ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
-      isolate, generator, v8::base::TimeDelta::FromMicroseconds(1),
-      /* use_precise_sampling */ true);
-  CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+  ProfilerCodeObserver code_observer(isolate);
+  ProfileGenerator* generator =
+      new ProfileGenerator(profiles, code_observer.code_map());
+  ProfilerEventsProcessor* processor =
+      new SamplingEventsProcessor(isolate, generator, &code_observer,
+                                  v8::base::TimeDelta::FromMicroseconds(1),
+                                  /* use_precise_sampling */ true);
+  CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
+                       processor);
 
   // Create a new CpuProfile that wants samples at 8us.
   CpuProfile profile(&profiler, "",
@@ -3228,11 +3310,15 @@ TEST(DynamicResampling) {
   i::HandleScope scope(isolate);
 
   CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
-  ProfileGenerator* generator = new ProfileGenerator(profiles);
-  ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
-      isolate, generator, v8::base::TimeDelta::FromMicroseconds(1),
-      /* use_precise_sampling */ true);
-  CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+  ProfilerCodeObserver code_observer(isolate);
+  ProfileGenerator* generator =
+      new ProfileGenerator(profiles, code_observer.code_map());
+  ProfilerEventsProcessor* processor =
+      new SamplingEventsProcessor(isolate, generator, &code_observer,
+                                  v8::base::TimeDelta::FromMicroseconds(1),
+                                  /* use_precise_sampling */ true);
+  CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
+                       processor);
 
   // Set a 1us base sampling rate, dividing all possible intervals.
   profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(1));
@@ -3286,11 +3372,15 @@ TEST(DynamicResamplingWithBaseInterval) {
   i::HandleScope scope(isolate);
 
   CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
-  ProfileGenerator* generator = new ProfileGenerator(profiles);
-  ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
-      isolate, generator, v8::base::TimeDelta::FromMicroseconds(1),
-      /* use_precise_sampling */ true);
-  CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+  ProfilerCodeObserver code_observer(isolate);
+  ProfileGenerator* generator =
+      new ProfileGenerator(profiles, code_observer.code_map());
+  ProfilerEventsProcessor* processor =
+      new SamplingEventsProcessor(isolate, generator, &code_observer,
+                                  v8::base::TimeDelta::FromMicroseconds(1),
+                                  /* use_precise_sampling */ true);
+  CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
+                       processor);
 
   profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(7));
 
@@ -3338,6 +3428,184 @@ TEST(DynamicResamplingWithBaseInterval) {
   profiles->StopProfiling("5us");
 }
 
+// Tests that functions compiled after a started profiler is stopped are still
+// visible when the profiler is started again. (https://crbug.com/v8/9151)
+TEST(Bug9151StaleCodeEntries) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+
+  v8::Local<v8::FunctionTemplate> func_template =
+      v8::FunctionTemplate::New(env->GetIsolate(), CallCollectSample);
+  v8::Local<v8::Function> func =
+      func_template->GetFunction(env.local()).ToLocalChecked();
+  func->SetName(v8_str("CallCollectSample"));
+  env->Global()->Set(env.local(), v8_str("CallCollectSample"), func).FromJust();
+
+  v8::CpuProfiler* profiler =
+      v8::CpuProfiler::New(env->GetIsolate(), kDebugNaming, kEagerLogging);
+  v8::Local<v8::String> profile_name = v8_str("");
+
+  // Warm up the profiler to create the initial code map.
+  profiler->StartProfiling(profile_name);
+  profiler->StopProfiling(profile_name);
+
+  // Log a function compilation (executed once to force a compilation).
+  CompileRun(R"(
+      function start() {
+        CallCollectSample();
+      }
+      start();
+  )");
+
+  // Restart the profiler, and execute both the JS function and callback.
+  profiler->StartProfiling(profile_name, true);
+  CompileRun("start();");
+  v8::CpuProfile* profile = profiler->StopProfiling(profile_name);
+
+  auto* root = profile->GetTopDownRoot();
+  auto* toplevel = GetChild(env.local(), root, "");
+
+  auto* start = FindChild(env.local(), toplevel, "start");
+  CHECK(start);
+
+  auto* callback = FindChild(env.local(), start, "CallCollectSample");
+  CHECK(callback);
+}
+
+// Tests that functions from other contexts aren't recorded when filtering for
+// another context.
+TEST(ContextIsolation) {
+  i::FLAG_allow_natives_syntax = true;
+  LocalContext execution_env;
+  i::HandleScope scope(CcTest::i_isolate());
+
+  // Install CollectSample callback for more deterministic sampling.
+  v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
+      execution_env.local()->GetIsolate(), CallCollectSample);
+  v8::Local<v8::Function> func =
+      func_template->GetFunction(execution_env.local()).ToLocalChecked();
+  func->SetName(v8_str("CallCollectSample"));
+  execution_env->Global()
+      ->Set(execution_env.local(), v8_str("CallCollectSample"), func)
+      .FromJust();
+
+  ProfilerHelper helper(execution_env.local());
+  CompileRun(R"(
+    function optimized() {
+      CallCollectSample();
+    }
+
+    function unoptimized() {
+      CallCollectSample();
+    }
+
+    function start() {
+      // Test optimized functions
+      %PrepareFunctionForOptimization(optimized);
+      optimized();
+      optimized();
+      %OptimizeFunctionOnNextCall(optimized);
+      optimized();
+
+      // Test unoptimized functions
+      %NeverOptimizeFunction(unoptimized);
+      unoptimized();
+
+      // Test callback
+      CallCollectSample();
+    }
+  )");
+  v8::Local<v8::Function> function =
+      GetFunction(execution_env.local(), "start");
+
+  v8::CpuProfile* same_context_profile = helper.Run(
+      function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
+      v8::CpuProfilingOptions::kNoSampleLimit, execution_env.local());
+  const v8::CpuProfileNode* root = same_context_profile->GetTopDownRoot();
+  const v8::CpuProfileNode* start_node = FindChild(root, "start");
+  CHECK(start_node);
+  const v8::CpuProfileNode* optimized_node = FindChild(start_node, "optimized");
+  CHECK(optimized_node);
+  const v8::CpuProfileNode* unoptimized_node =
+      FindChild(start_node, "unoptimized");
+  CHECK(unoptimized_node);
+  const v8::CpuProfileNode* callback_node =
+      FindChild(start_node, "CallCollectSample");
+  CHECK(callback_node);
+
+  {
+    LocalContext filter_env;
+    v8::CpuProfile* diff_context_profile = helper.Run(
+        function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
+        v8::CpuProfilingOptions::kNoSampleLimit, filter_env.local());
+    const v8::CpuProfileNode* diff_root =
+        diff_context_profile->GetTopDownRoot();
+    // Ensure that no children were recorded (including callbacks, builtins).
+    CHECK(!FindChild(diff_root, "start"));
+  }
+}
+
+// Tests that when a native context that's being filtered is moved, we continue
+// to track its execution.
+TEST(ContextFilterMovedNativeContext) {
+  i::FLAG_allow_natives_syntax = true;
+  i::FLAG_manual_evacuation_candidates_selection = true;
+  LocalContext env;
+  i::HandleScope scope(CcTest::i_isolate());
+
+  {
+    // Install CollectSample callback for more deterministic sampling.
+    v8::Local<v8::FunctionTemplate> sample_func_template =
+        v8::FunctionTemplate::New(env.local()->GetIsolate(), CallCollectSample);
+    v8::Local<v8::Function> sample_func =
+        sample_func_template->GetFunction(env.local()).ToLocalChecked();
+    sample_func->SetName(v8_str("CallCollectSample"));
+    env->Global()
+        ->Set(env.local(), v8_str("CallCollectSample"), sample_func)
+        .FromJust();
+
+    // Install a function that triggers the native context to be moved.
+    v8::Local<v8::FunctionTemplate> move_func_template =
+        v8::FunctionTemplate::New(
+            env.local()->GetIsolate(),
+            [](const v8::FunctionCallbackInfo<v8::Value>& info) {
+              i::Isolate* isolate =
+                  reinterpret_cast<i::Isolate*>(info.GetIsolate());
+              i::heap::ForceEvacuationCandidate(
+                  i::Page::FromHeapObject(isolate->raw_native_context()));
+              CcTest::CollectAllGarbage();
+            });
+    v8::Local<v8::Function> move_func =
+        move_func_template->GetFunction(env.local()).ToLocalChecked();
+    move_func->SetName(v8_str("ForceNativeContextMove"));
+    env->Global()
+        ->Set(env.local(), v8_str("ForceNativeContextMove"), move_func)
+        .FromJust();
+
+    ProfilerHelper helper(env.local());
+    CompileRun(R"(
+      function start() {
+        ForceNativeContextMove();
+        CallCollectSample();
+      }
+    )");
+    v8::Local<v8::Function> function = GetFunction(env.local(), "start");
+
+    v8::CpuProfile* profile = helper.Run(
+        function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
+        v8::CpuProfilingOptions::kNoSampleLimit, env.local());
+    const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+    const v8::CpuProfileNode* start_node = FindChild(root, "start");
+    CHECK(start_node);
+
+    // Verify that after moving the native context, CallCollectSample is still
+    // recorded.
+    const v8::CpuProfileNode* callback_node =
+        FindChild(start_node, "CallCollectSample");
+    CHECK(callback_node);
+  }
+}
+
 enum class EntryCountMode { kAll, kOnlyInlined };
 
 // Count the number of unique source positions.
diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc
new file mode 100644
index 00000000000000..67236e5a311ca4
--- /dev/null
+++ b/deps/v8/test/cctest/test-debug-helper.cc
@@ -0,0 +1,227 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api/api-inl.h"
+#include "src/heap/spaces.h"
+#include "test/cctest/cctest.h"
+#include "tools/debug_helper/debug-helper.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+namespace d = v8::debug_helper;
+
+uintptr_t memory_fail_start = 0;
+uintptr_t memory_fail_end = 0;
+
+class MemoryFailureRegion {
+ public:
+  MemoryFailureRegion(uintptr_t start, uintptr_t end) {
+    memory_fail_start = start;
+    memory_fail_end = end;
+  }
+  ~MemoryFailureRegion() {
+    memory_fail_start = 0;
+    memory_fail_end = 0;
+  }
+};
+
+// Implement the memory-reading callback. This one just fetches memory from the
+// current process, but a real implementation for a debugging extension would
+// fetch memory from the debuggee process or crash dump.
+d::MemoryAccessResult ReadMemory(uintptr_t address, uint8_t* destination,
+                                 size_t byte_count) {
+  if (address >= memory_fail_start && address <= memory_fail_end) {
+    // Simulate failure to read debuggee memory.
+    return d::MemoryAccessResult::kAddressValidButInaccessible;
+  }
+  memcpy(destination, reinterpret_cast<void*>(address), byte_count);
+  return d::MemoryAccessResult::kOk;
+}
+
+void CheckProp(const d::ObjectProperty& property, const char* expected_type,
+               const char* expected_name,
+               d::PropertyKind expected_kind = d::PropertyKind::kSingle,
+               size_t expected_num_values = 1) {
+  CHECK_EQ(property.num_values, expected_num_values);
+  CHECK(property.type == std::string("v8::internal::TaggedValue") ||
+        property.type == std::string(expected_type));
+  CHECK(property.decompressed_type == std::string(expected_type));
+  CHECK(property.kind == expected_kind);
+  CHECK(property.name == std::string(expected_name));
+}
+
+template <typename TValue>
+void CheckProp(const d::ObjectProperty& property, const char* expected_type,
+               const char* expected_name, TValue expected_value) {
+  CheckProp(property, expected_type, expected_name);
+  CHECK(*reinterpret_cast<TValue*>(property.address) == expected_value);
+}
+
+}  // namespace
+
+TEST(GetObjectProperties) {
+  CcTest::InitializeVM();
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  LocalContext context;
+  d::Roots roots{0, 0, 0, 0};  // We don't know the heap roots.
+
+  v8::Local<v8::Value> v = CompileRun("42");
+  Handle<Object> o = v8::Utils::OpenHandle(*v);
+  d::ObjectPropertiesResultPtr props =
+      d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
+  CHECK(props->type_check_result == d::TypeCheckResult::kSmi);
+  CHECK(props->brief == std::string("42 (0x2a)"));
+  CHECK(props->type == std::string("v8::internal::Smi"));
+  CHECK_EQ(props->num_properties, 0);
+
+  v = CompileRun("[\"a\", \"bc\"]");
+  o = v8::Utils::OpenHandle(*v);
+  props = d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
+  CHECK(props->type_check_result == d::TypeCheckResult::kUsedMap);
+  CHECK(props->type == std::string("v8::internal::JSArray"));
+  CHECK_EQ(props->num_properties, 4);
+  CheckProp(*props->properties[0], "v8::internal::Map", "map");
+  CheckProp(*props->properties[1], "v8::internal::Object",
+            "properties_or_hash");
+  CheckProp(*props->properties[2], "v8::internal::FixedArrayBase", "elements");
+  CheckProp(*props->properties[3], "v8::internal::Object", "length",
+            static_cast<i::Tagged_t>(IntToSmi(2)));
+
+  // We need to supply a root address for decompression before reading the
+  // elements from the JSArray.
+  roots.any_heap_pointer = o->ptr();
+
+  i::Tagged_t properties_or_hash =
+      *reinterpret_cast<i::Tagged_t*>(props->properties[1]->address);
+  i::Tagged_t elements =
+      *reinterpret_cast<i::Tagged_t*>(props->properties[2]->address);
+
+  // The properties_or_hash_code field should be an empty fixed array. Since
+  // that is at a known offset, we should be able to detect it even without
+  // any ability to read memory.
+  {
+    MemoryFailureRegion failure(0, UINTPTR_MAX);
+    props = d::GetObjectProperties(properties_or_hash, &ReadMemory, roots);
+    CHECK(props->type_check_result ==
+          d::TypeCheckResult::kObjectPointerValidButInaccessible);
+    CHECK(props->type == std::string("v8::internal::HeapObject"));
+    CHECK_EQ(props->num_properties, 1);
+    CheckProp(*props->properties[0], "v8::internal::Map", "map");
+    CHECK(std::string(props->brief).substr(0, 21) ==
+          std::string("maybe EmptyFixedArray"));
+
+    // Provide a heap root so the API can be more sure.
+    roots.read_only_space =
+        reinterpret_cast<uintptr_t>(reinterpret_cast<i::Isolate*>(isolate)
+                                        ->heap()
+                                        ->read_only_space()
+                                        ->first_page());
+    props = d::GetObjectProperties(properties_or_hash, &ReadMemory, roots);
+    CHECK(props->type_check_result ==
+          d::TypeCheckResult::kObjectPointerValidButInaccessible);
+    CHECK(props->type == std::string("v8::internal::HeapObject"));
+    CHECK_EQ(props->num_properties, 1);
+    CheckProp(*props->properties[0], "v8::internal::Map", "map");
+    CHECK(std::string(props->brief).substr(0, 15) ==
+          std::string("EmptyFixedArray"));
+  }
+
+  props = d::GetObjectProperties(elements, &ReadMemory, roots);
+  CHECK(props->type_check_result == d::TypeCheckResult::kUsedMap);
+  CHECK(props->type == std::string("v8::internal::FixedArray"));
+  CHECK_EQ(props->num_properties, 3);
+  CheckProp(*props->properties[0], "v8::internal::Map", "map");
+  CheckProp(*props->properties[1], "v8::internal::Object", "length",
+            static_cast<i::Tagged_t>(IntToSmi(2)));
+  CheckProp(*props->properties[2], "v8::internal::Object", "objects",
+            d::PropertyKind::kArrayOfKnownSize, 2);
+
+  // Get the second string value from the FixedArray.
+  i::Tagged_t second_string_address = *reinterpret_cast<i::Tagged_t*>(
+      props->properties[2]->address + sizeof(i::Tagged_t));
+  props = d::GetObjectProperties(second_string_address, &ReadMemory, roots);
+  CHECK(props->type_check_result == d::TypeCheckResult::kUsedMap);
+  CHECK(props->type == std::string("v8::internal::SeqOneByteString"));
+  CHECK_EQ(props->num_properties, 4);
+  CheckProp(*props->properties[0], "v8::internal::Map", "map");
+  CheckProp(*props->properties[1], "uint32_t", "hash_field");
+  CheckProp(*props->properties[2], "int32_t", "length", 2);
+  CheckProp(*props->properties[3], "char", "chars",
+            d::PropertyKind::kArrayOfKnownSize, 2);
+  CHECK_EQ(
+      strncmp("bc",
+              reinterpret_cast<const char*>(props->properties[3]->address), 2),
+      0);
+
+  // Read the second string again, using a type hint instead of the map. All of
+  // its properties should match what we read last time.
+  d::ObjectPropertiesResultPtr props2;
+  {
+    uintptr_t map_address =
+        d::GetObjectProperties(
+            *reinterpret_cast<i::Tagged_t*>(props->properties[0]->address),
+            &ReadMemory, roots)
+            ->properties[0]
+            ->address;
+    MemoryFailureRegion failure(map_address, map_address + i::Map::kSize);
+    props2 = d::GetObjectProperties(second_string_address, &ReadMemory, roots,
+                                    "v8::internal::String");
+    CHECK(props2->type_check_result == d::TypeCheckResult::kUsedTypeHint);
+    CHECK(props2->type == std::string("v8::internal::String"));
+    CHECK_EQ(props2->num_properties, 3);
+    CheckProp(*props2->properties[0], "v8::internal::Map", "map",
+              *reinterpret_cast<i::Tagged_t*>(props->properties[0]->address));
+    CheckProp(*props2->properties[1], "uint32_t", "hash_field",
+              *reinterpret_cast<int32_t*>(props->properties[1]->address));
+    CheckProp(*props2->properties[2], "int32_t", "length", 2);
+  }
+
+  // Try a weak reference.
+  props2 = d::GetObjectProperties(second_string_address | kWeakHeapObjectMask,
+                                  &ReadMemory, roots);
+  std::string weak_ref_prefix = "weak ref to ";
+  CHECK(weak_ref_prefix + props->brief == props2->brief);
+  CHECK(props2->type_check_result == d::TypeCheckResult::kUsedMap);
+  CHECK(props2->type == std::string("v8::internal::SeqOneByteString"));
+  CHECK_EQ(props2->num_properties, 4);
+  CheckProp(*props2->properties[0], "v8::internal::Map", "map",
+            *reinterpret_cast<i::Tagged_t*>(props->properties[0]->address));
+  CheckProp(*props2->properties[1], "uint32_t", "hash_field",
+            *reinterpret_cast<i::Tagged_t*>(props->properties[1]->address));
+  CheckProp(*props2->properties[2], "int32_t", "length", 2);
+
+  // Build a complicated string (multi-level cons with slices inside) to test
+  // string printing.
+  v = CompileRun(R"(
+    const alphabet = "abcdefghijklmnopqrstuvwxyz";
+    alphabet.substr(3,20) + alphabet.toUpperCase().substr(5,15) + "7")");
+  o = v8::Utils::OpenHandle(*v);
+  props = d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
+  CHECK(std::string(props->brief).substr(0, 38) ==
+        std::string("\"defghijklmnopqrstuvwFGHIJKLMNOPQRST7\""));
+
+  // Cause a failure when reading the "second" pointer within the top-level
+  // ConsString.
+  {
+    CheckProp(*props->properties[4], "v8::internal::String", "second");
+    uintptr_t second_address = props->properties[4]->address;
+    MemoryFailureRegion failure(second_address, second_address + 4);
+    props = d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
+    CHECK(std::string(props->brief).substr(0, 40) ==
+          std::string("\"defghijklmnopqrstuvwFGHIJKLMNOPQRST...\""));
+  }
+
+  // Build a very long string.
+  v = CompileRun("'a'.repeat(1000)");
+  o = v8::Utils::OpenHandle(*v);
+  props = d::GetObjectProperties(o->ptr(), &ReadMemory, roots);
+  CHECK(std::string(props->brief).substr(79, 7) == std::string("aa...\" "));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index b3da0329f6fe66..4ad55ef6b5fc4d 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -553,7 +553,7 @@ TEST(BreakPointBuiltin) {
   builtin = CompileRun("String.prototype.repeat").As<v8::Function>();
 
   // Run with breakpoint.
-  bp = SetBreakPoint(builtin, 0);
+  bp = SetBreakPoint(builtin, 0, "this != 1");
   ExpectString("'b'.repeat(10)", "bbbbbbbbbb");
   CHECK_EQ(1, break_point_hit_count);
 
@@ -754,7 +754,7 @@ TEST(BreakPointConstructorBuiltin) {
   CHECK_EQ(0, break_point_hit_count);
 
   // Run with breakpoint.
-  bp = SetBreakPoint(builtin, 0);
+  bp = SetBreakPoint(builtin, 0, "this != 1");
   ExpectString("(new Promise(()=>{})).toString()", "[object Promise]");
   CHECK_EQ(1, break_point_hit_count);
 
@@ -821,7 +821,7 @@ TEST(BreakPointInlinedBuiltin) {
   CHECK_EQ(0, break_point_hit_count);
 
   // Run with breakpoint.
-  bp = SetBreakPoint(builtin, 0);
+  bp = SetBreakPoint(builtin, 0, "this != 1");
   CompileRun("Math.sin(0.1);");
   CHECK_EQ(1, break_point_hit_count);
   CompileRun("test(0.2);");
@@ -869,7 +869,7 @@ TEST(BreakPointInlineBoundBuiltin) {
   CHECK_EQ(0, break_point_hit_count);
 
   // Run with breakpoint.
-  bp = SetBreakPoint(builtin, 0);
+  bp = SetBreakPoint(builtin, 0, "this != 1");
   CompileRun("'a'.repeat(2);");
   CHECK_EQ(1, break_point_hit_count);
   CompileRun("test(7);");
@@ -914,7 +914,7 @@ TEST(BreakPointInlinedConstructorBuiltin) {
   CHECK_EQ(0, break_point_hit_count);
 
   // Run with breakpoint.
-  bp = SetBreakPoint(builtin, 0);
+  bp = SetBreakPoint(builtin, 0, "this != 1");
   CompileRun("new Promise(()=>{});");
   CHECK_EQ(1, break_point_hit_count);
   CompileRun("test(7);");
@@ -1090,13 +1090,18 @@ TEST(BreakPointApiFunction) {
   break_point_hit_count = 0;
 
   // Run with breakpoint.
-  bp = SetBreakPoint(function, 0);
+  bp = SetBreakPoint(function, 0, "this != 1");
   ExpectInt32("f()", 2);
   CHECK_EQ(1, break_point_hit_count);
 
   ExpectInt32("f()", 2);
   CHECK_EQ(2, break_point_hit_count);
 
+  // Direct call through API does not trigger breakpoint.
+  function->Call(env.local(), v8::Undefined(env->GetIsolate()), 0, nullptr)
+      .ToLocalChecked();
+  CHECK_EQ(2, break_point_hit_count);
+
   // Run without breakpoints.
   ClearBreakPoint(bp);
   ExpectInt32("f()", 2);
@@ -1106,6 +1111,46 @@ TEST(BreakPointApiFunction) {
   CheckDebuggerUnloaded();
 }
 
+TEST(BreakPointApiConstructor) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+
+  DebugEventCounter delegate;
+  v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+
+  i::Handle<i::BreakPoint> bp;
+
+  v8::Local<v8::FunctionTemplate> function_template =
+      v8::FunctionTemplate::New(env->GetIsolate(), NoOpFunctionCallback);
+
+  v8::Local<v8::Function> function =
+      function_template->GetFunction(env.local()).ToLocalChecked();
+
+  env->Global()->Set(env.local(), v8_str("f"), function).ToChecked();
+
+  // === Test simple builtin ===
+  break_point_hit_count = 0;
+
+  // Run with breakpoint.
+  bp = SetBreakPoint(function, 0, "this != 1");
+  CompileRun("new f()");
+  CHECK_EQ(1, break_point_hit_count);
+  CompileRun("new f()");
+  CHECK_EQ(2, break_point_hit_count);
+
+  // Direct call through API does not trigger breakpoint.
+  function->NewInstance(env.local()).ToLocalChecked();
+  CHECK_EQ(2, break_point_hit_count);
+
+  // Run without breakpoints.
+  ClearBreakPoint(bp);
+  CompileRun("new f()");
+  CHECK_EQ(2, break_point_hit_count);
+
+  v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+  CheckDebuggerUnloaded();
+}
+
 void GetWrapperCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
   args.GetReturnValue().Set(
       args[0]
@@ -1145,15 +1190,16 @@ TEST(BreakPointApiGetter) {
   // Run with breakpoint.
   bp = SetBreakPoint(function, 0);
   CompileRun("get_wrapper(o, 'f')");
-  CHECK_EQ(1, break_point_hit_count);
+  CHECK_EQ(0, break_point_hit_count);
 
   CompileRun("o.f");
-  CHECK_EQ(2, break_point_hit_count);
+  CHECK_EQ(1, break_point_hit_count);
 
   // Run without breakpoints.
   ClearBreakPoint(bp);
   CompileRun("get_wrapper(o, 'f', 2)");
-  CHECK_EQ(2, break_point_hit_count);
+  CompileRun("o.f");
+  CHECK_EQ(1, break_point_hit_count);
 
   v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
   CheckDebuggerUnloaded();
@@ -1202,12 +1248,12 @@ TEST(BreakPointApiSetter) {
   CHECK_EQ(1, break_point_hit_count);
 
   CompileRun("set_wrapper(o, 'f', 2)");
-  CHECK_EQ(2, break_point_hit_count);
+  CHECK_EQ(1, break_point_hit_count);
 
   // Run without breakpoints.
   ClearBreakPoint(bp);
   CompileRun("o.f = 3");
-  CHECK_EQ(2, break_point_hit_count);
+  CHECK_EQ(1, break_point_hit_count);
 
   // === Test API builtin as setter, with condition ===
   break_point_hit_count = 0;
@@ -1218,15 +1264,16 @@ TEST(BreakPointApiSetter) {
   CHECK_EQ(0, break_point_hit_count);
 
   CompileRun("set_wrapper(o, 'f', 3)");
-  CHECK_EQ(1, break_point_hit_count);
+  CHECK_EQ(0, break_point_hit_count);
 
   CompileRun("o.f = 3");
-  CHECK_EQ(2, break_point_hit_count);
+  CHECK_EQ(1, break_point_hit_count);
 
   // Run without breakpoints.
   ClearBreakPoint(bp);
   CompileRun("set_wrapper(o, 'f', 2)");
-  CHECK_EQ(2, break_point_hit_count);
+  CompileRun("o.f = 3");
+  CHECK_EQ(1, break_point_hit_count);
 
   v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
   CheckDebuggerUnloaded();
@@ -3435,6 +3482,35 @@ TEST(SyntaxErrorEventOnSyntaxException) {
   CHECK_EQ(3, delegate.compile_error_event_count);
 }
 
+class ExceptionEventCounter : public v8::debug::DebugDelegate {
+ public:
+  void ExceptionThrown(v8::Local<v8::Context> paused_context,
+                       v8::Local<v8::Value> exception,
+                       v8::Local<v8::Value> promise, bool is_uncaught,
+                       v8::debug::ExceptionType) override {
+    exception_event_count++;
+  }
+  int exception_event_count = 0;
+};
+
+TEST(NoBreakOnStackOverflow) {
+  i::FLAG_stack_size = 100;
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+
+  ChangeBreakOnException(true, true);
+
+  ExceptionEventCounter delegate;
+  v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+  CHECK_EQ(0, delegate.exception_event_count);
+
+  CompileRun(
+      "function f() { return f(); }"
+      "try { f() } catch {}");
+
+  CHECK_EQ(0, delegate.exception_event_count);
+}
+
 // Tests that break event is sent when event listener is reset.
 TEST(BreakEventWhenEventListenerIsReset) {
   LocalContext env;
@@ -3854,7 +3930,7 @@ TEST(DebugBreakOffThreadTerminate) {
   DebugBreakTriggerTerminate delegate;
   v8::debug::SetDebugDelegate(isolate, &delegate);
   TerminationThread terminator(isolate);
-  terminator.Start();
+  CHECK(terminator.Start());
   v8::TryCatch try_catch(env->GetIsolate());
   env->GetIsolate()->RequestInterrupt(BreakRightNow, nullptr);
   CompileRun("while (true);");
@@ -3950,7 +4026,7 @@ class ArchiveRestoreThread : public v8::base::Thread,
       // on) so that the ThreadManager is forced to archive and restore
       // the current thread.
       ArchiveRestoreThread child(isolate_, spawn_count_ - 1);
-      child.Start();
+      CHECK(child.Start());
       child.Join();
 
       // The child thread sets itself as the debug delegate, so we need to
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index ed4fe6c6e09102..ba4d92d3a2c447 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -854,7 +854,7 @@ TEST_(branch) {
   COMPARE(br(x0), "br x0");
   COMPARE(blr(x1), "blr x1");
   COMPARE(ret(x2), "ret x2");
-  COMPARE(ret(lr), "ret")
+  COMPARE(ret(lr), "ret");
 
   CLEANUP();
 }
@@ -1881,6 +1881,14 @@ TEST_(system_nop) {
   CLEANUP();
 }
 
+TEST(system_pauth) {
+  SET_UP_ASM();
+
+  COMPARE(pacia1716(), "pacia1716");
+  COMPARE(paciasp(), "paciasp");
+  COMPARE(autia1716(), "autia1716");
+  COMPARE(autiasp(), "autiasp");
+}
 
 TEST_(debug) {
   InitializeVM();
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index ff21e9b265dbd3..08793fba4a0852 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -517,6 +517,8 @@ TEST(DisasmX64) {
       __ haddps(xmm1, xmm0);
       __ haddps(xmm1, Operand(rbx, rcx, times_4, 10000));
       __ lddqu(xmm1, Operand(rdx, 4));
+      __ movddup(xmm1, Operand(rax, 5));
+      __ movddup(xmm1, xmm2);
     }
   }
 
diff --git a/deps/v8/test/cctest/test-elements-kind.cc b/deps/v8/test/cctest/test-elements-kind.cc
index d08f6200ab734b..2f6ec6c164bd88 100644
--- a/deps/v8/test/cctest/test-elements-kind.cc
+++ b/deps/v8/test/cctest/test-elements-kind.cc
@@ -63,6 +63,7 @@ bool ElementsKindIsHoleyElementsKindForRead(ElementsKind kind) {
     case ElementsKind::HOLEY_SMI_ELEMENTS:
     case ElementsKind::HOLEY_ELEMENTS:
     case ElementsKind::HOLEY_DOUBLE_ELEMENTS:
+    case ElementsKind::HOLEY_NONEXTENSIBLE_ELEMENTS:
     case ElementsKind::HOLEY_SEALED_ELEMENTS:
     case ElementsKind::HOLEY_FROZEN_ELEMENTS:
       return true;
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 436925146b69ec..91db7e51a5176c 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -438,9 +438,7 @@ TEST(VectorLoadICStates) {
       Handle<FeedbackVector>(f->feedback_vector(), isolate);
   FeedbackSlot slot(0);
   FeedbackNexus nexus(feedback_vector, slot);
-  CHECK_EQ(PREMONOMORPHIC, nexus.ic_state());
 
-  CompileRun("f(o)");
   CHECK_EQ(MONOMORPHIC, nexus.ic_state());
   // Verify that the monomorphic map is the one we expect.
   v8::MaybeLocal<v8::Value> v8_o =
@@ -526,16 +524,13 @@ TEST(VectorLoadICOnSmi) {
   CompileRun(
       "var o = { foo: 3 };"
       "%EnsureFeedbackVectorForFunction(f);"
-      "function f(a) { return a.foo; } f(o);");
+      "function f(a) { return a.foo; } f(34);");
   Handle<JSFunction> f = GetFunction("f");
   // There should be one IC.
   Handle<FeedbackVector> feedback_vector =
       Handle<FeedbackVector>(f->feedback_vector(), isolate);
   FeedbackSlot slot(0);
   FeedbackNexus nexus(feedback_vector, slot);
-  CHECK_EQ(PREMONOMORPHIC, nexus.ic_state());
-
-  CompileRun("f(34)");
   CHECK_EQ(MONOMORPHIC, nexus.ic_state());
   // Verify that the monomorphic map is the one we expect.
   Map number_map = ReadOnlyRoots(heap).heap_number_map();
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index d23078b68a49e3..512bf2a9c6f3d1 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -2282,7 +2282,9 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
       {SEALED, factory->sealed_symbol(),
        FLAG_enable_sealed_frozen_elements_kind ? HOLEY_SEALED_ELEMENTS
                                                : DICTIONARY_ELEMENTS},
-      {NONE, factory->nonextensible_symbol(), DICTIONARY_ELEMENTS}};
+      {NONE, factory->nonextensible_symbol(),
+       FLAG_enable_sealed_frozen_elements_kind ? HOLEY_NONEXTENSIBLE_ELEMENTS
+                                               : DICTIONARY_ELEMENTS}};
   for (size_t i = 0; i < arraysize(configs); i++) {
     TestGeneralizeFieldWithSpecialTransition(
         configs[i],
@@ -2348,7 +2350,9 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
       {SEALED, factory->sealed_symbol(),
        FLAG_enable_sealed_frozen_elements_kind ? HOLEY_SEALED_ELEMENTS
                                                : DICTIONARY_ELEMENTS},
-      {NONE, factory->nonextensible_symbol(), DICTIONARY_ELEMENTS}};
+      {NONE, factory->nonextensible_symbol(),
+       FLAG_enable_sealed_frozen_elements_kind ? HOLEY_NONEXTENSIBLE_ELEMENTS
+                                               : DICTIONARY_ELEMENTS}};
   for (size_t i = 0; i < arraysize(configs); i++) {
     TestGeneralizeFieldWithSpecialTransition(
         configs[i],
@@ -2407,7 +2411,6 @@ TEST(PrototypeTransitionFromMapOwningDescriptor) {
       {PropertyConstness::kMutable, Representation::Tagged(), any_type}, true);
 }
 
-
 TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());
@@ -2461,7 +2464,6 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
       {PropertyConstness::kMutable, Representation::Tagged(), any_type}, true);
 }
 
-
 ////////////////////////////////////////////////////////////////////////////////
 // A set of tests for higher level transitioning mechanics.
 //
@@ -2776,15 +2778,15 @@ TEST(TransitionAccessorConstantToSameAccessorConstant) {
 // TODO(ishell): add this test once IS_ACCESSOR_FIELD_SUPPORTED is supported.
 // TEST(TransitionAccessorConstantToAnotherAccessorConstant)
 
-TEST(HoleyMutableHeapNumber) {
+TEST(HoleyHeapNumber) {
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());
   Isolate* isolate = CcTest::i_isolate();
 
-  auto mhn = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+  auto mhn = isolate->factory()->NewHeapNumberWithHoleNaN();
   CHECK_EQ(kHoleNanInt64, mhn->value_as_bits());
 
-  mhn = isolate->factory()->NewMutableHeapNumber(0.0);
+  mhn = isolate->factory()->NewHeapNumber(0.0);
   CHECK_EQ(uint64_t{0}, mhn->value_as_bits());
 
   mhn->set_value_as_bits(kHoleNanInt64);
@@ -2796,12 +2798,12 @@ TEST(HoleyMutableHeapNumber) {
   Handle<Object> obj =
       Object::NewStorageFor(isolate, isolate->factory()->uninitialized_value(),
                             Representation::Double());
-  CHECK(obj->IsMutableHeapNumber());
-  CHECK_EQ(kHoleNanInt64, MutableHeapNumber::cast(*obj).value_as_bits());
+  CHECK(obj->IsHeapNumber());
+  CHECK_EQ(kHoleNanInt64, HeapNumber::cast(*obj).value_as_bits());
 
   obj = Object::NewStorageFor(isolate, mhn, Representation::Double());
-  CHECK(obj->IsMutableHeapNumber());
-  CHECK_EQ(kHoleNanInt64, MutableHeapNumber::cast(*obj).value_as_bits());
+  CHECK(obj->IsHeapNumber());
+  CHECK_EQ(kHoleNanInt64, HeapNumber::cast(*obj).value_as_bits());
 }
 
 namespace {
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 93c7048f8142c0..4e5fcffa62de78 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -209,5 +209,11 @@ TEST(FlagsJitlessImplications) {
   }
 }
 
+TEST(FlagsRegexpInterpretAllImplications) {
+  if (FLAG_regexp_interpret_all) {
+    CHECK(!FLAG_regexp_tier_up);
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index e534670bb6389d..3aec4ae0039607 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -46,6 +46,7 @@
 #include "src/profiler/heap-snapshot-generator-inl.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/collector.h"
+#include "test/cctest/heap/heap-utils.h"
 
 using i::AllocationTraceNode;
 using i::AllocationTraceTree;
@@ -1693,6 +1694,154 @@ TEST(HeapSnapshotRetainedObjectInfo) {
   CHECK_EQ(native_group_ccc, GetChildByName(n_CCC, "ccc-group"));
 }
 
+namespace {
+
+class EmbedderGraphBuilderForNativeSnapshotObjectId final {
+ public:
+  class RegularNode : public v8::EmbedderGraph::Node {
+   public:
+    RegularNode(v8::NativeObject native_object, const char* name, size_t size,
+                Node* wrapper_node)
+        : name_(name),
+          size_(size),
+          native_object_(native_object),
+          wrapper_node_(wrapper_node) {}
+    // v8::EmbedderGraph::Node
+    const char* Name() override { return name_; }
+    size_t SizeInBytes() override { return size_; }
+    Node* WrapperNode() override { return wrapper_node_; }
+    v8::NativeObject GetNativeObject() override {
+      return native_object_ ? native_object_ : this;
+    }
+
+   private:
+    const char* name_;
+    size_t size_;
+    v8::NativeObject native_object_;
+    Node* wrapper_node_;
+  };
+
+  class RootNode : public RegularNode {
+   public:
+    explicit RootNode(const char* name)
+        : RegularNode(nullptr, name, 0, nullptr) {}
+    // v8::EmbedderGraph::EmbedderNode
+    bool IsRootNode() override { return true; }
+  };
+
+  struct BuildParameter {
+    v8::Persistent<v8::String>* wrapper;
+    void* native1;
+    void* native2;
+  };
+
+  static void BuildEmbedderGraph(v8::Isolate* isolate, v8::EmbedderGraph* graph,
+                                 void* data) {
+    BuildParameter* parameter = reinterpret_cast<BuildParameter*>(data);
+    v8::Local<v8::String> local_str =
+        v8::Local<v8::String>::New(isolate, *(parameter->wrapper));
+    auto* v8_node = graph->V8Node(local_str);
+    CHECK(!v8_node->IsEmbedderNode());
+    auto* root_node =
+        graph->AddNode(std::unique_ptr<RootNode>(new RootNode("root")));
+    auto* non_merged_node = graph->AddNode(std::unique_ptr<RegularNode>(
+        new RegularNode(parameter->native1, "non-merged", 0, nullptr)));
+    auto* merged_node = graph->AddNode(std::unique_ptr<RegularNode>(
+        new RegularNode(parameter->native2, "merged", 0, v8_node)));
+    graph->AddEdge(root_node, non_merged_node);
+    graph->AddEdge(root_node, merged_node);
+  }
+};
+
+}  // namespace
+
+TEST(NativeSnapshotObjectId) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+  v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
+
+  v8::Persistent<v8::String> wrapper(isolate, v8_str("wrapper"));
+  int native1;
+  int native2;
+
+  EmbedderGraphBuilderForNativeSnapshotObjectId::BuildParameter parameter{
+      &wrapper, &native1, &native2};
+  heap_profiler->AddBuildEmbedderGraphCallback(
+      EmbedderGraphBuilderForNativeSnapshotObjectId::BuildEmbedderGraph,
+      &parameter);
+  const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+  CHECK(ValidateSnapshot(snapshot));
+
+  v8::SnapshotObjectId non_merged_id = heap_profiler->GetObjectId(&native1);
+  CHECK_NE(v8::HeapProfiler::kUnknownObjectId, non_merged_id);
+  v8::SnapshotObjectId merged_id = heap_profiler->GetObjectId(&native2);
+  CHECK_NE(v8::HeapProfiler::kUnknownObjectId, merged_id);
+  CHECK_NE(non_merged_id, merged_id);
+  const v8::HeapGraphNode* non_merged_node =
+      snapshot->GetNodeById(non_merged_id);
+  CHECK_NOT_NULL(non_merged_node);
+  const v8::HeapGraphNode* merged_node = snapshot->GetNodeById(merged_id);
+  CHECK_NOT_NULL(merged_node);
+
+  heap_profiler->ClearObjectIds();
+  CHECK_EQ(v8::HeapProfiler::kUnknownObjectId,
+           heap_profiler->GetObjectId(&native1));
+  CHECK_EQ(v8::HeapProfiler::kUnknownObjectId,
+           heap_profiler->GetObjectId(&native2));
+}
+
+TEST(NativeSnapshotObjectIdMoving) {
+  // Required to allow moving specific objects.
+  i::FLAG_manual_evacuation_candidates_selection = true;
+
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+  v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
+  heap_profiler->StartTrackingHeapObjects(true);
+
+  v8::Persistent<v8::String> wrapper(isolate, v8_str("wrapper"));
+  int native1;
+  int native2;
+
+  EmbedderGraphBuilderForNativeSnapshotObjectId::BuildParameter parameter{
+      &wrapper, &native1, &native2};
+  heap_profiler->AddBuildEmbedderGraphCallback(
+      EmbedderGraphBuilderForNativeSnapshotObjectId::BuildEmbedderGraph,
+      &parameter);
+  const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+  CHECK(ValidateSnapshot(snapshot));
+
+  v8::SnapshotObjectId non_merged_id = heap_profiler->GetObjectId(&native1);
+  CHECK_NE(v8::HeapProfiler::kUnknownObjectId, non_merged_id);
+  v8::SnapshotObjectId merged_id = heap_profiler->GetObjectId(&native2);
+  CHECK_NE(v8::HeapProfiler::kUnknownObjectId, merged_id);
+  CHECK_NE(non_merged_id, merged_id);
+  const v8::HeapGraphNode* non_merged_node =
+      snapshot->GetNodeById(non_merged_id);
+  CHECK_NOT_NULL(non_merged_node);
+  const v8::HeapGraphNode* merged_node = snapshot->GetNodeById(merged_id);
+  CHECK_NOT_NULL(merged_node);
+
+  {
+    v8::HandleScope scope(isolate);
+    auto local = v8::Local<v8::String>::New(isolate, wrapper);
+    i::Handle<i::String> internal = i::Handle<i::String>::cast(
+        v8::Utils::OpenHandle(*v8::Local<v8::String>::Cast(local)));
+    i::heap::ForceEvacuationCandidate(i::Page::FromHeapObject(*internal));
+  }
+  CcTest::CollectAllGarbage();
+
+  non_merged_id = heap_profiler->GetObjectId(&native1);
+  CHECK_NE(v8::HeapProfiler::kUnknownObjectId, non_merged_id);
+  merged_id = heap_profiler->GetObjectId(&native2);
+  CHECK_NE(v8::HeapProfiler::kUnknownObjectId, merged_id);
+  CHECK_NE(non_merged_id, merged_id);
+
+  heap_profiler->StopTrackingHeapObjects();
+}
+
 TEST(DeleteAllHeapSnapshots) {
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index f0df0c614908a1..6a25536dd544ba 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -84,8 +84,8 @@ static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
     return obj.RawFastDoublePropertyAt(field_index);
   } else {
     Object value = obj.RawFastPropertyAt(field_index);
-    if (value.IsMutableHeapNumber()) {
-      return MutableHeapNumber::cast(value).value();
+    if (value.IsHeapNumber()) {
+      return HeapNumber::cast(value).value();
     } else {
       return value.Number();
     }
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index 51add24a60e673..9e44c3ad204db7 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -731,7 +731,7 @@ TEST(TestJSWeakRefKeepDuringJob) {
   CHECK(!weak_ref->target().IsUndefined(isolate));
 
   // Clears the KeepDuringJob set.
-  isolate->default_microtask_queue()->RunMicrotasks(isolate);
+  context->GetIsolate()->ClearKeptObjects();
   CcTest::CollectAllGarbage();
 
   CHECK(weak_ref->target().IsUndefined(isolate));
@@ -769,7 +769,7 @@ TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
   CHECK(!weak_ref->target().IsUndefined(isolate));
 
   // Clears the KeepDuringJob set.
-  isolate->default_microtask_queue()->RunMicrotasks(isolate);
+  context->GetIsolate()->ClearKeptObjects();
   heap::SimulateIncrementalMarking(heap, true);
   CcTest::CollectAllGarbage();
 
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 571b0000eb312e..092c1078413a29 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -82,7 +82,7 @@ void UnlockForDeoptimization(const v8::FunctionCallbackInfo<v8::Value>& args) {
     isolate->Exit();
     v8::Unlocker unlocker(isolate);
     // Starts the deoptimizing thread.
-    deoptimizer->Start();
+    CHECK(deoptimizer->Start());
     // Waits for deoptimization to finish.
     deoptimizer->Join();
   }
@@ -107,7 +107,7 @@ void UnlockForDeoptimizationIfReady(
       isolate->Exit();
       v8::Unlocker unlocker(isolate);
       // Starts the thread that deoptimizes the function.
-      deoptimizer->Start();
+      CHECK(deoptimizer->Start());
       // Waits for the deoptimizing thread to finish.
       deoptimizer->Join();
     }
@@ -339,7 +339,7 @@ TEST(KangarooIsolates) {
     CompileRun("function getValue() { return 30; }");
     thread1.reset(new KangarooThread(isolate, context));
   }
-  thread1->Start();
+  CHECK(thread1->Start());
   thread1->Join();
 }
 
@@ -364,9 +364,7 @@ class JoinableThread {
 
   virtual ~JoinableThread() = default;
 
-  void Start() {
-    thread_.Start();
-  }
+  void Start() { CHECK(thread_.Start()); }
 
   void Join() {
     semaphore_.Wait();
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 83060f0cac27a9..854a31f66b9910 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -37,6 +37,7 @@
 #include "src/execution/vm-state-inl.h"
 #include "src/init/v8.h"
 #include "src/objects/objects-inl.h"
+#include "src/profiler/tick-sample.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/trace-extension.h"
 
diff --git a/deps/v8/test/cctest/test-orderedhashtable.cc b/deps/v8/test/cctest/test-orderedhashtable.cc
index 9b1bc651fa790c..44a845eb7446e9 100644
--- a/deps/v8/test/cctest/test-orderedhashtable.cc
+++ b/deps/v8/test/cctest/test-orderedhashtable.cc
@@ -509,14 +509,14 @@ TEST(OrderedHashTableInsertion) {
   Handle<Smi> key1(Smi::FromInt(1), isolate);
   Handle<Smi> value1(Smi::FromInt(1), isolate);
   CHECK(!OrderedHashMap::HasKey(isolate, *map, *key1));
-  map = OrderedHashMap::Add(isolate, map, key1, value1);
+  map = OrderedHashMap::Add(isolate, map, key1, value1).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(1, map->NumberOfElements());
   CHECK(OrderedHashMap::HasKey(isolate, *map, *key1));
 
   // Add existing key.
-  map = OrderedHashMap::Add(isolate, map, key1, value1);
+  map = OrderedHashMap::Add(isolate, map, key1, value1).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(1, map->NumberOfElements());
@@ -525,14 +525,14 @@ TEST(OrderedHashTableInsertion) {
   Handle<String> key2 = factory->NewStringFromAsciiChecked("foo");
   Handle<String> value = factory->NewStringFromAsciiChecked("bar");
   CHECK(!OrderedHashMap::HasKey(isolate, *map, *key2));
-  map = OrderedHashMap::Add(isolate, map, key2, value);
+  map = OrderedHashMap::Add(isolate, map, key2, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(2, map->NumberOfElements());
   CHECK(OrderedHashMap::HasKey(isolate, *map, *key1));
   CHECK(OrderedHashMap::HasKey(isolate, *map, *key2));
 
-  map = OrderedHashMap::Add(isolate, map, key2, value);
+  map = OrderedHashMap::Add(isolate, map, key2, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(2, map->NumberOfElements());
@@ -541,7 +541,7 @@ TEST(OrderedHashTableInsertion) {
 
   Handle<Symbol> key3 = factory->NewSymbol();
   CHECK(!OrderedHashMap::HasKey(isolate, *map, *key3));
-  map = OrderedHashMap::Add(isolate, map, key3, value);
+  map = OrderedHashMap::Add(isolate, map, key3, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(3, map->NumberOfElements());
@@ -549,7 +549,7 @@ TEST(OrderedHashTableInsertion) {
   CHECK(OrderedHashMap::HasKey(isolate, *map, *key2));
   CHECK(OrderedHashMap::HasKey(isolate, *map, *key3));
 
-  map = OrderedHashMap::Add(isolate, map, key3, value);
+  map = OrderedHashMap::Add(isolate, map, key3, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(3, map->NumberOfElements());
@@ -559,7 +559,7 @@ TEST(OrderedHashTableInsertion) {
 
   Handle<Object> key4 = factory->NewHeapNumber(42.0);
   CHECK(!OrderedHashMap::HasKey(isolate, *map, *key4));
-  map = OrderedHashMap::Add(isolate, map, key4, value);
+  map = OrderedHashMap::Add(isolate, map, key4, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(4, map->NumberOfElements());
@@ -568,7 +568,7 @@ TEST(OrderedHashTableInsertion) {
   CHECK(OrderedHashMap::HasKey(isolate, *map, *key3));
   CHECK(OrderedHashMap::HasKey(isolate, *map, *key4));
 
-  map = OrderedHashMap::Add(isolate, map, key4, value);
+  map = OrderedHashMap::Add(isolate, map, key4, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(4, map->NumberOfElements());
@@ -587,7 +587,7 @@ TEST(OrderedHashMapDuplicateHashCode) {
   Handle<OrderedHashMap> map = factory->NewOrderedHashMap();
   Handle<JSObject> key1 = factory->NewJSObjectWithNullProto();
   Handle<JSObject> value = factory->NewJSObjectWithNullProto();
-  map = OrderedHashMap::Add(isolate, map, key1, value);
+  map = OrderedHashMap::Add(isolate, map, key1, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(1, map->NumberOfElements());
@@ -596,7 +596,7 @@ TEST(OrderedHashMapDuplicateHashCode) {
   Handle<JSObject> key2 = factory->NewJSObjectWithNullProto();
   CopyHashCode(key1, key2);
 
-  map = OrderedHashMap::Add(isolate, map, key2, value);
+  map = OrderedHashMap::Add(isolate, map, key2, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(2, map->NumberOfElements());
@@ -627,7 +627,7 @@ TEST(OrderedHashMapDeletion) {
   CHECK_EQ(0, map->NumberOfDeletedElements());
   CHECK(!OrderedHashMap::HasKey(isolate, *map, *key1));
 
-  map = OrderedHashMap::Add(isolate, map, key1, value1);
+  map = OrderedHashMap::Add(isolate, map, key1, value1).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(1, map->NumberOfElements());
@@ -642,7 +642,7 @@ TEST(OrderedHashMapDeletion) {
   CHECK_EQ(1, map->NumberOfDeletedElements());
   CHECK(!OrderedHashMap::HasKey(isolate, *map, *key1));
 
-  map = OrderedHashMap::Add(isolate, map, key1, value1);
+  map = OrderedHashMap::Add(isolate, map, key1, value1).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(1, map->NumberOfElements());
@@ -651,7 +651,7 @@ TEST(OrderedHashMapDeletion) {
 
   Handle<String> key2 = factory->NewStringFromAsciiChecked("foo");
   CHECK(!OrderedHashMap::HasKey(isolate, *map, *key2));
-  map = OrderedHashMap::Add(isolate, map, key2, value);
+  map = OrderedHashMap::Add(isolate, map, key2, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(2, map->NumberOfElements());
@@ -660,7 +660,7 @@ TEST(OrderedHashMapDeletion) {
 
   Handle<Symbol> key3 = factory->NewSymbol();
   CHECK(!OrderedHashMap::HasKey(isolate, *map, *key3));
-  map = OrderedHashMap::Add(isolate, map, key3, value);
+  map = OrderedHashMap::Add(isolate, map, key3, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(3, map->NumberOfElements());
@@ -709,7 +709,7 @@ TEST(OrderedHashMapDeletion) {
 
   // Delete non existent key from non empty hash table
   map = OrderedHashMap::Shrink(isolate, map);
-  map = OrderedHashMap::Add(isolate, map, key1, value);
+  map = OrderedHashMap::Add(isolate, map, key1, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(1, map->NumberOfElements());
@@ -858,7 +858,7 @@ TEST(OrderedHashMapDuplicateHashCodeDeletion) {
   Handle<OrderedHashMap> map = factory->NewOrderedHashMap();
   Handle<JSObject> key1 = factory->NewJSObjectWithNullProto();
   Handle<JSObject> value = factory->NewJSObjectWithNullProto();
-  map = OrderedHashMap::Add(isolate, map, key1, value);
+  map = OrderedHashMap::Add(isolate, map, key1, value).ToHandleChecked();
   Verify(isolate, map);
   CHECK_EQ(2, map->NumberOfBuckets());
   CHECK_EQ(1, map->NumberOfElements());
@@ -928,7 +928,7 @@ TEST(OrderedHashSetDeletion) {
   CHECK_EQ(0, set->NumberOfDeletedElements());
   CHECK(!OrderedHashSet::HasKey(isolate, *set, *key1));
 
-  set = OrderedHashSet::Add(isolate, set, key1);
+  set = OrderedHashSet::Add(isolate, set, key1).ToHandleChecked();
   Verify(isolate, set);
   CHECK_EQ(2, set->NumberOfBuckets());
   CHECK_EQ(1, set->NumberOfElements());
@@ -943,7 +943,7 @@ TEST(OrderedHashSetDeletion) {
   CHECK_EQ(1, set->NumberOfDeletedElements());
   CHECK(!OrderedHashSet::HasKey(isolate, *set, *key1));
 
-  set = OrderedHashSet::Add(isolate, set, key1);
+  set = OrderedHashSet::Add(isolate, set, key1).ToHandleChecked();
   Verify(isolate, set);
   CHECK_EQ(2, set->NumberOfBuckets());
   CHECK_EQ(1, set->NumberOfElements());
@@ -952,7 +952,7 @@ TEST(OrderedHashSetDeletion) {
 
   Handle<String> key2 = factory->NewStringFromAsciiChecked("foo");
   CHECK(!OrderedHashSet::HasKey(isolate, *set, *key2));
-  set = OrderedHashSet::Add(isolate, set, key2);
+  set = OrderedHashSet::Add(isolate, set, key2).ToHandleChecked();
   Verify(isolate, set);
   CHECK_EQ(2, set->NumberOfBuckets());
   CHECK_EQ(2, set->NumberOfElements());
@@ -961,7 +961,7 @@ TEST(OrderedHashSetDeletion) {
 
   Handle<Symbol> key3 = factory->NewSymbol();
   CHECK(!OrderedHashSet::HasKey(isolate, *set, *key3));
-  set = OrderedHashSet::Add(isolate, set, key3);
+  set = OrderedHashSet::Add(isolate, set, key3).ToHandleChecked();
   Verify(isolate, set);
   CHECK_EQ(2, set->NumberOfBuckets());
   CHECK_EQ(3, set->NumberOfElements());
@@ -1010,7 +1010,7 @@ TEST(OrderedHashSetDeletion) {
 
   // Delete non existent key from non empty hash table
   set = OrderedHashSet::Shrink(isolate, set);
-  set = OrderedHashSet::Add(isolate, set, key1);
+  set = OrderedHashSet::Add(isolate, set, key1).ToHandleChecked();
   Verify(isolate, set);
   CHECK_EQ(2, set->NumberOfBuckets());
   CHECK_EQ(1, set->NumberOfElements());
@@ -1156,7 +1156,7 @@ TEST(OrderedHashSetDuplicateHashCodeDeletion) {
 
   Handle<OrderedHashSet> set = factory->NewOrderedHashSet();
   Handle<JSObject> key1 = factory->NewJSObjectWithNullProto();
-  set = OrderedHashSet::Add(isolate, set, key1);
+  set = OrderedHashSet::Add(isolate, set, key1).ToHandleChecked();
   Verify(isolate, set);
   CHECK_EQ(2, set->NumberOfBuckets());
   CHECK_EQ(1, set->NumberOfElements());
@@ -1209,25 +1209,26 @@ TEST(OrderedHashSetHandlerInsertion) {
   Isolate* isolate = GetIsolateFrom(&context);
   HandleScope scope(isolate);
 
-  Handle<HeapObject> set = OrderedHashSetHandler::Allocate(isolate, 4);
+  Handle<HeapObject> set =
+      OrderedHashSetHandler::Allocate(isolate, 4).ToHandleChecked();
   Verify(isolate, set);
 
   // Add a new key.
   Handle<Smi> key1(Smi::FromInt(1), isolate);
   CHECK(!OrderedHashSetHandler::HasKey(isolate, set, key1));
-  set = OrderedHashSetHandler::Add(isolate, set, key1);
+  set = OrderedHashSetHandler::Add(isolate, set, key1).ToHandleChecked();
   Verify(isolate, set);
   CHECK(OrderedHashSetHandler::HasKey(isolate, set, key1));
 
   // Add existing key.
-  set = OrderedHashSetHandler::Add(isolate, set, key1);
+  set = OrderedHashSetHandler::Add(isolate, set, key1).ToHandleChecked();
   Verify(isolate, set);
   CHECK(OrderedHashSetHandler::HasKey(isolate, set, key1));
   CHECK(SmallOrderedHashSet::Is(set));
 
   for (int i = 0; i < 1024; i++) {
     Handle<Smi> key_i(Smi::FromInt(i), isolate);
-    set = OrderedHashSetHandler::Add(isolate, set, key_i);
+    set = OrderedHashSetHandler::Add(isolate, set, key_i).ToHandleChecked();
     Verify(isolate, set);
     for (int j = 0; j <= i; j++) {
       Handle<Smi> key_j(Smi::FromInt(j), isolate);
@@ -1242,26 +1243,30 @@ TEST(OrderedHashMapHandlerInsertion) {
   Isolate* isolate = GetIsolateFrom(&context);
   HandleScope scope(isolate);
 
-  Handle<HeapObject> map = OrderedHashMapHandler::Allocate(isolate, 4);
+  Handle<HeapObject> map =
+      OrderedHashMapHandler::Allocate(isolate, 4).ToHandleChecked();
   Verify(isolate, map);
 
   // Add a new key.
   Handle<Smi> key1(Smi::FromInt(1), isolate);
   Handle<Smi> value1(Smi::FromInt(1), isolate);
   CHECK(!OrderedHashMapHandler::HasKey(isolate, map, key1));
-  map = OrderedHashMapHandler::Add(isolate, map, key1, value1);
+  map =
+      OrderedHashMapHandler::Add(isolate, map, key1, value1).ToHandleChecked();
   Verify(isolate, map);
   CHECK(OrderedHashMapHandler::HasKey(isolate, map, key1));
 
   // Add existing key.
-  map = OrderedHashMapHandler::Add(isolate, map, key1, value1);
+  map =
+      OrderedHashMapHandler::Add(isolate, map, key1, value1).ToHandleChecked();
   Verify(isolate, map);
   CHECK(OrderedHashMapHandler::HasKey(isolate, map, key1));
   CHECK(SmallOrderedHashMap::Is(map));
   for (int i = 0; i < 1024; i++) {
     Handle<Smi> key_i(Smi::FromInt(i), isolate);
     Handle<Smi> value_i(Smi::FromInt(i), isolate);
-    map = OrderedHashMapHandler::Add(isolate, map, key_i, value_i);
+    map = OrderedHashMapHandler::Add(isolate, map, key_i, value_i)
+              .ToHandleChecked();
     Verify(isolate, map);
     for (int j = 0; j <= i; j++) {
       Handle<Smi> key_j(Smi::FromInt(j), isolate);
@@ -1286,7 +1291,8 @@ TEST(OrderedNameDictionaryInsertion) {
   Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
   CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key1));
   PropertyDetails details = PropertyDetails::Empty();
-  dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(1, dict->NumberOfElements());
@@ -1295,7 +1301,8 @@ TEST(OrderedNameDictionaryInsertion) {
 
   Handle<Symbol> key2 = factory->NewSymbol();
   CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key2));
-  dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(2, dict->NumberOfElements());
@@ -1317,7 +1324,8 @@ TEST(OrderedNameDictionaryFindEntry) {
   Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
   Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
   PropertyDetails details = PropertyDetails::Empty();
-  dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(1, dict->NumberOfElements());
@@ -1327,7 +1335,8 @@ TEST(OrderedNameDictionaryFindEntry) {
   CHECK_NE(entry, OrderedNameDictionary::kNotFound);
 
   Handle<Symbol> key2 = factory->NewSymbol();
-  dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(2, dict->NumberOfElements());
@@ -1356,7 +1365,8 @@ TEST(OrderedNameDictionaryValueAtAndValueAtPut) {
   Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
   CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key1));
   PropertyDetails details = PropertyDetails::Empty();
-  dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(1, dict->NumberOfElements());
@@ -1376,7 +1386,8 @@ TEST(OrderedNameDictionaryValueAtAndValueAtPut) {
 
   Handle<Symbol> key2 = factory->NewSymbol();
   CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key2));
-  dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(2, dict->NumberOfElements());
@@ -1414,7 +1425,8 @@ TEST(OrderedNameDictionaryDetailsAtAndDetailsAtPut) {
   Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
   CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key1));
   PropertyDetails details = PropertyDetails::Empty();
-  dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(1, dict->NumberOfElements());
@@ -1434,7 +1446,8 @@ TEST(OrderedNameDictionaryDetailsAtAndDetailsAtPut) {
 
   Handle<Symbol> key2 = factory->NewSymbol();
   CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key2));
-  dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(2, dict->NumberOfElements());
@@ -1725,7 +1738,8 @@ TEST(OrderedNameDictionarySetAndMigrateHash) {
   for (int i = 0; i <= 1024; i++) {
     CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
     Handle<String> key = isolate->factory()->InternalizeUtf8String(buf);
-    dict = OrderedNameDictionary::Add(isolate, dict, key, value, details);
+    dict = OrderedNameDictionary::Add(isolate, dict, key, value, details)
+               .ToHandleChecked();
     Verify(isolate, dict);
     CHECK_EQ(100, dict->Hash());
   }
@@ -1736,7 +1750,8 @@ TEST(OrderedNameDictionaryHandlerInsertion) {
   Isolate* isolate = GetIsolateFrom(&context);
   HandleScope scope(isolate);
 
-  Handle<HeapObject> table = OrderedNameDictionaryHandler::Allocate(isolate, 4);
+  Handle<HeapObject> table =
+      OrderedNameDictionaryHandler::Allocate(isolate, 4).ToHandleChecked();
   CHECK(table->IsSmallOrderedNameDictionary());
   Verify(isolate, table);
 
@@ -1745,8 +1760,8 @@ TEST(OrderedNameDictionaryHandlerInsertion) {
   Handle<String> key = isolate->factory()->InternalizeUtf8String("foo");
   PropertyDetails details = PropertyDetails::Empty();
 
-  table =
-      OrderedNameDictionaryHandler::Add(isolate, table, key, value, details);
+  table = OrderedNameDictionaryHandler::Add(isolate, table, key, value, details)
+              .ToHandleChecked();
   DCHECK(key->IsUniqueName());
   Verify(isolate, table);
   CHECK(table->IsSmallOrderedNameDictionary());
@@ -1758,7 +1773,8 @@ TEST(OrderedNameDictionaryHandlerInsertion) {
     CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
     key = isolate->factory()->InternalizeUtf8String(buf);
     table =
-        OrderedNameDictionaryHandler::Add(isolate, table, key, value, details);
+        OrderedNameDictionaryHandler::Add(isolate, table, key, value, details)
+            .ToHandleChecked();
     DCHECK(key->IsUniqueName());
     Verify(isolate, table);
 
@@ -1798,7 +1814,8 @@ TEST(OrderedNameDictionarySetEntry) {
   Handle<String> value = factory->InternalizeUtf8String("bar");
   CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key));
   PropertyDetails details = PropertyDetails::Empty();
-  dict = OrderedNameDictionary::Add(isolate, dict, key, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(1, dict->NumberOfElements());
@@ -1884,7 +1901,8 @@ TEST(OrderedNameDictionaryDeleteEntry) {
   Handle<String> value = factory->InternalizeUtf8String("bar");
   CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key));
   PropertyDetails details = PropertyDetails::Empty();
-  dict = OrderedNameDictionary::Add(isolate, dict, key, value, details);
+  dict = OrderedNameDictionary::Add(isolate, dict, key, value, details)
+             .ToHandleChecked();
   Verify(isolate, dict);
   CHECK_EQ(2, dict->NumberOfBuckets());
   CHECK_EQ(1, dict->NumberOfElements());
@@ -1903,7 +1921,8 @@ TEST(OrderedNameDictionaryDeleteEntry) {
   for (int i = 0; i < 100; i++) {
     CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
     key = factory->InternalizeUtf8String(buf);
-    dict = OrderedNameDictionary::Add(isolate, dict, key, value, details);
+    dict = OrderedNameDictionary::Add(isolate, dict, key, value, details)
+               .ToHandleChecked();
     DCHECK(key->IsUniqueName());
     Verify(isolate, dict);
   }
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index ba69b6d2f6723d..857bd7a45462b0 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -261,6 +261,7 @@ TEST(ArrowOrAssignmentOp) {
 bool TokenIsBinaryOp(Token::Value token) {
   switch (token) {
     case Token::COMMA:
+    case Token::NULLISH:
     case Token::OR:
     case Token::AND:
 #define T(name, string, precedence) case Token::name:
@@ -390,6 +391,7 @@ bool TokenIsPropertyOrCall(Token::Value token) {
     case Token::TEMPLATE_SPAN:
     case Token::TEMPLATE_TAIL:
     case Token::PERIOD:
+    case Token::QUESTION_PERIOD:
     case Token::LBRACK:
     case Token::LPAREN:
       return true;
@@ -1096,7 +1098,7 @@ TEST(ScopeUsesArgumentsSuperThis) {
       }
       if (is_sloppy(scope->language_mode())) {
         CHECK_EQ((source_data[i].expected & EVAL) != 0,
-                 scope->AsDeclarationScope()->calls_sloppy_eval());
+                 scope->AsDeclarationScope()->sloppy_eval_can_extend_vars());
       }
     }
   }
@@ -1529,7 +1531,8 @@ enum ParserFlag {
   kAllowHarmonyPrivateMethods,
   kAllowHarmonyDynamicImport,
   kAllowHarmonyImportMeta,
-  kAllowHarmonyNumericSeparator
+  kAllowHarmonyNullish,
+  kAllowHarmonyOptionalChaining,
 };
 
 enum ParserSyncTestResult {
@@ -1543,8 +1546,9 @@ void SetGlobalFlags(base::EnumSet<ParserFlag> flags) {
   i::FLAG_harmony_private_methods = flags.contains(kAllowHarmonyPrivateMethods);
   i::FLAG_harmony_dynamic_import = flags.contains(kAllowHarmonyDynamicImport);
   i::FLAG_harmony_import_meta = flags.contains(kAllowHarmonyImportMeta);
-  i::FLAG_harmony_numeric_separator =
-      flags.contains(kAllowHarmonyNumericSeparator);
+  i::FLAG_harmony_optional_chaining =
+      flags.contains(kAllowHarmonyOptionalChaining);
+  i::FLAG_harmony_nullish = flags.contains(kAllowHarmonyNullish);
 }
 
 void SetParserFlags(i::PreParser* parser, base::EnumSet<ParserFlag> flags) {
@@ -1555,8 +1559,9 @@ void SetParserFlags(i::PreParser* parser, base::EnumSet<ParserFlag> flags) {
       flags.contains(kAllowHarmonyDynamicImport));
   parser->set_allow_harmony_import_meta(
       flags.contains(kAllowHarmonyImportMeta));
-  parser->set_allow_harmony_numeric_separator(
-      flags.contains(kAllowHarmonyNumericSeparator));
+  parser->set_allow_harmony_optional_chaining(
+      flags.contains(kAllowHarmonyOptionalChaining));
+  parser->set_allow_harmony_nullish(flags.contains(kAllowHarmonyNullish));
 }
 
 void TestParserSyncWithFlags(i::Handle<i::String> source,
@@ -1883,6 +1888,18 @@ void RunModuleParserSyncTest(
                     always_false_len, true, test_preparser, ignore_error_msg);
 }
 
+TEST(NonOctalDecimalIntegerStrictError) {
+  v8::HandleScope handles(CcTest::isolate());
+  v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope context_scope(context);
+
+  const char* context_data[][2] = {{"\"use strict\";", ""}, {nullptr, nullptr}};
+  const char* statement_data[] = {"09", "09.1_2", nullptr};
+
+  RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, nullptr,
+                    0, nullptr, 0, false, true);
+}
+
 TEST(NumericSeparator) {
   v8::HandleScope handles(CcTest::isolate());
   v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
@@ -1894,11 +1911,7 @@ TEST(NumericSeparator) {
       "1_0_0_0", "1_0e+1",  "1_0e+1_0", "0xF_F_FF", "0o7_7_7", "0b0_1_0_1_0",
       ".3_2_1",  "0.0_2_1", "1_0.0_1",  ".0_1_2",   nullptr};
 
-  static const ParserFlag flags[] = {kAllowHarmonyNumericSeparator};
-  RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0, flags,
-                    1);
-
-  RunParserSyncTest(context_data, statement_data, kError);
+  RunParserSyncTest(context_data, statement_data, kSuccess);
 }
 
 TEST(NumericSeparatorErrors) {
@@ -1914,11 +1927,8 @@ TEST(NumericSeparatorErrors) {
       "0b1__1",   "0_b1",    "0_b_1", "0o777_", "0o_777",  "0o7__77",
       "0.0_2_1_", "0.0__21", "0_.01", "0._01",  nullptr};
 
-  static const ParserFlag flags[] = {kAllowHarmonyNumericSeparator};
-  RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, flags, 1,
-                    nullptr, 0, false, true, true);
-
-  RunParserSyncTest(context_data, statement_data, kError);
+  RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, nullptr,
+                    0, nullptr, 0, false, true);
 }
 
 TEST(NumericSeparatorImplicitOctalsErrors) {
@@ -1932,11 +1942,32 @@ TEST(NumericSeparatorImplicitOctalsErrors) {
                                   "0_7_7_7", "0_777",  "07_7_7_",
                                   "07__77",  "0__777", nullptr};
 
-  static const ParserFlag flags[] = {kAllowHarmonyNumericSeparator};
-  RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, flags, 1,
-                    nullptr, 0, false, true, true);
+  RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, nullptr,
+                    0, nullptr, 0, false, true);
+}
 
-  RunParserSyncTest(context_data, statement_data, kError);
+TEST(NumericSeparatorNonOctalDecimalInteger) {
+  v8::HandleScope handles(CcTest::isolate());
+  v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope context_scope(context);
+
+  const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
+  const char* statement_data[] = {"09.1_2", nullptr};
+
+  RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0, nullptr,
+                    0, nullptr, 0, false, true);
+}
+
+TEST(NumericSeparatorNonOctalDecimalIntegerErrors) {
+  v8::HandleScope handles(CcTest::isolate());
+  v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope context_scope(context);
+
+  const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
+  const char* statement_data[] = {"09_12", nullptr};
+
+  RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, nullptr,
+                    0, nullptr, 0, false, true);
 }
 
 TEST(NumericSeparatorUnicodeEscapeSequencesErrors) {
@@ -1949,12 +1980,74 @@ TEST(NumericSeparatorUnicodeEscapeSequencesErrors) {
   // https://github.com/tc39/proposal-numeric-separator/issues/25
   const char* statement_data[] = {"\\u{10_FFFF}", nullptr};
 
-  static const ParserFlag flags[] = {kAllowHarmonyNumericSeparator};
-  RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, flags, 1);
+  RunParserSyncTest(context_data, statement_data, kError);
+}
+
+TEST(OptionalChaining) {
+  v8::HandleScope handles(CcTest::isolate());
+  v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope context_scope(context);
+
+  const char* context_data[][2] = {
+      {"", ""}, {"'use strict';", ""}, {nullptr, nullptr}};
+  const char* statement_data[] = {"a?.b", "a?.['b']", "a?.()", nullptr};
+
+  static const ParserFlag flags[] = {kAllowHarmonyOptionalChaining};
+  RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0, flags,
+                    1, nullptr, 0, false, true, true);
+  RunParserSyncTest(context_data, statement_data, kError);
+}
+
+TEST(OptionalChainingTaggedError) {
+  v8::HandleScope handles(CcTest::isolate());
+  v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope context_scope(context);
+
+  const char* context_data[][2] = {
+      {"", ""}, {"'use strict';", ""}, {nullptr, nullptr}};
+  const char* statement_data[] = {"a?.b``", "a?.['b']``", "a?.()``", nullptr};
 
+  static const ParserFlag flags[] = {kAllowHarmonyOptionalChaining};
+  RunParserSyncTest(context_data, statement_data, kError, nullptr, 9, flags, 1,
+                    nullptr, 0, false, true, true);
   RunParserSyncTest(context_data, statement_data, kError);
 }
 
+TEST(Nullish) {
+  v8::HandleScope handles(CcTest::isolate());
+  v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope context_scope(context);
+
+  const char* context_data[][2] = {
+      {"", ""}, {"'use strict';", ""}, {nullptr, nullptr}};
+  const char* statement_data[] = {"a ?? b", "a ?? b ?? c",
+                                  "a ?? b ? c : d"
+                                  "a ?? b ?? c ? d : e",
+                                  nullptr};
+
+  static const ParserFlag flags[] = {kAllowHarmonyNullish};
+  RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0, flags,
+                    1, nullptr, 0, false, true, true);
+  RunParserSyncTest(context_data, statement_data, kError);
+}
+
+TEST(NullishNotContained) {
+  v8::HandleScope handles(CcTest::isolate());
+  v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope context_scope(context);
+
+  const char* context_data[][2] = {
+      {"", ""}, {"'use strict';", ""}, {nullptr, nullptr}};
+  const char* statement_data[] = {"a || b ?? c", "a ?? b || c",
+                                  "a && b ?? c"
+                                  "a ?? b && c",
+                                  nullptr};
+
+  static const ParserFlag flags[] = {kAllowHarmonyNullish};
+  RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, flags, 1,
+                    nullptr, 0, false, true, true);
+}
+
 TEST(ErrorsEvalAndArguments) {
   // Tests that both preparsing and parsing produce the right kind of errors for
   // using "eval" and "arguments" as identifiers. Without the strict mode, it's
@@ -5542,7 +5635,10 @@ TEST(PrivateMethodsErrors) {
     "async #['a']() { }",
     "async *#['a]() { }",
 
-    // TODO(joyee): check duplicate accessors
+    "get #a() {} get #a() {}",
+    "get #a() {} get #['a']() {}",
+    "set #a(val) {} set #a(val) {}",
+    "set #a(val) {} set #['a'](val) {}",
 
     "#a\n#",
     "#a() c",
@@ -5561,6 +5657,14 @@ TEST(PrivateMethodsErrors) {
     "set #constructor(test) {}",
     "#constructor() {}",
     "get #constructor() {}",
+
+    "static async *#constructor() {}",
+    "static *#constructor() {}",
+    "static async #constructor() {}",
+    "static set #constructor(test) {}",
+    "static #constructor() {}",
+    "static get #constructor() {}",
+
     nullptr
   };
   // clang-format on
@@ -5572,8 +5676,60 @@ TEST(PrivateMethodsErrors) {
                     private_methods, arraysize(private_methods));
 }
 
+// Test that private members parse in class bodies nested in object literals
+TEST(PrivateMembersNestedInObjectLiteralsNoErrors) {
+  // clang-format off
+  const char* context_data[][2] = {{"({", "})"},
+                                   {"'use strict'; ({", "});"},
+                                   {nullptr, nullptr}};
+  const char* class_body_data[] = {
+    "a: class { #a = 1 }",
+    "a: class { #a = () => {} }",
+    "a: class { #a }",
+    "a: class { #a() { } }",
+    "a: class { get #a() { } }",
+    "a: class { set #a(foo) { } }",
+    "a: class { *#a() { } }",
+    "a: class { async #a() { } }",
+    "a: class { async *#a() { } }",
+    nullptr
+  };
+  // clang-format on
+
+  static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
+  RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+                    private_methods, arraysize(private_methods));
+}
+
+// Test that private members parse in class bodies nested in classes
+TEST(PrivateMembersInNestedClassNoErrors) {
+  // clang-format off
+  const char* context_data[][2] = {{"(class {", "});"},
+                                   {"(class extends Base {", "});"},
+                                   {"class C {", "}"},
+                                   {"class C extends Base {", "}"},
+                                   {nullptr, nullptr}};
+  const char* class_body_data[] = {
+    "a = class { #a = 1 }",
+    "a = class { #a = () => {} }",
+    "a = class { #a }",
+    "a = class { #a() { } }",
+    "a = class { get #a() { } }",
+    "a = class { set #a(foo) { } }",
+    "a = class { *#a() { } }",
+    "a = class { async #a() { } }",
+    "a = class { async *#a() { } }",
+    nullptr
+  };
+  // clang-format on
+
+  static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
+  RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+                    private_methods, arraysize(private_methods));
+}
+
 // Test that private members do not parse outside class bodies
-TEST(PrivateMembersInNonClassNoErrors) {
+TEST(PrivateMembersInNonClassErrors) {
   // clang-format off
   const char* context_data[][2] = {{"", ""},
                                    {"({", "})"},
@@ -5605,6 +5761,122 @@ TEST(PrivateMembersInNonClassNoErrors) {
                     private_methods, arraysize(private_methods));
 }
 
+// Test that nested private members parse
+TEST(PrivateMembersNestedNoErrors) {
+  // clang-format off
+  const char* context_data[][2] = {{"(class { get #a() { ", "} });"},
+                                   {
+                                     "(class { set #a(val) {} get #a() { ",
+                                     "} });"
+                                    },
+                                   {"(class { set #a(val) {", "} });"},
+                                   {"(class { #a() { ", "} });"},
+                                   {nullptr, nullptr}};
+  const char* class_body_data[] = {
+    "class C { #a() {} }",
+    "class C { get #a() {} }",
+    "class C { get #a() {} set #a(val) {} }",
+    "class C { set #a(val) {} }",
+    nullptr
+  };
+  // clang-format on
+
+  static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
+  RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+                    private_methods, arraysize(private_methods));
+}
+
+// Test that acessing undeclared private members result in early errors
+TEST(PrivateMembersEarlyErrors) {
+  // clang-format off
+  const char* context_data[][2] = {{"(class {", "});"},
+                                   {"(class extends Base {", "});"},
+                                   {"class C {", "}"},
+                                   {"class C extends Base {", "}"},
+                                   {nullptr, nullptr}};
+  const char* class_body_data[] = {
+    "set #b(val) { this.#a = val; }",
+    "get #b() { return this.#a; }",
+    "foo() { return this.#a; }",
+    "foo() { this.#a = 1; }",
+    nullptr
+  };
+  // clang-format on
+
+  RunParserSyncTest(context_data, class_body_data, kError);
+
+  static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
+  RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+                    private_methods, arraysize(private_methods));
+}
+
+// Test that acessing wrong kind private members do not error early.
+// Instead these should be runtime errors.
+TEST(PrivateMembersWrongAccessNoEarlyErrors) {
+  // clang-format off
+  const char* context_data[][2] = {{"(class {", "});"},
+                                   {"(class extends Base {", "});"},
+                                   {"class C {", "}"},
+                                   {"class C extends Base {", "}"},
+                                   {nullptr, nullptr}};
+  const char* class_body_data[] = {
+    // Private setter only
+    "set #b(val) {} fn() { return this.#b; }",
+    "set #b(val) {} fn() { this.#b++; }",
+    // Nested private setter only
+    R"(get #b() {}
+    fn() {
+      return new class { set #b(val) {} fn() { this.#b++; } };
+    })",
+    R"(get #b() {}
+    fn() {
+      return new class { set #b(val) {} fn() { return this.#b; } };
+    })",
+
+    // Private getter only
+    "get #b() { } fn() { this.#b = 1; }",
+    "get #b() { } fn() { this.#b++; }",
+    "get #b() { } fn(obj) { ({ y: this.#b } = obj); }",
+    // Nested private getter only
+    R"(set #b(val) {}
+    fn() {
+      return new class { get #b() {} fn() { this.#b++; } };
+    })",
+    R"(set #b(val) {}
+    fn() {
+      return new class { get #b() {} fn() { this.#b = 1; } };
+    })",
+    R"(set #b(val) {}
+    fn() {
+      return new class { get #b() {} fn() { ({ y: this.#b } = obj); } };
+    })",
+
+    // Writing to private methods
+    "#b() { } fn() { this.#b = 1; }",
+    "#b() { } fn() { this.#b++; }",
+    "#b() {} fn(obj) { ({ y: this.#b } = obj); }",
+    // Writing to nested private methods
+    R"(#b() {}
+    fn() {
+      return new class { get #b() {} fn() { this.#b++; } };
+    })",
+    R"(#b() {}
+    fn() {
+      return new class { get #b() {} fn() { this.#b = 1; } };
+    })",
+    R"(#b() {}
+    fn() {
+      return new class { get #b() {} fn() { ({ y: this.#b } = obj); } };
+    })",
+    nullptr
+  };
+  // clang-format on
+
+  static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
+  RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+                    private_methods, arraysize(private_methods));
+}
+
 TEST(PrivateClassFieldsNoErrors) {
   // clang-format off
   // Tests proposed class fields syntax.
diff --git a/deps/v8/test/cctest/test-pointer-auth-arm64.cc b/deps/v8/test/cctest/test-pointer-auth-arm64.cc
new file mode 100644
index 00000000000000..11632be80875fe
--- /dev/null
+++ b/deps/v8/test/cctest/test-pointer-auth-arm64.cc
@@ -0,0 +1,76 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/arm64/decoder-arm64-inl.h"
+#include "src/execution/arm64/simulator-arm64.h"
+
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef USE_SIMULATOR
+TEST(compute_pac) {
+  Decoder<DispatchingDecoderVisitor>* decoder =
+      new Decoder<DispatchingDecoderVisitor>();
+  Simulator simulator(decoder);
+
+  uint64_t data1 = 0xfb623599da6e8127;
+  uint64_t data2 = 0x27979fadf7d53cb7;
+  uint64_t context = 0x477d469dec0b8762;
+  Simulator::PACKey key = {0x84be85ce9804e94b, 0xec2802d4e0a488e9, -1};
+
+  uint64_t pac1 = simulator.ComputePAC(data1, context, key);
+  uint64_t pac2 = simulator.ComputePAC(data2, context, key);
+
+  // NOTE: If the PAC implementation is changed, this may fail due to a hash
+  // collision.
+  CHECK_NE(pac1, pac2);
+}
+
+TEST(add_and_auth_pac) {
+  Decoder<DispatchingDecoderVisitor>* decoder =
+      new Decoder<DispatchingDecoderVisitor>();
+  Simulator simulator(decoder);
+
+  uint64_t ptr = 0x0000000012345678;
+  uint64_t context = 0x477d469dec0b8762;
+  Simulator::PACKey key_a = {0x84be85ce9804e94b, 0xec2802d4e0a488e9, 0};
+  Simulator::PACKey key_b = {0xec1119e288704d13, 0xd7f6b76e1cea585e, 1};
+
+  uint64_t ptr_a =
+      simulator.AddPAC(ptr, context, key_a, Simulator::kInstructionPointer);
+
+  // Attempt to authenticate the pointer with PAC using different keys.
+  uint64_t success =
+      simulator.AuthPAC(ptr_a, context, key_a, Simulator::kInstructionPointer);
+  uint64_t fail =
+      simulator.AuthPAC(ptr_a, context, key_b, Simulator::kInstructionPointer);
+
+  uint64_t pac_mask =
+      simulator.CalculatePACMask(ptr, Simulator::kInstructionPointer, 0);
+
+  // NOTE: If the PAC implementation is changed, this may fail due to a hash
+  // collision.
+  CHECK_NE((ptr_a & pac_mask), 0);
+  CHECK_EQ(success, ptr);
+  CHECK_NE(fail, ptr);
+}
+
+TEST(add_and_strip_pac) {
+  Decoder<DispatchingDecoderVisitor>* decoder =
+      new Decoder<DispatchingDecoderVisitor>();
+  Simulator simulator(decoder);
+
+  uint64_t ptr = 0xff00000012345678;
+  uint64_t pac_mask =
+      simulator.CalculatePACMask(ptr, Simulator::kInstructionPointer, 0);
+  uint64_t ptr_a = ptr | pac_mask;
+
+  CHECK_EQ(simulator.StripPAC(ptr_a, Simulator::kInstructionPointer), ptr);
+}
+#endif  // USE_SIMULATOR
+
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm.cc b/deps/v8/test/cctest/test-poison-disasm-arm.cc
index 37bb4e1b39e9be..3410e5487d6461 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm.cc
@@ -71,7 +71,6 @@ TEST(DisasmPoisonPolymorphicLoad) {
       "let o2 = { y : 1 };"
       "o2.x = 2;"
       "%PrepareFunctionForOptimization(poly);"
-      "poly(o1);"
       "poly(o2);"
       "poly(o1);"
       "poly(o2);"
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm64.cc b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
index d767fea9fb1d56..a428ce7b892bb7 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
@@ -84,7 +84,6 @@ TEST(DisasmPoisonPolymorphicLoad) {
       "let o2 = { y : 1 };"
       "o2.x = 2;"
       "%PrepareFunctionForOptimization(poly);"
-      "poly(o1);"
       "poly(o2);"
       "poly(o1);"
       "poly(o2);"
@@ -115,7 +114,7 @@ TEST(DisasmPoisonPolymorphicLoad) {
       "csel " + kPReg + ", xzr, " + kPReg + ", ne",      // update the poison
       "csdb",                                            // spec. barrier
       "ldursw x<<BSt:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]",  // load backing store
-      "tbz w<<BSt>>, #0, #\\+0x8",                       // branchful decompress
+                                                         // branchful decompress
       "add x<<BSt>>, x26, x<<BSt>>",                     // Add root to ref
       "and x<<BSt>>, x<<BSt>>, " + kPReg,                // apply the poison
       "ldur w<<Prop:[0-9]+>>, \\[x<<BSt>>, #[0-9]+\\]",  // load the property
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 051affc89845e3..ccebabec304e36 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -200,7 +200,9 @@ TEST(ProfileTreeAddPathFromEndWithLineNumbers) {
   ProfileTree tree(CcTest::i_isolate());
   ProfileTreeTestHelper helper(&tree);
 
-  ProfileStackTrace path = {{&c, 5}, {&b, 3}, {&a, 1}};
+  ProfileStackTrace path = {{{&c, 5}, kNullAddress, false},
+                            {{&b, 3}, kNullAddress, false},
+                            {{&a, 1}, kNullAddress, false}};
   tree.AddPathFromEnd(path, v8::CpuProfileNode::kNoLineNumberInfo, true,
                       v8::CpuProfilingMode::kCallerLineNumbers);
 
@@ -381,7 +383,8 @@ TEST(RecordTickSample) {
   CpuProfiler profiler(isolate);
   profiles.set_cpu_profiler(&profiler);
   profiles.StartProfiling("");
-  ProfileGenerator generator(&profiles);
+  CodeMap code_map;
+  ProfileGenerator generator(&profiles, &code_map);
   CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
   CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
   CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
@@ -449,7 +452,8 @@ TEST(SampleIds) {
   CpuProfiler profiler(isolate);
   profiles.set_cpu_profiler(&profiler);
   profiles.StartProfiling("", {CpuProfilingMode::kLeafNodeLineNumbers});
-  ProfileGenerator generator(&profiles);
+  CodeMap code_map;
+  ProfileGenerator generator(&profiles, &code_map);
   CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
   CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
   CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
@@ -503,7 +507,8 @@ TEST(NoSamples) {
   CpuProfiler profiler(isolate);
   profiles.set_cpu_profiler(&profiler);
   profiles.StartProfiling("");
-  ProfileGenerator generator(&profiles);
+  CodeMap code_map;
+  ProfileGenerator generator(&profiles, &code_map);
   CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
   generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
 
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 1dad8febdafb1f..1374673c618792 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -35,6 +35,7 @@
 #include "src/codegen/assembler-arch.h"
 #include "src/codegen/macro-assembler.h"
 #include "src/init/v8.h"
+#include "src/objects/js-regexp-inl.h"
 #include "src/objects/objects-inl.h"
 #include "src/regexp/regexp-bytecode-generator.h"
 #include "src/regexp/regexp-compiler.h"
@@ -535,6 +536,7 @@ static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
   Isolate* isolate = CcTest::i_isolate();
   FlatStringReader reader(isolate, CStrVector(input));
   RegExpCompileData compile_data;
+  compile_data.compilation_target = RegExpCompilationTarget::kNative;
   JSRegExp::Flags flags = JSRegExp::kNone;
   if (multiline) flags = JSRegExp::kMultiline;
   if (unicode) flags = JSRegExp::kUnicode;
@@ -629,16 +631,35 @@ class ContextInitializer {
   v8::Local<v8::Context> env_;
 };
 
-static ArchRegExpMacroAssembler::Result Execute(Code code, String input,
+// Create new JSRegExp object with only necessary fields (for this tests)
+// initialized.
+static Handle<JSRegExp> CreateJSRegExp(Handle<String> source, Handle<Code> code,
+                                       bool is_unicode = false) {
+  Isolate* isolate = CcTest::i_isolate();
+  Factory* factory = isolate->factory();
+  Handle<JSFunction> constructor = isolate->regexp_function();
+  Handle<JSRegExp> regexp =
+      Handle<JSRegExp>::cast(factory->NewJSObject(constructor));
+
+  factory->SetRegExpIrregexpData(regexp, JSRegExp::IRREGEXP, source,
+                                 JSRegExp::kNone, 0);
+  regexp->SetDataAt(is_unicode ? JSRegExp::kIrregexpUC16CodeIndex
+                               : JSRegExp::kIrregexpLatin1CodeIndex,
+                    *code);
+
+  return regexp;
+}
+
+static ArchRegExpMacroAssembler::Result Execute(JSRegExp regexp, String input,
                                                 int start_offset,
                                                 Address input_start,
                                                 Address input_end,
                                                 int* captures) {
   return static_cast<NativeRegExpMacroAssembler::Result>(
-      NativeRegExpMacroAssembler::Execute(code, input, start_offset,
-                                          reinterpret_cast<byte*>(input_start),
-                                          reinterpret_cast<byte*>(input_end),
-                                          captures, 0, CcTest::i_isolate()));
+      NativeRegExpMacroAssembler::Execute(
+          input, start_offset, reinterpret_cast<byte*>(input_start),
+          reinterpret_cast<byte*>(input_end), captures, 0, CcTest::i_isolate(),
+          regexp));
 }
 
 TEST(MacroAssemblerNativeSuccess) {
@@ -656,19 +677,15 @@ TEST(MacroAssemblerNativeSuccess) {
   Handle<String> source = factory->NewStringFromStaticChars("");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code);
 
   int captures[4] = {42, 37, 87, 117};
   Handle<String> input = factory->NewStringFromStaticChars("foofoo");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
-  NativeRegExpMacroAssembler::Result result =
-      Execute(*code,
-              *input,
-              0,
-              start_adr,
-              start_adr + seq_input->length(),
-              captures);
+  NativeRegExpMacroAssembler::Result result = Execute(
+      *regexp, *input, 0, start_adr, start_adr + seq_input->length(), captures);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
   CHECK_EQ(-1, captures[0]);
@@ -710,19 +727,15 @@ TEST(MacroAssemblerNativeSimple) {
   Handle<String> source = factory->NewStringFromStaticChars("^foo");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code);
 
   int captures[4] = {42, 37, 87, 117};
   Handle<String> input = factory->NewStringFromStaticChars("foofoo");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
-  NativeRegExpMacroAssembler::Result result =
-      Execute(*code,
-              *input,
-              0,
-              start_adr,
-              start_adr + input->length(),
-              captures);
+  NativeRegExpMacroAssembler::Result result = Execute(
+      *regexp, *input, 0, start_adr, start_adr + input->length(), captures);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
   CHECK_EQ(0, captures[0]);
@@ -734,11 +747,7 @@ TEST(MacroAssemblerNativeSimple) {
   seq_input = Handle<SeqOneByteString>::cast(input);
   start_adr = seq_input->GetCharsAddress();
 
-  result = Execute(*code,
-                   *input,
-                   0,
-                   start_adr,
-                   start_adr + input->length(),
+  result = Execute(*regexp, *input, 0, start_adr, start_adr + input->length(),
                    captures);
 
   CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
@@ -777,6 +786,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
   Handle<String> source = factory->NewStringFromStaticChars("^foo");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code, true);
 
   int captures[4] = {42, 37, 87, 117};
   const uc16 input_data[6] = {'f', 'o', 'o', 'f', 'o',
@@ -786,13 +796,8 @@ TEST(MacroAssemblerNativeSimpleUC16) {
   Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
-  NativeRegExpMacroAssembler::Result result =
-      Execute(*code,
-              *input,
-              0,
-              start_adr,
-              start_adr + input->length(),
-              captures);
+  NativeRegExpMacroAssembler::Result result = Execute(
+      *regexp, *input, 0, start_adr, start_adr + input->length(), captures);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
   CHECK_EQ(0, captures[0]);
@@ -807,12 +812,8 @@ TEST(MacroAssemblerNativeSimpleUC16) {
   seq_input = Handle<SeqTwoByteString>::cast(input);
   start_adr = seq_input->GetCharsAddress();
 
-  result = Execute(*code,
-                   *input,
-                   0,
-                   start_adr,
-                   start_adr + input->length() * 2,
-                   captures);
+  result = Execute(*regexp, *input, 0, start_adr,
+                   start_adr + input->length() * 2, captures);
 
   CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
 }
@@ -842,13 +843,14 @@ TEST(MacroAssemblerNativeBacktrack) {
   Handle<String> source = factory->NewStringFromStaticChars("..........");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code);
 
   Handle<String> input = factory->NewStringFromStaticChars("foofoo");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
   NativeRegExpMacroAssembler::Result result = Execute(
-      *code, *input, 0, start_adr, start_adr + input->length(), nullptr);
+      *regexp, *input, 0, start_adr, start_adr + input->length(), nullptr);
 
   CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
 }
@@ -882,19 +884,15 @@ TEST(MacroAssemblerNativeBackReferenceLATIN1) {
   Handle<String> source = factory->NewStringFromStaticChars("^(..)..\1");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code);
 
   Handle<String> input = factory->NewStringFromStaticChars("fooofo");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
   int output[4];
-  NativeRegExpMacroAssembler::Result result =
-      Execute(*code,
-              *input,
-              0,
-              start_adr,
-              start_adr + input->length(),
-              output);
+  NativeRegExpMacroAssembler::Result result = Execute(
+      *regexp, *input, 0, start_adr, start_adr + input->length(), output);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
   CHECK_EQ(0, output[0]);
@@ -932,6 +930,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
   Handle<String> source = factory->NewStringFromStaticChars("^(..)..\1");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code, true);
 
   const uc16 input_data[6] = {'f', 0x2028, 'o', 'o', 'f', 0x2028};
   Handle<String> input = factory->NewStringFromTwoByte(
@@ -940,13 +939,8 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
   Address start_adr = seq_input->GetCharsAddress();
 
   int output[4];
-  NativeRegExpMacroAssembler::Result result =
-      Execute(*code,
-              *input,
-              0,
-              start_adr,
-              start_adr + input->length() * 2,
-              output);
+  NativeRegExpMacroAssembler::Result result = Execute(
+      *regexp, *input, 0, start_adr, start_adr + input->length() * 2, output);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
   CHECK_EQ(0, output[0]);
@@ -991,18 +985,19 @@ TEST(MacroAssemblernativeAtStart) {
   Handle<String> source = factory->NewStringFromStaticChars("(^f|ob)");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code);
 
   Handle<String> input = factory->NewStringFromStaticChars("foobar");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
   NativeRegExpMacroAssembler::Result result = Execute(
-      *code, *input, 0, start_adr, start_adr + input->length(), nullptr);
+      *regexp, *input, 0, start_adr, start_adr + input->length(), nullptr);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
 
-  result = Execute(*code, *input, 3, start_adr + 3, start_adr + input->length(),
-                   nullptr);
+  result = Execute(*regexp, *input, 3, start_adr + 3,
+                   start_adr + input->length(), nullptr);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
 }
@@ -1044,19 +1039,15 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
       factory->NewStringFromStaticChars("^(abc)\1\1(?!\1)...(?!\1)");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code);
 
   Handle<String> input = factory->NewStringFromStaticChars("aBcAbCABCxYzab");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
   int output[4];
-  NativeRegExpMacroAssembler::Result result =
-      Execute(*code,
-              *input,
-              0,
-              start_adr,
-              start_adr + input->length(),
-              output);
+  NativeRegExpMacroAssembler::Result result = Execute(
+      *regexp, *input, 0, start_adr, start_adr + input->length(), output);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
   CHECK_EQ(0, output[0]);
@@ -1144,6 +1135,7 @@ TEST(MacroAssemblerNativeRegisters) {
   Handle<String> source = factory->NewStringFromStaticChars("<loop test>");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code);
 
   // String long enough for test (content doesn't matter).
   Handle<String> input = factory->NewStringFromStaticChars("foofoofoofoofoo");
@@ -1151,13 +1143,8 @@ TEST(MacroAssemblerNativeRegisters) {
   Address start_adr = seq_input->GetCharsAddress();
 
   int output[6];
-  NativeRegExpMacroAssembler::Result result =
-      Execute(*code,
-              *input,
-              0,
-              start_adr,
-              start_adr + input->length(),
-              output);
+  NativeRegExpMacroAssembler::Result result = Execute(
+      *regexp, *input, 0, start_adr, start_adr + input->length(), output);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
   CHECK_EQ(0, output[0]);
@@ -1188,6 +1175,7 @@ TEST(MacroAssemblerStackOverflow) {
       factory->NewStringFromStaticChars("<stack overflow test>");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code);
 
   // String long enough for test (content doesn't matter).
   Handle<String> input = factory->NewStringFromStaticChars("dummy");
@@ -1195,7 +1183,7 @@ TEST(MacroAssemblerStackOverflow) {
   Address start_adr = seq_input->GetCharsAddress();
 
   NativeRegExpMacroAssembler::Result result = Execute(
-      *code, *input, 0, start_adr, start_adr + input->length(), nullptr);
+      *regexp, *input, 0, start_adr, start_adr + input->length(), nullptr);
 
   CHECK_EQ(NativeRegExpMacroAssembler::EXCEPTION, result);
   CHECK(isolate->has_pending_exception());
@@ -1230,6 +1218,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
       factory->NewStringFromStaticChars("<huge register space test>");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
+  Handle<JSRegExp> regexp = CreateJSRegExp(source, code);
 
   // String long enough for test (content doesn't matter).
   Handle<String> input = factory->NewStringFromStaticChars("sample text");
@@ -1237,13 +1226,8 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
   Address start_adr = seq_input->GetCharsAddress();
 
   int captures[2];
-  NativeRegExpMacroAssembler::Result result =
-      Execute(*code,
-              *input,
-              0,
-              start_adr,
-              start_adr + input->length(),
-              captures);
+  NativeRegExpMacroAssembler::Result result = Execute(
+      *regexp, *input, 0, start_adr, start_adr + input->length(), captures);
 
   CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
   CHECK_EQ(0, captures[0]);
@@ -1298,7 +1282,9 @@ TEST(MacroAssembler) {
   Handle<String> f1_16 = factory->NewStringFromTwoByte(
       Vector<const uc16>(str1, 6)).ToHandleChecked();
 
-  CHECK(IrregexpInterpreter::Match(isolate, array, f1_16, captures, 0));
+  CHECK(IrregexpInterpreter::MatchInternal(isolate, *array, *f1_16, captures, 5,
+                                           0,
+                                           RegExp::CallOrigin::kFromRuntime));
   CHECK_EQ(0, captures[0]);
   CHECK_EQ(3, captures[1]);
   CHECK_EQ(1, captures[2]);
@@ -1309,7 +1295,9 @@ TEST(MacroAssembler) {
   Handle<String> f2_16 = factory->NewStringFromTwoByte(
       Vector<const uc16>(str2, 6)).ToHandleChecked();
 
-  CHECK(!IrregexpInterpreter::Match(isolate, array, f2_16, captures, 0));
+  CHECK(!IrregexpInterpreter::MatchInternal(isolate, *array, *f2_16, captures,
+                                            5, 0,
+                                            RegExp::CallOrigin::kFromRuntime));
   CHECK_EQ(42, captures[0]);
 }
 
@@ -1692,8 +1680,7 @@ void MockUseCounterCallback(v8::Isolate* isolate,
 }
 }
 
-
-// Test that ES2015 RegExp compatibility fixes are in place, that they
+// Test that ES2015+ RegExp compatibility fixes are in place, that they
 // are not overly broad, and the appropriate UseCounters are incremented
 TEST(UseCountRegExp) {
   v8::Isolate* isolate = CcTest::isolate();
@@ -1715,7 +1702,7 @@ TEST(UseCountRegExp) {
   CHECK_EQ(0, use_counts[v8::Isolate::kRegExpPrototypeToString]);
   CHECK(resultReSticky->IsFalse());
 
-  // When the getter is caleld on another object, throw an exception
+  // When the getter is called on another object, throw an exception
   // and don't increment the UseCounter
   v8::Local<v8::Value> resultStickyError = CompileRun(
       "var exception;"
@@ -1757,6 +1744,19 @@ TEST(UseCountRegExp) {
   CHECK_EQ(2, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
   CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeToString]);
   CHECK(resultToStringError->IsObject());
+
+  // Increment a UseCounter when .matchAll() is used with a non-global
+  // regular expression.
+  CHECK_EQ(0, use_counts[v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp]);
+  v8::Local<v8::Value> resultReMatchAllNonGlobal =
+      CompileRun("'a'.matchAll(/./)");
+  CHECK_EQ(1, use_counts[v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp]);
+  CHECK(resultReMatchAllNonGlobal->IsObject());
+  // Don't increment the counter for global regular expressions.
+  v8::Local<v8::Value> resultReMatchAllGlobal =
+      CompileRun("'a'.matchAll(/./g)");
+  CHECK_EQ(1, use_counts[v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp]);
+  CHECK(resultReMatchAllGlobal->IsObject());
 }
 
 class UncachedExternalString
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 1ed1547b34f067..407437c4b1b48e 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -1516,6 +1516,50 @@ static Handle<SharedFunctionInfo> CompileScriptAndProduceCache(
   return sfi;
 }
 
+TEST(CodeSerializerWithProfiler) {
+  FLAG_enable_lazy_source_positions = true;
+  FLAG_stress_lazy_source_positions = false;
+
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+  isolate->compilation_cache()->Disable();  // Disable same-isolate code cache.
+
+  v8::HandleScope scope(CcTest::isolate());
+
+  const char* source = "1 + 1";
+
+  Handle<String> orig_source = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  Handle<String> copy_source = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  CHECK(!orig_source.is_identical_to(copy_source));
+  CHECK(orig_source->Equals(*copy_source));
+
+  ScriptData* cache = nullptr;
+
+  Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+      isolate, orig_source, Handle<String>(), &cache,
+      v8::ScriptCompiler::kNoCompileOptions);
+
+  CHECK(!orig->GetBytecodeArray().HasSourcePositionTable());
+
+  isolate->set_is_profiling(true);
+
+  // This does not assert that no compilation can happen as source position
+  // collection could trigger it.
+  Handle<SharedFunctionInfo> copy =
+      CompileScript(isolate, copy_source, Handle<String>(), cache,
+                    v8::ScriptCompiler::kConsumeCodeCache);
+
+  // Since the profiler is now enabled, source positions should be collected
+  // after deserialization.
+  CHECK(copy->GetBytecodeArray().HasSourcePositionTable());
+
+  delete cache;
+}
+
 void TestCodeSerializerOnePlusOneImpl(bool verify_builtins_count = true) {
   LocalContext context;
   Isolate* isolate = CcTest::i_isolate();
@@ -3565,6 +3609,7 @@ UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
           base::make_unique<v8::Extension>("new extension",
                                            "function i() { return 24; }"
                                            "function j() { return 25; }"
+                                           "let a = 26;"
                                            "try {"
                                            "  if (o.p == 7) o.p++;"
                                            "} catch {}");
@@ -3582,6 +3627,7 @@ UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
         ExpectInt32("i()", 24);
         ExpectInt32("j()", 25);
         ExpectInt32("o.p", 8);
+        ExpectInt32("a", 26);
         v8::TryCatch try_catch(isolate);
         CHECK(CompileRun("x").IsEmpty());
         CHECK(try_catch.HasCaught());
diff --git a/deps/v8/test/cctest/test-stack-unwinding-x64.cc b/deps/v8/test/cctest/test-stack-unwinding-win64.cc
similarity index 76%
rename from deps/v8/test/cctest/test-stack-unwinding-x64.cc
rename to deps/v8/test/cctest/test-stack-unwinding-win64.cc
index 583e14111ad676..84f1318a29525a 100644
--- a/deps/v8/test/cctest/test-stack-unwinding-x64.cc
+++ b/deps/v8/test/cctest/test-stack-unwinding-win64.cc
@@ -6,9 +6,15 @@
 #include "src/init/v8.h"
 #include "test/cctest/cctest.h"
 
-class UnwindingWinX64Callbacks {
+#if defined(V8_OS_WIN_X64)
+#define CONTEXT_PC(context) (context.Rip)
+#elif defined(V8_OS_WIN_ARM64)
+#define CONTEXT_PC(context) (context.Pc)
+#endif
+
+class UnwindingWin64Callbacks {
  public:
-  UnwindingWinX64Callbacks() = default;
+  UnwindingWin64Callbacks() = default;
 
   static void Getter(v8::Local<v8::String> name,
                      const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -31,25 +37,26 @@ class UnwindingWinX64Callbacks {
     int iframe = 0;
     while (++iframe < max_frames) {
       uint64_t image_base;
-      PRUNTIME_FUNCTION function_entry =
-          ::RtlLookupFunctionEntry(context_record.Rip, &image_base, nullptr);
+      PRUNTIME_FUNCTION function_entry = ::RtlLookupFunctionEntry(
+          CONTEXT_PC(context_record), &image_base, nullptr);
       if (!function_entry) break;
 
       void* handler_data;
       uint64_t establisher_frame;
-      ::RtlVirtualUnwind(UNW_FLAG_NHANDLER, image_base, context_record.Rip,
-                         function_entry, &context_record, &handler_data,
-                         &establisher_frame, NULL);
+      ::RtlVirtualUnwind(UNW_FLAG_NHANDLER, image_base,
+                         CONTEXT_PC(context_record), function_entry,
+                         &context_record, &handler_data, &establisher_frame,
+                         NULL);
     }
     return iframe;
   }
 };
 
-// Verifies that stack unwinding data has been correctly registered on Win/x64.
-UNINITIALIZED_TEST(StackUnwindingWinX64) {
+// Verifies that stack unwinding data has been correctly registered on Win64.
+UNINITIALIZED_TEST(StackUnwindingWin64) {
 #ifdef V8_WIN64_UNWINDING_INFO
 
-  static const char* unwinding_win_x64_test_source =
+  static const char* unwinding_win64_test_source =
       "function start(count) {\n"
       "  for (var i = 0; i < count; i++) {\n"
       "    var o = instance.foo;\n"
@@ -79,18 +86,18 @@ UNINITIALIZED_TEST(StackUnwindingWinX64) {
     v8::Local<v8::ObjectTemplate> instance_template =
         func_template->InstanceTemplate();
 
-    UnwindingWinX64Callbacks accessors;
+    UnwindingWin64Callbacks accessors;
     v8::Local<v8::External> data = v8::External::New(isolate, &accessors);
     instance_template->SetAccessor(v8_str("foo"),
-                                   &UnwindingWinX64Callbacks::Getter,
-                                   &UnwindingWinX64Callbacks::Setter, data);
+                                   &UnwindingWin64Callbacks::Getter,
+                                   &UnwindingWin64Callbacks::Setter, data);
     v8::Local<v8::Function> func =
         func_template->GetFunction(env.local()).ToLocalChecked();
     v8::Local<v8::Object> instance =
         func->NewInstance(env.local()).ToLocalChecked();
     env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
 
-    CompileRun(unwinding_win_x64_test_source);
+    CompileRun(unwinding_win64_test_source);
     v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
         env->Global()->Get(env.local(), v8_str("start")).ToLocalChecked());
 
@@ -106,3 +113,5 @@ UNINITIALIZED_TEST(StackUnwindingWinX64) {
 
 #endif  // V8_WIN64_UNWINDING_INFO
 }
+
+#undef CONTEXT_PC
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm.cc b/deps/v8/test/cctest/test-sync-primitives-arm.cc
index 84dc0575cfb664..6afa9e5192e476 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm.cc
@@ -352,7 +352,7 @@ TEST(simulator_invalidate_exclusive_access_threaded) {
   TestData test_data(1);
 
   MemoryAccessThread thread;
-  thread.Start();
+  CHECK(thread.Start());
 
   MemoryAccess ldrex_w(Kind::LoadExcl, Size::Word, offsetof(TestData, w));
   MemoryAccess strex_w(Kind::StoreExcl, Size::Word, offsetof(TestData, w), 7);
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm64.cc b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
index 38adf8486ac173..f5f19f0687acae 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm64.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
@@ -359,7 +359,7 @@ TEST(simulator_invalidate_exclusive_access_threaded) {
   TestData test_data(1);
 
   MemoryAccessThread thread;
-  thread.Start();
+  CHECK(thread.Start());
 
   MemoryAccess ldaxr_w(Kind::LoadExcl, Size::Word, offsetof(TestData, w));
   MemoryAccess stlxr_w(Kind::StoreExcl, Size::Word, offsetof(TestData, w), 7);
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index ca9ac5efaadc06..db9b5c928a49e3 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -179,7 +179,7 @@ class TerminatorThread : public v8::base::Thread {
 void TestTerminatingSlowOperation(const char* source) {
   semaphore = new v8::base::Semaphore(0);
   TerminatorThread thread(CcTest::i_isolate());
-  thread.Start();
+  CHECK(thread.Start());
 
   v8::HandleScope scope(CcTest::isolate());
   v8::Local<v8::ObjectTemplate> global =
@@ -474,7 +474,7 @@ void MicrotaskLoopForever(const v8::FunctionCallbackInfo<v8::Value>& info) {
 TEST(TerminateFromOtherThreadWhileMicrotaskRunning) {
   semaphore = new v8::base::Semaphore(0);
   TerminatorThread thread(CcTest::i_isolate());
-  thread.Start();
+  CHECK(thread.Start());
 
   v8::Isolate* isolate = CcTest::isolate();
   isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
@@ -878,7 +878,7 @@ TEST(TerminateRegExp) {
   CHECK(!isolate->IsExecutionTerminating());
   CHECK(!CompileRun("var re = /(x+)+y$/; re.test('x');").IsEmpty());
   TerminatorSleeperThread terminator(isolate, 100);
-  terminator.Start();
+  CHECK(terminator.Start());
   CHECK(CompileRun("re.test('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); fail();")
             .IsEmpty());
   CHECK(try_catch.HasCaught());
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index be76f5e93f58eb..20627240431ea0 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -54,7 +54,7 @@ class ThreadIdValidationThread : public base::Thread {
     }
     refs_[thread_no_].store(thread_id, std::memory_order_relaxed);
     if (thread_to_start_ != nullptr) {
-      thread_to_start_->Start();
+      CHECK(thread_to_start_->Start());
     }
     semaphore_->Signal();
   }
@@ -77,7 +77,7 @@ TEST(ThreadIdValidation) {
     threads[i] =
         base::make_unique<ThreadIdValidationThread>(prev, refs, i, &semaphore);
   }
-  threads[0]->Start();
+  CHECK(threads[0]->Start());
   for (int i = 0; i < kNThreads; i++) {
     semaphore.Wait();
   }
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 78ec000d6510ea..9cfc40d37d992e 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -73,8 +73,8 @@ static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
     return obj.RawFastDoublePropertyAt(field_index);
   } else {
     Object value = obj.RawFastPropertyAt(field_index);
-    CHECK(value.IsMutableHeapNumber());
-    return MutableHeapNumber::cast(value).value();
+    CHECK(value.IsHeapNumber());
+    return HeapNumber::cast(value).value();
   }
 }
 
@@ -1120,7 +1120,7 @@ TEST(DoScavenge) {
   double boom_value = bit_cast<double>(fake_object);
 
   FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
-  auto boom_number = factory->NewMutableHeapNumber(boom_value);
+  auto boom_number = factory->NewHeapNumber(boom_value);
   obj->FastPropertyAtPut(field_index, *boom_number);
 
   // Now |obj| moves to old gen and it has a double field that looks like
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 8b672339632235..184a86794634fb 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -114,7 +114,7 @@ TEST(TestBuiltinSpecialization) {
   CodeAssemblerTester asm_tester(isolate, 0);
   TestTorqueAssembler m(asm_tester.state());
   {
-    Node* temp = m.SmiConstant(0);
+    TNode<Object> temp = m.SmiConstant(0);
     m.TestBuiltinSpecialization(m.UncheckedCast<Context>(temp));
     m.Return(m.UndefinedConstant());
   }
@@ -171,7 +171,7 @@ TEST(TestFunctionPointerToGeneric) {
   CodeAssemblerTester asm_tester(isolate, 0);
   TestTorqueAssembler m(asm_tester.state());
   {
-    Node* temp = m.SmiConstant(0);
+    TNode<Object> temp = m.SmiConstant(0);
     m.TestFunctionPointerToGeneric(m.UncheckedCast<Context>(temp));
     m.Return(m.UndefinedConstant());
   }
@@ -184,8 +184,8 @@ TEST(TestUnsafeCast) {
   CodeAssemblerTester asm_tester(isolate, 0);
   TestTorqueAssembler m(asm_tester.state());
   {
-    Node* temp = m.SmiConstant(0);
-    Node* n = m.SmiConstant(10);
+    TNode<Object> temp = m.SmiConstant(0);
+    TNode<Smi> n = m.SmiConstant(10);
     m.Return(m.TestUnsafeCast(m.UncheckedCast<Context>(temp),
                               m.UncheckedCast<Number>(n)));
   }
@@ -328,7 +328,7 @@ TEST(TestCatch1) {
     TNode<Smi> result =
         m.TestCatch1(m.UncheckedCast<Context>(m.HeapConstant(context)));
     USE(result);
-    CSA_ASSERT(&m, m.WordEqual(result, m.SmiConstant(1)));
+    CSA_ASSERT(&m, m.TaggedEqual(result, m.SmiConstant(1)));
     m.Return(m.UndefinedConstant());
   }
   FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -347,7 +347,7 @@ TEST(TestCatch2) {
     TNode<Smi> result =
         m.TestCatch2(m.UncheckedCast<Context>(m.HeapConstant(context)));
     USE(result);
-    CSA_ASSERT(&m, m.WordEqual(result, m.SmiConstant(2)));
+    CSA_ASSERT(&m, m.TaggedEqual(result, m.SmiConstant(2)));
     m.Return(m.UndefinedConstant());
   }
   FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -366,7 +366,7 @@ TEST(TestCatch3) {
     TNode<Smi> result =
         m.TestCatch3(m.UncheckedCast<Context>(m.HeapConstant(context)));
     USE(result);
-    CSA_ASSERT(&m, m.WordEqual(result, m.SmiConstant(2)));
+    CSA_ASSERT(&m, m.TaggedEqual(result, m.SmiConstant(2)));
     m.Return(m.UndefinedConstant());
   }
   FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -484,6 +484,36 @@ TEST(TestReferences) {
   ft.Call();
 }
 
+TEST(TestSlices) {
+  CcTest::InitializeVM();
+  Isolate* isolate(CcTest::i_isolate());
+  i::HandleScope scope(isolate);
+  CodeAssemblerTester asm_tester(isolate);
+  TestTorqueAssembler m(asm_tester.state());
+  {
+    m.TestSlices();
+    m.Return(m.UndefinedConstant());
+  }
+  FunctionTester ft(asm_tester.GenerateCode(), 0);
+  ft.Call();
+}
+
+TEST(TestSliceEnumeration) {
+  CcTest::InitializeVM();
+  Isolate* isolate(CcTest::i_isolate());
+  i::HandleScope scope(isolate);
+  Handle<Context> context =
+      Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+  CodeAssemblerTester asm_tester(isolate);
+  TestTorqueAssembler m(asm_tester.state());
+  {
+    m.TestSliceEnumeration(m.UncheckedCast<Context>(m.HeapConstant(context)));
+    m.Return(m.UndefinedConstant());
+  }
+  FunctionTester ft(asm_tester.GenerateCode(), 0);
+  ft.Call();
+}
+
 TEST(TestStaticAssert) {
   CcTest::InitializeVM();
   Isolate* isolate(CcTest::i_isolate());
@@ -580,11 +610,28 @@ TEST(TestGenericStruct2) {
   i::HandleScope scope(isolate);
   CodeAssemblerTester asm_tester(isolate);
   TestTorqueAssembler m(asm_tester.state());
-  { m.Return(m.TestGenericStruct2().fst); }
+  { m.Return(m.TestGenericStruct2().snd.fst); }
   FunctionTester ft(asm_tester.GenerateCode(), 0);
   ft.Call();
 }
 
+TEST(TestBranchOnBoolOptimization) {
+  CcTest::InitializeVM();
+  Isolate* isolate(CcTest::i_isolate());
+  i::HandleScope scope(isolate);
+  Handle<Context> context =
+      Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+  CodeAssemblerTester asm_tester(isolate, 1);
+  TestTorqueAssembler m(asm_tester.state());
+  {
+    m.TestBranchOnBoolOptimization(
+        m.UncheckedCast<Context>(m.HeapConstant(context)),
+        m.UncheckedCast<Smi>(m.Parameter(0)));
+    m.Return(m.UndefinedConstant());
+  }
+  asm_tester.GenerateCode();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index 08cc024c90885b..df24e83f39bed8 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -30,6 +30,7 @@
 #include "include/v8-profiler.h"
 #include "src/execution/vm-state-inl.h"
 #include "src/objects/smi.h"
+#include "src/profiler/tick-sample.h"
 #include "test/cctest/cctest.h"
 
 namespace v8 {
@@ -89,21 +90,20 @@ Address TraceExtension::GetFP(const v8::FunctionCallbackInfo<v8::Value>& args) {
   return fp;
 }
 
-static struct { v8::TickSample* sample; } trace_env = {nullptr};
+static struct { TickSample* sample; } trace_env = {nullptr};
 
-void TraceExtension::InitTraceEnv(v8::TickSample* sample) {
+void TraceExtension::InitTraceEnv(TickSample* sample) {
   trace_env.sample = sample;
 }
 
-
 void TraceExtension::DoTrace(Address fp) {
   RegisterState regs;
   regs.fp = reinterpret_cast<void*>(fp);
   // sp is only used to define stack high bound
   regs.sp = reinterpret_cast<void*>(
       reinterpret_cast<Address>(trace_env.sample) - 10240);
-  trace_env.sample->Init(CcTest::isolate(), regs,
-                         v8::TickSample::kSkipCEntryFrame, true);
+  trace_env.sample->Init(CcTest::i_isolate(), regs,
+                         TickSample::kSkipCEntryFrame, true);
 }
 
 
diff --git a/deps/v8/test/cctest/trace-extension.h b/deps/v8/test/cctest/trace-extension.h
index fe62c006b7c7e5..78927f0fb62dbd 100644
--- a/deps/v8/test/cctest/trace-extension.h
+++ b/deps/v8/test/cctest/trace-extension.h
@@ -32,9 +32,10 @@
 #include "src/common/globals.h"
 
 namespace v8 {
-struct TickSample;
 namespace internal {
 
+struct TickSample;
+
 class TraceExtension : public v8::Extension {
  public:
   TraceExtension() : v8::Extension("v8/trace", kSource) { }
@@ -45,7 +46,7 @@ class TraceExtension : public v8::Extension {
   static void JSEntrySP(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void JSEntrySPLevel2(const v8::FunctionCallbackInfo<v8::Value>& args);
   static Address GetJsEntrySp();
-  static void InitTraceEnv(v8::TickSample* sample);
+  static void InitTraceEnv(TickSample* sample);
   static void DoTrace(Address fp);
  private:
   static Address GetFP(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index dc02cfd14acd8c..556d74daefc411 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -32,16 +32,18 @@ constexpr int kJumpTableSlotCount = 128;
 constexpr uint32_t kJumpTableSize =
     JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
 
+constexpr size_t kThunkBufferSize = AssemblerBase::kMinimalBufferSize;
+
 #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
 constexpr uint32_t kAvailableBufferSlots =
-    (kMaxWasmCodeMemory - kJumpTableSize) / AssemblerBase::kMinimalBufferSize;
+    (kMaxWasmCodeMemory - kJumpTableSize) / kThunkBufferSize;
 constexpr uint32_t kBufferSlotStartOffset =
-    RoundUp<AssemblerBase::kMinimalBufferSize>(kJumpTableSize);
+    RoundUp<kThunkBufferSize>(kJumpTableSize);
 #else
 constexpr uint32_t kAvailableBufferSlots = 0;
 #endif
 
-Address GenerateJumpTableThunk(
+Address AllocateJumpTableThunk(
     Address jump_target, byte* thunk_slot_buffer,
     std::bitset<kAvailableBufferSlots>* used_slots,
     std::vector<std::unique_ptr<TestingAssemblerBuffer>>* thunk_buffers) {
@@ -62,20 +64,22 @@ Address GenerateJumpTableThunk(
     buffer_index = rng->NextInt(kAvailableBufferSlots);
   } while (used_slots->test(buffer_index));
   used_slots->set(buffer_index);
-  byte* buffer =
-      thunk_slot_buffer + buffer_index * AssemblerBase::kMinimalBufferSize;
+  return reinterpret_cast<Address>(thunk_slot_buffer +
+                                   buffer_index * kThunkBufferSize);
 
 #else
   USE(thunk_slot_buffer);
   USE(used_slots);
-  thunk_buffers->emplace_back(AllocateAssemblerBuffer(
-      AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr()));
-  byte* buffer = thunk_buffers->back()->start();
+  thunk_buffers->emplace_back(
+      AllocateAssemblerBuffer(kThunkBufferSize, GetRandomMmapAddr()));
+  return reinterpret_cast<Address>(thunk_buffers->back()->start());
 #endif
+}
 
-  MacroAssembler masm(
-      nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
-      ExternalAssemblerBuffer(buffer, AssemblerBase::kMinimalBufferSize));
+void CompileJumpTableThunk(Address thunk, Address jump_target) {
+  MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+                      ExternalAssemblerBuffer(reinterpret_cast<void*>(thunk),
+                                              kThunkBufferSize));
 
   Label exit;
   Register scratch = kReturnRegister0;
@@ -132,9 +136,9 @@ Address GenerateJumpTableThunk(
   __ bind(&exit);
   __ Ret();
 
-  CodeDesc desc;
-  masm.GetCode(nullptr, &desc);
-  return reinterpret_cast<Address>(buffer);
+  FlushInstructionCache(thunk, kThunkBufferSize);
+  CHECK(SetPermissions(GetPlatformPageAllocator(), thunk, kThunkBufferSize,
+                       v8::PageAllocator::kReadExecute));
 }
 
 class JumpTableRunner : public v8::base::Thread {
@@ -159,29 +163,38 @@ class JumpTableRunner : public v8::base::Thread {
 class JumpTablePatcher : public v8::base::Thread {
  public:
   JumpTablePatcher(Address slot_start, uint32_t slot_index, Address thunk1,
-                   Address thunk2)
+                   Address thunk2, base::Mutex* jump_table_mutex)
       : Thread(Options("JumpTablePatcher")),
         slot_start_(slot_start),
         slot_index_(slot_index),
-        thunks_{thunk1, thunk2} {}
+        thunks_{thunk1, thunk2},
+        jump_table_mutex_(jump_table_mutex) {}
 
   void Run() override {
-    TRACE("Patcher is starting ...\n");
+    TRACE("Patcher %p is starting ...\n", this);
+    Address slot_address =
+        slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_);
+    // First, emit code to the two thunks.
+    for (Address thunk : thunks_) {
+      CompileJumpTableThunk(thunk, slot_address);
+    }
+    // Then, repeatedly patch the jump table to jump to one of the two thunks.
     constexpr int kNumberOfPatchIterations = 64;
     for (int i = 0; i < kNumberOfPatchIterations; ++i) {
-      TRACE("  patch slot " V8PRIxPTR_FMT " to thunk #%d\n",
-            slot_start_ + JumpTableAssembler::SlotIndexToOffset(slot_index_),
-            i % 2);
+      TRACE("  patcher %p patch slot " V8PRIxPTR_FMT " to thunk #%d\n", this,
+            slot_address, i % 2);
+      base::MutexGuard jump_table_guard(jump_table_mutex_);
       JumpTableAssembler::PatchJumpTableSlot(
           slot_start_, slot_index_, thunks_[i % 2], WasmCode::kFlushICache);
     }
-    TRACE("Patcher is stopping ...\n");
+    TRACE("Patcher %p is stopping ...\n", this);
   }
 
  private:
   Address slot_start_;
   uint32_t slot_index_;
   Address thunks_[2];
+  base::Mutex* jump_table_mutex_;
 };
 
 }  // namespace
@@ -198,9 +211,10 @@ class JumpTablePatcher : public v8::base::Thread {
 //      one of the runners is currently executing the jump-table slot.
 TEST(JumpTablePatchingStress) {
   constexpr int kNumberOfRunnerThreads = 5;
+  constexpr int kNumberOfPatcherThreads = 3;
 
 #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
-  // We need the branches (from GenerateJumpTableThunk) to be within near-call
+  // We need the branches (from CompileJumpTableThunk) to be within near-call
   // range of the jump table slots. The address hint to AllocateAssemblerBuffer
   // is not reliable enough to guarantee that we can always achieve this with
   // separate allocations, so for Arm64 we generate all code in a single
@@ -226,29 +240,42 @@ TEST(JumpTablePatchingStress) {
     TRACE("Hammering on jump table slot #%d ...\n", slot);
     uint32_t slot_offset = JumpTableAssembler::JumpSlotIndexToOffset(slot);
     std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
-    Address thunk1 =
-        GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
-                               &used_thunk_slots, &thunk_buffers);
-    Address thunk2 =
-        GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
-                               &used_thunk_slots, &thunk_buffers);
-    TRACE("  generated thunk1: " V8PRIxPTR_FMT "\n", thunk1);
-    TRACE("  generated thunk2: " V8PRIxPTR_FMT "\n", thunk2);
-    JumpTableAssembler::PatchJumpTableSlot(slot_start, slot, thunk1,
-                                           WasmCode::kFlushICache);
+    // Patch the jump table slot to jump to itself. This will later be patched
+    // by the patchers.
+    JumpTableAssembler::PatchJumpTableSlot(
+        slot_start, slot, slot_start + slot_offset, WasmCode::kFlushICache);
+    // For each patcher, generate two thunks where this patcher can emit code
+    // which finally jumps back to {slot} in the jump table.
+    std::vector<Address> patcher_thunks;
+    for (int i = 0; i < 2 * kNumberOfPatcherThreads; ++i) {
+      Address thunk =
+          AllocateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
+                                 &used_thunk_slots, &thunk_buffers);
+      ZapCode(thunk, kThunkBufferSize);
+      patcher_thunks.push_back(thunk);
+      TRACE("  generated jump thunk: " V8PRIxPTR_FMT "\n",
+            patcher_thunks.back());
+    }
 
-    for (auto& buf : thunk_buffers) buf->MakeExecutable();
-    // Start multiple runner threads and a patcher thread that hammer on the
-    // same jump-table slot concurrently.
+    // Start multiple runner threads that execute the jump table slot
+    // concurrently.
     std::list<JumpTableRunner> runners;
     for (int runner = 0; runner < kNumberOfRunnerThreads; ++runner) {
       runners.emplace_back(slot_start + slot_offset, runner);
     }
-    JumpTablePatcher patcher(slot_start, slot, thunk1, thunk2);
+    // Start multiple patcher thread that concurrently generate code and insert
+    // jumps to that into the jump table slot.
+    std::list<JumpTablePatcher> patchers;
+    // Only one patcher should modify the jump table at a time.
+    base::Mutex jump_table_mutex;
+    for (int i = 0; i < kNumberOfPatcherThreads; ++i) {
+      patchers.emplace_back(slot_start, slot, patcher_thunks[2 * i],
+                            patcher_thunks[2 * i + 1], &jump_table_mutex);
+    }
     global_stop_bit = 0;  // Signal runners to keep going.
-    for (auto& runner : runners) runner.Start();
-    patcher.Start();
-    patcher.Join();
+    for (auto& runner : runners) CHECK(runner.Start());
+    for (auto& patcher : patchers) CHECK(patcher.Start());
+    for (auto& patcher : patchers) patcher.Join();
     global_stop_bit = -1;  // Signal runners to stop.
     for (auto& runner : runners) runner.Join();
   }
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index c8dd901161d024..3f96f8720fd434 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -618,7 +618,9 @@ WASM_EXEC_TEST(F32UConvertI64) {
                 {0x8000008000000000, 0x5F000000},
                 {0x8000008000000001, 0x5F000001},
                 {0x8000000000000400, 0x5F000000},
-                {0x8000000000000401, 0x5F000000}};
+                {0x8000000000000401, 0x5F000000},
+                {0x20000020000001, 0x5a000001},
+                {0xFFFFFe8000000001, 0x5f7FFFFF}};
   WasmRunner<float, uint64_t> r(execution_tier);
   BUILD(r, WASM_F32_UCONVERT_I64(WASM_GET_LOCAL(0)));
   for (size_t i = 0; i < arraysize(values); i++) {
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index 60cda4addec629..354ff436c0c1e6 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -317,6 +317,54 @@ WASM_EXEC_TEST(AtomicFence) {
   CHECK_EQ(0, r.Call());
 }
 
+WASM_EXEC_TEST(AtomicStoreNoConsideredEffectful) {
+  EXPERIMENTAL_FLAG_SCOPE(threads);
+  FLAG_wasm_trap_handler = false;  // To use {Load} instead of {ProtectedLoad}.
+  WasmRunner<uint32_t> r(execution_tier);
+  r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+  r.builder().SetHasSharedMemory();
+  BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+        WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_I32V_1(20),
+                              MachineRepresentation::kWord32),
+        kExprI64Eqz);
+  CHECK_EQ(1, r.Call());
+}
+
+void RunNoEffectTest(ExecutionTier execution_tier, WasmOpcode wasm_op) {
+  EXPERIMENTAL_FLAG_SCOPE(threads);
+  FLAG_wasm_trap_handler = false;  // To use {Load} instead of {ProtectedLoad}.
+  WasmRunner<uint32_t> r(execution_tier);
+  r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+  r.builder().SetHasSharedMemory();
+  BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+        WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_I32V_1(20),
+                           MachineRepresentation::kWord32),
+        WASM_DROP, kExprI64Eqz);
+  CHECK_EQ(1, r.Call());
+}
+
+WASM_EXEC_TEST(AtomicAddNoConsideredEffectful) {
+  RunNoEffectTest(execution_tier, kExprI32AtomicAdd);
+}
+
+WASM_EXEC_TEST(AtomicExchangeNoConsideredEffectful) {
+  RunNoEffectTest(execution_tier, kExprI32AtomicExchange);
+}
+
+WASM_EXEC_TEST(AtomicCompareExchangeNoConsideredEffectful) {
+  EXPERIMENTAL_FLAG_SCOPE(threads);
+  FLAG_wasm_trap_handler = false;  // To use {Load} instead of {ProtectedLoad}.
+  WasmRunner<uint32_t> r(execution_tier);
+  r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+  r.builder().SetHasSharedMemory();
+  BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO),
+        WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange, WASM_ZERO,
+                                WASM_ZERO, WASM_I32V_1(30),
+                                MachineRepresentation::kWord32),
+        WASM_DROP, kExprI32Eqz);
+  CHECK_EQ(1, r.Call());
+}
+
 }  // namespace test_run_wasm_atomics
 }  // namespace wasm
 }  // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 2d5d6a945cc071..748adc4a67a09c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -646,6 +646,54 @@ WASM_EXEC_TEST(I64AtomicCompareExchange32UFail) {
   CHECK_EQ(initial, r.builder().ReadMemory(&memory[0]));
 }
 
+WASM_EXEC_TEST(AtomicStoreNoConsideredEffectful) {
+  EXPERIMENTAL_FLAG_SCOPE(threads);
+  FLAG_wasm_trap_handler = false;  // To use {Load} instead of {ProtectedLoad}.
+  WasmRunner<uint32_t> r(execution_tier);
+  r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
+  r.builder().SetHasSharedMemory();
+  BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+        WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_I64V(20),
+                              MachineRepresentation::kWord64),
+        kExprI64Eqz);
+  CHECK_EQ(1, r.Call());
+}
+
+void RunNoEffectTest(ExecutionTier execution_tier, WasmOpcode wasm_op) {
+  EXPERIMENTAL_FLAG_SCOPE(threads);
+  FLAG_wasm_trap_handler = false;  // To use {Load} instead of {ProtectedLoad}.
+  WasmRunner<uint32_t> r(execution_tier);
+  r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
+  r.builder().SetHasSharedMemory();
+  BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+        WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_I64V(20),
+                           MachineRepresentation::kWord64),
+        WASM_DROP, kExprI64Eqz);
+  CHECK_EQ(1, r.Call());
+}
+
+WASM_EXEC_TEST(AtomicAddNoConsideredEffectful) {
+  RunNoEffectTest(execution_tier, kExprI64AtomicAdd);
+}
+
+WASM_EXEC_TEST(AtomicExchangeNoConsideredEffectful) {
+  RunNoEffectTest(execution_tier, kExprI64AtomicExchange);
+}
+
+WASM_EXEC_TEST(AtomicCompareExchangeNoConsideredEffectful) {
+  EXPERIMENTAL_FLAG_SCOPE(threads);
+  FLAG_wasm_trap_handler = false;  // To use {Load} instead of {ProtectedLoad}.
+  WasmRunner<uint32_t> r(execution_tier);
+  r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+  r.builder().SetHasSharedMemory();
+  BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+        WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange, WASM_ZERO,
+                                WASM_I64V(0), WASM_I64V(30),
+                                MachineRepresentation::kWord64),
+        WASM_DROP, kExprI64Eqz);
+  CHECK_EQ(1, r.Call());
+}
+
 }  // namespace test_run_wasm_atomics_64
 }  // namespace wasm
 }  // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 526c5846a2a6cf..51d97650d4cf43 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -583,7 +583,7 @@ TEST(TestInterruptLoop) {
     int32_t* memory_array = reinterpret_cast<int32_t*>(memory->backing_store());
 
     InterruptThread thread(isolate, memory_array);
-    thread.Start();
+    CHECK(thread.Start());
     testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
     Address address = reinterpret_cast<Address>(
         &memory_array[InterruptThread::interrupt_location_]);
@@ -910,6 +910,8 @@ TEST(EmptyMemoryEmptyDataSegment) {
 
 TEST(MemoryWithOOBEmptyDataSegment) {
   {
+    FlagScope<bool> no_bulk_memory(
+        &v8::internal::FLAG_experimental_wasm_bulk_memory, false);
     Isolate* isolate = CcTest::InitIsolateOnce();
     HandleScope scope(isolate);
     testing::SetupIsolateForWasmModule(isolate);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index b1d95a12bb6b07..b48321df40b554 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -20,6 +20,7 @@ namespace test_run_wasm_simd {
 namespace {
 
 using DoubleUnOp = double (*)(double);
+using DoubleBinOp = double (*)(double, double);
 using DoubleCompareOp = int64_t (*)(double, double);
 using FloatUnOp = float (*)(float);
 using FloatBinOp = float (*)(float, float);
@@ -85,6 +86,13 @@ T Mul(T a, T b) {
   return a * b;
 }
 
+template <typename T, typename = typename std::enable_if<
+                          std::is_floating_point<T>::value>::type>
+T Div(T a, T b) {
+  // Workaround C++ undefined behavior when b is 0.
+  return base::Divide(a, b);
+}
+
 template <typename T>
 T Minimum(T a, T b) {
   return a <= b ? a : b;
@@ -271,7 +279,7 @@ T Sqrt(T a) {
   return std::sqrt(a);
 }
 
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
 // only used for F64x2 tests below
 int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
 
@@ -284,7 +292,7 @@ int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
 int64_t Less(double a, double b) { return a < b ? -1 : 0; }
 
 int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
-#endif  // V8_TARGET_ARCH_X64
+#endif  // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
 
 }  // namespace
 
@@ -299,7 +307,7 @@ int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
 #define WASM_SIMD_SPLAT(Type, ...) __VA_ARGS__, WASM_SIMD_OP(kExpr##Type##Splat)
 #define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
 #define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
-#define WASM_SIMD_SHIFT_OP(op, shift, x) x, WASM_SIMD_OP(op), TO_BYTE(shift)
+#define WASM_SIMD_SHIFT_OP(op, x, y) x, y, WASM_SIMD_OP(op)
 #define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
   x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
 #define WASM_SIMD_SELECT(format, x, y, z) x, y, z, WASM_SIMD_OP(kExprS128Select)
@@ -652,12 +660,13 @@ WASM_SIMD_TEST(F32x4Sub) {
 WASM_SIMD_TEST(F32x4Mul) {
   RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Mul, Mul);
 }
-// v8:8425 tracks this test being enabled in the interpreter.
-WASM_SIMD_COMPILED_TEST(F32x4Min) {
+WASM_SIMD_TEST(F32x4Div) {
+  RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Div, Div);
+}
+WASM_SIMD_TEST(F32x4Min) {
   RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Min, JSMin);
 }
-// v8:8425 tracks this test being enabled in the interpreter.
-WASM_SIMD_COMPILED_TEST(F32x4Max) {
+WASM_SIMD_TEST(F32x4Max) {
   RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Max, JSMax);
 }
 
@@ -715,7 +724,201 @@ WASM_SIMD_TEST(F32x4Le) {
   RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
 }
 
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+WASM_SIMD_TEST_NO_LOWERING(I64x2Splat) {
+  WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+  // Set up a global to hold output vector.
+  int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+  byte param1 = 0;
+  BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
+        WASM_ONE);
+
+  FOR_INT64_INPUTS(x) {
+    r.Call(x);
+    int64_t expected = x;
+    for (int i = 0; i < 2; i++) {
+      int64_t actual = ReadLittleEndianValue<int64_t>(&g[i]);
+      CHECK_EQ(actual, expected);
+    }
+  }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractLane) {
+  WasmRunner<int64_t> r(execution_tier, lower_simd);
+  r.AllocateLocal(kWasmI64);
+  r.AllocateLocal(kWasmS128);
+  BUILD(
+      r,
+      WASM_SET_LOCAL(0, WASM_SIMD_I64x2_EXTRACT_LANE(
+                            0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(0xFFFFFFFFFF)))),
+      WASM_SET_LOCAL(1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(0))),
+      WASM_SIMD_I64x2_EXTRACT_LANE(1, WASM_GET_LOCAL(1)));
+  CHECK_EQ(0xFFFFFFFFFF, r.Call());
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ReplaceLane) {
+  WasmRunner<int32_t> r(execution_tier, lower_simd);
+  // Set up a global to hold input/output vector.
+  int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+  // Build function to replace each lane with its index.
+  byte temp1 = r.AllocateLocal(kWasmS128);
+  BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_I64V(-1))),
+        WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_REPLACE_LANE(
+                                  0, WASM_GET_LOCAL(temp1), WASM_I64V(0))),
+        WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_REPLACE_LANE(
+                               1, WASM_GET_LOCAL(temp1), WASM_I64V(1))),
+        WASM_ONE);
+
+  r.Call();
+  for (int64_t i = 0; i < 2; i++) {
+    CHECK_EQ(i, ReadLittleEndianValue<int64_t>(&g[i]));
+  }
+}
+
+void RunI64x2UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+                      WasmOpcode opcode, Int64UnOp expected_op) {
+  WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+  // Global to hold output.
+  int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+  // Build fn to splat test value, perform unop, and write the result.
+  byte value = 0;
+  byte temp1 = r.AllocateLocal(kWasmS128);
+  BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
+        WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
+        WASM_ONE);
+
+  FOR_INT64_INPUTS(x) {
+    r.Call(x);
+    int64_t expected = expected_op(x);
+    for (int i = 0; i < 2; i++) {
+      CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+    }
+  }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Neg) {
+  RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Neg,
+                   base::NegateWithWraparound);
+}
+
+void RunI64x2ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+                         WasmOpcode opcode, Int64ShiftOp expected_op) {
+  for (int shift = 1; shift < 64; shift++) {
+    WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+    int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+    byte value = 0;
+    byte shift_index = r.AllocateLocal(kWasmI32);
+    byte simd1 = r.AllocateLocal(kWasmS128);
+    BUILD(r,
+          WASM_SET_LOCAL(simd1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
+          WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
+          WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
+                                                WASM_GET_LOCAL(shift_index))),
+          WASM_ONE);
+
+    FOR_INT64_INPUTS(x) {
+      r.Call(x);
+      int64_t expected = expected_op(x, shift);
+      for (int i = 0; i < 2; i++) {
+        CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+      }
+    }
+  }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Shl) {
+  RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2Shl,
+                      LogicalShiftLeft);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ShrS) {
+  RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrS,
+                      ArithmeticShiftRight);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) {
+  RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrU,
+                      LogicalShiftRight);
+}
+
+void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+                       WasmOpcode opcode, Int64BinOp expected_op) {
+  WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
+  // Global to hold output.
+  int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+  // Build fn to splat test values, perform binop, and write the result.
+  byte value1 = 0, value2 = 1;
+  byte temp1 = r.AllocateLocal(kWasmS128);
+  byte temp2 = r.AllocateLocal(kWasmS128);
+  BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value1))),
+        WASM_SET_LOCAL(temp2, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value2))),
+        WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
+                                           WASM_GET_LOCAL(temp2))),
+        WASM_ONE);
+
+  FOR_INT64_INPUTS(x) {
+    FOR_INT64_INPUTS(y) {
+      r.Call(x, y);
+      int64_t expected = expected_op(x, y);
+      for (int i = 0; i < 2; i++) {
+        CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+      }
+    }
+  }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Add) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Add,
+                    base::AddWithWraparound);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Sub) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Sub,
+                    base::SubWithWraparound);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LtU) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtU, UnsignedLess);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LeU) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeU,
+                    UnsignedLessEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GtU) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtU, UnsignedGreater);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeU,
+                    UnsignedGreaterEqual);
+}
+
 WASM_SIMD_TEST_NO_LOWERING(F64x2Splat) {
   WasmRunner<int32_t, double> r(execution_tier, lower_simd);
   // Set up a global to hold output vector.
@@ -770,6 +973,16 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLane) {
   }
 }
 
+WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
+  WasmRunner<int64_t> r(execution_tier, lower_simd);
+  BUILD(r, WASM_IF_ELSE_L(
+               WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
+                               0, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e15))),
+                           WASM_I64_REINTERPRET_F64(WASM_F64(1e15))),
+               WASM_I64V(1), WASM_I64V(0)));
+  CHECK_EQ(1, r.Call());
+}
+
 WASM_SIMD_TEST_NO_LOWERING(F64x2ReplaceLane) {
   WasmRunner<int32_t> r(execution_tier, lower_simd);
   // Set up a global to hold input/output vector.
@@ -789,58 +1002,12 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ReplaceLane) {
   }
 }
 
-void RunF64x2CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
-                           WasmOpcode opcode, DoubleCompareOp expected_op) {
-  WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
-  // Set up global to hold mask output.
-  int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
-  // Build fn to splat test values, perform compare op, and write the result.
-  byte value1 = 0, value2 = 1;
-  byte temp1 = r.AllocateLocal(kWasmS128);
-  byte temp2 = r.AllocateLocal(kWasmS128);
-  BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))),
-        WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))),
-        WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
-                                           WASM_GET_LOCAL(temp2))),
-        WASM_ONE);
-
-  FOR_FLOAT64_INPUTS(x) {
-    if (!PlatformCanRepresent(x)) continue;
-    FOR_FLOAT64_INPUTS(y) {
-      if (!PlatformCanRepresent(y)) continue;
-      double diff = x - y;  // Model comparison as subtraction.
-      if (!PlatformCanRepresent(diff)) continue;
-      r.Call(x, y);
-      int64_t expected = expected_op(x, y);
-      for (int i = 0; i < 2; i++) {
-        CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
-      }
-    }
-  }
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Eq) {
-  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Ne) {
-  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Gt) {
-  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Ge) {
-  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Lt) {
-  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Le) {
-  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual);
+bool IsExtreme(double x) {
+  double abs_x = std::fabs(x);
+  const double kSmallFloatThreshold = 1.0e-298;
+  const double kLargeFloatThreshold = 1.0e298;
+  return abs_x != 0.0f &&  // 0 or -0 are fine.
+         (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
 }
 
 bool IsSameNan(double expected, double actual) {
@@ -855,7 +1022,7 @@ bool IsSameNan(double expected, double actual) {
 bool IsCanonical(double actual) {
   uint64_t actual_bits = bit_cast<uint64_t>(actual);
   // Canonical NaN has quiet bit and no payload.
-  return (actual_bits & 0xFF80000000000000) == actual_bits;
+  return (actual_bits & 0xFFF8000000000000) == actual_bits;
 }
 
 void CheckDoubleResult(double x, double y, double expected, double actual,
@@ -948,7 +1115,6 @@ void RunF64x2UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
     }
   }
 }
-#undef FOR_FLOAT64_NAN_INPUTS
 
 WASM_SIMD_TEST_NO_LOWERING(F64x2Abs) {
   RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Abs, std::abs);
@@ -958,96 +1124,90 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Neg) {
   RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Neg, Negate);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2Splat) {
-  WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
-  // Set up a global to hold output vector.
-  int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
-  byte param1 = 0;
-  BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
+void RunF64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+                       WasmOpcode opcode, DoubleBinOp expected_op) {
+  WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
+  // Global to hold output.
+  double* g = r.builder().AddGlobal<double>(kWasmS128);
+  // Build fn to splat test value, perform binop, and write the result.
+  byte value1 = 0, value2 = 1;
+  byte temp1 = r.AllocateLocal(kWasmS128);
+  byte temp2 = r.AllocateLocal(kWasmS128);
+  BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))),
+        WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))),
+        WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
+                                           WASM_GET_LOCAL(temp2))),
         WASM_ONE);
 
-  FOR_INT64_INPUTS(x) {
-    r.Call(x);
-    int64_t expected = x;
-    for (int i = 0; i < 2; i++) {
-      int64_t actual = ReadLittleEndianValue<int64_t>(&g[i]);
-      CHECK_EQ(actual, expected);
+  FOR_FLOAT64_INPUTS(x) {
+    if (!PlatformCanRepresent(x)) continue;
+    FOR_FLOAT64_INPUTS(y) {
+      if (!PlatformCanRepresent(x)) continue;
+      double expected = expected_op(x, y);
+      if (!PlatformCanRepresent(expected)) continue;
+      r.Call(x, y);
+      for (int i = 0; i < 2; i++) {
+        double actual = ReadLittleEndianValue<double>(&g[i]);
+        CheckDoubleResult(x, y, expected, actual, true /* exact */);
+      }
     }
   }
-}
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
-  WasmRunner<int64_t> r(execution_tier, lower_simd);
-  BUILD(r, WASM_IF_ELSE_L(
-               WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
-                               0, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e15))),
-                           WASM_I64_REINTERPRET_F64(WASM_F64(1e15))),
-               WASM_I64V(1), WASM_I64V(0)));
-  CHECK_EQ(1, r.Call());
+  FOR_FLOAT64_NAN_INPUTS(i) {
+    double x = bit_cast<double>(double_nan_test_array[i]);
+    if (!PlatformCanRepresent(x)) continue;
+    FOR_FLOAT64_NAN_INPUTS(j) {
+      double y = bit_cast<double>(double_nan_test_array[j]);
+      double expected = expected_op(x, y);
+      if (!PlatformCanRepresent(expected)) continue;
+      r.Call(x, y);
+      for (int i = 0; i < 2; i++) {
+        double actual = ReadLittleEndianValue<double>(&g[i]);
+        CheckDoubleResult(x, y, expected, actual, true /* exact */);
+      }
+    }
+  }
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2ReplaceLane) {
-  WasmRunner<int32_t> r(execution_tier, lower_simd);
-  // Set up a global to hold input/output vector.
-  int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
-  // Build function to replace each lane with its index.
-  byte temp1 = r.AllocateLocal(kWasmS128);
-  BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_I64V(-1))),
-        WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_REPLACE_LANE(
-                                  0, WASM_GET_LOCAL(temp1), WASM_I64V(0))),
-        WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_REPLACE_LANE(
-                               1, WASM_GET_LOCAL(temp1), WASM_I64V(1))),
-        WASM_ONE);
+#undef FOR_FLOAT64_NAN_INPUTS
 
-  r.Call();
-  for (int64_t i = 0; i < 2; i++) {
-    CHECK_EQ(i, ReadLittleEndianValue<int64_t>(&g[i]));
-  }
+WASM_SIMD_TEST_NO_LOWERING(F64x2Add) {
+  RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Add, Add);
 }
 
-void RunI64x2UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
-                      WasmOpcode opcode, Int64UnOp expected_op) {
-  WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
-  // Global to hold output.
-  int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
-  // Build fn to splat test value, perform unop, and write the result.
-  byte value = 0;
-  byte temp1 = r.AllocateLocal(kWasmS128);
-  BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
-        WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
-        WASM_ONE);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Sub) {
+  RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Sub, Sub);
+}
 
-  FOR_INT64_INPUTS(x) {
-    r.Call(x);
-    int64_t expected = expected_op(x);
-    for (int i = 0; i < 2; i++) {
-      CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
-    }
-  }
+WASM_SIMD_TEST_NO_LOWERING(F64x2Mul) {
+  RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Mul, Mul);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2Neg) {
-  RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Neg,
-                   base::NegateWithWraparound);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Div) {
+  RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Div, Div);
 }
 
-void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
-                       WasmOpcode opcode, Int64BinOp expected_op) {
-  WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
-  // Global to hold output.
+void RunF64x2CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+                           WasmOpcode opcode, DoubleCompareOp expected_op) {
+  WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
+  // Set up global to hold mask output.
   int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
-  // Build fn to splat test values, perform binop, and write the result.
+  // Build fn to splat test values, perform compare op, and write the result.
   byte value1 = 0, value2 = 1;
   byte temp1 = r.AllocateLocal(kWasmS128);
   byte temp2 = r.AllocateLocal(kWasmS128);
-  BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value1))),
-        WASM_SET_LOCAL(temp2, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value2))),
+  BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))),
+        WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))),
         WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
                                            WASM_GET_LOCAL(temp2))),
         WASM_ONE);
 
-  FOR_INT64_INPUTS(x) {
-    FOR_INT64_INPUTS(y) {
+  FOR_FLOAT64_INPUTS(x) {
+    if (!PlatformCanRepresent(x)) continue;
+    FOR_FLOAT64_INPUTS(y) {
+      if (!PlatformCanRepresent(y)) continue;
+      double diff = x - y;  // Model comparison as subtraction.
+      if (!PlatformCanRepresent(diff)) continue;
       r.Call(x, y);
       int64_t expected = expected_op(x, y);
       for (int i = 0; i < 2; i++) {
@@ -1057,101 +1217,63 @@ void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
   }
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2Add) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Add,
-                    base::AddWithWraparound);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2Sub) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Sub,
-                    base::SubWithWraparound);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2Mul) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Mul,
-                    base::MulWithWraparound);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Eq) {
+  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Ne) {
+  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Gt) {
+  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Ge) {
+  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Lt) {
+  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2LtU) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtU, UnsignedLess);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Le) {
+  RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2LeU) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeU,
-                    UnsignedLessEqual);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Min) {
+  RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Min, JSMin);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2GtU) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtU, UnsignedGreater);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Max) {
+  RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Max, JSMax);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) {
-  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeU,
-                    UnsignedGreaterEqual);
+#if V8_TARGET_ARCH_X64
+WASM_SIMD_TEST_NO_LOWERING(I64x2Mul) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Mul,
+                    base::MulWithWraparound);
 }
 
-void RunI64x2ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
-                         WasmOpcode opcode, Int64ShiftOp expected_op) {
-  for (int shift = 1; shift < 64; shift++) {
-    WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
-    int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
-    byte value = 0;
-    byte simd1 = r.AllocateLocal(kWasmS128);
-    BUILD(r,
-          WASM_SET_LOCAL(simd1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
-          WASM_SET_GLOBAL(
-              0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
-          WASM_ONE);
-
-    FOR_INT64_INPUTS(x) {
-      r.Call(x);
-      int64_t expected = expected_op(x, shift);
-      for (int i = 0; i < 2; i++) {
-        CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
-      }
-    }
-  }
+WASM_SIMD_TEST_NO_LOWERING(I64x2MinS) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MinS, Minimum);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2Shl) {
-  RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2Shl,
-                      LogicalShiftLeft);
+WASM_SIMD_TEST_NO_LOWERING(I64x2MaxS) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MaxS, Maximum);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2ShrS) {
-  RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrS,
-                      ArithmeticShiftRight);
+WASM_SIMD_TEST_NO_LOWERING(I64x2MinU) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MinU,
+                    UnsignedMinimum);
 }
 
-WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) {
-  RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrU,
-                      LogicalShiftRight);
+WASM_SIMD_TEST_NO_LOWERING(I64x2MaxU) {
+  RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MaxU,
+                    UnsignedMaximum);
 }
 #endif  // V8_TARGET_ARCH_X64
+#endif  // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
 
 WASM_SIMD_TEST(I32x4Splat) {
   WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -1534,16 +1656,17 @@ void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
     WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
     int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
     byte value = 0;
+    byte shift_index = r.AllocateLocal(kWasmI32);
     byte simd1 = r.AllocateLocal(kWasmS128);
-    BUILD(r,
+    BUILD(r, WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
           WASM_SET_LOCAL(simd1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
-          WASM_SET_GLOBAL(
-              0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
+          WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
+                                                WASM_GET_LOCAL(shift_index))),
           WASM_ONE);
 
     FOR_INT32_INPUTS(x) {
       r.Call(x);
-      float expected = expected_op(x, shift);
+      int32_t expected = expected_op(x, shift);
       for (int i = 0; i < 4; i++) {
         CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
       }
@@ -1551,17 +1674,17 @@ void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
   }
 }
 
-WASM_SIMD_TEST(I32x4Shl) {
+WASM_SIMD_TEST_NO_LOWERING(I32x4Shl) {
   RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
                       LogicalShiftLeft);
 }
 
-WASM_SIMD_TEST(I32x4ShrS) {
+WASM_SIMD_TEST_NO_LOWERING(I32x4ShrS) {
   RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrS,
                       ArithmeticShiftRight);
 }
 
-WASM_SIMD_TEST(I32x4ShrU) {
+WASM_SIMD_TEST_NO_LOWERING(I32x4ShrU) {
   RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrU,
                       LogicalShiftRight);
 }
@@ -1784,10 +1907,12 @@ void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
     int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
     byte value = 0;
     byte simd1 = r.AllocateLocal(kWasmS128);
+    byte shift_index = r.AllocateLocal(kWasmI32);
     BUILD(r,
           WASM_SET_LOCAL(simd1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
-          WASM_SET_GLOBAL(
-              0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
+          WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
+          WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
+                                                WASM_GET_LOCAL(shift_index))),
           WASM_ONE);
 
     FOR_INT16_INPUTS(x) {
@@ -1800,17 +1925,17 @@ void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
   }
 }
 
-WASM_SIMD_TEST(I16x8Shl) {
+WASM_SIMD_TEST_NO_LOWERING(I16x8Shl) {
   RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
                       LogicalShiftLeft);
 }
 
-WASM_SIMD_TEST(I16x8ShrS) {
+WASM_SIMD_TEST_NO_LOWERING(I16x8ShrS) {
   RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrS,
                       ArithmeticShiftRight);
 }
 
-WASM_SIMD_TEST(I16x8ShrU) {
+WASM_SIMD_TEST_NO_LOWERING(I16x8ShrU) {
   RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrU,
                       LogicalShiftRight);
 }
@@ -1998,15 +2123,17 @@ void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
     int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
     byte value = 0;
     byte simd1 = r.AllocateLocal(kWasmS128);
+    byte shift_index = r.AllocateLocal(kWasmI32);
     BUILD(r,
           WASM_SET_LOCAL(simd1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
-          WASM_SET_GLOBAL(
-              0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
+          WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
+          WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
+                                                WASM_GET_LOCAL(shift_index))),
           WASM_ONE);
 
     FOR_INT8_INPUTS(x) {
       r.Call(x);
-      float expected = expected_op(x, shift);
+      int8_t expected = expected_op(x, shift);
       for (int i = 0; i < 16; i++) {
         CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
       }
@@ -2014,17 +2141,17 @@ void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
   }
 }
 
-WASM_SIMD_TEST(I8x16Shl) {
+WASM_SIMD_TEST_NO_LOWERING(I8x16Shl) {
   RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16Shl,
                       LogicalShiftLeft);
 }
 
-WASM_SIMD_TEST(I8x16ShrS) {
+WASM_SIMD_TEST_NO_LOWERING(I8x16ShrS) {
   RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrS,
                       ArithmeticShiftRight);
 }
 
-WASM_SIMD_TEST(I8x16ShrU) {
+WASM_SIMD_TEST_NO_LOWERING(I8x16ShrU) {
   RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrU,
                       LogicalShiftRight);
 }
@@ -2432,13 +2559,14 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
 // Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
 // result. Use relational ops on numeric vectors to create the boolean vector
 // test inputs. Test inputs with all true, all false, one true, and one false.
-#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes)                           \
+#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type)                 \
   WASM_SIMD_TEST(ReductionTest##lanes) {                                       \
     WasmRunner<int32_t> r(execution_tier, lower_simd);                         \
+    if (lanes == 2 && lower_simd == kLowerSimd) return;                        \
     byte zero = r.AllocateLocal(kWasmS128);                                    \
     byte one_one = r.AllocateLocal(kWasmS128);                                 \
     byte reduced = r.AllocateLocal(kWasmI32);                                  \
-    BUILD(r, WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)),     \
+    BUILD(r, WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(int_type(0))),   \
           WASM_SET_LOCAL(                                                      \
               reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue,                \
                                       WASM_SIMD_BINOP(kExprI##format##Eq,      \
@@ -2469,7 +2597,7 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
                   WASM_RETURN1(WASM_ZERO)),                                    \
           WASM_SET_LOCAL(one_one,                                              \
                          WASM_SIMD_I##format##_REPLACE_LANE(                   \
-                             lanes - 1, WASM_GET_LOCAL(zero), WASM_ONE)),      \
+                             lanes - 1, WASM_GET_LOCAL(zero), int_type(1))),   \
           WASM_SET_LOCAL(                                                      \
               reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue,                \
                                       WASM_SIMD_BINOP(kExprI##format##Eq,      \
@@ -2502,9 +2630,12 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
     CHECK_EQ(1, r.Call());                                                     \
   }
 
-WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4)
-WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8)
-WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16)
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+WASM_SIMD_BOOL_REDUCTION_TEST(64x2, 2, WASM_I64V)
+#endif  // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4, WASM_I32V)
+WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8, WASM_I32V)
+WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16, WASM_I32V)
 
 WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
   WasmRunner<int32_t> r(execution_tier, lower_simd);
@@ -2758,7 +2889,7 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
   CHECK_EQ(GetScalar(global, 3), 65.0f);
 }
 
-WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
+WASM_SIMD_TEST(SimdLoadStoreLoad) {
   WasmRunner<int32_t> r(execution_tier, lower_simd);
   int32_t* memory =
       r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
@@ -2776,11 +2907,10 @@ WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
 
 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
     V8_TARGET_ARCH_ARM
-// V8:8665 - Tracking bug to enable reduction tests in the interpreter,
-// and for SIMD lowering.
 #define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type)                \
-  WASM_SIMD_TEST_NO_LOWERING(S##format##AnyTrue) {                            \
+  WASM_SIMD_TEST(S##format##AnyTrue) {                                        \
     WasmRunner<int32_t, param_type> r(execution_tier, lower_simd);            \
+    if (lanes == 2 && lower_simd == kLowerSimd) return;                       \
     byte simd = r.AllocateLocal(kWasmS128);                                   \
     BUILD(                                                                    \
         r,                                                                    \
@@ -2790,16 +2920,17 @@ WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
     DCHECK_EQ(1, r.Call(5));                                                  \
     DCHECK_EQ(0, r.Call(0));                                                  \
   }
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
 WASM_SIMD_ANYTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
-#endif  // V8_TARGET_ARCH_X64
+#endif  // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
 WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
 WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff, int32_t)
 WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
 
 #define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type)                \
-  WASM_SIMD_TEST_NO_LOWERING(S##format##AllTrue) {                            \
+  WASM_SIMD_TEST(S##format##AllTrue) {                                        \
     WasmRunner<int32_t, param_type> r(execution_tier, lower_simd);            \
+    if (lanes == 2 && lower_simd == kLowerSimd) return;                       \
     byte simd = r.AllocateLocal(kWasmS128);                                   \
     BUILD(                                                                    \
         r,                                                                    \
@@ -2809,9 +2940,9 @@ WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
     DCHECK_EQ(1, r.Call(0x1));                                                \
     DCHECK_EQ(0, r.Call(0));                                                  \
   }
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
 WASM_SIMD_ALLTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
-#endif  // V8_TARGET_ARCH_X64
+#endif  // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
 WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
 WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff, int32_t)
 WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff, int32_t)
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 93ae92d6978000..795fa30e725d21 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -267,7 +267,7 @@ size_t GetFunctionOffset(i::Isolate* isolate, const uint8_t* buffer,
       kAllWasmFeatures, buffer, buffer + size, false, ModuleOrigin::kWasmOrigin,
       isolate->counters(), isolate->wasm_engine()->allocator());
   CHECK(result.ok());
-  const WasmFunction* func = &result.value()->functions[1];
+  const WasmFunction* func = &result.value()->functions[index];
   return func->code.offset();
 }
 
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index 855e44aba22f03..b5bacf57d4b36b 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -273,8 +273,8 @@ TEST(SharedEngineRunThreadedBuildingSync) {
     Handle<WasmInstanceObject> instance = isolate.CompileAndInstantiate(buffer);
     CHECK_EQ(42, isolate.Run(instance));
   });
-  thread1.Start();
-  thread2.Start();
+  CHECK(thread1.Start());
+  CHECK(thread2.Start());
   thread1.Join();
   thread2.Join();
 }
@@ -295,8 +295,8 @@ TEST(SharedEngineRunThreadedBuildingAsync) {
         CompileAndInstantiateAsync(isolate, buffer);
     CHECK_EQ(42, isolate.Run(instance));
   });
-  thread1.Start();
-  thread2.Start();
+  CHECK(thread1.Start());
+  CHECK(thread2.Start());
   thread1.Join();
   thread2.Join();
 }
@@ -321,8 +321,8 @@ TEST(SharedEngineRunThreadedExecution) {
     Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
     CHECK_EQ(23, isolate.Run(instance));
   });
-  thread1.Start();
-  thread2.Start();
+  CHECK(thread1.Start());
+  CHECK(thread2.Start());
   thread1.Join();
   thread2.Join();
 }
@@ -358,7 +358,7 @@ TEST(SharedEngineRunThreadedTierUp) {
         &module->module()->functions[0], ExecutionTier::kTurbofan);
     CHECK_EQ(23, isolate.Run(instance));
   });
-  for (auto& thread : threads) thread.Start();
+  for (auto& thread : threads) CHECK(thread.Start());
   for (auto& thread : threads) thread.Join();
 }
 
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 6a17b81c56ad1c..528d71f53c6642 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -47,8 +47,8 @@ TestingModuleBuilder::TestingModuleBuilder(
   if (maybe_import) {
     // Manually compile an import wrapper and insert it into the instance.
     CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
-    auto resolved = compiler::ResolveWasmImportCall(maybe_import->js_function,
-                                                    maybe_import->sig, false);
+    auto resolved = compiler::ResolveWasmImportCall(
+        maybe_import->js_function, maybe_import->sig, enabled_features_);
     compiler::WasmImportCallKind kind = resolved.first;
     Handle<JSReceiver> callable = resolved.second;
     WasmImportWrapperCache::ModificationScope cache_scope(
@@ -159,7 +159,7 @@ void TestingModuleBuilder::FreezeSignatureMapAndInitializeWrapperCache() {
 Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
   FreezeSignatureMapAndInitializeWrapperCache();
   SetExecutable();
-  return WasmInstanceObject::GetOrCreateWasmExportedFunction(
+  return WasmInstanceObject::GetOrCreateWasmExternalFunction(
       isolate_, instance_object(), index);
 }
 
@@ -324,9 +324,14 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
   Handle<Script> script =
       isolate_->factory()->NewScript(isolate_->factory()->empty_string());
   script->set_type(Script::TYPE_WASM);
+
+  auto native_module = isolate_->wasm_engine()->NewNativeModule(
+      isolate_, enabled_features_, test_module_);
+  native_module->SetWireBytes(OwnedVector<const uint8_t>());
+  native_module->SetRuntimeStubs(isolate_);
+
   Handle<WasmModuleObject> module_object =
-      WasmModuleObject::New(isolate_, enabled_features_, test_module_, {},
-                            script, Handle<ByteArray>::null());
+      WasmModuleObject::New(isolate_, std::move(native_module), script);
   // This method is called when we initialize TestEnvironment. We don't
   // have a memory yet, so we won't create it here. We'll update the
   // interpreter when we get a memory. We do have globals, though.
@@ -360,7 +365,7 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
     FATAL("Verification failed; pc = +%x, msg = %s", result.error().offset(),
           result.error().message().c_str());
   }
-  builder->LowerInt64();
+  builder->LowerInt64(compiler::WasmGraphBuilder::kCalledFromWasm);
   if (!CpuFeatures::SupportsWasmSimd128()) {
     builder->SimdScalarLoweringForTesting();
   }
@@ -453,8 +458,8 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
   if (!code_.ToHandle(&code)) {
     Isolate* isolate = CcTest::InitIsolateOnce();
 
-    auto call_descriptor =
-        compiler::Linkage::GetSimplifiedCDescriptor(zone(), signature_, true);
+    auto call_descriptor = compiler::Linkage::GetSimplifiedCDescriptor(
+        zone(), signature_, CallDescriptor::kInitializeRootRegister);
 
     if (kSystemPointerSize == 4) {
       size_t num_params = signature_->parameter_count();
diff --git a/deps/v8/test/common/wasm/OWNERS b/deps/v8/test/common/wasm/OWNERS
index 4b6b34d24a7129..a89e5f10561a45 100644
--- a/deps/v8/test/common/wasm/OWNERS
+++ b/deps/v8/test/common/wasm/OWNERS
@@ -1 +1 @@
-file://src/wasm/OWNERS
+file:../../../src/wasm/OWNERS
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 8de76559144f1e..d2869509cc6cd5 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -101,6 +101,12 @@ bool InterpretWasmModuleForTesting(Isolate* isolate,
       case kWasmF64:
         arguments[i] = WasmValue(0.0);
         break;
+      case kWasmAnyRef:
+      case kWasmFuncRef:
+      case kWasmExnRef:
+        arguments[i] =
+            WasmValue(Handle<Object>::cast(isolate->factory()->null_value()));
+        break;
       default:
         UNREACHABLE();
     }
diff --git a/deps/v8/test/debugger/OWNERS b/deps/v8/test/debugger/OWNERS
index 39aa08cd8c7123..611a024b5755bb 100644
--- a/deps/v8/test/debugger/OWNERS
+++ b/deps/v8/test/debugger/OWNERS
@@ -1 +1 @@
-file://src/debug/OWNERS
+file:../../src/debug/OWNERS
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index 028ce7083a82ce..26bb70fce70d47 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -238,10 +238,11 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
   callee.Return(static_cast<int>(desc->ReturnCount()), returns.get());
 
   OptimizedCompilationInfo info(ArrayVector("testing"), &zone, Code::STUB);
-  Handle<Code> code = Pipeline::GenerateCodeForTesting(
-                          &info, i_isolate, desc, callee.graph(),
-                          AssemblerOptions::Default(i_isolate), callee.Export())
-                          .ToHandleChecked();
+  Handle<Code> code =
+      Pipeline::GenerateCodeForTesting(&info, i_isolate, desc, callee.graph(),
+                                       AssemblerOptions::Default(i_isolate),
+                                       callee.ExportForTest())
+          .ToHandleChecked();
 
   std::shared_ptr<wasm::NativeModule> module =
       AllocateNativeModule(i_isolate, code->raw_instruction_size());
@@ -286,7 +287,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
   Handle<Code> wrapper_code =
       Pipeline::GenerateCodeForTesting(
           &wrapper_info, i_isolate, wrapper_desc, caller.graph(),
-          AssemblerOptions::Default(i_isolate), caller.Export())
+          AssemblerOptions::Default(i_isolate), caller.ExportForTest())
           .ToHandleChecked();
 
   auto fn = GeneratedCode<int32_t>::FromCode(*wrapper_code);
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index 20cb024a1a508e..6dafe950fb4548 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -337,8 +337,14 @@ bool ResultsAreIdentical(FuzzerArgs* args) {
   std::string source =
       "assertEquals(fast.exception, slow.exception);\n"
       "assertEquals(fast.result, slow.result);\n"
-      "if (fast.result !== null)\n"
+      "if (fast.result !== null) {\n"
       "  assertEquals(fast.result.groups, slow.result.groups);\n"
+      "  assertEquals(fast.result.indices, slow.result.indices);\n"
+      "  if (fast.result.indices !== undefined) {\n"
+      "    assertEquals(fast.result.indices.groups,\n"
+      "                 slow.result.indices.groups);\n"
+      "  }\n"
+      "}\n"
       "assertEquals(fast.re.lastIndex, slow.re.lastIndex);\n";
 
   v8::Local<v8::Value> result;
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 5a60eb63aa1ba1..35e942b2622906 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -8,9 +8,10 @@
 
 #include "include/v8.h"
 #include "src/execution/isolate.h"
-#include "src/utils/ostreams.h"
 #include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
 #include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-feature-flags.h"
 #include "src/wasm/wasm-module-builder.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-objects-inl.h"
@@ -88,6 +89,12 @@ const char* ValueTypeToConstantName(ValueType type) {
       return "kWasmF32";
     case kWasmF64:
       return "kWasmF64";
+    case kWasmAnyRef:
+      return "kWasmAnyRef";
+    case kWasmFuncRef:
+      return "kWasmFuncRef";
+    case kWasmExnRef:
+      return "kWasmExnRef";
     default:
       UNREACHABLE();
   }
@@ -140,6 +147,8 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
         "can be\n"
         "// found in the LICENSE file.\n"
         "\n"
+        "// Flags: --wasm-staging\n"
+        "\n"
         "load('test/mjsunit/wasm/wasm-module-builder.js');\n"
         "\n"
         "(function() {\n"
@@ -249,6 +258,14 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
 
 void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
                                          bool require_valid) {
+  // We explicitly enable staged WebAssembly features here to increase fuzzer
+  // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
+  // the flag by itself.
+#define ENABLE_STAGED_FEATURES(feat, desc, val) \
+  FlagScope<bool> enable_##feat(&FLAG_experimental_wasm_##feat, true);
+  FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
+#undef ENABLE_STAGED_FEATURES
+
   // Strictly enforce the input size limit. Note that setting "max_len" on the
   // fuzzer target is not enough, since different fuzzers are used and not all
   // respect that limit.
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index 53bbac6a015bb6..e8aebbfbecc23f 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -11,6 +11,7 @@
 #include "src/heap/factory.h"
 #include "src/objects/objects-inl.h"
 #include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-feature-flags.h"
 #include "src/wasm/wasm-module.h"
 #include "test/common/wasm/flag-utils.h"
 #include "test/common/wasm/wasm-module-runner.h"
@@ -20,6 +21,16 @@
 namespace i = v8::internal;
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  // We explicitly enable staged WebAssembly features here to increase fuzzer
+  // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
+  // the flag by itself.
+#define ENABLE_STAGED_FEATURES(feat, desc, val) \
+  i::FlagScope<bool> enable_##feat(&i::FLAG_experimental_wasm_##feat, true);
+  FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
+#undef ENABLE_STAGED_FEATURES
+
+  // We reduce the maximum memory size and table size of WebAssembly instances
+  // to avoid OOMs in the fuzzer.
   i::FlagScope<uint32_t> max_mem_flag_scope(&i::FLAG_wasm_max_mem_pages, 32);
   i::FlagScope<uint32_t> max_table_size_scope(&i::FLAG_wasm_max_table_size,
                                               100);
diff --git a/deps/v8/test/inspector/OWNERS b/deps/v8/test/inspector/OWNERS
index eef15ad6d868f3..50cd83c40ca5a8 100644
--- a/deps/v8/test/inspector/OWNERS
+++ b/deps/v8/test/inspector/OWNERS
@@ -1,3 +1,3 @@
-file://src/inspector/OWNERS
+file:../../src/inspector/OWNERS
 
 # COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/test/inspector/debugger/class-private-fields-scopes-expected.txt b/deps/v8/test/inspector/debugger/class-private-fields-scopes-expected.txt
new file mode 100644
index 00000000000000..a018bdd7a5efaa
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-fields-scopes-expected.txt
@@ -0,0 +1,141 @@
+Test private class fields in scopes
+
+Running test: testScopesPaused
+[
+    [0] : {
+        callFrameId : <callFrameId>
+        functionLocation : {
+            columnNumber : 16
+            lineNumber : 4
+            scriptId : <scriptId>
+        }
+        functionName : A
+        location : {
+            columnNumber : 6
+            lineNumber : 5
+            scriptId : <scriptId>
+        }
+        scopeChain : [
+            [0] : {
+                endLocation : {
+                    columnNumber : 5
+                    lineNumber : 6
+                    scriptId : <scriptId>
+                }
+                name : A
+                object : {
+                    className : Object
+                    description : Object
+                    objectId : <objectId>
+                    type : object
+                }
+                startLocation : {
+                    columnNumber : 16
+                    lineNumber : 4
+                    scriptId : <scriptId>
+                }
+                type : local
+            }
+            [1] : {
+                object : {
+                    className : global
+                    description : global
+                    objectId : <objectId>
+                    type : object
+                }
+                type : global
+            }
+        ]
+        this : {
+            className : A
+            description : A
+            objectId : <objectId>
+            type : object
+        }
+        url :
+    }
+    [1] : {
+        callFrameId : <callFrameId>
+        functionLocation : {
+            columnNumber : 12
+            lineNumber : 1
+            scriptId : <scriptId>
+        }
+        functionName : run
+        location : {
+            columnNumber : 2
+            lineNumber : 8
+            scriptId : <scriptId>
+        }
+        scopeChain : [
+            [0] : {
+                endLocation : {
+                    columnNumber : 1
+                    lineNumber : 9
+                    scriptId : <scriptId>
+                }
+                name : run
+                object : {
+                    className : Object
+                    description : Object
+                    objectId : <objectId>
+                    type : object
+                }
+                startLocation : {
+                    columnNumber : 12
+                    lineNumber : 1
+                    scriptId : <scriptId>
+                }
+                type : local
+            }
+            [1] : {
+                object : {
+                    className : global
+                    description : global
+                    objectId : <objectId>
+                    type : object
+                }
+                type : global
+            }
+        ]
+        this : {
+            className : global
+            description : global
+            objectId : <objectId>
+            type : object
+        }
+        url :
+    }
+    [2] : {
+        callFrameId : <callFrameId>
+        functionLocation : {
+            columnNumber : 0
+            lineNumber : 0
+            scriptId : <scriptId>
+        }
+        functionName :
+        location : {
+            columnNumber : 0
+            lineNumber : 0
+            scriptId : <scriptId>
+        }
+        scopeChain : [
+            [0] : {
+                object : {
+                    className : global
+                    description : global
+                    objectId : <objectId>
+                    type : object
+                }
+                type : global
+            }
+        ]
+        this : {
+            className : global
+            description : global
+            objectId : <objectId>
+            type : object
+        }
+        url :
+    }
+]
diff --git a/deps/v8/test/inspector/debugger/class-private-fields-scopes.js b/deps/v8/test/inspector/debugger/class-private-fields-scopes.js
new file mode 100644
index 00000000000000..11bea38f0e9dad
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-fields-scopes.js
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let { session, contextGroup, Protocol } = InspectorTest.start(
+  "Test private class fields in scopes"
+);
+
+contextGroup.addScript(`
+function run() {
+  class A {
+    #foo = "hello"
+    constructor () {
+      debugger;
+    }
+  };
+  new A();
+}`);
+
+InspectorTest.runAsyncTestSuite([
+  async function testScopesPaused() {
+    Protocol.Debugger.enable();
+    Protocol.Runtime.evaluate({ expression: "run()" });
+
+    let {
+      params: { callFrames }
+    } = await Protocol.Debugger.oncePaused(); // inside A()
+    InspectorTest.logMessage(callFrames);
+    Protocol.Debugger.resume();
+    Protocol.Debugger.disable();
+  }
+]);
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
index 23342131247aa2..d21ebc783e156e 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
@@ -259,6 +259,7 @@ expression: Promise.resolve(42)
     value : 42
 }
 
+
 Running test: privateNames
 expression: new class { #foo = 1; #bar = 2; baz = 3;}
 {
@@ -295,3 +296,18 @@ expression: new class extends class { #baz = 3; } { #foo = 1; #bar = 2; }
 }
 
 expression: new class extends class { constructor() { return new Proxy({}, {}); } } { #foo = 1; #bar = 2; }
+
+
+Running test: functionProxy
+expression: new Proxy(() => {}, { get: () => x++ })
+{
+    name : length
+    type : number
+    value : 0
+}
+{
+    name : name
+    type : string
+    value : 
+}
+
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
index cfbdba816b8b6f..f542683aa49159 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
@@ -82,6 +82,12 @@ InspectorTest.runTestSuite([
       .then(() => checkExpression("new class extends class { #baz = 3; } { #foo = 1; #bar = 2; }"))
       .then(() => checkExpression("new class extends class { constructor() { return new Proxy({}, {}); } } { #foo = 1; #bar = 2; }"))
       .then(next);
+  },
+
+  function functionProxy(next)
+  {
+    checkExpression("new Proxy(() => {}, { get: () => x++ })")
+      .then(next);
   }
 ]);
 
diff --git a/deps/v8/test/inspector/runtime/await-promise-expected.txt b/deps/v8/test/inspector/runtime/await-promise-expected.txt
index 2b906dd49b106c..975ef8177ebb38 100644
--- a/deps/v8/test/inspector/runtime/await-promise-expected.txt
+++ b/deps/v8/test/inspector/runtime/await-promise-expected.txt
@@ -19,10 +19,22 @@ Running test: testRejectedPromise
         exceptionDetails : {
             columnNumber : 0
             exception : {
-                type : object
-                value : {
-                    a : 1
+                className : Object
+                description : Object
+                objectId : <objectId>
+                preview : {
+                    description : Object
+                    overflow : false
+                    properties : [
+                        [0] : {
+                            name : a
+                            type : number
+                            value : 1
+                        }
+                    ]
+                    type : object
                 }
+                type : object
             }
             exceptionId : <exceptionId>
             lineNumber : 0
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
index 1a64b576c3e3d7..f98fc43bf959e8 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
@@ -166,10 +166,22 @@ Running test: testFunctionReturnRejectedPromise
         exceptionDetails : {
             columnNumber : 0
             exception : {
-                type : object
-                value : {
-                    a : 3
+                className : Object
+                description : Object
+                objectId : <objectId>
+                preview : {
+                    description : Object
+                    overflow : false
+                    properties : [
+                        [0] : {
+                            name : a
+                            type : number
+                            value : 3
+                        }
+                    ]
+                    type : object
                 }
+                type : object
             }
             exceptionId : <exceptionId>
             lineNumber : 0
@@ -204,3 +216,75 @@ Running test: testPassingBothObjectIdAndExecutionContextId
     }
     id : <messageId>
 }
+
+Running test: testThrowNumber
+{
+    id : <messageId>
+    result : {
+        exceptionDetails : {
+            columnNumber : 10
+            exception : {
+                description : 100500
+                type : number
+                value : 100500
+            }
+            exceptionId : <exceptionId>
+            lineNumber : 0
+            scriptId : <scriptId>
+            stackTrace : {
+                callFrames : [
+                    [0] : {
+                        columnNumber : 10
+                        functionName : 
+                        lineNumber : 0
+                        scriptId : <scriptId>
+                        url : 
+                    }
+                ]
+            }
+            text : Uncaught
+        }
+        result : {
+            description : 100500
+            type : number
+            value : 100500
+        }
+    }
+}
+
+Running test: testAsyncFunctionWithUnknownReferenceReturnByValue
+{
+    id : <messageId>
+    result : {
+        exceptionDetails : {
+            columnNumber : 30
+            exception : {
+                className : ReferenceError
+                description : ReferenceError: does_not_exist is not defined     at <anonymous>:1:30
+                objectId : <objectId>
+                subtype : error
+                type : object
+            }
+            exceptionId : <exceptionId>
+            lineNumber : 1
+            scriptId : <scriptId>
+            stackTrace : {
+                callFrames : [
+                    [0] : {
+                        columnNumber : 29
+                        functionName : 
+                        lineNumber : 0
+                        scriptId : <scriptId>
+                        url : 
+                    }
+                ]
+            }
+            text : Uncaught (in promise) ReferenceError: does_not_exist is not defined
+        }
+        result : {
+            type : object
+            value : {
+            }
+        }
+    }
+}
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async.js b/deps/v8/test/inspector/runtime/call-function-on-async.js
index ab146e1c4dec08..70f823c52cee81 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async.js
+++ b/deps/v8/test/inspector/runtime/call-function-on-async.js
@@ -146,6 +146,28 @@ let testSuite = [
       awaitPromise: false
     }));
   },
+
+  async function testThrowNumber() {
+    InspectorTest.logMessage(await callFunctionOn({
+      executionContextId,
+      functionDeclaration: '(() => { throw 100500; } )',
+      arguments: prepareArguments([]),
+      returnByValue: true,
+      generatePreview: false,
+      awaitPromise: true
+    }));
+  },
+
+  async function testAsyncFunctionWithUnknownReferenceReturnByValue() {
+    InspectorTest.logMessage(await callFunctionOn({
+      executionContextId,
+      functionDeclaration: '(async () => does_not_exist.click())',
+      arguments: prepareArguments([]),
+      returnByValue: true,
+      generatePreview: false,
+      awaitPromise: true
+    }));
+  },
 ];
 
 function prepareArguments(args) {
diff --git a/deps/v8/test/inspector/runtime/console-table-expected.txt b/deps/v8/test/inspector/runtime/console-table-expected.txt
index e00708b587dc36..aa6b456b9308f4 100644
--- a/deps/v8/test/inspector/runtime/console-table-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-table-expected.txt
@@ -383,3 +383,97 @@ preview:
     type : object
 }
 
+{
+    description : Array(2)
+    overflow : false
+    properties : [
+        [0] : {
+            name : 0
+            type : object
+            value : Object
+            valuePreview : {
+                description : Object
+                overflow : false
+                properties : [
+                    [0] : {
+                        name : c
+                        type : number
+                        value : 3
+                    }
+                    [1] : {
+                        name : b
+                        type : number
+                        value : 2
+                    }
+                ]
+                type : object
+            }
+        }
+        [1] : {
+            name : 1
+            type : object
+            value : Object
+            valuePreview : {
+                description : Object
+                overflow : false
+                properties : [
+                    [0] : {
+                        name : c
+                        type : number
+                        value : 3
+                    }
+                ]
+                type : object
+            }
+        }
+    ]
+    subtype : array
+    type : object
+}
+{
+    description : Array(2)
+    overflow : false
+    properties : [
+        [0] : {
+            name : 0
+            type : object
+            value : Object
+            valuePreview : {
+                description : Object
+                overflow : false
+                properties : [
+                    [0] : {
+                        name : c
+                        type : number
+                        value : 3
+                    }
+                    [1] : {
+                        name : b
+                        type : number
+                        value : 2
+                    }
+                ]
+                type : object
+            }
+        }
+        [1] : {
+            name : 1
+            type : object
+            value : Object
+            valuePreview : {
+                description : Object
+                overflow : false
+                properties : [
+                    [0] : {
+                        name : c
+                        type : number
+                        value : 3
+                    }
+                ]
+                type : object
+            }
+        }
+    ]
+    subtype : array
+    type : object
+}
diff --git a/deps/v8/test/inspector/runtime/console-table.js b/deps/v8/test/inspector/runtime/console-table.js
index 70e3548c143f05..961499dfac8a92 100644
--- a/deps/v8/test/inspector/runtime/console-table.js
+++ b/deps/v8/test/inspector/runtime/console-table.js
@@ -65,6 +65,18 @@ const { session, contextGroup, Protocol } =
       console.table(bigTable);`
   });
   await waitConsoleAPICalledAndDump(true /* concise */);
+  Protocol.Runtime.evaluate({
+    expression: `var table = [{a:1, b:2, c:3}, {c:3}];
+      var filter = ['c', 'b'];
+      console.table(table, filter);`
+  });
+  await waitConsoleAPICalledAndDump();
+  Protocol.Runtime.evaluate({
+    expression: `var table = [{a:1, b:2, c:3}, {c:3}];
+      var filter = ['c', 'b', 'c'];
+      console.table(table, filter);`
+  });
+  await waitConsoleAPICalledAndDump();
   InspectorTest.completeTest();
 })()
 
diff --git a/deps/v8/test/inspector/runtime/evaluate-new-function-error-expected.txt b/deps/v8/test/inspector/runtime/evaluate-new-function-error-expected.txt
new file mode 100644
index 00000000000000..70191eac96f867
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-new-function-error-expected.txt
@@ -0,0 +1,27 @@
+Tests that Runtime.evaluate has the correct error line number for 'new Function(...)'
+{
+    id : <messageId>
+    result : {
+        exceptionDetails : {
+            columnNumber : 3
+            exception : {
+                className : TypeError
+                description : TypeError: 0 is not a function     at eval (eval at <anonymous> (:1:1), <anonymous>:1:4)     at <anonymous>:1:22
+                objectId : <objectId>
+                subtype : error
+                type : object
+            }
+            exceptionId : <exceptionId>
+            lineNumber : 0
+            scriptId : <scriptId>
+            text : Uncaught
+        }
+        result : {
+            className : TypeError
+            description : TypeError: 0 is not a function     at eval (eval at <anonymous> (:1:1), <anonymous>:1:4)     at <anonymous>:1:22
+            objectId : <objectId>
+            subtype : error
+            type : object
+        }
+    }
+}
diff --git a/deps/v8/test/inspector/runtime/evaluate-new-function-error.js b/deps/v8/test/inspector/runtime/evaluate-new-function-error.js
new file mode 100644
index 00000000000000..bb878d957a1bbf
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-new-function-error.js
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that Runtime.evaluate has the correct error line number for 'new Function(...)'");
+
+var message = { expression: "new Function('(0)()')();" };
+
+Protocol.Runtime.evaluate(message)
+  .then(message => InspectorTest.logMessage(message))
+  .then(() => InspectorTest.completeTest());
diff --git a/deps/v8/test/inspector/runtime/remote-object-expected.txt b/deps/v8/test/inspector/runtime/remote-object-expected.txt
index d05401ddfb0b33..98fe47beed7590 100644
--- a/deps/v8/test/inspector/runtime/remote-object-expected.txt
+++ b/deps/v8/test/inspector/runtime/remote-object-expected.txt
@@ -1436,6 +1436,18 @@ Running test: testCustomError
     }
 }
 
+Running test: testCustomErrorWithMessage
+'class CustomMsgError extends Error {}; a = new CustomMsgError(); delete a.stack; a.message = 'foobar'; a', returnByValue: false, generatePreview: false
+{
+    result : {
+        className : CustomMsgError
+        description : CustomMsgError: foobar
+        objectId : <objectId>
+        subtype : error
+        type : object
+    }
+}
+
 Running test: testProxy
 'new Proxy({}, {})', returnByValue: false, generatePreview: false
 {
diff --git a/deps/v8/test/inspector/runtime/remote-object.js b/deps/v8/test/inspector/runtime/remote-object.js
index ed35f0eff6be3f..78926479aef0a3 100644
--- a/deps/v8/test/inspector/runtime/remote-object.js
+++ b/deps/v8/test/inspector/runtime/remote-object.js
@@ -419,6 +419,11 @@ InspectorTest.runAsyncTestSuite([
       expression: `class CustomError extends Error {}; a = new CustomError(); delete a.stack; a`
     })).result);
   },
+  async function testCustomErrorWithMessage() {
+    InspectorTest.logMessage((await evaluate( {
+      expression: `class CustomMsgError extends Error {}; a = new CustomMsgError(); delete a.stack; a.message = 'foobar'; a`
+    })).result);
+  },
   async function testProxy() {
     InspectorTest.logMessage((await evaluate({
       expression: 'new Proxy({}, {})'
diff --git a/deps/v8/test/inspector/runtime/run-script-async-expected.txt b/deps/v8/test/inspector/runtime/run-script-async-expected.txt
index 8befa1399c6032..5b94305081cd28 100644
--- a/deps/v8/test/inspector/runtime/run-script-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/run-script-async-expected.txt
@@ -172,10 +172,22 @@ Running test: testAwaitRejectedPromise
         exceptionDetails : {
             columnNumber : 0
             exception : {
-                type : object
-                value : {
-                    a : 1
+                className : Object
+                description : Object
+                objectId : <objectId>
+                preview : {
+                    description : Object
+                    overflow : false
+                    properties : [
+                        [0] : {
+                            name : a
+                            type : number
+                            value : 1
+                        }
+                    ]
+                    type : object
                 }
+                type : object
             }
             exceptionId : <exceptionId>
             lineNumber : 0
diff --git a/deps/v8/test/inspector/runtime/terminate-execution-expected.txt b/deps/v8/test/inspector/runtime/terminate-execution-expected.txt
index 24df70ebb66bda..614dc6da1e4b1a 100644
--- a/deps/v8/test/inspector/runtime/terminate-execution-expected.txt
+++ b/deps/v8/test/inspector/runtime/terminate-execution-expected.txt
@@ -64,3 +64,10 @@ Pause inside microtask and terminate execution
         }
     }
 }
+Terminate execution with pending microtasks
+{
+    id : <messageId>
+    result : {
+    }
+}
+
diff --git a/deps/v8/test/inspector/runtime/terminate-execution.js b/deps/v8/test/inspector/runtime/terminate-execution.js
index feaf52eb2c1915..8af28e4787fb9c 100644
--- a/deps/v8/test/inspector/runtime/terminate-execution.js
+++ b/deps/v8/test/inspector/runtime/terminate-execution.js
@@ -55,6 +55,18 @@ let {session, contextGroup, Protocol} =
       .then(InspectorTest.logMessage);
   await Protocol.Debugger.disable();
 
+  InspectorTest.log('Terminate execution with pending microtasks');
+  Protocol.Debugger.enable();
+  const paused2 = Protocol.Debugger.oncePaused();
+  Protocol.Runtime.evaluate({expression: `
+      Promise.resolve().then(() => { console.log('FAIL: microtask ran'); });
+      debugger;
+      for (;;) {}
+  `});
+  await paused2;
+  Protocol.Runtime.terminateExecution().then(InspectorTest.logMessage);
+  await Protocol.Debugger.resume();
+
   await Protocol.Runtime.disable();
   InspectorTest.completeTest();
 })();
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
index d76de0d323e8df..1476b0f64c82b2 100644
--- a/deps/v8/test/inspector/task-runner.cc
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -43,7 +43,7 @@ TaskRunner::TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
       process_queue_semaphore_(0),
       nested_loop_count_(0),
       is_terminated_(0) {
-  Start();
+  CHECK(Start());
 }
 
 TaskRunner::~TaskRunner() { Join(); }
diff --git a/deps/v8/test/intl/number-format/unified/style-unit.js b/deps/v8/test/intl/number-format/unified/style-unit.js
index b88af0fb7672e7..72eb0a782d2ce4 100644
--- a/deps/v8/test/intl/number-format/unified/style-unit.js
+++ b/deps/v8/test/intl/number-format/unified/style-unit.js
@@ -24,6 +24,7 @@ assertEquals(undefined, nf.resolvedOptions().unit);
 assertThrows(() => new Intl.NumberFormat("en", {style: 'unit'}), TypeError);
 
 const validUnits = [
+  // IsSanctionedSimpleUnitIdentifier
   'acre',
   'bit',
   'byte',
@@ -32,7 +33,9 @@ const validUnits = [
   'day',
   'degree',
   'fahrenheit',
+  'fluid-ounce',
   'foot',
+  'gallon',
   'gigabit',
   'gigabyte',
   'gram',
@@ -43,12 +46,14 @@ const validUnits = [
   'kilobyte',
   'kilogram',
   'kilometer',
+  'liter',
   'megabit',
   'megabyte',
   'meter',
   'mile-scandinavian',
   'mile',
   'millimeter',
+  'milliliter',
   'millisecond',
   'minute',
   'month',
@@ -68,6 +73,9 @@ const validUnits = [
   'meter-per-second',
   'yard-per-second',
   'yard-per-hour',
+  // -per- in IsWellFormedUnitIdentifier
+  'liter-per-kilometer',
+  'mile-per-gallon',
 ];
 
 for (const unit of validUnits) {
@@ -103,12 +111,10 @@ assertThrows(() => c('day-person'), RangeError);
 assertThrows(() => c('deciliter'), RangeError);
 assertThrows(() => c('decimeter'), RangeError);
 assertThrows(() => c('fathom'), RangeError);
-assertThrows(() => c('fluid-ounce'), RangeError);
 assertThrows(() => c('foodcalorie'), RangeError);
 assertThrows(() => c('furlong'), RangeError);
 assertThrows(() => c('g-force'), RangeError);
 assertThrows(() => c('gallon-imperial'), RangeError);
-assertThrows(() => c('gallon'), RangeError);
 assertThrows(() => c('generic'), RangeError);
 assertThrows(() => c('gigahertz'), RangeError);
 assertThrows(() => c('gigawatt'), RangeError);
@@ -128,8 +134,6 @@ assertThrows(() => c('kilowatt'), RangeError);
 assertThrows(() => c('knot'), RangeError);
 assertThrows(() => c('light-year'), RangeError);
 assertThrows(() => c('liter-per-100kilometers'), RangeError);
-assertThrows(() => c('liter-per-kilometer'), RangeError);
-assertThrows(() => c('liter'), RangeError);
 assertThrows(() => c('lux'), RangeError);
 assertThrows(() => c('megahertz'), RangeError);
 assertThrows(() => c('megaliter'), RangeError);
@@ -140,12 +144,10 @@ assertThrows(() => c('microgram'), RangeError);
 assertThrows(() => c('micrometer'), RangeError);
 assertThrows(() => c('microsecond'), RangeError);
 assertThrows(() => c('mile-per-gallon-imperial'), RangeError);
-assertThrows(() => c('mile-per-gallon'), RangeError);
 assertThrows(() => c('milliampere'), RangeError);
 assertThrows(() => c('millibar'), RangeError);
 assertThrows(() => c('milligram-per-deciliter'), RangeError);
 assertThrows(() => c('milligram'), RangeError);
-assertThrows(() => c('milliliter'), RangeError);
 assertThrows(() => c('millimeter-of-mercury'), RangeError);
 assertThrows(() => c('millimole-per-liter'), RangeError);
 assertThrows(() => c('milliwatt'), RangeError);
diff --git a/deps/v8/test/intl/regress-9475.js b/deps/v8/test/intl/regress-9475.js
new file mode 100644
index 00000000000000..3549ef8f3899f1
--- /dev/null
+++ b/deps/v8/test/intl/regress-9475.js
@@ -0,0 +1,62 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+// Test format of all valid units won't throw exception.
+
+let validList = [
+  // IsSanctionedSimpleUnitIdentifier
+  'acre',
+  'bit',
+  'byte',
+  'celsius',
+  'centimeter',
+  'day',
+  'degree',
+  'fahrenheit',
+  'fluid-ounce',
+  'foot',
+  'gallon',
+  'gigabit',
+  'gigabyte',
+  'gram',
+  'hectare',
+  'hour',
+  'inch',
+  'kilobit',
+  'kilobyte',
+  'kilogram',
+  'kilometer',
+  'liter',
+  'megabit',
+  'megabyte',
+  'meter',
+  'mile',
+  'mile-scandinavian',
+  'millimeter',
+  'milliliter',
+  'millisecond',
+  'minute',
+  'month',
+  'ounce',
+  'percent',
+  'petabyte',
+  'pound',
+  'second',
+  'stone',
+  'terabit',
+  'terabyte',
+  'week',
+  'yard',
+  'year',
+  // -per- in IsWellFormedUnitIdentifier
+  'liter-per-kilometer',
+  'mile-per-gallon',
+];
+
+for (let unit of validList) {
+  let nf = new Intl.NumberFormat("en", {style: "unit", unit});
+  assertDoesNotThrow(() => nf.format(123.45),
+      "unit: '" + unit + "' should not throw");
+}
diff --git a/deps/v8/test/intl/regress-9642.js b/deps/v8/test/intl/regress-9642.js
new file mode 100644
index 00000000000000..9091ffb12541a5
--- /dev/null
+++ b/deps/v8/test/intl/regress-9642.js
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Environment Variables: LANG=lb_LU
+// Test creation of Intl.DateTimeFormat under not installed locale.
+
+let dtf = new Intl.DateTimeFormat();
diff --git a/deps/v8/test/js-perf-test/IC/loadconstantfromprototype.js b/deps/v8/test/js-perf-test/IC/loadconstantfromprototype.js
new file mode 100644
index 00000000000000..deb0a817627cb6
--- /dev/null
+++ b/deps/v8/test/js-perf-test/IC/loadconstantfromprototype.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('LoadConstantFromPrototype', [1000], [
+  new Benchmark('LoadConstantFromPrototype', false, false, 0, LoadConstantFromPrototype)
+]);
+
+function Foo() {};
+
+Foo.prototype.bar = {};
+Foo.prototype.covfefe = function() {};
+Foo.prototype.baz = 1;
+
+function LoadConstantFromPrototype() {
+  let foo = new Foo();
+
+  for (let i = 0; i < 1000; ++i) {
+    foo.bar;
+    foo.covfefe;
+    foo.baz;
+  }
+}
diff --git a/deps/v8/test/js-perf-test/IC/run.js b/deps/v8/test/js-perf-test/IC/run.js
new file mode 100644
index 00000000000000..254799c7d3b68b
--- /dev/null
+++ b/deps/v8/test/js-perf-test/IC/run.js
@@ -0,0 +1,24 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('../base.js');
+
+load('loadconstantfromprototype.js');
+
+function PrintResult(name, result) {
+  print(name + '-IC(Score): ' + result);
+}
+
+function PrintStep(name) {}
+
+function PrintError(name, error) {
+  PrintResult(name, error);
+}
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+                           NotifyError: PrintError,
+                           NotifyStep: PrintStep });
diff --git a/deps/v8/test/js-perf-test/JSTests5.json b/deps/v8/test/js-perf-test/JSTests5.json
index 66504bf5755086..6d42cbdda93785 100644
--- a/deps/v8/test/js-perf-test/JSTests5.json
+++ b/deps/v8/test/js-perf-test/JSTests5.json
@@ -618,6 +618,18 @@
         {"name": "Inline-Serialize-Error.stack"},
         {"name": "Recursive-Serialize-Error.stack"}
       ]
+    },
+    {
+      "name": "IC",
+      "path": ["IC"],
+      "main": "run.js",
+      "flags": ["--no-opt"],
+      "resources": ["loadconstantfromprototype.js"],
+      "results_regexp": "^%s\\-IC\\(Score\\): (.+)$",
+      "tests": [
+        {"name": "LoadConstantFromPrototype"
+        }
+      ]
     }
   ]
 }
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-getter-count.js b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-count.js
new file mode 100644
index 00000000000000..35f30accdfa9c1
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-count.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+class C {
+  set #foo(val) {}
+  constructor() {
+    this.#foo++;
+  }
+}
+
+new C();
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-getter-count.out b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-count.out
new file mode 100644
index 00000000000000..f6328b9c43403b
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-count.out
@@ -0,0 +1,9 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:10: TypeError: '#foo' was defined without a getter
+    this.#foo++;
+    ^
+TypeError: '#foo' was defined without a getter
+    at new C (*%(basename)s:10:5)
+    at *%(basename)s:14:1
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.js b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.js
new file mode 100644
index 00000000000000..680c1bba744d98
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+class C {
+  set #a(val) {}
+  setA(obj, val) { obj.#a = val; }
+
+  constructor() {
+    class D {
+      get #a() {}
+    }
+    this.setA(new D(), 1);
+  }
+}
+
+new C;
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.out b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.out
new file mode 100644
index 00000000000000..ca3154a26b13b5
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.out
@@ -0,0 +1,10 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:9: TypeError: Cannot read private member C from an object whose class did not declare it
+  setA(obj, val) { obj.#a = val; }
+                       ^
+TypeError: Cannot read private member C from an object whose class did not declare it
+    at C.setA (*%(basename)s:9:24)
+    at new C (*%(basename)s:15:10)
+    at *%(basename)s:19:1
\ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-getter.js b/deps/v8/test/message/fail/class-accessors-private-undefined-getter.js
new file mode 100644
index 00000000000000..9422739f7def29
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-getter.js
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+class C {
+  set #a(val) {}
+  constructor() {
+    const a = this.#a;
+  }
+}
+new C;
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-getter.out b/deps/v8/test/message/fail/class-accessors-private-undefined-getter.out
new file mode 100644
index 00000000000000..c41451c3af3814
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-getter.out
@@ -0,0 +1,9 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:10: TypeError: '#a' was defined without a getter
+    const a = this.#a;
+              ^
+TypeError: '#a' was defined without a getter
+    at new C (*%(basename)s:10:15)
+    at *%(basename)s:13:1
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-setter-compound.js b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-compound.js
new file mode 100644
index 00000000000000..2a1f05767f57d0
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-compound.js
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+class C {
+  get #a() {}
+  constructor() {
+    this.#a = 1;
+  }
+}
+new C;
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-setter-compound.out b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-compound.out
new file mode 100644
index 00000000000000..a98f4707d412af
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-compound.out
@@ -0,0 +1,9 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:10: TypeError: '#a' was defined without a setter
+    this.#a = 1;
+            ^
+TypeError: '#a' was defined without a setter
+    at new C (*%(basename)s:10:13)
+    at *%(basename)s:13:1
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-setter-count.js b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-count.js
new file mode 100644
index 00000000000000..33367c7ef9d303
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-count.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+class C {
+  get #foo() {}
+  constructor() {
+    this.#foo++;
+  }
+}
+
+new C();
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-setter-count.out b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-count.out
new file mode 100644
index 00000000000000..787a00019adc78
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-count.out
@@ -0,0 +1,9 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:10: TypeError: '#foo' was defined without a setter
+    this.#foo++;
+    ^
+TypeError: '#foo' was defined without a setter
+    at new C (*%(basename)s:10:5)
+    at *%(basename)s:14:1
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.js b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.js
new file mode 100644
index 00000000000000..2f28fc26f35498
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+class C {
+  get #a() {}
+  getA(obj) { return obj.#a; }
+
+  constructor() {
+    class D {
+      set #a(val) {}
+    }
+    this.getA(new D());
+  }
+}
+
+new C;
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.out b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.out
new file mode 100644
index 00000000000000..5f22848692213d
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.out
@@ -0,0 +1,10 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:9: TypeError: Cannot read private member C from an object whose class did not declare it
+  getA(obj) { return obj.#a; }
+                         ^
+TypeError: Cannot read private member C from an object whose class did not declare it
+    at C.getA (*%(basename)s:9:26)
+    at new C (*%(basename)s:15:10)
+    at *%(basename)s:19:1
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-setter.js b/deps/v8/test/message/fail/class-accessors-private-undefined-setter.js
new file mode 100644
index 00000000000000..2a1f05767f57d0
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-setter.js
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+class C {
+  get #a() {}
+  constructor() {
+    this.#a = 1;
+  }
+}
+new C;
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-setter.out b/deps/v8/test/message/fail/class-accessors-private-undefined-setter.out
new file mode 100644
index 00000000000000..a5c56ca4d001b7
--- /dev/null
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-setter.out
@@ -0,0 +1,9 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:10: TypeError: '#a' was defined without a setter
+    this.#a = 1;
+            ^
+TypeError: '#a' was defined without a setter
+    at new C (*%(basename)s:10:13)
+    at *%(basename)s:13:1
\ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-source-positions.out b/deps/v8/test/message/fail/class-fields-private-source-positions.out
index 7c2f99964b989b..6be1fd89f87745 100644
--- a/deps/v8/test/message/fail/class-fields-private-source-positions.out
+++ b/deps/v8/test/message/fail/class-fields-private-source-positions.out
@@ -1,5 +1,5 @@
-*%(basename)s:9: TypeError: Read of private field #a from an object which did not contain the field
+*%(basename)s:9: TypeError: Cannot read private member #a from an object whose class did not declare it
     [o.#a](){}
        ^
-TypeError: Read of private field #a from an object which did not contain the field
+TypeError: Cannot read private member #a from an object whose class did not declare it
     at *%(basename)s:9:8
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-early-2.out b/deps/v8/test/message/fail/class-fields-private-throw-early-2.out
index 1cf7bb41aec429..9731e7d6111e07 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-early-2.out
+++ b/deps/v8/test/message/fail/class-fields-private-throw-early-2.out
@@ -1,6 +1,6 @@
-*%(basename)s:8: TypeError: Write of private field #x to an object which did not contain the field
+*%(basename)s:8: TypeError: Cannot write private member #x to an object whose class did not declare it
     ({}).#x = 1;
             ^
-TypeError: Write of private field #x to an object which did not contain the field
+TypeError: Cannot write private member #x to an object whose class did not declare it
     at new X (*%(basename)s:8:13)
     at *%(basename)s:12:1
\ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-read.out b/deps/v8/test/message/fail/class-fields-private-throw-read.out
index ec8dcf5108db07..21823edc443681 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-read.out
+++ b/deps/v8/test/message/fail/class-fields-private-throw-read.out
@@ -1,6 +1,6 @@
-*%(basename)s:7: TypeError: Read of private field #x from an object which did not contain the field
+*%(basename)s:7: TypeError: Cannot read private member #x from an object whose class did not declare it
   eq(o) { return this.#x === o.#x; }
                                ^
-TypeError: Read of private field #x from an object which did not contain the field
+TypeError: Cannot read private member #x from an object whose class did not declare it
     at X.eq (*%(basename)s:7:32)
     at *%(basename)s:10:9
\ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-write.out b/deps/v8/test/message/fail/class-fields-private-throw-write.out
index 8d9047cc628e92..81a73ca239129d 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-write.out
+++ b/deps/v8/test/message/fail/class-fields-private-throw-write.out
@@ -1,6 +1,6 @@
-*%(basename)s:7: TypeError: Write of private field #x to an object which did not contain the field
+*%(basename)s:7: TypeError: Cannot write private member #x to an object whose class did not declare it
   setX(o, val) { o.#x = val; }
                       ^
-TypeError: Write of private field #x to an object which did not contain the field
+TypeError: Cannot write private member #x to an object whose class did not declare it
     at X.setX (*%(basename)s:7:23)
     at *%(basename)s:10:9
\ No newline at end of file
diff --git a/deps/v8/test/message/fail/destructuring-undefined-computed-property.out b/deps/v8/test/message/fail/destructuring-undefined-computed-property.out
index 1dfb19eb6979e3..326bb30e3b5233 100644
--- a/deps/v8/test/message/fail/destructuring-undefined-computed-property.out
+++ b/deps/v8/test/message/fail/destructuring-undefined-computed-property.out
@@ -1,5 +1,5 @@
-*%(basename)s:5: TypeError: Cannot destructure 'undefined' or 'null'.
+*%(basename)s:5: TypeError: Cannot destructure 'undefined' as it is undefined.
 var { [x] : y } = undefined;
-    ^
-TypeError: Cannot destructure 'undefined' or 'null'.
+                  ^
+TypeError: Cannot destructure 'undefined' as it is undefined.
     at *%(basename)s:5:5
diff --git a/deps/v8/test/message/fail/destructuring-undefined-number-property.out b/deps/v8/test/message/fail/destructuring-undefined-number-property.out
index b23889566ab814..1fb13a6f00af95 100644
--- a/deps/v8/test/message/fail/destructuring-undefined-number-property.out
+++ b/deps/v8/test/message/fail/destructuring-undefined-number-property.out
@@ -1,5 +1,5 @@
-*%(basename)s:5: TypeError: Cannot destructure 'undefined' or 'null'.
+*%(basename)s:5: TypeError: Cannot destructure 'undefined' as it is undefined.
 var { 1: x } = undefined;
-    ^
-TypeError: Cannot destructure 'undefined' or 'null'.
-    at *%(basename)s:5:5
+               ^
+TypeError: Cannot destructure 'undefined' as it is undefined.
+    at *%(basename)s:5:10
diff --git a/deps/v8/test/message/fail/destructuring-undefined-string-property.out b/deps/v8/test/message/fail/destructuring-undefined-string-property.out
index 238aae974a5343..bd0520ba6d90b7 100644
--- a/deps/v8/test/message/fail/destructuring-undefined-string-property.out
+++ b/deps/v8/test/message/fail/destructuring-undefined-string-property.out
@@ -1,5 +1,5 @@
-*%(basename)s:5: TypeError: Cannot destructure property `x` of 'undefined' or 'null'.
+*%(basename)s:5: TypeError: Cannot destructure property 'x' of 'undefined' as it is undefined.
 var { x } = undefined;
       ^
-TypeError: Cannot destructure property `x` of 'undefined' or 'null'.
-    at *%(basename)s:5:5
+TypeError: Cannot destructure property 'x' of 'undefined' as it is undefined.
+    at *%(basename)s:5:7
diff --git a/deps/v8/test/message/regress/fail/regress-9603.js b/deps/v8/test/message/regress/fail/regress-9603.js
new file mode 100644
index 00000000000000..4e2fce95fc3c1c
--- /dev/null
+++ b/deps/v8/test/message/regress/fail/regress-9603.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let x = 09_9.0;
diff --git a/deps/v8/test/message/regress/fail/regress-9603.out b/deps/v8/test/message/regress/fail/regress-9603.out
new file mode 100644
index 00000000000000..2929a1991d2a09
--- /dev/null
+++ b/deps/v8/test/message/regress/fail/regress-9603.out
@@ -0,0 +1,7 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:5: SyntaxError: Invalid or unexpected token
+let x = 09_9.0;
+          ^
+SyntaxError: Invalid or unexpected token
diff --git a/deps/v8/test/mjsunit/code-coverage-block.js b/deps/v8/test/mjsunit/code-coverage-block.js
index a7bad5bf11fbc4..c441342cdfbeed 100644
--- a/deps/v8/test/mjsunit/code-coverage-block.js
+++ b/deps/v8/test/mjsunit/code-coverage-block.js
@@ -434,8 +434,8 @@ TestCoverage(
 `,
 [{"start":0,"end":399,"count":1},
  {"start":1,"end":351,"count":1},
- {"start":154,"end":176,"count":0},
- {"start":254,"end":276,"count":0}]
+ {"start":154,"end":204,"count":0},
+ {"start":226,"end":350,"count":0}]
 );
 
 TestCoverage(
@@ -464,8 +464,8 @@ TestCoverage(
 `,
 [{"start":0,"end":999,"count":1},
  {"start":1,"end":951,"count":1},
- {"start":152,"end":168,"count":0},
- {"start":287,"end":310,"count":0}]
+ {"start":152,"end":202,"count":0},
+ {"start":285,"end":353,"count":0}]
 );
 
 TestCoverage(
@@ -1052,49 +1052,4 @@ try {                                     // 0500
  {"start":69,"end":153,"count":1}]
 );
 
-TestCoverage(
-"https://crbug.com/v8/9705",
-`
-function f(x) {                           // 0000
-  switch (x) {                            // 0050
-    case 40: nop();                       // 0100
-    case 41: nop(); return 1;             // 0150
-    case 42: nop(); break;                // 0200
-  }                                       // 0250
-  return 3;                               // 0300
-};                                        // 0350
-f(40);                                    // 0400
-f(41);                                    // 0450
-f(42);                                    // 0500
-f(43);                                    // 0550
-`,
-[{"start":0,"end":599,"count":1},
- {"start":0,"end":351,"count":4},
- {"start":104,"end":119,"count":1},
- {"start":154,"end":179,"count":2},
- {"start":204,"end":226,"count":1},
- {"start":253,"end":350,"count":2}]
-);
-
-TestCoverage(
-"https://crbug.com/v8/9705",
-`
-function f(x) {                           // 0000
-  switch (x) {                            // 0050
-    case 40: nop();                       // 0100
-    case 41: nop(); return 1;             // 0150
-    case 42: nop(); break;                // 0200
-  }                                       // 0250
-  return 3;                               // 0300
-};                                        // 0350
-f(42);                                    // 0400
-f(43);                                    // 0450
-`,
-[{"start":0,"end":499,"count":1},
- {"start":0,"end":351,"count":2},
- {"start":104,"end":119,"count":0},
- {"start":154,"end":179,"count":0},
- {"start":204,"end":226,"count":1}]
-);
-
 %DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/code-coverage-utils.js b/deps/v8/test/mjsunit/code-coverage-utils.js
index 4164f5a314f157..57833902220b7f 100644
--- a/deps/v8/test/mjsunit/code-coverage-utils.js
+++ b/deps/v8/test/mjsunit/code-coverage-utils.js
@@ -18,40 +18,25 @@ let gen;
     return undefined;
   };
 
-  function TestCoverageInternal(
-      name, source, expectation, collect_garbage, prettyPrintResults) {
+  function TestCoverageInternal(name, source, expectation, collect_garbage) {
     source = source.trim();
     eval(source);
     if (collect_garbage) %CollectGarbage("collect dead objects");
     var covfefe = GetCoverage(source);
     var stringified_result = JSON.stringify(covfefe);
     var stringified_expectation = JSON.stringify(expectation);
-    const mismatch = stringified_result != stringified_expectation;
-    if (mismatch) {
-      console.log(stringified_result.replace(/[}],[{]/g, "},\n {"));
-    }
-    if (prettyPrintResults) {
-      console.log("=== Coverage Expectation ===")
-      for (const {start,end,count} of expectation) {
-        console.log(`Range [${start}, ${end}) (count: ${count})`);
-        console.log(source.substring(start, end));
-      }
-      console.log("=== Coverage Results ===")
-      for (const {start,end,count} of covfefe) {
-        console.log(`Range [${start}, ${end}) (count: ${count})`);
-        console.log(source.substring(start, end));
-      }
-      console.log("========================")
+    if (stringified_result != stringified_expectation) {
+      print(stringified_result.replace(/[}],[{]/g, "},\n {"));
     }
     assertEquals(stringified_expectation, stringified_result, name + " failed");
   };
 
-  TestCoverage = function(name, source, expectation, prettyPrintResults) {
-    TestCoverageInternal(name, source, expectation, true, prettyPrintResults);
+  TestCoverage = function(name, source, expectation) {
+    TestCoverageInternal(name, source, expectation, true);
   };
 
-  TestCoverageNoGC = function(name, source, expectation, prettyPrintResults) {
-    TestCoverageInternal(name, source, expectation, false, prettyPrintResults);
+  TestCoverageNoGC = function(name, source, expectation) {
+    TestCoverageInternal(name, source, expectation, false);
   };
 
   nop = function() {};
diff --git a/deps/v8/test/mjsunit/compiler/bigint-int64-lowered.js b/deps/v8/test/mjsunit/compiler/bigint-int64-lowered.js
index f669c17c2926da..abbfa8cfd05cae 100644
--- a/deps/v8/test/mjsunit/compiler/bigint-int64-lowered.js
+++ b/deps/v8/test/mjsunit/compiler/bigint-int64-lowered.js
@@ -70,6 +70,7 @@ function TestInt64LoweredOperations() {
 
 function OptimizeAndTest(fn) {
   %PrepareFunctionForOptimization(fn);
+  %PrepareFunctionForOptimization(assertEquals);
   fn();
   fn();
   %OptimizeFunctionOnNextCall(fn);
diff --git a/deps/v8/test/mjsunit/compiler/dataview-neutered.js b/deps/v8/test/mjsunit/compiler/dataview-detached.js
similarity index 100%
rename from deps/v8/test/mjsunit/compiler/dataview-neutered.js
rename to deps/v8/test/mjsunit/compiler/dataview-detached.js
diff --git a/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js b/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js
new file mode 100644
index 00000000000000..e69a1cbeda9c5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Check that the branch elimination replace the redundant branch condition with
+// a phi node, and then the branch is folded in EffectControlLinearizationPhase.
+function foo(cond, v1, v2) {
+  cond = cond | 0;
+  var a = cond == 1 ? v1 : v2;
+  if(cond == 1) {
+    %TurbofanStaticAssert(a == v1);
+  } else {
+    %TurbofanStaticAssert(a == v2);
+  }
+}
+
+%PrepareFunctionForOptimization(foo);
+foo(1, 10, 20); foo(2, 30, 40);
+%OptimizeFunctionOnNextCall(foo);
+foo(1, 10, 20); foo(2, 30, 40);
diff --git a/deps/v8/test/mjsunit/compiler/instanceof4.js b/deps/v8/test/mjsunit/compiler/instanceof4.js
new file mode 100644
index 00000000000000..5074fb5f643cb2
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/instanceof4.js
@@ -0,0 +1,80 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+(function testFunctionPrototypeHasInstance() {
+  class A {};
+  var a = new A;
+
+  function foo() {
+    return A[Symbol.hasInstance](a);
+  };
+
+  %PrepareFunctionForOptimization(foo);
+  assertTrue(foo());
+  assertTrue(foo());
+  %OptimizeFunctionOnNextCall(foo);
+  assertTrue(foo());
+})();
+
+
+(function testFunctionPrototypeHasInstanceWithInference() {
+  class A {};
+  var a = new A;
+  a.bla = 42;
+
+  function foo() {
+    a.bla;
+    return A[Symbol.hasInstance](a);
+  };
+
+  %PrepareFunctionForOptimization(foo);
+  assertTrue(foo());
+  assertTrue(foo());
+  %OptimizeFunctionOnNextCall(foo);
+  assertTrue(foo());
+})();
+
+
+(function testFunctionPrototypeHasInstanceWithBoundFunction() {
+  class A {};
+  var a = new A;
+  var f = A.bind({});
+
+  function foo() {
+    return f[Symbol.hasInstance](a);
+  };
+
+  %PrepareFunctionForOptimization(foo);
+  assertTrue(foo());
+  assertTrue(foo());
+  %OptimizeFunctionOnNextCall(foo);
+  assertTrue(foo());
+
+  // JSCallReducer::ReduceFunctionPrototypeHasInstance ->
+  // JSNative...::ReduceJSOrdinaryHasInstance ->
+  // JSNative...::ReduceJSInstanceOf (on bound_target_function)
+  // ~~~~~>
+  // JSCallReducer::ReduceFunctionPrototypeHasInstance
+  // JSNative...::ReduceJSOrdinaryHasInstance ->
+  // JSNative...::ReduceJSHasInPrototypeChain
+})();
+
+
+(function testSimpleInstanceOf() {
+  class A {};
+  var a = new A;
+
+  function foo() {
+    return a instanceof A;
+  };
+
+  %PrepareFunctionForOptimization(foo);
+  assertTrue(foo());
+  assertTrue(foo());
+  %OptimizeFunctionOnNextCall(foo);
+  assertTrue(foo());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/object-isprototypeof.js b/deps/v8/test/mjsunit/compiler/object-isprototypeof.js
index d8e3c3e796692e..4e50d01c90e0c0 100644
--- a/deps/v8/test/mjsunit/compiler/object-isprototypeof.js
+++ b/deps/v8/test/mjsunit/compiler/object-isprototypeof.js
@@ -160,3 +160,16 @@
   %OptimizeFunctionOnNextCall(foo);
   assertTrue(foo());
 })();
+(function() {
+  function A() {}
+  A.prototype = {};
+  var a = {__proto__: new A, gaga: 42};
+
+  function foo() { a.gaga; return A.prototype.isPrototypeOf(a); }
+
+  %PrepareFunctionForOptimization(foo);
+  assertTrue(foo());
+  assertTrue(foo());
+  %OptimizeFunctionOnNextCall(foo);
+  assertTrue(foo());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-9041.js b/deps/v8/test/mjsunit/compiler/regress-9041.js
index d7a8e6d6259574..ddbf3e492890a1 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9041.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9041.js
@@ -5,19 +5,77 @@
 // Flags: --allow-natives-syntax
 
 (function() {
-class A {}
-
-function foo(a, fn) {
-  const C = a.constructor;
-  fn(a);
-  return a instanceof C;
-};
-%PrepareFunctionForOptimization(foo);
-assertTrue(foo(new A(), a => {}));
-assertTrue(foo(new A(), a => {}));
-%OptimizeFunctionOnNextCall(foo);
-assertTrue(foo(new A(), a => {}));
-assertFalse(foo(new A(), a => {
-  a.__proto__ = {};
-}));
+  class A {};
+
+  function foo(a, fn) {
+    const C = a.constructor;
+    fn(a);
+    return a instanceof C;
+  };
+
+  %PrepareFunctionForOptimization(foo);
+  assertTrue(foo(new A(), a => {}));
+  assertTrue(foo(new A(), a => {}));
+  %OptimizeFunctionOnNextCall(foo);
+  assertTrue(foo(new A(), a => {}));
+  assertFalse(foo(new A(), a => { a.__proto__ = {}; }));
+})();
+
+(function() {
+  class A {};
+  A.__proto__ = {};
+  A.prototype = {};
+
+  function foo() {
+    var x = Object.create(Object.create(Object.create(A.prototype)));
+    return x instanceof A;
+  };
+
+  %PrepareFunctionForOptimization(foo);
+  assertTrue(foo());
+  assertTrue(foo());
+  %OptimizeFunctionOnNextCall(foo);
+  assertTrue(foo());
+})();
+
+(function() {
+  class A {};
+  A.prototype = {};
+  A.__proto__ = {};
+  var a = {__proto__: new A, gaga: 42};
+
+  function foo() {
+    A.bla;  // Make A.__proto__ fast again.
+    a.gaga;
+    return a instanceof A;
+  };
+
+  %PrepareFunctionForOptimization(foo);
+  assertTrue(foo());
+  assertTrue(foo());
+  %OptimizeFunctionOnNextCall(foo);
+  assertTrue(foo());
+})();
+
+(function() {
+  class A {};
+  A.prototype = {};
+  A.__proto__ = {};
+  const boundA = Function.prototype.bind.call(A, {});
+  boundA.prototype = {};
+  boundA.__proto__ = {};
+  var a = {__proto__: new boundA, gaga: 42};
+
+  function foo() {
+    A.bla;  // Make A.__proto__ fast again.
+    boundA.bla;  // Make boundA.__proto__ fast again.
+    a.gaga;
+    return a instanceof boundA;
+  };
+
+  %PrepareFunctionForOptimization(foo);
+  assertTrue(foo());
+  assertTrue(foo());
+  %OptimizeFunctionOnNextCall(foo);
+  assertTrue(foo());
 })();
diff --git a/deps/v8/test/mjsunit/compiler/regress-992684.js b/deps/v8/test/mjsunit/compiler/regress-992684.js
new file mode 100644
index 00000000000000..55854781a39a32
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-992684.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-lazy-feedback-allocation
+
+function* g(h) {
+  return yield* h;
+}
+
+var f = Object.getPrototypeOf(function*(){}).prototype;
+var t = f.throw;
+const h = (function*(){})();
+h.next = function () { return { }; };
+const x = g(h);
+x.next();
+delete f.throw;
+
+try {
+  t.bind(x)();
+} catch (e) {}
+
+%PrepareFunctionForOptimization(g);
+g();
+%OptimizeFunctionOnNextCall(g);
+g();
diff --git a/deps/v8/test/mjsunit/compiler/regress-995430.js b/deps/v8/test/mjsunit/compiler/regress-995430.js
new file mode 100644
index 00000000000000..7df0efcf8709c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-995430.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+  x ** -9 === '';
+};
+%PrepareFunctionForOptimization(foo);
+%OptimizeFunctionOnNextCall(foo);
+try { foo() } catch(_) {};
diff --git a/deps/v8/test/mjsunit/compiler/regress-995562.js b/deps/v8/test/mjsunit/compiler/regress-995562.js
new file mode 100644
index 00000000000000..332960b360af3d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-995562.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+  function foo() {
+    return { [bla]() {} };
+  }
+  %PrepareFunctionForOptimization(foo);
+  %OptimizeFunctionOnNextCall(foo);
+  try { foo() } catch(_) {};
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-997100.js b/deps/v8/test/mjsunit/compiler/regress-997100.js
new file mode 100644
index 00000000000000..7611f6e7b34209
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-997100.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function C() { return this };
+function foo() {
+  return new C() instanceof function(){};
+}
+%PrepareFunctionForOptimization(C);
+%PrepareFunctionForOptimization(foo);
+assertFalse(foo());
+%OptimizeFunctionOnNextCall(foo);
+assertFalse(foo());
diff --git a/deps/v8/test/mjsunit/compiler/regress-nonextensiblearray-store-outofbounds.js b/deps/v8/test/mjsunit/compiler/regress-nonextensiblearray-store-outofbounds.js
new file mode 100644
index 00000000000000..4d8d64385e6eda
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-nonextensiblearray-store-outofbounds.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+const v3 = [0,"symbol"];
+const v5 = 0 - 1;
+const v6 = Object.preventExtensions(v3);
+let v9 = 0;
+function f1() {
+  v6[119090556] = v5;
+}
+%PrepareFunctionForOptimization(f1);
+f1();
+%OptimizeFunctionOnNextCall(f1);
+f1();
+assertOptimized(f1);
+assertEquals(v6.length, 2);
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js b/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
index f166ca2eb1a338..049348856d3890 100644
--- a/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
@@ -55,7 +55,7 @@ if (this.Worker) {
 
     // Clone SharedArrayBuffer
     w.postMessage(sab);
-    assertEquals(16, sab.byteLength);  // ArrayBuffer should not be neutered.
+    assertEquals(16, sab.byteLength);  // ArrayBuffer should not be detached.
 
     // Spinwait for the worker to update ta[0]
     var ta0;
@@ -65,7 +65,7 @@ if (this.Worker) {
 
     w.terminate();
 
-    assertEquals(16, sab.byteLength);  // Still not neutered.
+    assertEquals(16, sab.byteLength);  // Still not detached.
   })();
 
   (function TestCloneMulti() {
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-shutdown-empty.js b/deps/v8/test/mjsunit/d8/d8-worker-shutdown-empty.js
new file mode 100644
index 00000000000000..360e36ef08a487
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-worker-shutdown-empty.js
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stress-runs=1
+
+const kBatchSize = 10;
+const kNumBatches = 10;
+
+function RunWorkerBatch(count) {
+  let script = `postMessage(42)`;
+
+  // Launch workers.
+  let workers = new Array(count);
+  for (let i = 0; i < count; i++) {
+    workers[i] = new Worker(script, {type : 'string'});
+  }
+
+  // Terminate half of the workers early.
+  for (let i = 0; i < workers.length; i++) {
+    if ((i & 1) == 1) workers[i].terminate();
+  }
+
+  // Get messages from some workers.
+  for (let i = 0; i < workers.length; i++) {
+    let msg = workers[i].getMessage();
+    assertTrue(msg === undefined || msg === 42);
+    // terminate all workers.
+    workers[i].terminate();
+  }
+}
+
+(function RunTest() {
+  print(`running ${kNumBatches} batches...`);
+  let time = performance.now();
+  for (let i = 0; i < kNumBatches; i++) {
+    let before = performance.now();
+    RunWorkerBatch(kBatchSize);
+    let time = performance.now() - before;
+    print(`batch ${i+1}, Δ = ${(time).toFixed(3)} ms`);
+  }
+})();
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-shutdown-gc.js b/deps/v8/test/mjsunit/d8/d8-worker-shutdown-gc.js
new file mode 100644
index 00000000000000..b276a4c20ee4e6
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-worker-shutdown-gc.js
@@ -0,0 +1,56 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --stress-runs=1
+
+const kBatchSize = 10;
+const kNumBatches = 10;
+
+function RunWorkerBatch(count) {
+  let script = `onmessage =
+   function(msg) {
+     if (msg.array) {
+        msg.array[0] = 99;
+        postMessage({array : msg.array});
+     }
+}`;
+
+  // Launch workers.
+  let workers = new Array(count);
+  for (let i = 0; i < count; i++) {
+    workers[i] = new Worker(script, {type : 'string'});
+  }
+
+  // Send messages.
+  for (let i = 0; i < workers.length; i++) {
+    let array = new Int32Array([55, -77]);
+    workers[i].postMessage({array : array});
+    // terminate half of the workers early.
+    if ((i & 1) == 1) workers[i].terminate();
+  }
+
+  // Wait for replies.
+  for (let i = 0; i < workers.length; i++) {
+    let msg = workers[i].getMessage();
+    if (msg !== undefined && msg.array) {
+      assertInstanceof(msg.array, Int32Array);
+      assertEquals(99, msg.array[0]);
+      assertEquals(-77, msg.array[1]);
+    }
+    // terminate all workers.
+    workers[i].terminate();
+  }
+}
+
+(function RunTest() {
+  print(`running ${kNumBatches} batches...`);
+  let time = performance.now();
+  for (let i = 0; i < kNumBatches; i++) {
+    let before = performance.now();
+    RunWorkerBatch(kBatchSize);
+    gc();
+    let time = performance.now() - before;
+    print(`batch ${i+1}, Δ = ${(time).toFixed(3)} ms`);
+  }
+})();
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-shutdown-spawn.js b/deps/v8/test/mjsunit/d8/d8-worker-shutdown-spawn.js
new file mode 100644
index 00000000000000..92a675e4fe9f27
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-worker-shutdown-spawn.js
@@ -0,0 +1,47 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --stress-runs=1
+
+let script = `onmessage =
+   function(msg) {
+     if (msg.depth > 0) {
+        print("spawn");
+        let w = new Worker(msg.script, {type : "string"});
+        w.postMessage({script: msg.script, depth: msg.depth - 1});
+        let m = w.getMessage();
+        w.terminate();
+        postMessage(m);
+     } else {
+        postMessage(-99);
+     }
+}`;
+
+function RunWorker(depth) {
+  let w = new Worker(script, {type : "string"});
+
+  let array = new Int32Array([55, -77]);
+  w.postMessage({script: script, depth: depth});
+  let msg = w.getMessage();
+  print(msg);
+  w.terminate();
+}
+
+function RunTest(depth, iterations) {
+  let time = performance.now();
+  for (let i = 0; i < iterations; i++) {
+    let now = performance.now();
+    print(`iteration ${i}, Δ = ${(now - time).toFixed(3)} ms`);
+    RunWorker(depth);
+    gc();
+    time = now;
+  }
+}
+
+// TODO(9524): increase the workload of this test. Runs out of threads
+// on too many platforms.
+RunTest(1, 1);
+RunTest(2, 2);
+RunTest(5, 3);
+RunTest(9, 2);
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-shutdown.js b/deps/v8/test/mjsunit/d8/d8-worker-shutdown.js
new file mode 100644
index 00000000000000..b11e8c0423e71f
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-worker-shutdown.js
@@ -0,0 +1,55 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stress-runs=1
+
+const kBatchSize = 10;
+const kNumBatches = 10;
+
+function RunWorkerBatch(count) {
+  let script = `onmessage =
+   function(msg) {
+     if (msg.array) {
+        msg.array[0] = 99;
+        postMessage({array : msg.array});
+     }
+}`;
+
+  // Launch workers.
+  let workers = new Array(count);
+  for (let i = 0; i < count; i++) {
+    workers[i] = new Worker(script, {type : 'string'});
+  }
+
+  // Send messages.
+  for (let i = 0; i < workers.length; i++) {
+    let array = new Int32Array([55, -77]);
+    workers[i].postMessage({array : array});
+    // terminate half of the workers early.
+    if ((i & 1) == 1) workers[i].terminate();
+  }
+
+  // Wait for replies.
+  for (let i = 0; i < workers.length; i++) {
+    let msg = workers[i].getMessage();
+    if (msg !== undefined && msg.array) {
+      assertInstanceof(msg.array, Int32Array);
+      assertEquals(99, msg.array[0]);
+      assertEquals(-77, msg.array[1]);
+    }
+    // terminate all workers.
+    workers[i].terminate();
+  }
+}
+
+(function RunTest() {
+  print(`running ${kNumBatches} batches...`);
+  let time = performance.now();
+  for (let i = 0; i < kNumBatches; i++) {
+    let before = performance.now();
+    RunWorkerBatch(kBatchSize);
+    let time = performance.now() - before;
+    print(`batch ${i+1}, Δ = ${(time).toFixed(3)} ms`);
+  }
+})();
diff --git a/deps/v8/test/mjsunit/d8/d8-worker.js b/deps/v8/test/mjsunit/d8/d8-worker.js
index afc03f5c8b02c0..752bb5ced2ae0d 100644
--- a/deps/v8/test/mjsunit/d8/d8-worker.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker.js
@@ -147,12 +147,12 @@ if (this.Worker) {
   // Clone ArrayBuffer
   var ab1 = createArrayBuffer(16);
   w.postMessage(ab1);
-  assertEquals(16, ab1.byteLength);  // ArrayBuffer should not be neutered.
+  assertEquals(16, ab1.byteLength);  // ArrayBuffer should not be detached.
 
   // Transfer ArrayBuffer
   var ab2 = createArrayBuffer(32);
   w.postMessage(ab2, [ab2]);
-  assertEquals(0, ab2.byteLength);  // ArrayBuffer should be neutered.
+  assertEquals(0, ab2.byteLength);  // ArrayBuffer should be detached.
 
   // Attempting to transfer the same ArrayBuffer twice should throw.
   assertThrows(function() {
diff --git a/deps/v8/test/mjsunit/neuter-twice.js b/deps/v8/test/mjsunit/detach-twice.js
similarity index 100%
rename from deps/v8/test/mjsunit/neuter-twice.js
rename to deps/v8/test/mjsunit/detach-twice.js
diff --git a/deps/v8/test/mjsunit/es6/classes.js b/deps/v8/test/mjsunit/es6/classes.js
index eb25f07685f30d..de7312ce275ed1 100644
--- a/deps/v8/test/mjsunit/es6/classes.js
+++ b/deps/v8/test/mjsunit/es6/classes.js
@@ -37,7 +37,7 @@
   literal = { __proto__: class {} };
   assertEquals('', literal.__proto__.name);
   assertEquals(
-      undefined, Object.getOwnPropertyDescriptor(literal.__proto__, 'name'));
+      '', Object.getOwnPropertyDescriptor(literal.__proto__, 'name').value);
 
   literal = { __proto__: class F {} };
   assertEquals('F', literal.__proto__.name);
diff --git a/deps/v8/test/mjsunit/es6/function-name.js b/deps/v8/test/mjsunit/es6/function-name.js
index c292edf0cd3be9..71f4da52f10900 100644
--- a/deps/v8/test/mjsunit/es6/function-name.js
+++ b/deps/v8/test/mjsunit/es6/function-name.js
@@ -394,7 +394,7 @@
 })();
 
 (function testClassNameOrder() {
-  assertEquals(['length', 'prototype'], Object.getOwnPropertyNames(class {}));
+  assertEquals(['length', 'prototype', 'name'], Object.getOwnPropertyNames(class {}));
 
   var tmp = {'': class {}};
   var Tmp = tmp[''];
diff --git a/deps/v8/test/mjsunit/es6/large-classes-properties.js b/deps/v8/test/mjsunit/es6/large-classes-properties.js
index a670b0a90770f1..fe3fb13b8ff0cc 100644
--- a/deps/v8/test/mjsunit/es6/large-classes-properties.js
+++ b/deps/v8/test/mjsunit/es6/large-classes-properties.js
@@ -8,14 +8,14 @@
   // This is to test for dictionary mode when there more than
   // kMaxNumberOfDescriptors (1024) properties.
   const kLimit = 1030;
-  let evalString = "(function(i) { " +
+  let evalString = "function f(i) { " +
       "let clazz = class { " +
       "   constructor(i) { this.value = i;";
   for (let i = 0; i < kLimit ; i++) {
     evalString  += "this.property"+i +" = "+i+"; "
   }
   evalString += "}};" +
-      " return (new clazz(i)); })";
+      " return (new clazz(i)); }; f;";
 
   let fn = eval(evalString);
   %PrepareFunctionForOptimization(fn);
diff --git a/deps/v8/test/mjsunit/es6/throw-type-error-function-restrictions.js b/deps/v8/test/mjsunit/es6/throw-type-error-function-restrictions.js
index 7f67747f2919e5..0eeb9591b2dbb2 100644
--- a/deps/v8/test/mjsunit/es6/throw-type-error-function-restrictions.js
+++ b/deps/v8/test/mjsunit/es6/throw-type-error-function-restrictions.js
@@ -5,8 +5,12 @@
 var throwTypeErrorFunction =
     Object.getOwnPropertyDescriptor(Function.prototype, 'arguments').get;
 
-assertFalse(
-    Object.prototype.hasOwnProperty.call(throwTypeErrorFunction, 'name'));
+var nameDesc =
+    Object.getOwnPropertyDescriptor(throwTypeErrorFunction, 'name');
+assertEquals('', nameDesc.value);
+assertFalse(nameDesc.configurable);
+assertFalse(nameDesc.enumerable);
+assertFalse(nameDesc.writable);
 assertThrows(function() {
   'use strict';
   throwTypeErrorFunction.name = 'foo';
diff --git a/deps/v8/test/mjsunit/es9/regexp-lookbehind.js b/deps/v8/test/mjsunit/es9/regexp-lookbehind.js
index c3aae317a94e41..513fd630fe7c44 100644
--- a/deps/v8/test/mjsunit/es9/regexp-lookbehind.js
+++ b/deps/v8/test/mjsunit/es9/regexp-lookbehind.js
@@ -27,6 +27,7 @@ assertEquals(["def"], "abcdef".match(/(?<=\w*)[^a|b|c]{3}/));
 // Start of line matches.
 assertEquals(["def"], "abcdef".match(/(?<=^abc)def/));
 assertEquals(["def"], "abcdef".match(/(?<=^[a-c]{3})def/));
+assertEquals(["def"], "abcabcdef".match(/(?<=^[a-c]{6})def/));
 assertEquals(["def"], "xyz\nabcdef".match(/(?<=^[a-c]{3})def/m));
 assertEquals(["ab", "cd", "efg"], "ab\ncd\nefg".match(/(?<=^)\w+/gm));
 assertEquals(["ab", "cd", "efg"], "ab\ncd\nefg".match(/\w+(?<=$)/gm));
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-904167.js b/deps/v8/test/mjsunit/es9/regress/regress-904167.js
index 8986972a8fcd23..7dc06e9cb36490 100644
--- a/deps/v8/test/mjsunit/es9/regress/regress-904167.js
+++ b/deps/v8/test/mjsunit/es9/regress/regress-904167.js
@@ -6,7 +6,7 @@
 // as tagged, potentially dereferencing a Float64.
 
 // Ensure that we don't fail an assert from --verify-heap when cloning a
-// MutableHeapNumber in the CloneObjectIC handler case.
+// HeapNumber in the CloneObjectIC handler case.
 var src, clone;
 for (var i = 0; i < 40000; i++) {
     src = { ...i, x: -9007199254740991 };
diff --git a/deps/v8/test/mjsunit/harmony/atomics.js b/deps/v8/test/mjsunit/harmony/atomics.js
index ef900761032bb9..5caeb7e8e5ca87 100644
--- a/deps/v8/test/mjsunit/harmony/atomics.js
+++ b/deps/v8/test/mjsunit/harmony/atomics.js
@@ -5,27 +5,47 @@
 // Flags: --harmony-sharedarraybuffer
 //
 
-function toRangeWrapped(value) {
-  var range = this.max - this.min + 1;
-  while (value < this.min) {
-    value += range;
-  }
-  while (value > this.max) {
-    value -= range;
+function toRangeWrapper(is_big) {
+  return function _toRangeWrapped(raw_value) {
+    var raw_range = this.max - this.min + (is_big ? 1n : 1);
+    let range = is_big ? BigInt(raw_range) : raw_range;
+    let value = is_big ? BigInt(raw_value) : raw_value;
+    while (value < this.min) {
+      value += range;
+    }
+    while (value > this.max) {
+      value -= range;
+    }
+    return value;
   }
-  return value;
 }
 
 function makeConstructorObject(constr, min, max, toRange) {
   var o = {constr: constr, min: min, max: max};
-  o.toRange = toRangeWrapped.bind(o);
+  let is_big = constr.name.startsWith('Big')
+  o.toRange = toRangeWrapper(is_big).bind(o);
   return o;
 }
 
+function IsBig(t) {
+  return t.constructor.name.startsWith('Big');
+}
+
+function MaybeIntToBigInt(arr, i) {
+  if (IsBig(arr)) {
+    return BigInt(i);
+  } else {
+    return i;
+  }
+}
+
 var IntegerTypedArrayConstructors = [
   makeConstructorObject(Int8Array, -128, 127),
   makeConstructorObject(Int16Array, -32768, 32767),
   makeConstructorObject(Int32Array, -0x80000000, 0x7fffffff),
+  makeConstructorObject(BigInt64Array, -0x8000_0000_0000_0000n,
+                        0x7fff_ffff_ffff_ffffn),
+  makeConstructorObject(BigUint64Array, 0n, 0xffff_ffff_ffff_ffffn),
   makeConstructorObject(Uint8Array, 0, 255),
   makeConstructorObject(Uint16Array, 0, 65535),
   makeConstructorObject(Uint32Array, 0, 0xffffffff),
@@ -189,16 +209,19 @@ function clearArray(sab) {
     var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
 
     [sta, sta2].forEach(function(array) {
+      let _i = (i) => { return MaybeIntToBigInt(array, i); }
       clearArray(array.buffer);
       var name = Object.prototype.toString.call(array);
       for (var i = 0; i < array.length; ++i) {
         // array[i] == 0, CAS will store
-        assertEquals(0, Atomics.compareExchange(array, i, 0, 50), name);
-        assertEquals(50, array[i], name);
+        assertEquals(_i(0), Atomics.compareExchange(array, i, _i(0), _i(50)),
+                    name);
+        assertEquals(_i(50), array[i], name);
 
         // array[i] == 50, CAS will not store
-        assertEquals(50, Atomics.compareExchange(array, i, 0, 100), name);
-        assertEquals(50, array[i], name);
+        assertEquals(_i(50), Atomics.compareExchange(array, i, _i(0), _i(100)),
+                     name);
+        assertEquals(_i(50), array[i], name);
       }
     })
   });
@@ -211,13 +234,14 @@ function clearArray(sab) {
     var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
 
     [sta, sta2].forEach(function(array) {
+      let _i = (i) => { return MaybeIntToBigInt(array, i); }
       clearArray(array.buffer);
       var name = Object.prototype.toString.call(array);
       for (var i = 0; i < array.length; ++i) {
-        array[i] = 0;
-        assertEquals(0, Atomics.load(array, i), name);
-        array[i] = 50;
-        assertEquals(50, Atomics.load(array, i), name);
+        array[i] = _i(0);
+        assertEquals(_i(0), Atomics.load(array, i), name);
+        array[i] = _i(50);
+        assertEquals(_i(50), Atomics.load(array, i), name);
       }
     })
   });
@@ -248,14 +272,15 @@ function clearArray(sab) {
     var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
 
     [sta, sta2].forEach(function(array) {
+      let _i = (i) => { return MaybeIntToBigInt(array, i); }
       clearArray(array.buffer);
       var name = Object.prototype.toString.call(array);
       for (var i = 0; i < array.length; ++i) {
-        assertEquals(50, Atomics.store(array, i, 50), name);
-        assertEquals(50, array[i], name);
+        assertEquals(_i(50), Atomics.store(array, i, _i(50)), name);
+        assertEquals(_i(50), array[i], name);
 
-        assertEquals(100, Atomics.store(array, i, 100), name);
-        assertEquals(100, array[i], name);
+        assertEquals(_i(100), Atomics.store(array, i, _i(100)), name);
+        assertEquals(_i(100), array[i], name);
       }
     })
   });
@@ -268,14 +293,15 @@ function clearArray(sab) {
     var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
 
     [sta, sta2].forEach(function(array) {
+      let _i = (i) => { return MaybeIntToBigInt(array, i); }
       clearArray(array.buffer);
       var name = Object.prototype.toString.call(array);
       for (var i = 0; i < array.length; ++i) {
-        assertEquals(0, Atomics.add(array, i, 50), name);
-        assertEquals(50, array[i], name);
+        assertEquals(_i(0), Atomics.add(array, i, _i(50)), name);
+        assertEquals(_i(50), array[i], name);
 
-        assertEquals(50, Atomics.add(array, i, 70), name);
-        assertEquals(120, array[i], name);
+        assertEquals(_i(50), Atomics.add(array, i, _i(70)), name);
+        assertEquals(_i(120), array[i], name);
       }
     })
   });
@@ -288,15 +314,16 @@ function clearArray(sab) {
     var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
 
     [sta, sta2].forEach(function(array) {
+      let _i = (i) => { return MaybeIntToBigInt(array, i); }
       clearArray(array.buffer);
       var name = Object.prototype.toString.call(array);
       for (var i = 0; i < array.length; ++i) {
-        array[i] = 120;
-        assertEquals(120, Atomics.sub(array, i, 50), name);
-        assertEquals(70, array[i], name);
+        array[i] = _i(120);
+        assertEquals(_i(120), Atomics.sub(array, i, _i(50)), name);
+        assertEquals(_i(70), array[i], name);
 
-        assertEquals(70, Atomics.sub(array, i, 70), name);
-        assertEquals(0, array[i], name);
+        assertEquals(_i(70), Atomics.sub(array, i, _i(70)), name);
+        assertEquals(_i(0), array[i], name);
       }
     })
   });
@@ -309,15 +336,16 @@ function clearArray(sab) {
     var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
 
     [sta, sta2].forEach(function(array) {
+      let _i = (i) => { return MaybeIntToBigInt(array, i); }
       clearArray(array.buffer);
       var name = Object.prototype.toString.call(sta);
       for (var i = 0; i < array.length; ++i) {
-        array[i] = 0x3f;
-        assertEquals(0x3f, Atomics.and(array, i, 0x30), name);
-        assertEquals(0x30, array[i], name);
+        array[i] = _i(0x3f);
+        assertEquals(_i(0x3f), Atomics.and(array, i, _i(0x30)), name);
+        assertEquals(_i(0x30), array[i], name);
 
-        assertEquals(0x30, Atomics.and(array, i, 0x20), name);
-        assertEquals(0x20, array[i], name);
+        assertEquals(_i(0x30), Atomics.and(array, i, _i(0x20)), name);
+        assertEquals(_i(0x20), array[i], name);
       }
     })
   });
@@ -330,15 +358,16 @@ function clearArray(sab) {
     var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
 
     [sta, sta2].forEach(function(array) {
+      let _i = (i) => { return MaybeIntToBigInt(array, i); }
       clearArray(array.buffer);
       var name = Object.prototype.toString.call(array);
       for (var i = 0; i < array.length; ++i) {
-        array[i] = 0x30;
-        assertEquals(0x30, Atomics.or(array, i, 0x1c), name);
-        assertEquals(0x3c, array[i], name);
+        array[i] = _i(0x30);
+        assertEquals(_i(0x30), Atomics.or(array, i, _i(0x1c)), name);
+        assertEquals(_i(0x3c), array[i], name);
 
-        assertEquals(0x3c, Atomics.or(array, i, 0x09), name);
-        assertEquals(0x3d, array[i], name);
+        assertEquals(_i(0x3c), Atomics.or(array, i, _i(0x09)), name);
+        assertEquals(_i(0x3d), array[i], name);
       }
     })
   });
@@ -351,15 +380,16 @@ function clearArray(sab) {
     var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
 
     [sta, sta2].forEach(function(array) {
+      let _i = (i) => { return MaybeIntToBigInt(array, i); }
       clearArray(array.buffer);
       var name = Object.prototype.toString.call(array);
       for (var i = 0; i < array.length; ++i) {
-        array[i] = 0x30;
-        assertEquals(0x30, Atomics.xor(array, i, 0x1c), name);
-        assertEquals(0x2c, array[i], name);
+        array[i] = _i(0x30);
+        assertEquals(_i(0x30), Atomics.xor(array, i, _i(0x1c)), name);
+        assertEquals(_i(0x2c), array[i], name);
 
-        assertEquals(0x2c, Atomics.xor(array, i, 0x09), name);
-        assertEquals(0x25, array[i], name);
+        assertEquals(_i(0x2c), Atomics.xor(array, i, _i(0x09)), name);
+        assertEquals(_i(0x25), array[i], name);
       }
     })
   });
@@ -372,15 +402,16 @@ function clearArray(sab) {
     var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
 
     [sta, sta2].forEach(function(array) {
+      let _i = (i) => { return MaybeIntToBigInt(array, i); }
       clearArray(array.buffer);
       var name = Object.prototype.toString.call(array);
       for (var i = 0; i < array.length; ++i) {
-        array[i] = 0x30;
-        assertEquals(0x30, Atomics.exchange(array, i, 0x1c), name);
-        assertEquals(0x1c, array[i], name);
+        array[i] = _i(0x30);
+        assertEquals(_i(0x30), Atomics.exchange(array, i, _i(0x1c)), name);
+        assertEquals(_i(0x1c), array[i], name);
 
-        assertEquals(0x1c, Atomics.exchange(array, i, 0x09), name);
-        assertEquals(0x09, array[i], name);
+        assertEquals(_i(0x1c), Atomics.exchange(array, i, _i(0x09)), name);
+        assertEquals(_i(0x09), array[i], name);
       }
     })
   });
@@ -397,72 +428,73 @@ function clearArray(sab) {
   });
 
   // For all platforms we support, 1, 2 and 4 bytes should be lock-free.
-  assertEquals(true, Atomics.isLockFree(1));
-  assertEquals(true, Atomics.isLockFree(2));
-  assertEquals(true, Atomics.isLockFree(4));
-
-  // Sizes that aren't equal to a typedarray BYTES_PER_ELEMENT always return
-  // false.
-  var validSizes = {};
-  IntegerTypedArrayConstructors.forEach(function(t) {
-    validSizes[t.constr.BYTES_PER_ELEMENT] = true;
-  });
-
-  for (var i = 0; i < 1000; ++i) {
-    if (!validSizes[i]) {
-      assertEquals(false, Atomics.isLockFree(i));
-    }
-  }
+  assertTrue(Atomics.isLockFree(1));
+  assertTrue(Atomics.isLockFree(2));
+  assertTrue(Atomics.isLockFree(4));
+
+  assertFalse(Atomics.isLockFree(0));
+  assertFalse(Atomics.isLockFree(3));
+  assertFalse(Atomics.isLockFree(5));
+  assertFalse(Atomics.isLockFree(6));
+  assertFalse(Atomics.isLockFree(7));
+  // isLockFree(8) is platform dependent.
+  for (var i = 9; i < 100; ++i) assertFalse(Atomics.isLockFree(i));
 })();
 
 (function TestToNumber() {
   IntegerTypedArrayConstructors.forEach(function(t) {
     var sab = new SharedArrayBuffer(1 * t.constr.BYTES_PER_ELEMENT);
     var sta = new t.constr(sab);
+    let _i = (i) => { return MaybeIntToBigInt(sta, i); }
 
-    var valueOf = {valueOf: function(){ return 3;}};
+    var valueOf = {valueOf: function(){ return _i(3);}};
     var toString = {toString: function(){ return '3';}};
 
     [false, true, undefined, valueOf, toString].forEach(function(v) {
+      if (v === undefined && IsBig(sta)) {
+        // undefined does not convert to a BigInt.
+        return;
+      }
+      let _v = () => { return IsBig(sta) ? _i(v) : (v|0); }
       var name = Object.prototype.toString.call(sta) + ' - ' + v;
 
       // CompareExchange
-      sta[0] = 50;
-      assertEquals(50, Atomics.compareExchange(sta, 0, v, v), name);
+      sta[0] = _i(50);
+      assertEquals(_i(50), Atomics.compareExchange(sta, 0, v, v), name);
 
       // Store
-      assertEquals(v|0, Atomics.store(sta, 0, v), name);
-      assertEquals(v|0, sta[0], name);
+      assertEquals(_v(), Atomics.store(sta, 0, v), name);
+      assertEquals(_v(), sta[0], name);
 
       // Add
-      sta[0] = 120;
-      assertEquals(120, Atomics.add(sta, 0, v), name);
-      assertEquals(120 + (v|0), sta[0], name);
+      sta[0] = _i(120);
+      assertEquals(_i(120), Atomics.add(sta, 0, v), name);
+      assertEquals(_i(120) + _v(), sta[0], name);
 
       // Sub
-      sta[0] = 70;
-      assertEquals(70, Atomics.sub(sta, 0, v), name);
-      assertEquals(70 - (v|0), sta[0]);
+      sta[0] = _i(70);
+      assertEquals(_i(70), Atomics.sub(sta, 0, v), name);
+      assertEquals(_i(70) - _v(), sta[0]);
 
       // And
-      sta[0] = 0x20;
-      assertEquals(0x20, Atomics.and(sta, 0, v), name);
-      assertEquals(0x20 & (v|0), sta[0]);
+      sta[0] = _i(0x20);
+      assertEquals(_i(0x20), Atomics.and(sta, 0, v), name);
+      assertEquals(_i(0x20) & _v(), sta[0]);
 
       // Or
-      sta[0] = 0x3d;
-      assertEquals(0x3d, Atomics.or(sta, 0, v), name);
-      assertEquals(0x3d | (v|0), sta[0]);
+      sta[0] = _i(0x3d);
+      assertEquals(_i(0x3d), Atomics.or(sta, 0, v), name);
+      assertEquals(_i(0x3d) | _v(), sta[0]);
 
       // Xor
-      sta[0] = 0x25;
-      assertEquals(0x25, Atomics.xor(sta, 0, v), name);
-      assertEquals(0x25 ^ (v|0), sta[0]);
+      sta[0] = _i(0x25);
+      assertEquals(_i(0x25), Atomics.xor(sta, 0, v), name);
+      assertEquals(_i(0x25) ^ _v(), sta[0]);
 
       // Exchange
-      sta[0] = 0x09;
-      assertEquals(0x09, Atomics.exchange(sta, 0, v), name);
-      assertEquals(v|0, sta[0]);
+      sta[0] = _i(0x09);
+      assertEquals(_i(0x09), Atomics.exchange(sta, 0, v), name);
+      assertEquals(_v(), sta[0]);
     });
   });
 })();
@@ -472,7 +504,8 @@ function clearArray(sab) {
     var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
     var sta = new t.constr(sab);
     var name = Object.prototype.toString.call(sta);
-    var range = t.max - t.min + 1;
+    let _i = (i) => { return MaybeIntToBigInt(sta, i); }
+    var range = t.max - t.min + _i(1);
     var offset;
     var operand;
     var val, newVal;
@@ -480,52 +513,52 @@ function clearArray(sab) {
 
     for (offset = -range; offset <= range; offset += range) {
       // CompareExchange
-      sta[0] = val = 0;
-      newVal = val + offset + 1;
+      sta[0] = val = _i(0);
+      newVal = val + offset + _i(1);
       newValWrapped = t.toRange(newVal);
       assertEquals(val, Atomics.compareExchange(sta, 0, val, newVal), name);
       assertEquals(newValWrapped, sta[0], name);
 
       sta[0] = val = t.min;
-      newVal = val + offset - 1;
+      newVal = val + offset - _i(1);
       newValWrapped = t.toRange(newVal);
       assertEquals(val, Atomics.compareExchange(sta, 0, val, newVal), name);
       assertEquals(newValWrapped, sta[0], name);
 
       // Store
-      sta[0] = 0;
-      val = t.max + offset + 1;
+      sta[0] = _i(0);
+      val = t.max + offset + _i(1);
       valWrapped = t.toRange(val);
       assertEquals(val, Atomics.store(sta, 0, val), name);
       assertEquals(valWrapped, sta[0], name);
 
-      sta[0] = val = t.min + offset - 1;
+      sta[0] = val = t.min + offset - _i(1);
       valWrapped = t.toRange(val);
       assertEquals(val, Atomics.store(sta, 0, val), name);
       assertEquals(valWrapped, sta[0], name);
 
       // Add
       sta[0] = val = t.max;
-      operand = offset + 1;
+      operand = offset + _i(1);
       valWrapped = t.toRange(val + operand);
       assertEquals(val, Atomics.add(sta, 0, operand), name);
       assertEquals(valWrapped, sta[0], name);
 
       sta[0] = val = t.min;
-      operand = offset - 1;
+      operand = offset - _i(1);
       valWrapped = t.toRange(val + operand);
       assertEquals(val, Atomics.add(sta, 0, operand), name);
       assertEquals(valWrapped, sta[0], name);
 
       // Sub
       sta[0] = val = t.max;
-      operand = offset - 1;
+      operand = offset - _i(1);
       valWrapped = t.toRange(val - operand);
       assertEquals(val, Atomics.sub(sta, 0, operand), name);
       assertEquals(valWrapped, sta[0], name);
 
       sta[0] = val = t.min;
-      operand = offset + 1;
+      operand = offset + _i(1);
       valWrapped = t.toRange(val - operand);
       assertEquals(val, Atomics.sub(sta, 0, operand), name);
       assertEquals(valWrapped, sta[0], name);
@@ -535,29 +568,29 @@ function clearArray(sab) {
       // to memory.
 
       // And
-      sta[0] = val = 0xf;
-      operand = 0x3 + offset;
+      sta[0] = val = _i(0xf);
+      operand = _i(0x3) + offset;
       valWrapped = t.toRange(val & operand);
       assertEquals(val, Atomics.and(sta, 0, operand), name);
       assertEquals(valWrapped, sta[0], name);
 
       // Or
-      sta[0] = val = 0x12;
-      operand = 0x22 + offset;
+      sta[0] = val = _i(0x12);
+      operand = _i(0x22) + offset;
       valWrapped = t.toRange(val | operand);
       assertEquals(val, Atomics.or(sta, 0, operand), name);
       assertEquals(valWrapped, sta[0], name);
 
       // Xor
-      sta[0] = val = 0x12;
-      operand = 0x22 + offset;
+      sta[0] = val = _i(0x12);
+      operand = _i(0x22) + offset;
       valWrapped = t.toRange(val ^ operand);
       assertEquals(val, Atomics.xor(sta, 0, operand), name);
       assertEquals(valWrapped, sta[0], name);
 
       // Exchange
-      sta[0] = val = 0x12;
-      operand = 0x22 + offset;
+      sta[0] = val = _i(0x12);
+      operand = _i(0x22) + offset;
       valWrapped = t.toRange(operand);
       assertEquals(val, Atomics.exchange(sta, 0, operand), name);
       assertEquals(valWrapped, sta[0], name);
@@ -574,7 +607,7 @@ function clearArray(sab) {
 
     // The index should be checked before calling ToInteger on the value, so
     // valueof_has_been_called should not be modified.
-    sta[0] = 0;
+    sta[0] = MaybeIntToBigInt(sta, 0);
     assertThrows(function() { op(sta, index, value, value); }, RangeError);
     assertEquals(0, valueof_has_been_called);
   };
diff --git a/deps/v8/test/mjsunit/harmony/nullish.js b/deps/v8/test/mjsunit/harmony/nullish.js
new file mode 100644
index 00000000000000..87d35db4bc62e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/nullish.js
@@ -0,0 +1,143 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-nullish
+
+// Basic sanity checks.
+assertTrue(true ?? false);
+assertFalse(false ?? true);
+assertTrue(undefined ?? true);
+assertTrue(null ?? true);
+assertEquals([], [] ?? true);
+
+// Chaining nullish.
+assertTrue(null ?? null ?? true);
+assertTrue(null ?? null ?? true ?? null);
+assertTrue(undefined ?? undefined ?? true);
+assertTrue(undefined ?? undefined ?? true ?? undefined);
+assertFalse(null ?? false ?? null);
+assertFalse(undefined ?? false ?? undefined);
+
+// Nullish and conditionals.
+assertTrue(null ?? true ? true : false);
+assertTrue(null ?? null ?? true ? true : false);
+assertTrue(undefined ?? true ? true : false);
+assertTrue(undefined ?? undefined ?? true ? true : false);
+
+// Nullish mixed expressions.
+assertTrue(null ?? 1 == 1);
+assertTrue(undefined ?? 1 == 1);
+assertTrue(null ?? null ?? 1 == 1);
+assertTrue(undefined ??undefined ?? 1 == 1);
+assertEquals(1, null ?? 1 | 0);
+assertEquals(1, undefined ?? 1 | 0);
+assertEquals(1, null ?? null ?? 1 | 0);
+assertEquals(1, undefined ?? undefined ?? 1 | 0);
+
+// Short circuit.
+{
+  let ran = false;
+  let never_ran = () => { ran = true; }
+  let value = true ?? never_ran();
+  assertTrue(value);
+  assertFalse(ran);
+}
+
+{
+  let ran = false;
+  let never_ran = () => { ran = true; }
+  let value = undefined ?? true ?? never_ran();
+  assertTrue(value);
+  assertFalse(ran);
+}
+
+{
+  let ran = false;
+  let never_ran = () => { ran = true; }
+  let value = null ?? true ?? never_ran();
+  assertTrue(value);
+  assertFalse(ran);
+}
+
+// Nullish in tests evaluates only once.
+{
+  let run_count = 0;
+  let run = () => { run_count++; return null; }
+  if (run() ?? true) {} else { assertUnreachable(); }
+  assertEquals(1, run_count);
+}
+
+// Nullish may not contain or be contained within || or &&.
+assertThrows("true || true ?? true", SyntaxError);
+assertThrows("true ?? true || true", SyntaxError);
+assertThrows("true && true ?? true", SyntaxError);
+assertThrows("true ?? true && true", SyntaxError);
+
+// Test boolean expressions and nullish.
+assertTrue((false || true) ?? false);
+assertTrue(null ?? (false || true));
+assertTrue((false || null) ?? true);
+assertTrue((false || null) ?? (true && null) ?? true);
+assertTrue((false || undefined) ?? true);
+assertTrue((false || undefined) ?? (true && undefined) ?? true);
+assertTrue(null ?? (false || true));
+assertTrue(undefined ?? (false || true));
+assertTrue(null ?? (false || null) ?? true);
+assertTrue(undefined ?? (false || undefined) ?? true);
+assertTrue(null ?? null ?? (false || true));
+assertTrue(undefined ?? undefined ?? (false || true));
+assertTrue((undefined ?? false) || true);
+assertTrue((null ?? false) || true);
+assertTrue((undefined ?? undefined ?? false) || false || true);
+assertTrue((null ?? null ?? false) || false || true);
+assertTrue(false || (undefined ?? true));
+assertTrue(false || (null ?? true));
+assertTrue(false || false || (undefined ?? undefined ?? true));
+assertTrue(false || false || (null ?? null ?? true));
+
+// Test use when test true.
+if (undefined ?? true) {} else { assertUnreachable(); }
+if (null ?? true) {} else { assertUnreachable(); }
+
+if (undefined ?? undefined ?? true) {} else { assertUnreachable(); }
+if (null ?? null ?? true) {} else { assertUnreachable(); }
+
+// test use when test false
+if (undefined ?? false) { assertUnreachable(); } else {}
+if (null ?? false) { assertUnreachable(); } else {}
+
+if (undefined ?? undefined ?? false) { assertUnreachable(); } else {}
+if (null ?? null ?? false) { assertUnreachable(); } else {}
+
+if (undefined ?? false ?? true) { assertUnreachable(); } else {}
+if (null ?? false ?? true) { assertUnreachable(); } else {}
+
+// Test use with nested boolean.
+if ((false || undefined) ?? true) {} else { assertUnreachable(); }
+if ((false || null) ?? true) {} else { assertUnreachable(); }
+
+if ((false || undefined) ?? undefined ?? true) {} else { assertUnreachable(); }
+if ((false || null) ?? null ?? true) {} else { assertUnreachable(); }
+
+if (undefined ?? (false || true)) {} else { assertUnreachable(); }
+if (null ?? (false || true)) {} else { assertUnreachable(); }
+
+if (undefined ?? undefined ?? (false || true)) {} else { assertUnreachable(); }
+if (null ?? null ?? (false || true)) {} else { assertUnreachable(); }
+
+if (undefined ?? (false || undefined) ?? true) {} else { assertUnreachable(); }
+if (null ?? (false || null) ?? true) {} else { assertUnreachable(); }
+
+// Nested nullish.
+if ((null ?? true) || false) {} else { assertUnreachable(); }
+if ((null ?? null ?? true) || false) {} else { assertUnreachable(); }
+
+if (false || (null ?? true)) {} else { assertUnreachable(); }
+if (false || (null ?? null ?? true)) {} else { assertUnreachable(); }
+
+if ((null ?? false) || false) { assertUnreachable(); } else {}
+if ((null ?? null ?? false) || false) { assertUnreachable(); } else {}
+
+if (false || (null ?? false)) { assertUnreachable(); } else {}
+if (false || (null ?? null ?? false)) { assertUnreachable(); } else {}
diff --git a/deps/v8/test/mjsunit/harmony/numeric-separator.js b/deps/v8/test/mjsunit/harmony/numeric-separator.js
index 0ea3ac8f8d0dac..c2c32b7db09717 100644
--- a/deps/v8/test/mjsunit/harmony/numeric-separator.js
+++ b/deps/v8/test/mjsunit/harmony/numeric-separator.js
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-numeric-separator
-
 {
   const basic = 1_0_0_0;
   assertEquals(basic, 1000);
@@ -40,11 +38,6 @@
   const binary = 0b0_1_0_1_0;
   assertEquals(binary, 0b01010);
 }
-{
-  const leadingZeros = 09_1_3;
-  assertEquals(leadingZeros, 0913);
-}
-
 {
   const dot1 = 9_1.1_3;
   assertEquals(dot1, 91.13);
@@ -54,6 +47,9 @@
 
   const dot3 = 1_1.21;
   assertEquals(dot3, 11.21);
+
+  const dot4 = 09.1_2
+  assertEquals(dot4, 9.12);
 }
 
 {
@@ -114,3 +110,4 @@ assertThrows('0o7__77', SyntaxError);
 assertThrows('0777_', SyntaxError);
 assertThrows('07__77', SyntaxError);
 assertThrows('07_7_7', SyntaxError);
+assertThrows('09_1_3', SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/optional-chaining.js b/deps/v8/test/mjsunit/harmony/optional-chaining.js
new file mode 100644
index 00000000000000..72b0559e0042ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/optional-chaining.js
@@ -0,0 +1,119 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-optional-chaining
+
+function shouldThrowSyntaxError(script) {
+  let error;
+  try {
+    eval(script);
+  } catch (e) {
+    error = e;
+  }
+
+  if (!(error instanceof SyntaxError)) {
+    throw new Error('Expected SyntaxError!');
+  }
+}
+
+assertEquals(undefined?.valueOf(), undefined);
+assertEquals(null?.valueOf(), undefined);
+assertEquals(true?.valueOf(), true);
+assertEquals(false?.valueOf(), false);
+assertEquals(0?.valueOf(), 0);
+assertEquals(1?.valueOf(), 1);
+assertEquals(''?.valueOf(), '');
+assertEquals('hi'?.valueOf(), 'hi');
+assertEquals(({})?.constructor, Object);
+assertEquals(({ x: 'hi' })?.x, 'hi');
+assertEquals([]?.length, 0);
+assertEquals(['hi']?.length, 1);
+
+assertEquals(undefined?.['valueOf'](), undefined);
+assertEquals(null?.['valueOf'](), undefined);
+assertEquals(true?.['valueOf'](), true);
+assertEquals(false?.['valueOf'](), false);
+assertEquals(0?.['valueOf'](), 0);
+assertEquals(1?.['valueOf'](), 1);
+assertEquals(''?.['valueOf'](), '');
+assertEquals('hi'?.['valueOf'](), 'hi');
+assertEquals(({})?.['constructor'], Object);
+assertEquals(({ x: 'hi' })?.['x'], 'hi');
+assertEquals([]?.['length'], 0);
+assertEquals(['hi']?.[0], 'hi');
+
+assertEquals(undefined?.(), undefined);
+assertEquals(null?.(), undefined);
+assertThrows(() => true?.(), TypeError);
+assertThrows(() => false?.(), TypeError);
+assertThrows(() => 0?.(), TypeError);
+assertThrows(() => 1?.(), TypeError);
+assertThrows(() => ''?.(), TypeError);
+assertThrows(() => 'hi'?.(), TypeError);
+assertThrows(() => ({})?.(), TypeError);
+assertThrows(() => ({ x: 'hi' })?.(), TypeError);
+assertThrows(() => []?.(), TypeError);
+assertThrows(() => ['hi']?.(), TypeError);
+
+assertThrows(() => ({})?.a['b'], TypeError);
+assertEquals(({})?.a?.['b'], undefined);
+assertEquals(null?.a['b']().c, undefined);
+assertThrows(() => ({})?.['a'].b);
+assertEquals(({})?.['a']?.b, undefined);
+assertEquals(null?.['a'].b()['c'], undefined);
+assertThrows(() => (() => {})?.()(), TypeError);
+assertEquals((() => {})?.()?.(), undefined);
+assertEquals(null?.()().a['b'], undefined);
+
+assertEquals(delete undefined?.foo, true);
+assertEquals(delete null?.foo, true);
+assertEquals(delete undefined?.['foo'], true);
+assertEquals(delete null?.['foo'], true);
+assertEquals(delete undefined?.(), true);
+assertEquals(delete null?.(), true);
+
+assertEquals(undefined?.(...a), undefined);
+assertEquals(null?.(1, ...a), undefined);
+assertEquals(({}).a?.(...a), undefined);
+assertEquals(({ a: null }).a?.(...a), undefined);
+assertEquals(undefined?.(...a)?.(1, ...a), undefined);
+assertThrows(() => 5?.(...[]), TypeError);
+
+const o1 = { x: 0, y: 0, z() {} };
+assertEquals(delete o1?.x, true);
+assertEquals(o1.x, undefined);
+assertEquals(delete o1?.x, true);
+assertEquals(delete o1?.['y'], true);
+assertEquals(o1.y, undefined);
+assertEquals(delete o1?.['y'], true);
+assertEquals(delete o1.z?.(), true);
+assertThrows(() => { delete ({})?.foo.bar; });
+
+shouldThrowSyntaxError('class C {} class D extends C { foo() { return super?.bar; } }');
+shouldThrowSyntaxError('class C {} class D extends C { foo() { return super?.["bar"]; } }');
+shouldThrowSyntaxError('class C {} class D extends C { constructor() { super?.(); } }');
+
+shouldThrowSyntaxError('const o = { C: class {} }; new o?.C();');
+shouldThrowSyntaxError('const o = { C: class {} }; new o?.["C"]();');
+shouldThrowSyntaxError('class C {} new C?.();');
+shouldThrowSyntaxError('function foo() { new?.target; }');
+
+shouldThrowSyntaxError('function tag() {} tag?.``;');
+shouldThrowSyntaxError('const o = { tag() {} }; o?.tag``;');
+
+const o2 = {
+  count: 0,
+  get x() {
+    this.count += 1;
+    return () => {};
+  },
+};
+o2.x?.y;
+assertEquals(o2.count, 1);
+o2.x?.['y'];
+assertEquals(o2.count, 2);
+o2.x?.();
+assertEquals(o2.count, 3);
+
+assertEquals(true?.5:5, 0.5);
diff --git a/deps/v8/test/mjsunit/harmony/private-accessors.js b/deps/v8/test/mjsunit/harmony/private-accessors.js
new file mode 100644
index 00000000000000..3a828116a1c3db
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-accessors.js
@@ -0,0 +1,91 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+"use strict";
+
+// Complementary private accessors.
+{
+  let store = 1;
+  class C {
+    get #a() { return store; }
+    set #a(val) { store = val; }
+    incA() { this.#a++; }  // CountOperation
+    setA(val) { this.#a = val; }
+    getA() { return this.#a; }
+  }
+
+  const c = new C;
+  assertEquals(store, c.getA());
+  assertEquals(1, c.getA());
+  c.setA(2);
+  assertEquals(store, c.getA());
+  assertEquals(2, c.getA());
+  c.incA();
+  assertEquals(store, c.getA());
+  assertEquals(3, c.getA());
+}
+
+// Compound assignment.
+{
+  let store;
+  class A {
+    get #a() { return store; }
+    set #a(val) { store = val; }
+    getA() { return this.#a; }
+    constructor(val) {
+      ({ y: this.#a } = val);
+    }
+  }
+
+  const a = new A({y: 'test'});
+  assertEquals('test', a.getA());
+}
+
+// Accessing super in private accessors.
+{
+  class A { foo(val) {} }
+  class C extends A {
+    set #a(val) { super.foo(val); }
+  }
+  new C();
+
+  class D extends A {
+    get #a() { return super.foo; }
+  }
+  new D();
+
+  class E extends A {
+    set #a(val) { super.foo(val); }
+    get #a() { return super.foo; }
+  }
+  new E();
+}
+
+// Nested private accessors.
+{
+  class C {
+    get #a() {
+      let storeD = 'd';
+      class D {
+        // Shadows outer #a
+        get #a() { return storeD; }
+        getD() { return this.#a; }
+      }
+      return new D;
+    }
+    getA() {
+      return this.#a;
+    }
+  }
+  assertEquals('d', new C().getA().getD());
+}
+
+// Duplicate private accessors.
+// https://tc39.es/proposal-private-methods/#sec-static-semantics-early-errors
+{
+  assertThrows('class C { get #a() {} get #a() {} }', SyntaxError);
+  assertThrows('class C { set #a(val) {} set #a(val) {} }', SyntaxError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/private-methods.js b/deps/v8/test/mjsunit/harmony/private-methods.js
index 360b065f17db62..fcd80823c1c52a 100644
--- a/deps/v8/test/mjsunit/harmony/private-methods.js
+++ b/deps/v8/test/mjsunit/harmony/private-methods.js
@@ -281,3 +281,17 @@
   new D;
   new E;
 }
+
+// Super access within private methods.
+{
+  class A {
+    foo() { return 1; }
+  }
+
+  class C extends A {
+    #m() { return super.foo; }
+    fn() { return this.#m()(); }
+  }
+
+  assertEquals(1, new C().fn());
+}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
index 20726284bb154a..ebc4ebf933f1ed 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
@@ -6,6 +6,7 @@
 
 let cleanup_call_count = 0;
 let cleanup = function(iter) {
+  print("in cleanup");
   if (cleanup_call_count == 0) {
     // First call: iterate 2 of the 3 holdings
     let holdings_list = [];
@@ -74,11 +75,15 @@ let timeout_func_2 = function() {
   assertEquals(1, cleanup_call_count);
 
   // Create a new object and register it.
-  let obj = {};
-  let wc = fg.register(obj, 100);
-  obj = null;
+  (function() {
+    let obj = {};
+    let wc = fg.register(obj, 100);
+    obj = null;
+  })();
 
+  // This GC will reclaim the targets.
   gc();
+  assertEquals(1, cleanup_call_count);
 
   setTimeout(timeout_func_3, 0);
 }
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
similarity index 63%
rename from deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js
rename to deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
index c6b834e8fb9248..077bc21e8273f2 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
 
 // This test asserts that the cleanup function call, scheduled by GC, is a
 // microtask and not a normal task.
@@ -11,6 +11,8 @@
 // microtask).  lso schedule another microtask. Assert that the cleanup
 // function ran before the other microtask.
 
+let cleanedUp = false;
+
 function scheduleMicrotask(func) {
   Promise.resolve().then(func);
 }
@@ -18,7 +20,7 @@ function scheduleMicrotask(func) {
 let log = [];
 
 let cleanup = (iter) => {
-  log.push("cleanup");
+  cleanedUp = true;
   for (holdings of iter) { }
 }
 
@@ -32,25 +34,29 @@ let o = null;
   fg.register(o, {});
 })();
 
-let microtask_after_cleanup = () => {
-  log.push("microtask_after_cleanup");
-}
-
-let first_microtask = function() {
+let microtask = function() {
   log.push("first_microtask");
 
-  // This schedules the cleanup function as microtask.
+  // cause GC during a microtask
   o = null;
   gc();
-
-  // Schedule a microtask which should run after the cleanup microtask.
-  scheduleMicrotask(microtask_after_cleanup);
 }
 
-scheduleMicrotask(first_microtask);
+assertFalse(cleanedUp);
+
+// enqueue microtask that triggers GC
+Promise.resolve().then(microtask);
+
+// but cleanup callback hasn't been called yet, as we're still in
+// synchronous execution
+assertFalse(cleanedUp);
+
+// flush the microtask queue to run the microtask that triggers GC
+%PerformMicrotaskCheckpoint();
+
+// still no cleanup callback, because it runs after as a separate task
+assertFalse(cleanedUp);
 
 setTimeout(() => {
-  // Assert that the functions got called in the right order.
-  let wanted_log = ["first_microtask", "cleanup", "microtask_after_cleanup"];
-  assertEquals(wanted_log, log);
+  assertTrue(cleanedUp);
 }, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
index ca156e0574a3b2..9cc548920c3ce0 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
@@ -4,12 +4,13 @@
 
 // Flags: --harmony-weak-refs --expose-gc --noincremental-marking
 
+let cleanedUp = false;
 let r = Realm.create();
 let FG = Realm.eval(r, "FinalizationGroup");
 Realm.detachGlobal(r);
 
 let fg = new FG(()=> {
-  assertUnreachable();
+  cleanedUp = true;
 });
 
 (() => {
@@ -20,3 +21,5 @@ let fg = new FG(()=> {
 })();
 
 gc();
+
+setTimeout(function() { assertTrue(cleanedUp); }, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js
index bd66f1ce1d4931..83de3a838b2b15 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
 
 let cleanup_called = false;
 let cleanup = function(iter) {
@@ -32,13 +32,13 @@ gc();
   assertNotEquals(undefined, weak_ref.deref());
 })();
 
-%PerformMicrotaskCheckpoint();
-// Next turn.
+// Trigger gc in next task
+setTimeout(() => {
+  gc();
 
-gc();
-
-%PerformMicrotaskCheckpoint();
-// Next turn.
-
-assertTrue(cleanup_called);
-assertEquals(undefined, weak_ref.deref());
+  // Check that cleanup callback was called in a follow up task
+  setTimeout(() => {
+    assertTrue(cleanup_called);
+    assertEquals(undefined, weak_ref.deref());
+  }, 0);
+}, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
index c3fc9f741c76eb..c17e7aa969d4fb 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
 
 let o1 = {};
 let o2 = {};
@@ -21,28 +21,27 @@ gc();
   assertNotEquals(undefined, wr2.deref());
 })();
 
-%PerformMicrotaskCheckpoint();
-// New turn.
-
-wr1.deref();
-o1 = null;
-gc(); // deref makes sure we don't clean up wr1
-
-%PerformMicrotaskCheckpoint();
-// New turn.
-
-wr2.deref();
-o2 = null;
-gc(); // deref makes sure we don't clean up wr2
-
-%PerformMicrotaskCheckpoint();
-// New turn.
-
-assertEquals(undefined, wr1.deref());
-
-gc();
-
-%PerformMicrotaskCheckpoint();
-// New turn.
-
-assertEquals(undefined, wr2.deref());
+// New task
+setTimeout(function() {
+  wr1.deref();
+  o1 = null;
+  gc(); // deref makes sure we don't clean up wr1
+
+  // New task
+  setTimeout(function() {
+    wr2.deref();
+    o2 = null;
+    gc(); // deref makes sure we don't clean up wr2
+
+    // New task
+    setTimeout(function() {
+      assertEquals(undefined, wr1.deref());
+      gc();
+
+      // New task
+      setTimeout(function() {
+        assertEquals(undefined, wr2.deref());
+      }, 0);
+    }, 0);
+  }, 0);
+}, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
index 18e3af26cec8b6..4c8641d8aa0ff2 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
 
 let wr;
 (function() {
@@ -19,9 +19,8 @@ gc();
   assertNotEquals(undefined, wr.deref());
 })();
 
-%PerformMicrotaskCheckpoint();
-// Next turn.
-
-gc();
-
-assertEquals(undefined, wr.deref());
+// Next task.
+setTimeout(() => {
+  gc();
+  assertEquals(undefined, wr.deref());
+}, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
index c17f0607138bff..eb02290dfdc64f 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
 
 let wr;
 let wr_control; // control WeakRef for testing what happens without deref
@@ -17,29 +17,29 @@ let strong = {a: wr.deref(), b: wr_control.deref()};
 
 gc();
 
-%PerformMicrotaskCheckpoint();
-// Next turn.
+// Next task.
+setTimeout(function() {
+  // Call deref inside a closure, trying to avoid accidentally storing a strong
+  // reference into the object in the stack frame.
+  (function() {
+    wr.deref();
+  })();
 
-// Call deref inside a closure, trying to avoid accidentally storing a strong
-// reference into the object in the stack frame.
-(function() {
-  wr.deref();
-})();
-
-strong = null;
-
-// This GC will clear wr_control.
-gc();
+  strong = null;
 
-(function() {
-  assertNotEquals(undefined, wr.deref());
-  // Now the control WeakRef got cleared, since nothing was keeping it alive.
-  assertEquals(undefined, wr_control.deref());
-})();
+  // This GC will clear wr_control.
+  gc();
 
-%PerformMicrotaskCheckpoint();
-// Next turn.
+  (function() {
+    assertNotEquals(undefined, wr.deref());
+    // Now the control WeakRef got cleared, since nothing was keeping it alive.
+    assertEquals(undefined, wr_control.deref());
+  })();
 
-gc();
+  // Next task.
+  setTimeout(function() {
+    gc();
 
-assertEquals(undefined, wr.deref());
+    assertEquals(undefined, wr.deref());
+  }, 0);
+}, 0);
diff --git a/deps/v8/test/mjsunit/interrupt-budget-override.js b/deps/v8/test/mjsunit/interrupt-budget-override.js
index 6dbf0785a7c3a9..5f83b3ccc5e9dc 100644
--- a/deps/v8/test/mjsunit/interrupt-budget-override.js
+++ b/deps/v8/test/mjsunit/interrupt-budget-override.js
@@ -12,7 +12,8 @@ function f() {
   return s;
 }
 
-%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(f, "allow heuristic optimization");
+f();
 f();
 f();
 assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 8582b38036920c..fd45ebacbd29d4 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -174,6 +174,7 @@ var V8OptimizationStatus = {
   kIsExecuting: 1 << 10,
   kTopmostFrameIsTurboFanned: 1 << 11,
   kLiteMode: 1 << 12,
+  kMarkedForDeoptimization: 1 << 13,
 };
 
 // Returns true if --lite-mode is on and we can't ever turn on optimization.
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index bdcf3cf18d52f1..134a49f7480c87 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -54,9 +54,6 @@
   # Issue 3784: setters-on-elements is flaky
   'setters-on-elements': [PASS, FAIL],
 
-  # Issue 8505: Math.pow is incorrect for asm.js
-  'regress/wasm/regress-8505': [SKIP],
-
   # Issue 9380: Memory leaks of shared WebAssembly.Memory objects
   'wasm/shared-memory-worker-gc': [SKIP],
 
@@ -182,6 +179,7 @@
   'regress/regress-605470': [PASS, SLOW],
   'regress/regress-655573': [PASS, SLOW],
   'regress/regress-1200351': [PASS, SLOW],
+  'regress/regress-crbug-918301': [PASS, SLOW, NO_VARIANTS, ['mode != release or dcheck_always_on', SKIP], ['(arch == arm or arch == arm64) and simulator_run', SKIP], ['tsan', SKIP]],
   'regress/wasm/regress-810973': [PASS, SLOW],
   'string-replace-gc': [PASS, SLOW],
   'wasm/asm-wasm-f32': [PASS, SLOW],
@@ -194,6 +192,9 @@
   'wasm/compare-exchange-stress': [PASS, SLOW, NO_VARIANTS],
   'wasm/compare-exchange64-stress': [PASS, SLOW, NO_VARIANTS],
 
+  # worker creation/shutdown is very slow in debug mode
+  'd8/d8-worker-shutdown*': [PASS, ['mode == debug', SLOW], ['no_snap', SKIP]],
+
   # case-insensitive unicode regexp relies on case mapping provided by ICU.
   'es6/unicode-regexp-ignore-case': [PASS, ['no_i18n == True', FAIL]],
   'es6/unicode-regexp-ignore-case-noi18n': [FAIL, ['no_i18n == True', PASS]],
@@ -391,11 +392,16 @@
   'regress/regress-crbug-721835': [SKIP],
   'regress/regress-crbug-759327': [SKIP],
   'regress/regress-crbug-898974': [SKIP],
+  'regexp-tier-up': [SKIP],
 
   # These tests check that we can trace the compiler.
   'tools/compiler-trace-flags': [SKIP],
   'tools/compiler-trace-flags-wasm': [SKIP],
 
+  # Too slow on arm64 simulator and debug: https://crbug.com/v8/7783
+  'bit-not': [PASS, ['arch == arm64 and mode == debug and simulator_run', SKIP]],
+  'md5': [PASS, ['arch == arm64 and mode == debug and simulator_run', SKIP]],
+
   # Slow with pointer compression.
   'regress/regress-crbug-319860': [PASS, ['pointer_compression', SLOW]],
 }],  # 'lite_mode or variant == jitless'
@@ -405,6 +411,7 @@
   # Tests too slow in non-optimized debug mode.
   'compiler/regress-9017': [SKIP],
   'regress/regress-2790': [SKIP],
+  'regress/regress-331444': [SKIP],
   'regress/regress-740784': [SKIP],
 }],  # 'is_full_debug'
 
@@ -490,6 +497,9 @@
 
   # BUG(v8:9256). Slow with pointer compression.
   'regress/regress-708247': [PASS, ['pointer_compression', SLOW]],
+  'es6/array-concat': [PASS, ['pointer_compression', SLOW]],
+  'non-extensible-array-reduce': [PASS, ['pointer_compression', SLOW]],
+  'regress/regress-454725': [PASS, ['pointer_compression', SLOW]],
 }],  # 'arch == arm64'
 
 ['arch == arm64 and mode == debug and simulator_run', {
@@ -861,6 +871,7 @@
   # Tests that fail some assertions due to checking internal state sensitive
   # to GC.
   'compiler/native-context-specialization-hole-check': [SKIP],
+  'opt-elements-kind': [SKIP],
   'regress/regress-trap-allocation-memento': [SKIP],
   'regress/regress-v8-9267-*': [SKIP],
   'shared-function-tier-up-turbo': [SKIP],
@@ -890,6 +901,8 @@
   # Skip tests that are known to be non-deterministic.
   'd8/d8-worker-sharedarraybuffer': [SKIP],
   'd8/d8-os': [SKIP],
+  'd8/d8-worker-shutdown': [SKIP],
+  'd8/d8-worker-shutdown-gc': [SKIP],
   'harmony/futex': [SKIP],
 
   # BUG(v8:7166).
@@ -949,6 +962,9 @@
   'ignition/regress-599001-verifyheap': [SKIP],
   'unicode-test': [SKIP],
 
+  # The RegExp code cache means running this test multiple times is invalid.
+  'regexp-tier-up': [SKIP],
+
   # Flaky crash on Odroid devices: https://crbug.com/v8/7678
   'regress/regress-336820': [PASS, ['arch == arm and not simulator_run', SKIP]],
 
@@ -1064,14 +1080,6 @@
   'wasm/asm-wasm-f64': [SKIP],
 }], # arch == x64
 
-##############################################################################
-['arch in [arm, android_arm, android_ia32, ia32, ppc, s390, s390x, mipsel, mips]', {
-  # TODO(ssauleau): implement BigInt<>Wasm conversion for other arch -
-  # crbug.com/v8/7741
-  'wasm/bigint': [SKIP],
-  'wasm/bigint-i64-to-imported-js-func': [SKIP],
-}], # arch in [arm, android_arm, android_ia32, ia32, ppc, s390, s390x, mipsel, mips]
-
 ##############################################################################
 ['arch not in [x64, arm, arm64] or system != linux', {
   # Unwinding info writer is only supported on x64, arm, and arm64 Linux
@@ -1083,4 +1091,9 @@
   '*': [SKIP],
 }], # variant == jitless and not embedded_builtins
 
+['not embedded_builtins', {
+  # Explicitly sets jitless flag.
+  'regress/regress-992389': [SKIP],
+}], # not embedded_builtins
+
 ]
diff --git a/deps/v8/test/mjsunit/object-prevent-extensions.js b/deps/v8/test/mjsunit/object-prevent-extensions.js
index 4bda84e2dd5039..2fafcf395d3ab8 100644
--- a/deps/v8/test/mjsunit/object-prevent-extensions.js
+++ b/deps/v8/test/mjsunit/object-prevent-extensions.js
@@ -932,3 +932,110 @@ assertTrue(checkUndefined.apply(this, [...arr]));
   assertEquals(a[0], a[1]);
 
 })();
+
+// Test regression with Object.defineProperty.
+var obj = [];
+obj.propertyA = 42;
+obj[0] = true;
+Object.preventExtensions(obj);
+assertDoesNotThrow(function() {
+  Object.defineProperty(obj, 'propertyA', {
+    value: obj,
+  });
+});
+assertEquals(obj, obj.propertyA);
+assertDoesNotThrow(function() {
+  Object.defineProperty(obj, 'propertyA', {
+    value: obj,
+    writable: false,
+  });
+});
+obj.propertyA = 42;
+assertEquals(obj.propertyA, obj);
+assertThrows(function() {
+  Object.defineProperty(obj, 'abc', {
+    value: obj,
+  });
+}, TypeError);
+
+
+// Handle IC store.
+// For packed sealed object.
+function packedStore() {
+  let a = Object.preventExtensions([""]);
+  a[0] = 0;
+  assertEquals(a[0], 0);
+}
+
+packedStore();
+packedStore();
+
+// For holey sealed object.
+function holeyStore() {
+  let a = Object.preventExtensions([, ""]);
+  a[0] = 0;
+  assertEquals(a[0], undefined);
+}
+
+holeyStore();
+holeyStore();
+
+// Make sure IC store for holey is consistent.
+let a = Object.preventExtensions([, ""]);
+function foo() {
+  a[1] = 0;
+}
+
+foo();
+foo();
+function bar() {
+  a[0] = 1;
+}
+assertEquals(a, [, 0]);
+bar();
+assertEquals(a, [, 0]);
+bar();
+assertEquals(a, [, 0]);
+function baz() {
+  a[2] = 2;
+}
+assertEquals(a, [, 0]);
+baz();
+assertEquals(a, [, 0]);
+baz();
+assertEquals(a, [, 0]);
+
+// Reconfigure data field (e.g. change representation).
+function testReconfig() {
+  var o = ['3'];
+  function foo(i) { o.x = i; }
+  foo("string");
+  Object.preventExtensions(o);
+  Object.seal(o);
+  foo(0);
+  %HeapObjectVerify(o);
+  assertEquals(o.x, 0);
+}
+testReconfig();
+
+// Seal proxy from nonextensible object.
+PI = [];
+PI[250] = PI;
+Object.preventExtensions(PI);
+assertFalse(Object.isExtensible(PI));
+assertFalse(Object.isSealed(PI));
+var proxy = new Proxy(PI, PI);
+Object.seal(proxy);
+assertFalse(Object.isFrozen(proxy));
+assertTrue(Object.isSealed(proxy));
+
+// Freeze proxy from nonextensible object.
+PI = [];
+PI[250] = PI;
+Object.preventExtensions(PI);
+assertFalse(Object.isExtensible(PI));
+assertFalse(Object.isSealed(PI));
+var proxy = new Proxy(PI, PI);
+Object.freeze(proxy);
+assertTrue(Object.isSealed(proxy));
+assertTrue(Object.isFrozen(proxy));
diff --git a/deps/v8/test/mjsunit/regexp-tier-up.js b/deps/v8/test/mjsunit/regexp-tier-up.js
new file mode 100644
index 00000000000000..e55e87f5938a47
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-tier-up.js
@@ -0,0 +1,92 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tier-up behavior differs between slow and fast paths in functional
+// RegExp.prototype.replace.
+// Flags: --regexp-tier-up --allow-natives-syntax --no-force-slow-path
+
+const kLatin1 = true;
+const kUnicode = false;
+
+function CheckRegexpNotYetCompiled(regexp) {
+  assertFalse(%RegexpHasBytecode(regexp, kLatin1) &&
+              %RegexpHasNativeCode(regexp, kLatin1));
+  assertFalse(%RegexpHasBytecode(regexp, kUnicode) &&
+              %RegexpHasNativeCode(regexp, kUnicode));
+}
+
+// Testing RegExp.test method which calls into Runtime_RegExpExec.
+let re = new RegExp('^.$');
+CheckRegexpNotYetCompiled(re);
+
+// Testing first execution of regexp with one-byte string subject.
+re.test("a");
+assertTrue(%RegexpHasBytecode(re, kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+            !%RegexpHasNativeCode(re, kUnicode));
+// Testing second execution of regexp now with a two-byte string subject.
+// This will compile to native code because we have a single tick counter
+// for both string representations.
+re.test("π");
+assertTrue(%RegexpHasBytecode(re, kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+            %RegexpHasNativeCode(re,kUnicode));
+// Testing tier-up when we're back to executing the regexp with a one byte
+// string.
+re.test("6");
+assertTrue(!%RegexpHasBytecode(re, kLatin1) &&
+            %RegexpHasNativeCode(re,kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+            %RegexpHasNativeCode(re,kUnicode));
+re.test("7");
+assertTrue(!%RegexpHasBytecode(re, kLatin1) &&
+            %RegexpHasNativeCode(re,kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+            %RegexpHasNativeCode(re,kUnicode));
+
+// Testing String.replace method for non-global regexps.
+var subject = "a11";
+re = /\w1/;
+CheckRegexpNotYetCompiled(re);
+
+subject.replace(re, "x");
+assertTrue(%RegexpHasBytecode(re, kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+            !%RegexpHasNativeCode(re, kUnicode));
+subject.replace(re, "x");
+assertTrue(!%RegexpHasBytecode(re, kLatin1) &&
+            %RegexpHasNativeCode(re, kLatin1));
+assertTrue(!%RegexpHasBytecode(re, kUnicode) &&
+            !%RegexpHasNativeCode(re, kUnicode));
+
+// Testing String.replace method for global regexps.
+let re_g = /\w111/g;
+CheckRegexpNotYetCompiled(re_g);
+// This regexp will not match, so it will only execute the bytecode once,
+// without tiering-up and recompiling to native code.
+subject.replace(re_g, "x");
+assertTrue(%RegexpHasBytecode(re_g, kLatin1));
+assertTrue(!%RegexpHasBytecode(re_g, kUnicode) &&
+            !%RegexpHasNativeCode(re_g, kUnicode));
+
+// This regexp will match, so it will execute twice, and tier-up.
+re_g = /\w1/g;
+CheckRegexpNotYetCompiled(re_g);
+subject.replace(re_g, "x");
+assertTrue(!%RegexpHasBytecode(re_g, kLatin1) &&
+            %RegexpHasNativeCode(re_g, kLatin1));
+assertTrue(!%RegexpHasBytecode(re_g, kUnicode) &&
+            !%RegexpHasNativeCode(re_g, kUnicode));
+
+// Testing String.replace method for global regexps with a function as a
+// parameter. This will tier-up eagerly and compile to native code right
+// away, even though the regexp is only executed once.
+function f() { return "x"; }
+re_g = /\w2/g;
+CheckRegexpNotYetCompiled(re_g);
+subject.replace(re_g, f);
+assertTrue(!%RegexpHasBytecode(re_g, kLatin1) &&
+            %RegexpHasNativeCode(re_g, kLatin1));
+assertTrue(!%RegexpHasBytecode(re_g, kUnicode) &&
+            !%RegexpHasNativeCode(re_g, kUnicode));
diff --git a/deps/v8/test/mjsunit/regress/regress-1000635.js b/deps/v8/test/mjsunit/regress/regress-1000635.js
new file mode 100644
index 00000000000000..2a02774f999d2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1000635.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --stress-compaction --detailed-error-stack-trace --gc-interval=1
+
+function add(a, b) {
+  throw new Error();
+}
+for (let i = 0; i < 100; ++i) {
+  try {
+    add(1, 2);
+  } catch (e) {
+  }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-7773.js b/deps/v8/test/mjsunit/regress/regress-7773.js
index 7930ae91063f39..6f047711bf5c31 100644
--- a/deps/v8/test/mjsunit/regress/regress-7773.js
+++ b/deps/v8/test/mjsunit/regress/regress-7773.js
@@ -43,8 +43,8 @@
     configurable: true
   };
 
-  // Anonymous classes do not have a "name" property by default.
-  assertSame(undefined, Object.getOwnPropertyDescriptor(class {}, 'name'));
+  // Anonymous classes do have a "name" property by default with a value of ''.
+  assertEquals(descriptor, Object.getOwnPropertyDescriptor(class {}, 'name'));
   descriptor.value = 'C';
   assertEquals(descriptor, Object.getOwnPropertyDescriptor(class C {}, 'name'));
 
@@ -55,8 +55,9 @@
 
   let b = { __proto__: class {} };
   assertSame('', b.__proto__.name);
-  assertSame(
-      undefined, Object.getOwnPropertyDescriptor(b.__proto__, 'name'));
+  descriptor.value = '';
+  assertEquals(
+      descriptor, Object.getOwnPropertyDescriptor(b.__proto__, 'name'));
 
   let c = { fn: class F {} };
   assertSame('F', c.fn.name);
diff --git a/deps/v8/test/mjsunit/regress/regress-8510-2.js b/deps/v8/test/mjsunit/regress/regress-8510-2.js
new file mode 100644
index 00000000000000..b870a42df0c047
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8510-2.js
@@ -0,0 +1,38 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-lazy-source-positions
+
+try {
+  (function () {
+    eval(`
+      function assertLocation() {}
+      (function foo() {
+        var x = 1;
+        assertLocation();
+        throw new Error();
+      })();
+    `);
+  })();
+} catch (e) {
+  print(e.stack);
+}
+
+try {
+  (function () {
+    var assertLocation = 2;
+    (function () {
+      eval(`
+        function assertLocation() {}
+        (function foo() {
+          var x = 1;
+          assertLocation();
+          throw new Error();
+        })();
+      `);
+    })();
+  })();
+} catch (e) {
+  print(e.stack);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-9546.js b/deps/v8/test/mjsunit/regress/regress-9546.js
new file mode 100644
index 00000000000000..ecea405e984fd8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-9546.js
@@ -0,0 +1,53 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+// Sanity checks.
+assertEquals(Math.hypot(3, 4), 5);
+assertEquals(Math.hypot(1, 2, 3, 4, 5, 27), 28);
+
+// Regress.
+assertEquals(Math.hypot(Infinity, NaN), Infinity);
+assertEquals(Math.hypot(NaN, 0), NaN);
+assertEquals(Math.hypot(NaN, Infinity), Infinity);
+assertEquals(Math.hypot(0, NaN), NaN);
+assertEquals(Math.hypot(NaN, 1, 2, 3, 4, 5, 0), NaN);
+assertEquals(Math.hypot(NaN, Infinity, 2, 3, 4, 5, 0), Infinity);
+
+// Verify optimized code works as intended.
+function two_hypot(a, b) {
+  return Math.hypot(a, b);
+}
+
+function six_hypot(a, b, c, d, e, f) {
+  return Math.hypot(a, b, c, d, e, f);
+}
+
+%PrepareFunctionForOptimization(two_hypot);
+two_hypot(1, 2);
+two_hypot(3, 4);
+two_hypot(5, 6);
+%OptimizeFunctionOnNextCall(two_hypot);
+assertEquals(two_hypot(3, 4), 5);
+
+// Regress 2 parameter case.
+assertEquals(two_hypot(Infinity, NaN), Infinity);
+assertEquals(two_hypot(NaN, 0), NaN);
+assertEquals(two_hypot(NaN, Infinity), Infinity);
+assertEquals(two_hypot(0, NaN), NaN);
+
+// Regress many parameters case.
+%PrepareFunctionForOptimization(six_hypot);
+six_hypot(1, 2, 3, 4, 5, 6);
+six_hypot(3, 4, 5, 6, 7, 8);
+six_hypot(5, 6, 7, 8, 9, 10);
+%OptimizeFunctionOnNextCall(six_hypot);
+assertEquals(six_hypot(1, 2, 3, 4, 5, 27), 28);
+
+assertEquals(six_hypot(0, 0, 0, 0, 0, 0), 0);
+assertEquals(six_hypot(NaN, 1, 2, 3, 4, 5, 0), NaN);
+assertEquals(six_hypot(NaN, Infinity, 2, 3, 4, 5, 0), Infinity);
+assertEquals(six_hypot(1, 2, 3, 4, 5, NaN), NaN);
+assertEquals(six_hypot(Infinity, 2, 3, 4, 5, NaN), Infinity);
diff --git a/deps/v8/test/mjsunit/regress/regress-9560.js b/deps/v8/test/mjsunit/regress/regress-9560.js
new file mode 100644
index 00000000000000..71ad9f1eab10ef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-9560.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var value = 0;
+
+[{ set prop(v) { value = v } }.prop = 12 ] = [ 1 ]
+
+assertEquals(1, value);
diff --git a/deps/v8/test/mjsunit/regress/regress-988973.js b/deps/v8/test/mjsunit/regress/regress-988973.js
new file mode 100644
index 00000000000000..29c8ab43deeb2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-988973.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"".match(/(?:(?=a)b){5}abcde/);
diff --git a/deps/v8/test/mjsunit/regress/regress-989914.js b/deps/v8/test/mjsunit/regress/regress-989914.js
new file mode 100644
index 00000000000000..199fbfdf0187d7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-989914.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy --stress-lazy-source-positions
+
+function foo() {
+    return () => {
+      this.test_;
+      eval();
+  }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-991133.js b/deps/v8/test/mjsunit/regress/regress-991133.js
new file mode 100644
index 00000000000000..78573897238ef3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-991133.js
@@ -0,0 +1,176 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --allow-natives-syntax --gc-interval=416
+// Flags: --no-lazy --no-lazy-feedback-allocation --stress-lazy-source-positions
+
+"use strict";
+function WasmModuleBuilder() {}
+(function () {
+  try {
+    BigIntPrototypeValueOf = BigInt.prototype.valueOf;
+  } catch (e) {}
+  failWithMessage = function failWithMessage(message) {
+    throw new MjsUnitAssertionError(message);
+  }
+  assertSame = function assertSame(expected, found, name_opt) {
+    if (Object.is(expected, found)) return;
+    fail(prettyPrinted(expected), found, name_opt);
+  };
+  assertArrayEquals = function assertArrayEquals(expected, found, name_opt) {
+    var start = "";
+    if (name_opt) {
+      start = name_opt + " - ";
+    }
+    assertEquals(expected.length, found.length, start + "array length");
+    if (expected.length === found.length) {
+      for (var i = 0; i < expected.length; ++i) {
+        assertEquals(expected[i], found[i],
+                     start + "array element at index " + i);
+      }
+    }
+  };
+  assertPropertiesEqual = function assertPropertiesEqual(expected, found,
+                                                         name_opt) {
+    if (!deepObjectEquals(expected, found)) {
+      fail(expected, found, name_opt);
+    }
+  };
+  assertToStringEquals = function assertToStringEquals(expected, found,
+                                                       name_opt) {
+    if (expected !== String(found)) {
+      fail(expected, found, name_opt);
+    }
+  };
+  assertTrue = function assertTrue(value, name_opt) {
+    assertEquals(true, value, name_opt);
+  };
+  assertFalse = function assertFalse(value, name_opt) {
+    assertEquals(false, value, name_opt);
+  };
+  assertNull = function assertNull(value, name_opt) {
+    if (value !== null) {
+      fail("null", value, name_opt);
+    }
+  };
+  assertNotNull = function assertNotNull(value, name_opt) {
+    if (value === null) {
+      fail("not null", value, name_opt);
+    }
+  };
+})();
+
+function getRandomProperty(v, rand) { var properties = Object.getOwnPropertyNames(v); var proto = Object.getPrototypeOf(v); if (proto) { properties = properties.concat(Object.getOwnPropertyNames(proto)); } if (properties.includes("constructor") && v.constructor.hasOwnProperty("__proto__")) { properties = properties.concat(Object.getOwnPropertyNames(v.constructor.__proto__)); } if (properties.length == 0) { return "0"; } return properties[rand % properties.length]; }
+
+
+var __v_11 = { Float32Array() {}, Uint8Array() {} };
+var __v_17 = {};
+try {
+} catch(e) { print("Caught: " + e); }
+function __f_0(){
+}
+function __f_3(a, b) {
+    (a | 0) + (b | 0);
+    return a;
+}
+function __f_23(expected, __f_20, ffi) {
+}
+try {
+(function() {
+  function __f_12(__v_11, foreign, buffer) {
+    "use asm";
+    var __v_18 = new __v_11.Uint8Array(buffer);
+    var __v_8 = new __v_9.Int32Array(buffer);
+    function __f_24(__v_23, __v_28) {
+      __v_23 = __v_23 | 0;
+      __v_28 = __v_28 | 0;
+      __v_16[__v_23 >> 2] = __v_28;
+    }
+    function __f_19(__v_23, __v_28) {
+      __v_21 = __v_19 | 0;
+      __v_28 = __v_28 | 0;
+      __v_18[__v_23 | 0] = __v_28;
+    }
+    function __f_10(__v_23) {
+      __v_0 = __v_10 | 0;
+      return __v_18[__v_23] | 0;
+      gc();
+    }
+    function __f_13(__v_23) {
+      __v_23 = __v_17 | 0;
+      return __v_18[__v_16[__v_23 >> 2] | 0] | 0;
+    }
+    return {__f_10: __f_10, __f_13: __f_13, __f_24: __f_24, __f_19: __f_19};
+  }
+  var __v_15 = new ArrayBuffer(__v_17);
+  var __v_13 = eval('(' + __f_3.toString() + ')');
+  var __v_26 = __v_13(__v_11, null, __v_15);
+  var __v_27 = { __f_10() {} };
+  __f_3(__v_13);
+  assertNotEquals(123, __v_27.__f_10(20));
+  assertNotEquals(42, __v_27.__f_10(21));
+  assertNotEquals(77, __v_27.__f_10(22));
+  __v_26.__p_711994219 = __v_26[getRandomProperty(__v_26, 711994219)];
+  __v_26.__defineGetter__(getRandomProperty(__v_26, 477679072), function() {  gc(); __v_16[getRandomProperty(__v_16, 1106800630)] = __v_1[getRandomProperty(__v_1, 1151799043)]; return __v_26.__p_711994219; });
+  assertNotEquals(123, __v_27.__f_10(0));
+  assertNotEquals(42, __v_27.__f_10(4));
+  assertNotEquals(77, __v_27.__f_10(8));
+})();
+} catch(e) { print("Caught: " + e); }
+function __f_18(__v_11, foreign, heap) {
+  "use asm";
+  var __v_12 = new __v_11.Float32Array(heap);
+  var __v_14 = __v_11.Math.fround;
+  function __f_20() {
+    var __v_21 = 1.23;
+    __v_12[0] = __v_21;
+    return +__v_12[0];
+  }
+  return {__f_14: __f_20};
+}
+try {
+__f_23(Math.fround(1.23), __f_3);
+} catch(e) { print("Caught: " + e); }
+try {
+} catch(e) { print("Caught: " + e); }
+function __f_25(
+    imported_module_name, imported_function_name) {
+  var __v_11 = new WasmModuleBuilder();
+  var __v_25 = new WasmModuleBuilder();
+  let imp = i => i + 3;
+}
+try {
+__f_25('mod', 'foo');
+__f_25('mod', '☺☺happy☺☺');
+__f_25('☺☺happy☺☺', 'foo');
+__f_25('☺☺happy☺☺', '☼+☃=☹');
+} catch(e) { print("Caught: " + e); }
+function __f_26(
+    internal_name_mul, exported_name_mul, internal_name_add,
+    exported_name_add) {
+  var __v_25 = new WasmModuleBuilder();
+}
+try {
+__f_26('☺☺mul☺☺', 'mul', '☺☺add☺☺', 'add');
+__f_26('☺☺mul☺☺', '☺☺mul☺☺', '☺☺add☺☺', '☺☺add☺☺');
+(function __f_27() {
+  var __v_25 = new WasmModuleBuilder();
+  __v_25.addFunction('three snowmen: ☃☃☃').addBody([]).exportFunc();
+  assertThrows( () => __v_25.instantiate(), WebAssembly.CompileError, /Compiling function #0:"three snowmen: ☃☃☃" failed: /);
+});
+(function __f_28() {
+  var __v_25 = new WasmModuleBuilder();
+  __v_25.addImport('three snowmen: ☃☃☃', 'foo');
+  assertThrows( () => __v_25.instantiate({}), TypeError, /WebAssembly.Instance\(\): Import #0 module="three snowmen: ☃☃☃" error: /);
+});
+(function __f_29() {
+  __v_25.__defineGetter__(getRandomProperty(__v_25, 539246294), function() { gc(); return __f_21; });
+  var __v_25 = new WasmModuleBuilder();
+  __v_25.addImport('mod', 'three snowmen: ☃☃☃');
+  assertThrows(
+      () => __v_14.instantiate({mod: {}}), WebAssembly.LinkError,
+      'WebAssembly.Instance\(\): Import #0 module="mod" function="three ' +
+          'snowmen: ☃☃☃" error: function import requires a callable');
+});
+} catch(e) { print("Caught: " + e); }
diff --git a/deps/v8/test/mjsunit/regress/regress-992389.js b/deps/v8/test/mjsunit/regress/regress-992389.js
new file mode 100644
index 00000000000000..66fa9696f67515
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-992389.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --jitless --gc-interval=12 --stack-size=50
+
+__f_0();
+function __f_0() {
+  try {
+    __f_0();
+  } catch(e) {
+    "b".replace(/(b)/g, function() { return "c"; });
+  }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-996234.js b/deps/v8/test/mjsunit/regress/regress-996234.js
new file mode 100644
index 00000000000000..e68ef7de3e95c5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-996234.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --regexp-tier-up --print-code --trace-regexp-bytecodes
+
+// Test printing of regexp code and bytecode when tiering up from the
+// interpreter to the compiler.
+function __f_13544(__v_62631) {
+  __v_62631.replace(/\s/g);
+}
+
+__f_13544("No");
+
+let re = /^.$/;
+re.test("a");
+re.test("3");
+re.test("π");
diff --git a/deps/v8/test/mjsunit/regress/regress-996751.js b/deps/v8/test/mjsunit/regress/regress-996751.js
new file mode 100644
index 00000000000000..71a4e329b1f8a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-996751.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --stress-lazy-source-positions
+
+eval(`
+  eval("");
+  (function f() {
+    // This undefined should always be known to be the global undefined value,
+    // even though there is a sloppy eval call inside the top eval scope.
+    return undefined;
+  })();
+`);
+
+// The above logic should work through multiple layers of eval nesting.
+eval(`
+  eval(\`
+    eval(\\\`
+      eval("");
+      (function f() {
+        return undefined;
+      })();
+    \\\`);
+  \`);
+`);
diff --git a/deps/v8/test/mjsunit/regress/regress-bind-deoptimize.js b/deps/v8/test/mjsunit/regress/regress-bind-deoptimize.js
new file mode 100644
index 00000000000000..a01d150d69cfad
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-bind-deoptimize.js
@@ -0,0 +1,24 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+(function() {
+  function bla(x) {
+    return this[x];
+  }
+
+  function foo(f) {
+    return bla.bind(f())(0);
+  };
+
+  %PrepareFunctionForOptimization(foo);
+  foo(() => { return [true]; });
+  foo(() => { return [true]; });
+  %OptimizeFunctionOnNextCall(foo);
+  foo(() => { return [true]; });
+  assertOptimized(foo);
+  foo(() => { bla.a = 1; return [true]; });
+  assertUnoptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1000094.js b/deps/v8/test/mjsunit/regress/regress-crbug-1000094.js
new file mode 100644
index 00000000000000..40f6799c4e7c58
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1000094.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-lazy-source-positions --stress-lazy-source-positions
+
+var f = (( {a: b} = {
+    a() {
+      return b;
+    }
+}) => b)()();
+
+// b should get assigned to the inner function a, which then ends up returning
+// itself.
+assertEquals(f, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1000170.js b/deps/v8/test/mjsunit/regress/regress-crbug-1000170.js
new file mode 100644
index 00000000000000..41975c7a72b760
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1000170.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --no-lazy --stress-lazy-source-positions --enable-lazy-source-positions
+
+(function a() {
+  function b() { a(); }
+  function c() { eval(); }
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-109362.js b/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
index cf7cd4e5fa737d..54bae56a1a6178 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
@@ -19,7 +19,7 @@ function test(expectation, f) {
 1 + reference_error //@ sourceURL=evaltest
 })
 */
-test("3:5", new Function(
+test("1:5", new Function(
     '1 + reference_error //@ sourceURL=evaltest'));
 /*
 (function(x
@@ -28,7 +28,7 @@ test("3:5", new Function(
  1 + reference_error //@ sourceURL=evaltest
 })
 */
-test("4:6", new Function(
+test("2:6", new Function(
     'x', '\n 1 + reference_error //@ sourceURL=evaltest'));
 /*
 (function(x
@@ -40,7 +40,7 @@ test("4:6", new Function(
  1 + reference_error //@ sourceURL=evaltest
 })
 */
-test("7:6", new Function(
+test("5:6", new Function(
     'x\n\n', "z//\n", "y", '\n 1 + reference_error //@ sourceURL=evaltest'));
 /*
 (function(x/\*,z//
@@ -49,7 +49,7 @@ test("7:6", new Function(
 1 + reference_error //@ sourceURL=evaltest
 })
 */
-test("4:5", new Function(
+test("2:5", new Function(
     'x/*', "z//\n", "y*/", '1 + reference_error //@ sourceURL=evaltest'));
 /*
 (function () {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-734051.js b/deps/v8/test/mjsunit/regress/regress-crbug-734051.js
index 2655db08a7eab1..7d8aa9cb85be17 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-734051.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-734051.js
@@ -2,14 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-function TestMutableHeapNumberLiteral() {
+function TestHeapNumberLiteral() {
     var data = { a: 0, b: 0 };
     data.a += 0.1;
     assertEquals(0.1, data.a);
     assertEquals(0, data.b);
 };
-TestMutableHeapNumberLiteral();
-TestMutableHeapNumberLiteral();
-TestMutableHeapNumberLiteral();
-TestMutableHeapNumberLiteral();
-TestMutableHeapNumberLiteral();
+TestHeapNumberLiteral();
+TestHeapNumberLiteral();
+TestHeapNumberLiteral();
+TestHeapNumberLiteral();
+TestHeapNumberLiteral();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-918301.js b/deps/v8/test/mjsunit/regress/regress-crbug-918301.js
new file mode 100644
index 00000000000000..a93ec3e9df8e17
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-918301.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() => Object.getOwnPropertyDescriptors(Array(1e9).join('c')), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-980183.js b/deps/v8/test/mjsunit/regress/regress-crbug-980183.js
new file mode 100644
index 00000000000000..f4b4f5cfce3bf1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-980183.js
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+  const o = {};
+  // The order of the following operations is significant
+  o.a = 0;
+  o[1024] = 1;  // An offset of >=1024 is required
+  delete o.a;
+  o.b = 2;
+  return o.b;
+}
+%PrepareFunctionForOptimization(f);
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+
+function g(o) {
+  o.b = 2;
+}
+function h() {
+  const o = {};
+  o.a = 0;
+  o[1024] = 1;
+  delete o.a;
+  g(o);
+  assertEquals(o.b, 2);
+}
+%NeverOptimizeFunction(g);
+%PrepareFunctionForOptimization(h);
+h();
+h();
+%OptimizeFunctionOnNextCall(h);
+h();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-980422.js b/deps/v8/test/mjsunit/regress/regress-crbug-980422.js
new file mode 100644
index 00000000000000..93ea619afab36a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-980422.js
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-lazy-source-positions --stress-lazy-source-positions
+(function () {
+  ((d, e = d) => {
+    return d * e;
+  })();
+})();
+
+try {
+  (function () {
+    ((d, e = f, f = d) => {
+      // Won't get here as the initializers will cause a ReferenceError
+    })();
+  })();
+  assertUnreachable();
+} catch (ex) {
+  assertInstanceof(ex, ReferenceError);
+  // Not using assertThrows because we need to access ex.stack to force
+  // collection of source positions.
+  print(ex.stack);
+}
+
+// Check that spreads in arrow functions work
+(function () {
+  ((...args) => args)();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-981701.js b/deps/v8/test/mjsunit/regress/regress-crbug-981701.js
new file mode 100644
index 00000000000000..f38591b600625e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-981701.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --throws --enable-lazy-source-positions --stress-lazy-source-positions
+((...{a: [b, ...{b: [] = b, a: c}] = b}) => b)(-2);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-986187.js b/deps/v8/test/mjsunit/regress/regress-crbug-986187.js
new file mode 100644
index 00000000000000..6fd2ccb5bf4149
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-986187.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+var obj = {}
+obj.__proto__ = null;
+Object.defineProperty(obj, "prop", {
+  set: gc
+});
+for (var i = 0; i < 100 ; ++i) {
+  obj["prop"] = 0;
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-987205.js b/deps/v8/test/mjsunit/regress/regress-crbug-987205.js
new file mode 100644
index 00000000000000..51903919e45368
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-987205.js
@@ -0,0 +1,68 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+  // This used to generate two distinct stores to #undefined, violating the load
+  // elimination invariant that there are no two store to the same const field:
+  var obj1 = {
+    [undefined]: 1,
+    'undefined': 1
+  };
+  // This one cannot be discharged statically:
+  var obj2 = {
+    [undefined]: x,
+    'undefined': 1
+  };
+  // Some more variations that exercise AllocateFastLiteral:
+  var obj3 = {
+    'undefined': 1,
+    [undefined]: x
+  };
+  var obj4 = {
+    'undefined': x,
+    [undefined]: 1
+  };
+  assertEquals(obj1.undefined, 1);
+  assertEquals(obj2.undefined, 1);
+  assertEquals(obj3.undefined, x);
+  assertEquals(obj4.undefined, 1);
+}
+
+%PrepareFunctionForOptimization(f);
+f(1);
+f(1);
+%OptimizeFunctionOnNextCall(f);
+f(2);
+
+
+function g(x) {
+  var obj1 = {
+    [undefined]: 1,
+    [undefined]: 1
+  };
+  var obj2 = {
+    [undefined]: 1,
+    [undefined]: x
+  };
+  var obj3 = {
+    [undefined]: x,
+    [undefined]: 1
+  };
+  var obj4 = {
+    [undefined]: x,
+    [undefined]: x
+  };
+  assertEquals(obj1.undefined, 1);
+  assertEquals(obj2.undefined, x);
+  assertEquals(obj3.undefined, 1);
+  assertEquals(obj4.undefined, x);
+}
+
+%PrepareFunctionForOptimization(g);
+g(1);
+g(1);
+%OptimizeFunctionOnNextCall(g);
+g(2);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-988304.js b/deps/v8/test/mjsunit/regress/regress-crbug-988304.js
new file mode 100644
index 00000000000000..a9ceec4d59f97e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-988304.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-lazy-source-positions --stress-lazy-source-positions
+
+(function() {
+  ((x = 1) => {
+    function foo() {
+      x;
+    }
+    return x;
+  })();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-990582.js b/deps/v8/test/mjsunit/regress/regress-crbug-990582.js
new file mode 100644
index 00000000000000..e78775fdbbfcdb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-990582.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --invoke-weak-callbacks --budget-for-feedback-vector-allocation=0
+
+__v_0 = 0;
+function __f_0() {
+   try {
+     __f_0();
+   } catch(e) {
+     if (__v_0 < 50) {
+       __v_0++;
+/()/g, new [];
+     }
+   }
+}
+  __f_0();
+Realm.shared = this;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-992914.js b/deps/v8/test/mjsunit/regress/regress-crbug-992914.js
new file mode 100644
index 00000000000000..31d0e76c343f0f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-992914.js
@@ -0,0 +1,59 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function mainFreeze() {
+  const v2 = {foo:1.1};
+  Object.freeze(v2);
+  const v12 = {foo:2.2};
+  Object.preventExtensions(v12);
+  Object.freeze(v12);
+  const v18 = {foo:Object};
+  v12.__proto__ = 0;
+  v2[5] = 1;
+}
+mainFreeze();
+
+function mainSeal() {
+  const v2 = {foo:1.1};
+  Object.seal(v2);
+  const v12 = {foo:2.2};
+  Object.preventExtensions(v12);
+  Object.seal(v12);
+  const v18 = {foo:Object};
+  v12.__proto__ = 0;
+  v2[5] = 1;
+}
+mainSeal();
+
+function testSeal() {
+  a = new RangeError(null, null, null);
+  a.toFixed = () => {};
+  e = Object.seal(a);
+  a = new RangeError(null, null, null);
+  a.toFixed = () => {};
+  k = Object.preventExtensions(a);
+  l = Object.seal(a);
+  a.toFixed = () => {};
+  assertThrows(() => {
+    Array.prototype.unshift.call(l, false, Infinity);
+  }, TypeError);
+}
+testSeal();
+
+function testFreeze() {
+  a = new RangeError(null, null, null);
+  a.toFixed = () => {};
+  e = Object.freeze(a);
+  a = new RangeError(null, null, null);
+  a.toFixed = () => {};
+  k = Object.preventExtensions(a);
+  l = Object.freeze(a);
+  a.toFixed = () => {};
+  assertThrows(() => {
+    Array.prototype.unshift.call(l, false, Infinity);
+  }, TypeError);
+}
+testFreeze();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-993980.js b/deps/v8/test/mjsunit/regress/regress-crbug-993980.js
new file mode 100644
index 00000000000000..aea4eeb83eeca2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-993980.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function () {
+    // generate some sample data
+    let data = new Array(1600).fill(null).map((e, i) => ({
+        invariantKey: 'v',
+        ['randomKey' + i]: 'w',
+
+    }));
+
+    // use json parser
+    data = JSON.parse(JSON.stringify(data))
+
+    // look for undefined values
+    for (const t of data) {
+      assertFalse(Object.keys(t).some(k => !t[k]));
+    }
+})()
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-994041.js b/deps/v8/test/mjsunit/regress/regress-crbug-994041.js
new file mode 100644
index 00000000000000..396cfa2c1ef593
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-994041.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+v0 = Array().join();
+RegExp.prototype.__defineSetter__(0, function() {
+})
+v24 = v0.search();
+assertEquals(v24, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-994719.js b/deps/v8/test/mjsunit/regress/regress-crbug-994719.js
new file mode 100644
index 00000000000000..72a9819331593c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-994719.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy --enable-lazy-source-positions --stress-lazy-source-positions
+
+class C extends Object {
+  constructor() {
+    () => this;
+    super();
+  }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-997056.js b/deps/v8/test/mjsunit/regress/regress-crbug-997056.js
new file mode 100644
index 00000000000000..02e2772ddb829b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-997056.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+for (let i = 0; i < 4; ++i) {
+    var obj1 = {
+        get [obj1]() {},
+        ...obj2,
+    };
+    var obj2 = { [obj1]: 0 };
+    print(obj2);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-997057.js b/deps/v8/test/mjsunit/regress/regress-crbug-997057.js
new file mode 100644
index 00000000000000..8f90b5e05cc131
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-997057.js
@@ -0,0 +1,31 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy-feedback-allocation
+
+arr0 = [];
+
+var obj = {};
+
+Array.prototype[12] = 10;
+arr0 = [];
+Array.prototype[0] = 153;
+
+for (var elem in arr0) {
+  obj.length = {
+    toString: function () {
+    }
+  };
+}
+
+function baz() {
+  obj.length, arr0.length;
+}
+
+var arr = [{}, [], {}];
+for (var i in arr) {
+  baz();
+  for (var j = 0; j < 100000; j++) {
+  }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-997320.js b/deps/v8/test/mjsunit/regress/regress-crbug-997320.js
new file mode 100644
index 00000000000000..5f8dd56200b620
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-997320.js
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy --stress-lazy-source-positions
+// Flags: --enable-lazy-source-positions
+
+async(a, b = a) => {};
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-999450.js b/deps/v8/test/mjsunit/regress/regress-crbug-999450.js
new file mode 100644
index 00000000000000..6d005007b52703
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-999450.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --no-lazy --stress-lazy-source-positions --enable-lazy-source-positions
+
+(function foo() {
+    foo = null;
+    () => foo;
+})
diff --git a/deps/v8/test/mjsunit/regress/regress-inlining-printing.js b/deps/v8/test/mjsunit/regress/regress-inlining-printing.js
new file mode 100644
index 00000000000000..9a574ad308a3c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-inlining-printing.js
@@ -0,0 +1,24 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --trace-turbo-inlining
+// Flags: --max-inlined-bytecode-size-small=0
+
+function f() {}
+function g() {}
+function h() {}
+
+function test(n) {
+  h;
+  (n == 0 ? f : (n > 0 ? g : h))();
+}
+
+%EnsureFeedbackVectorForFunction(f);
+%EnsureFeedbackVectorForFunction(g);
+
+%PrepareFunctionForOptimization(test);
+test(0);
+test(1);
+%OptimizeFunctionOnNextCall(test);
+test(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js b/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js
index 2b34159c14d339..c2e6212eb16f1d 100644
--- a/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js
+++ b/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js
@@ -14,7 +14,6 @@ function foo() {
 let g1 = foo();
 let g2 = foo();
 %PrepareFunctionForOptimization(g1);
-%PrepareFunctionForOptimization(g2);
 
 g1({ f : 1});
 g1({ f : 2});
@@ -22,9 +21,10 @@ g2({ f : 2});
 g2({ f : 2});
 
 %OptimizeFunctionOnNextCall(g1);
-%OptimizeFunctionOnNextCall(g2);
-
 g1({ f : 1});
+
+%PrepareFunctionForOptimization(g2);
+%OptimizeFunctionOnNextCall(g2);
 g2({ f : 2});
 g1({});
 
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9511.js b/deps/v8/test/mjsunit/regress/regress-v8-9511.js
new file mode 100644
index 00000000000000..b2bff4161137aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9511.js
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var f = function() { return 1; };
+
+(function func1() {
+  eval("var f = function canary(s) { return 2; }");
+})();
+
+assertEquals(f(), 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9656.js b/deps/v8/test/mjsunit/regress/regress-v8-9656.js
new file mode 100644
index 00000000000000..98779b18f9bf44
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9656.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// Files: test/mjsunit/code-coverage-utils.js
+
+%DebugToggleBlockCoverage(true);
+
+try {
+  throw new Error();
+} catch (e) {
+  e.stack;
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8505.js b/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
index 0488723e4f7181..b1fdedfc93c800 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
@@ -150,18 +150,18 @@ function assertBinop(name, math_func, wasm_func) {
 }
 
 let stdlib = this;
-function Module_exp(stdlib) {
+function Module_pow(stdlib) {
   "use asm";
 
-  var Stdlib = stdlib.Math.exp;
+  var Stdlib = stdlib.Math.pow;
 
-  function NAME(a, b) {
+  function pow(a, b) {
     a = +a;
     b = +b;
     return +Stdlib(a, b);
   }
 
-  return {exp: exp};
+  return {pow: pow};
 }
 
 function wasmBinop(name, sig) {
@@ -181,8 +181,8 @@ function wasmBinop(name, sig) {
 }
 
 function asmBinop(name) {
-  let instance = Module_exp(stdlib);
-  assertTrue(%IsAsmWasmCode(Module_exp));
+  let instance = Module_pow(stdlib);
+  assertTrue(%IsAsmWasmCode(Module_pow));
 
   let asm_func = instance[name];
   if (typeof asm_func != "function") throw "asm[" + full_name + "] not found";
@@ -190,7 +190,7 @@ function asmBinop(name) {
 }
 
 (function TestF64() {
-  let name = 'exp';
+  let name = 'pow';
   let math_func = Math[name];
 
   let wasm_func = wasmBinop(name, kSig_d_dd);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1002388.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1002388.js
new file mode 100644
index 00000000000000..7ad066a666002d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1002388.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-type-reflection
+
+(function TestTableSetAndGetFunction() {
+  let func = new WebAssembly.Function({ parameters: [], results: [] }, x => x);
+  let table = new WebAssembly.Table({ element: "anyfunc", initial: 1 });
+  table.set(0, func);
+  table.get(0);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-math-intrinsic.js b/deps/v8/test/mjsunit/wasm/asm-wasm-math-intrinsic.js
index f6834362464f28..59fcc66d7e62b6 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-math-intrinsic.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-math-intrinsic.js
@@ -246,7 +246,6 @@ function assertBinop(name, math_func, asm_func) {
   ];
 
   for (name of f64_intrinsics) {
-    if (name == 'pow') continue;  // TODO(8505): asm.js correctness
     let math_func = Math[name];
     let f32 = false;
     print('Testing (f64) Math.' + name);
diff --git a/deps/v8/test/mjsunit/wasm/bigint.js b/deps/v8/test/mjsunit/wasm/bigint.js
index d64c0e06231f52..ff9046e9dcf16b 100644
--- a/deps/v8/test/mjsunit/wasm/bigint.js
+++ b/deps/v8/test/mjsunit/wasm/bigint.js
@@ -7,7 +7,7 @@
 load("test/mjsunit/wasm/wasm-module-builder.js");
 
 (function TestWasmI64ToJSBigInt() {
-  var builder = new WasmModuleBuilder();
+  let builder = new WasmModuleBuilder();
 
   builder
     .addFunction("fn", kSig_l_v) // () -> i64
@@ -16,22 +16,22 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
     ])
     .exportFunc();
 
-  var module = builder.instantiate();
+  let module = builder.instantiate();
 
   assertEquals(typeof module.exports.fn(), "bigint");
   assertEquals(module.exports.fn(), 3n);
 })();
 
 (function TestJSBigIntToWasmI64Global() {
-  var builder = new WasmModuleBuilder();
+  let builder = new WasmModuleBuilder();
 
-  var a_global_index = builder
+  let a_global_index = builder
     .addImportedGlobal("mod", "a", kWasmI64)
 
-  var b_global_index = builder
+  let b_global_index = builder
     .addImportedGlobal("mod", "b", kWasmI64);
 
-  var c_global_index = builder
+  let c_global_index = builder
     .addImportedGlobal("mod", "c", kWasmI64);
 
   builder
@@ -39,7 +39,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
     .addExportOfKind('b', kExternalGlobal, b_global_index)
     .addExportOfKind('c', kExternalGlobal, c_global_index);
 
-  var module = builder.instantiate({
+  let module = builder.instantiate({
     mod: {
       a: 1n,
       b: 2n ** 63n,
@@ -53,16 +53,16 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
 })();
 
 (function TestJSBigIntToWasmI64MutableGlobal() {
-  var builder = new WasmModuleBuilder();
+  let builder = new WasmModuleBuilder();
 
-  var a_global_index = builder
+  let a_global_index = builder
     .addImportedGlobal("mod", "a", kWasmI64, /* mutable = */ true)
 
   builder
     .addExportOfKind('a', kExternalGlobal, a_global_index);
 
   // as non object
-  var fn = () => builder.instantiate({
+  let fn = () => builder.instantiate({
     mod: {
       a: 1n,
     }
@@ -71,7 +71,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
   assertThrows(fn, WebAssembly.LinkError);
 
   // as WebAssembly.Global object
-  var module = builder.instantiate({
+  let module = builder.instantiate({
     mod: {
       a: new WebAssembly.Global({ value: "i64", mutable: true }, 1n),
     }
@@ -81,20 +81,19 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
 })();
 
 (function TestJSBigIntToWasmI64Identity() {
-  var builder = new WasmModuleBuilder();
+  let builder = new WasmModuleBuilder();
 
   builder
     .addFunction("f", kSig_l_l) // i64 -> i64
     .addBody([
-      kExprGetLocal, 0x0,
+      kExprGetLocal, 0,
     ])
     .exportFunc();
 
-  var module = builder.instantiate();
-  var f = module.exports.f;
+  let module = builder.instantiate();
+  let f = module.exports.f;
 
   assertEquals(f(0n), 0n);
-  assertEquals(f(-0n), -0n);
   assertEquals(f(123n), 123n);
   assertEquals(f(-123n), -123n);
 
@@ -103,9 +102,31 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
   assertThrows(() => f(5), TypeError);
 })();
 
+(function TestJSBigIntToWasmI64Projection() {
+  let builder = new WasmModuleBuilder();
+
+  builder
+    .addFunction("f", kSig_l_ll) // i64 -> i64
+    .addBody([
+      kExprGetLocal, 1,
+    ])
+    .exportFunc();
+
+  let module = builder.instantiate();
+  let f = module.exports.f;
+
+  assertEquals(f(1n, 0n), 0n);
+  assertEquals(f(1n, 123n), 123n);
+  assertEquals(f(1n, -123n), -123n);
+
+  assertEquals(f(1n, "5"), 5n);
+
+  assertThrows(() => f(1n, 5), TypeError);
+})();
+
 (function TestI64Global() {
-  var argument = { "value": "i64", "mutable": true };
-  var global = new WebAssembly.Global(argument);
+  let argument = { "value": "i64", "mutable": true };
+  let global = new WebAssembly.Global(argument);
 
   assertEquals(global.value, 0n); // initial value
 
@@ -117,10 +138,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
 })();
 
 (function TestI64GlobalValueOf() {
-  var argument = { "value": "i64" };
+  let argument = { "value": "i64" };
 
   // as literal
-  var global = new WebAssembly.Global(argument, {
+  let global = new WebAssembly.Global(argument, {
     valueOf() {
       return 123n;
     }
@@ -128,7 +149,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
   assertEquals(global.value, 123n);
 
   // as string
-  var global2 = new WebAssembly.Global(argument, {
+  let global2 = new WebAssembly.Global(argument, {
     valueOf() {
       return "321";
     }
@@ -137,7 +158,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
 })();
 
 (function TestInvalidValtypeGlobalErrorMessage() {
-  var argument = { "value": "some string" };
+  let argument = { "value": "some string" };
   assertThrows(() => new WebAssembly.Global(argument), TypeError);
 
   try {
@@ -149,26 +170,26 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
 })();
 
 (function TestGlobalI64ValueWrongType() {
-  var argument = { "value": "i64" };
+  let argument = { "value": "i64" };
   assertThrows(() => new WebAssembly.Global(argument, 666), TypeError);
 })();
 
 (function TestGlobalI64SetWrongType() {
-  var argument = { "value": "i64", "mutable": true };
-  var global = new WebAssembly.Global(argument);
+  let argument = { "value": "i64", "mutable": true };
+  let global = new WebAssembly.Global(argument);
 
   assertThrows(() => global.value = 1, TypeError);
 })();
 
 (function TestFuncParamF64PassingBigInt() {
-  var builder = new WasmModuleBuilder();
+  let builder = new WasmModuleBuilder();
 
   builder
       .addFunction("f", kSig_v_d) // f64 -> ()
       .addBody([])
       .exportFunc();
 
-  var module = builder.instantiate();
+  let module = builder.instantiate();
 
   assertThrows(() => module.exports.f(123n), TypeError);
 })();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-global.js b/deps/v8/test/mjsunit/wasm/exceptions-global.js
index c3f208ca165dd7..4a74dfb010d35b 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-global.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-global.js
@@ -115,10 +115,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
   assertThrowsEquals(() => instance.exports.rethrow_exnref(), exception2);
 })();
 
-// TODO(mstarzinger): Add the following test once proposal makes it clear how
-// far interaction with the mutable globals proposal is intended to go.
 // Test loading an imported mutable "exnref" being changed from the outside.
-/*(function TestGlobalExnRefGetImportedMutableAndRethrow() {
+(function TestGlobalExnRefGetImportedMutableAndRethrow() {
   print(arguments.callee.name);
   let builder = new WasmModuleBuilder();
   let g_index = builder.addImportedGlobal("m", "exn", kWasmExnRef, true);
@@ -135,7 +133,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
   assertThrowsEquals(() => instance.exports.rethrow_exnref(), exception1);
   let exception2 = mutable_global.value = "an even fancier exception";
   assertThrowsEquals(() => instance.exports.rethrow_exnref(), exception2);
-})();*/
+})();
 
 // Test custom initialization index for a global "exnref" variable.
 (function TestGlobalExnRefInitIndex() {
diff --git a/deps/v8/test/mjsunit/wasm/multi-value.js b/deps/v8/test/mjsunit/wasm/multi-value.js
index 194880195829af..31f9e8149b130f 100644
--- a/deps/v8/test/mjsunit/wasm/multi-value.js
+++ b/deps/v8/test/mjsunit/wasm/multi-value.js
@@ -319,3 +319,34 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
   assertEquals(instance.exports.main(2), 8);
   assertEquals(instance.exports.main(10), 200);
 })();
+
+(function MultiJSReturnTest() {
+  print(arguments.callee.name);
+  let builder = new WasmModuleBuilder();
+  let sig_fi_if = makeSig([kWasmI32, kWasmF32], [kWasmF32, kWasmI32]);
+
+  builder.addFunction("swap", sig_fi_if)
+    .addBody([
+      kExprGetLocal, 1,
+      kExprGetLocal, 0])
+    .exportAs("swap");
+  builder.addFunction("addsubmul", kSig_iii_i)
+      .addBody([
+        kExprGetLocal, 0,
+        kExprGetLocal, 0,
+        kExprI32Add,
+        kExprGetLocal, 0,
+        kExprGetLocal, 0,
+        kExprI32Sub,
+        kExprGetLocal, 0,
+        kExprGetLocal, 0,
+        kExprI32Mul])
+    .exportAs("addsubmul");
+
+  let module = new WebAssembly.Module(builder.toBuffer());
+  let instance = new WebAssembly.Instance(module);
+  assertEquals(instance.exports.swap(0, 1.5), [1.5, 0]);
+  assertEquals(instance.exports.swap(2, 3.75), [3.75, 2]);
+  assertEquals(instance.exports.addsubmul(4), [8, 0, 16]);
+  assertEquals(instance.exports.addsubmul(5), [10, 0, 25]);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
index eb0a95384c644d..96d3a0bac5c72b 100644
--- a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
@@ -13,153 +13,174 @@ function instantiate(buffer, ffi) {
 }
 
 (function BasicTest() {
-  print("BasicTest");
-    let builder = new WasmModuleBuilder();
-    builder.addMemory(1, 2, false);
-    builder.addFunction("foo", kSig_i_v)
-        .addBody([kExprI32Const, 11])
-        .exportAs("blarg");
-
-    var buffer = builder.toBuffer(debug);
-    var instance = instantiate(buffer);
-    assertEquals(11, instance.exports.blarg());
+  print(arguments.callee.name);
+  let builder = new WasmModuleBuilder();
+  builder.addMemory(1, 2, false);
+  builder.addFunction('foo', kSig_i_v)
+      .addBody([kExprI32Const, 11])
+      .exportAs('blarg');
+
+  var buffer = builder.toBuffer(debug);
+  var instance = instantiate(buffer);
+  assertEquals(11, instance.exports.blarg());
 })();
 
 (function ImportTest() {
-  print("ImportTest");
-    let builder = new WasmModuleBuilder();
-  var index = builder.addImport("", "print", makeSig_v_x(kWasmI32));
-    builder.addFunction("foo", kSig_v_v)
-        .addBody([kExprI32Const, 13, kExprCallFunction, index])
-        .exportAs("main");
-
-    var buffer = builder.toBuffer(debug);
-    var instance = instantiate(buffer, {"": {print: print}});
-    print("should print 13! ");
-    instance.exports.main();
+  print(arguments.callee.name);
+  let builder = new WasmModuleBuilder();
+  var index = builder.addImport('', 'print', makeSig_v_x(kWasmI32));
+  builder.addFunction('foo', kSig_v_v)
+      .addBody([kExprI32Const, 13, kExprCallFunction, index])
+      .exportAs('main');
+
+  var buffer = builder.toBuffer(debug);
+  var instance = instantiate(buffer, {'': {print: print}});
+  print('should print 13! ');
+  instance.exports.main();
 })();
 
 (function LocalsTest() {
-  print("LocalsTest");
+  print(arguments.callee.name);
+  let builder = new WasmModuleBuilder();
+  builder.addFunction(undefined, kSig_i_i)
+      .addLocals({i32_count: 1})
+      .addBody([kExprGetLocal, 0, kExprSetLocal, 1, kExprGetLocal, 1])
+      .exportAs('main');
+
+  var buffer = builder.toBuffer(debug);
+  var instance = instantiate(buffer);
+  assertEquals(19, instance.exports.main(19));
+  assertEquals(27777, instance.exports.main(27777));
+})();
+
+(function LocalsTest2() {
+  print(arguments.callee.name);
+  // TODO(titzer): i64 only works on 64-bit platforms.
+  var types = [
+    {locals: {i32_count: 1}, type: kWasmI32},
+    // {locals: {i64_count: 1}, type: kWasmI64},
+    {locals: {f32_count: 1}, type: kWasmF32},
+    {locals: {f64_count: 1}, type: kWasmF64},
+  ];
+
+  for (p of types) {
     let builder = new WasmModuleBuilder();
-    builder.addFunction(undefined, kSig_i_i)
-        .addLocals({i32_count: 1})
+    builder.addFunction(undefined, makeSig_r_x(p.type, p.type))
+        .addLocals(p.locals)
         .addBody([kExprGetLocal, 0, kExprSetLocal, 1, kExprGetLocal, 1])
-        .exportAs("main");
+        .exportAs('main');
 
     var buffer = builder.toBuffer(debug);
     var instance = instantiate(buffer);
     assertEquals(19, instance.exports.main(19));
     assertEquals(27777, instance.exports.main(27777));
-})();
-
-(function LocalsTest2() {
-  print("LocalsTest2");
-    // TODO(titzer): i64 only works on 64-bit platforms.
-    var types = [
-      {locals: {i32_count: 1}, type: kWasmI32},
-//      {locals: {i64_count: 1}, type: kWasmI64},
-      {locals: {f32_count: 1}, type: kWasmF32},
-      {locals: {f64_count: 1}, type: kWasmF64},
-    ];
-
-    for (p of types) {
-      let builder = new WasmModuleBuilder();
-      builder.addFunction(undefined, makeSig_r_x(p.type, p.type))
-        .addLocals(p.locals)
-        .addBody([kExprGetLocal, 0, kExprSetLocal, 1, kExprGetLocal, 1])
-        .exportAs("main");
-
-      var buffer = builder.toBuffer(debug);
-      var instance = instantiate(buffer);
-      assertEquals(19, instance.exports.main(19));
-      assertEquals(27777, instance.exports.main(27777));
-    }
+  }
 })();
 
 (function CallTest() {
-  print("CallTest");
-    let builder = new WasmModuleBuilder();
-    builder.addFunction("add", kSig_i_ii)
-        .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add]);
-    builder.addFunction("main", kSig_i_ii)
-        .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0])
-        .exportAs("main");
-
-    var instance = builder.instantiate();
-    assertEquals(44, instance.exports.main(11, 33));
-    assertEquals(7777, instance.exports.main(2222, 5555));
+  print(arguments.callee.name);
+  let builder = new WasmModuleBuilder();
+  builder.addFunction('add', kSig_i_ii).addBody([
+    kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add
+  ]);
+  builder.addFunction('main', kSig_i_ii)
+      .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0])
+      .exportAs('main');
+
+  var instance = builder.instantiate();
+  assertEquals(44, instance.exports.main(11, 33));
+  assertEquals(7777, instance.exports.main(2222, 5555));
 })();
 
 (function IndirectCallTest() {
-  print("IndirectCallTest");
-    let builder = new WasmModuleBuilder();
-    builder.addFunction("add", kSig_i_ii)
-        .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add]);
-    builder.addFunction("main", kSig_i_iii)
-        .addBody([kExprGetLocal,
-                  1, kExprGetLocal, 2, kExprGetLocal, 0, kExprCallIndirect, 0, kTableZero])
-        .exportAs("main");
-    builder.appendToTable([0]);
-
-    var instance = builder.instantiate();
-    assertEquals(44, instance.exports.main(0, 11, 33));
-    assertEquals(7777, instance.exports.main(0, 2222, 5555));
-    assertThrows(function() { instance.exports.main(1, 1, 1); });
+  print(arguments.callee.name);
+  let builder = new WasmModuleBuilder();
+  builder.addFunction('add', kSig_i_ii).addBody([
+    kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add
+  ]);
+  builder.addFunction('main', kSig_i_iii)
+      .addBody([
+        kExprGetLocal, 1, kExprGetLocal, 2, kExprGetLocal, 0, kExprCallIndirect,
+        0, kTableZero
+      ])
+      .exportAs('main');
+  builder.appendToTable([0]);
+
+  var instance = builder.instantiate();
+  assertEquals(44, instance.exports.main(0, 11, 33));
+  assertEquals(7777, instance.exports.main(0, 2222, 5555));
+  assertThrows(() => instance.exports.main(1, 1, 1));
 })();
 
 (function DataSegmentTest() {
-  print("DataSegmentTest");
-    let builder = new WasmModuleBuilder();
-    builder.addMemory(1, 1, false);
-    builder.addFunction("load", kSig_i_i)
-        .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
-        .exportAs("load");
-    builder.addDataSegment(0, [9, 9, 9, 9]);
-
-    var buffer = builder.toBuffer(debug);
-    var instance = instantiate(buffer);
-    assertEquals(151587081, instance.exports.load(0));
+  print(arguments.callee.name);
+  let builder = new WasmModuleBuilder();
+  builder.addMemory(1, 1, false);
+  builder.addFunction('load', kSig_i_i)
+      .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+      .exportAs('load');
+  builder.addDataSegment(0, [9, 9, 9, 9]);
+
+  var buffer = builder.toBuffer(debug);
+  var instance = instantiate(buffer);
+  assertEquals(151587081, instance.exports.load(0));
 })();
 
-
 (function BasicTestWithUint8Array() {
-  print("BasicTestWithUint8Array");
-    let builder = new WasmModuleBuilder();
-    builder.addMemory(1, 2, false);
-    builder.addFunction("foo", kSig_i_v)
-        .addBody([kExprI32Const, 17])
-        .exportAs("blarg");
-
-    var buffer = builder.toBuffer(debug);
-    var array = new Uint8Array(buffer);
-    var instance = instantiate(array);
-    assertEquals(17, instance.exports.blarg());
-
-    var kPad = 5;
-    var buffer2 = new ArrayBuffer(kPad + buffer.byteLength + kPad);
-    var whole = new Uint8Array(buffer2);
-    for (var i = 0; i < whole.byteLength; i++) {
-      whole[i] = 0xff;
-    }
-    var array2 = new Uint8Array(buffer2, kPad, buffer.byteLength);
-    for (var i = 0; i < array2.byteLength; i++) {
-      array2[i] = array[i];
-    }
-    var instance = instantiate(array2);
-    assertEquals(17, instance.exports.blarg());
+  print(arguments.callee.name);
+  let builder = new WasmModuleBuilder();
+  builder.addMemory(1, 2, false);
+  builder.addFunction('foo', kSig_i_v)
+      .addBody([kExprI32Const, 17])
+      .exportAs('blarg');
+
+  var buffer = builder.toBuffer(debug);
+  var array = new Uint8Array(buffer);
+  var instance = instantiate(array);
+  assertEquals(17, instance.exports.blarg());
+
+  var kPad = 5;
+  var buffer2 = new ArrayBuffer(kPad + buffer.byteLength + kPad);
+  var whole = new Uint8Array(buffer2);
+  for (var i = 0; i < whole.byteLength; i++) {
+    whole[i] = 0xff;
+  }
+  var array2 = new Uint8Array(buffer2, kPad, buffer.byteLength);
+  for (var i = 0; i < array2.byteLength; i++) {
+    array2[i] = array[i];
+  }
+  var instance = instantiate(array2);
+  assertEquals(17, instance.exports.blarg());
 })();
 
 (function ImportTestTwoLevel() {
-  print("ImportTestTwoLevel");
-    let builder = new WasmModuleBuilder();
-    var index = builder.addImport("mod", "print", makeSig_v_x(kWasmI32));
-    builder.addFunction("foo", kSig_v_v)
-        .addBody([kExprI32Const, 19, kExprCallFunction, index])
-        .exportAs("main");
+  print(arguments.callee.name);
+  let builder = new WasmModuleBuilder();
+  var index = builder.addImport('mod', 'print', makeSig_v_x(kWasmI32));
+  builder.addFunction('foo', kSig_v_v)
+      .addBody([kExprI32Const, 19, kExprCallFunction, index])
+      .exportAs('main');
+
+  var buffer = builder.toBuffer(debug);
+  var instance = instantiate(buffer, {mod: {print: print}});
+  print('should print 19! ');
+  instance.exports.main();
+})();
 
-    var buffer = builder.toBuffer(debug);
-    var instance = instantiate(buffer, {mod: {print: print}});
-    print("should print 19! ");
-    instance.exports.main();
+(function TestI32Const() {
+  print(arguments.callee.name);
+  let ints = [
+    // A few negative number of different length.
+    -3 << 28, -20000, -400, -200, -100, -50, -10, -1,
+    // And a few positive number of different length.
+    0, 1, 2, 20, 120, 130, 260, 500, 5000000, 3 << 28
+  ];
+  for (let i of ints) {
+    let builder = new WasmModuleBuilder();
+    builder.addFunction('main', kSig_i_v)
+        .addBody([...wasmI32Const(i)])
+        .exportAs('main');
+    let instance = builder.instantiate();
+    assertEquals(i, instance.exports.main());
+  }
 })();
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js b/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js
index 0b857fb42fd848..b7a7ee7969b64f 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js
@@ -33,6 +33,12 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
   assertEquals("anyref", type.value);
   assertEquals(false, type.mutable);
   assertEquals(2, Object.getOwnPropertyNames(type).length);
+
+  global = new WebAssembly.Global({value: "anyfunc"});
+  type = WebAssembly.Global.type(global);
+  assertEquals("anyfunc", type.value);
+  assertEquals(false, type.mutable);
+  assertEquals(2, Object.getOwnPropertyNames(type).length);
 })();
 
 // This is an extension of "type-reflection.js/TestFunctionTableSetAndCall" to
@@ -65,17 +71,23 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
 
   // Test table #0 first.
   assertEquals(v1, instance.exports.call0(0));
+  assertSame(f1, table.get(0));
   table.set(1, f2);
   assertEquals(v2, instance.exports.call0(1));
+  assertSame(f2, table.get(1));
   table.set(1, f3);
   assertTraps(kTrapFuncSigMismatch, () => instance.exports.call0(1));
+  assertSame(f3, table.get(1));
 
   // Test table #1 next.
   assertTraps(kTrapFuncSigMismatch, () => instance.exports.call1(0));
   instance.exports.tbl.set(0, f1);
   assertEquals(v1, instance.exports.call1(0));
+  assertSame(f1, instance.exports.tbl.get(0));
   instance.exports.tbl.set(0, f2);
   assertEquals(v2, instance.exports.call1(0));
+  assertSame(f2, instance.exports.tbl.get(0));
   instance.exports.tbl.set(0, f3);
   assertTraps(kTrapFuncSigMismatch, () => instance.exports.call1(0));
+  assertSame(f3, instance.exports.tbl.get(0));
 })();
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection-with-exnref.js b/deps/v8/test/mjsunit/wasm/type-reflection-with-exnref.js
new file mode 100644
index 00000000000000..df655f6ce7e057
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/type-reflection-with-exnref.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-type-reflection --experimental-wasm-eh
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function TestGlobalType() {
+  let global = new WebAssembly.Global({value: "exnref", mutable: true});
+  let type = WebAssembly.Global.type(global);
+  assertEquals("exnref", type.value);
+  assertEquals(true, type.mutable);
+  assertEquals(2, Object.getOwnPropertyNames(type).length);
+
+  global = new WebAssembly.Global({value: "exnref"});
+  type = WebAssembly.Global.type(global);
+  assertEquals("exnref", type.value);
+  assertEquals(false, type.mutable);
+  assertEquals(2, Object.getOwnPropertyNames(type).length);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection.js b/deps/v8/test/mjsunit/wasm/type-reflection.js
index da9ef83fdaa131..a9a0b871436053 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --experimental-wasm-type-reflection
+// Flags: --experimental-wasm-type-reflection --expose-gc
 
 load('test/mjsunit/wasm/wasm-module-builder.js');
 
@@ -57,6 +57,52 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
   assertEquals(2, Object.getOwnPropertyNames(type).length);
 })();
 
+(function TestMemoryExports() {
+  let builder = new WasmModuleBuilder();
+  builder.addMemory(1).exportMemoryAs("a")
+  let module = new WebAssembly.Module(builder.toBuffer());
+  let exports = WebAssembly.Module.exports(module);
+
+  assertEquals("a", exports[0].name);
+  assertTrue("type" in exports[0]);
+  assertEquals(1, exports[0].type.minimum);
+  assertFalse("maximum" in exports[0].type);
+
+  builder = new WasmModuleBuilder();
+  builder.addMemory(2, 16).exportMemoryAs("b")
+  module = new WebAssembly.Module(builder.toBuffer());
+  exports = WebAssembly.Module.exports(module);
+
+  assertEquals("b", exports[0].name);
+  assertTrue("type" in exports[0]);
+  assertEquals(2, exports[0].type.minimum);
+  assertEquals(16, exports[0].type.maximum);
+})();
+
+(function TestMemoryImports() {
+  let builder = new WasmModuleBuilder();
+  builder.addImportedMemory("m", "a", 1);
+  let module = new WebAssembly.Module(builder.toBuffer());
+  let imports = WebAssembly.Module.imports(module);
+
+  assertEquals("a", imports[0].name);
+  assertEquals("m", imports[0].module);
+  assertTrue("type" in imports[0]);
+  assertEquals(1, imports[0].type.minimum);
+  assertFalse("maximum" in imports[0].type);
+
+  builder = new WasmModuleBuilder();
+  builder.addImportedMemory("m", "b", 2, 16);
+  module = new WebAssembly.Module(builder.toBuffer());
+  imports = WebAssembly.Module.imports(module);
+
+  assertEquals("b", imports[0].name);
+  assertEquals("m", imports[0].module);
+  assertTrue("type" in imports[0]);
+  assertEquals(2, imports[0].type.minimum);
+  assertEquals(16, imports[0].type.maximum);
+})();
+
 (function TestTableType() {
   let table = new WebAssembly.Table({initial: 1, element: "anyfunc"});
   let type = WebAssembly.Table.type(table);
@@ -73,6 +119,56 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
   assertEquals(3, Object.getOwnPropertyNames(type).length);
 })();
 
+(function TestTableExports() {
+  let builder = new WasmModuleBuilder();
+  builder.addTable(kWasmAnyFunc, 20).exportAs("a");
+  let module = new WebAssembly.Module(builder.toBuffer());
+  let exports = WebAssembly.Module.exports(module);
+
+  assertEquals("a", exports[0].name);
+  assertTrue("type" in exports[0]);
+  assertEquals("anyfunc", exports[0].type.element);
+  assertEquals(20, exports[0].type.minimum);
+  assertFalse("maximum" in exports[0].type);
+
+  builder = new WasmModuleBuilder();
+  builder.addTable(kWasmAnyFunc, 15, 25).exportAs("b");
+  module = new WebAssembly.Module(builder.toBuffer());
+  exports = WebAssembly.Module.exports(module);
+
+  assertEquals("b", exports[0].name);
+  assertTrue("type" in exports[0]);
+  assertEquals("anyfunc", exports[0].type.element);
+  assertEquals(15, exports[0].type.minimum);
+  assertEquals(25, exports[0].type.maximum);
+})();
+
+(function TestTableImports() {
+  let builder = new WasmModuleBuilder();
+  builder.addImportedTable("m", "a", 20, undefined, kWasmAnyFunc);
+  let module = new WebAssembly.Module(builder.toBuffer());
+  let imports = WebAssembly.Module.imports(module);
+
+  assertEquals("a", imports[0].name);
+  assertEquals("m", imports[0].module);
+  assertTrue("type" in imports[0]);
+  assertEquals("anyfunc", imports[0].type.element);
+  assertEquals(20, imports[0].type.minimum);
+  assertFalse("maximum" in imports[0].type);
+
+  builder = new WasmModuleBuilder();
+  builder.addImportedTable("m", "b", 15, 25, kWasmAnyFunc);
+  module = new WebAssembly.Module(builder.toBuffer());
+  imports = WebAssembly.Module.imports(module);
+
+  assertEquals("b", imports[0].name);
+  assertEquals("m", imports[0].module);
+  assertTrue("type" in imports[0]);
+  assertEquals("anyfunc", imports[0].type.element);
+  assertEquals(15, imports[0].type.minimum);
+  assertEquals(25, imports[0].type.maximum);
+})();
+
 (function TestGlobalType() {
   let global = new WebAssembly.Global({value: "i32", mutable: true});
   let type = WebAssembly.Global.type(global);
@@ -105,6 +201,44 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
   assertEquals(2, Object.getOwnPropertyNames(type).length);
 })();
 
+(function TestGlobalExports() {
+  let builder = new WasmModuleBuilder();
+  builder.addGlobal(kWasmI32).exportAs("a");
+  builder.addGlobal(kWasmF64, true).exportAs("b");
+  let module = new WebAssembly.Module(builder.toBuffer());
+  let exports = WebAssembly.Module.exports(module);
+
+  assertEquals("a", exports[0].name);
+  assertTrue("type" in exports[0]);
+  assertEquals("i32", exports[0].type.value);
+  assertEquals(false, exports[0].type.mutable);
+
+  assertEquals("b", exports[1].name);
+  assertTrue("type" in exports[1]);
+  assertEquals("f64", exports[1].type.value);
+  assertEquals(true, exports[1].type.mutable);
+})();
+
+(function TestGlobalImports() {
+  let builder = new WasmModuleBuilder();
+  builder.addImportedGlobal("m", "a", kWasmI32);
+  builder.addImportedGlobal("m", "b", kWasmF64, true);
+  let module = new WebAssembly.Module(builder.toBuffer());
+  let imports = WebAssembly.Module.imports(module);
+
+  assertEquals("a", imports[0].name);
+  assertEquals("m", imports[0].module);
+  assertTrue("type" in imports[0]);
+  assertEquals("i32", imports[0].type.value);
+  assertEquals(false, imports[0].type.mutable);
+
+  assertEquals("b", imports[1].name);
+  assertEquals("m", imports[1].module);
+  assertTrue("type" in imports[1]);
+  assertEquals("f64", imports[1].type.value);
+  assertEquals(true, imports[1].type.mutable);
+})();
+
 (function TestMemoryConstructorWithMinimum() {
   let mem = new WebAssembly.Memory({minimum: 1});
   assertTrue(mem instanceof WebAssembly.Memory);
@@ -209,6 +343,42 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
     () => new WebAssembly.Function({parameters:[], results:[]}, _ => 0));
 })();
 
+(function TestFunctionConstructorNonArray1() {
+  let log = [];  // Populated with a log of accesses.
+  let two = { toString: () => "2" };  // Just a fancy "2".
+  let logger = new Proxy({ length: two, "0": "i32", "1": "f32"}, {
+    get: function(obj, prop) { log.push(prop); return Reflect.get(obj, prop); },
+    set: function(obj, prop, val) { assertUnreachable(); }
+  });
+  let fun = new WebAssembly.Function({parameters:logger, results:[]}, _ => 0);
+  assertArrayEquals(["i32", "f32"], WebAssembly.Function.type(fun).parameters);
+  assertArrayEquals(["length", "0", "1"], log);
+})();
+
+(function TestFunctionConstructorNonArray2() {
+  let throw1 = { get length() { throw new Error("cannot see length"); }};
+  let throw2 = { length: { toString: _ => { throw new Error("no length") } } };
+  let throw3 = { length: "not a length value, this also throws" };
+  assertThrows(
+    () => new WebAssembly.Function({parameters:throw1, results:[]}), Error,
+    /cannot see length/);
+  assertThrows(
+    () => new WebAssembly.Function({parameters:throw2, results:[]}), Error,
+    /no length/);
+  assertThrows(
+    () => new WebAssembly.Function({parameters:throw3, results:[]}), TypeError,
+    /Argument 0 contains parameters without 'length'/);
+  assertThrows(
+    () => new WebAssembly.Function({parameters:[], results:throw1}), Error,
+    /cannot see length/);
+  assertThrows(
+    () => new WebAssembly.Function({parameters:[], results:throw2}), Error,
+    /no length/);
+  assertThrows(
+    () => new WebAssembly.Function({parameters:[], results:throw3}), TypeError,
+    /Argument 0 contains results without 'length'/);
+})();
+
 (function TestFunctionConstructedFunction() {
   let fun = new WebAssembly.Function({parameters:[], results:[]}, _ => 0);
   assertTrue(fun instanceof WebAssembly.Function);
@@ -219,8 +389,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
   assertSame(fun.__proto__.__proto__.__proto__, Object.prototype);
   assertSame(fun.constructor, WebAssembly.Function);
   assertEquals(typeof fun, 'function');
-  // TODO(7742): Enable once it is callable.
-  // assertDoesNotThrow(() => fun());
+  assertDoesNotThrow(() => fun());
 })();
 
 (function TestFunctionExportedFunction() {
@@ -271,6 +440,88 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
   });
 })();
 
+(function TestFunctionExports() {
+  let testcases = [
+    [kSig_v_v, {parameters:[], results:[]}],
+    [kSig_v_i, {parameters:["i32"], results:[]}],
+    [kSig_i_l, {parameters:["i64"], results:["i32"]}],
+    [kSig_v_ddi, {parameters:["f64", "f64", "i32"], results:[]}],
+    [kSig_f_f, {parameters:["f32"], results:["f32"]}],
+  ];
+  testcases.forEach(function([sig, expected]) {
+    let builder = new WasmModuleBuilder();
+    builder.addFunction("fun", sig).addBody([kExprUnreachable]).exportFunc();
+    let module = new WebAssembly.Module(builder.toBuffer());
+    let exports = WebAssembly.Module.exports(module);
+    assertEquals("fun", exports[0].name);
+    assertTrue("type" in exports[0]);
+    assertEquals(expected, exports[0].type);
+  });
+})();
+
+(function TestFunctionImports() {
+  let testcases = [
+    [kSig_v_v, {parameters:[], results:[]}],
+    [kSig_v_i, {parameters:["i32"], results:[]}],
+    [kSig_i_l, {parameters:["i64"], results:["i32"]}],
+    [kSig_v_ddi, {parameters:["f64", "f64", "i32"], results:[]}],
+    [kSig_f_f, {parameters:["f32"], results:["f32"]}],
+  ];
+  testcases.forEach(function([sig, expected]) {
+    let builder = new WasmModuleBuilder();
+    builder.addImport("m", "fun", sig);
+    let module = new WebAssembly.Module(builder.toBuffer());
+    let imports = WebAssembly.Module.imports(module);
+    assertEquals("fun", imports[0].name);
+    assertEquals("m", imports[0].module);
+    assertTrue("type" in imports[0]);
+    assertEquals(expected, imports[0].type);
+  });
+})();
+
+(function TestFunctionConstructedCoercions() {
+  let obj1 = { valueOf: _ => 123.45 };
+  let obj2 = { toString: _ => "456" };
+  let gcer = { valueOf: _ => gc() };
+  let testcases = [
+    { params: { sig: ["i32"],
+                val: [23.5],
+                exp: [23], },
+      result: { sig: ["i32"],
+                val: 42.7,
+                exp: 42, },
+    },
+    { params: { sig: ["i32", "f32", "f64"],
+                val: [obj1,  obj2,  "789"],
+                exp: [123,   456,   789], },
+      result: { sig: [],
+                val: undefined,
+                exp: undefined, },
+    },
+    { params: { sig: ["i32", "f32", "f64"],
+                val: [gcer,  {},    "xyz"],
+                exp: [0,     NaN,   NaN], },
+      result: { sig: ["f64"],
+                val: gcer,
+                exp: NaN, },
+    },
+  ];
+  testcases.forEach(function({params, result}) {
+    let p = params.sig; let r = result.sig; var params_after;
+    function testFun() { params_after = arguments; return result.val; }
+    let fun = new WebAssembly.Function({parameters:p, results:r}, testFun);
+    let result_after = fun.apply(undefined, params.val);
+    assertArrayEquals(params.exp, params_after);
+    assertEquals(result.exp, result_after);
+  });
+})();
+
+(function TestFunctionConstructedIncompatibleSig() {
+  let fun = new WebAssembly.Function({parameters:["i64"], results:[]}, _ => 0);
+  assertThrows(() => fun(), TypeError,
+    /wasm function signature contains illegal type/);
+})();
+
 (function TestFunctionTableSetAndCall() {
   let builder = new WasmModuleBuilder();
   let fun1 = new WebAssembly.Function({parameters:[], results:["i32"]}, _ => 7);
@@ -353,3 +604,14 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
     () => builder.instantiate({ m: { fun: fun3 }}), WebAssembly.LinkError,
     /imported function does not match the expected type/);
 })();
+
+(function TestFunctionModuleImportReExport () {
+  let builder = new WasmModuleBuilder();
+  let fun = new WebAssembly.Function({parameters:[], results:["i32"]}, _ => 7);
+  let fun_index = builder.addImport("m", "fun", kSig_i_v)
+  builder.addExport("fun1", fun_index);
+  builder.addExport("fun2", fun_index);
+  let instance = builder.instantiate({ m: { fun: fun }});
+  assertSame(instance.exports.fun1, instance.exports.fun2);
+  assertSame(fun, instance.exports.fun1);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 8e423bd24f8226..45af969d09fcbb 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -552,7 +552,7 @@ class Binary {
     this.buffer[this.length++] = val >> 24;
   }
 
-  emit_leb(val, max_len) {
+  emit_leb_u(val, max_len) {
     this.ensure_space(max_len);
     for (let i = 0; i < max_len; ++i) {
       let v = val & 0xff;
@@ -567,11 +567,11 @@ class Binary {
   }
 
   emit_u32v(val) {
-    this.emit_leb(val, kMaxVarInt32Size);
+    this.emit_leb_u(val, kMaxVarInt32Size);
   }
 
   emit_u64v(val) {
-    this.emit_leb(val, kMaxVarInt64Size);
+    this.emit_leb_u(val, kMaxVarInt64Size);
   }
 
   emit_bytes(data) {
@@ -1384,13 +1384,24 @@ class WasmModuleBuilder {
   }
 }
 
-function wasmI32Const(val) {
-  let bytes = [kExprI32Const];
-  for (let i = 0; i < 4; ++i) {
-    bytes.push(0x80 | ((val >> (7 * i)) & 0x7f));
+function wasmSignedLeb(val, max_len = 5) {
+  let res = [];
+  for (let i = 0; i < max_len; ++i) {
+    let v = val & 0x7f;
+    // If {v} sign-extended from 7 to 32 bits is equal to val, we are done.
+    if (((v << 25) >> 25) == val) {
+      res.push(v);
+      return res;
+    }
+    res.push(v | 0x80);
+    val = val >> 7;
   }
-  bytes.push((val >> (7 * 4)) & 0x7f);
-  return bytes;
+  throw new Error(
+      'Leb value <' + val + '> exceeds maximum length of ' + max_len);
+}
+
+function wasmI32Const(val) {
+  return [kExprI32Const, ...wasmSignedLeb(val, 5)];
 }
 
 function wasmF32Const(f) {
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 103c1334a102ba..8c07576d3aab0f 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -42,7 +42,7 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
   void Free(void* p, size_t) override {}
 };
 
-static void DumpKnownMap(i::Heap* heap, const char* space_name,
+static void DumpKnownMap(FILE* out, i::Heap* heap, const char* space_name,
                          i::HeapObject object) {
 #define RO_ROOT_LIST_CASE(type, name, CamelName) \
   if (root_name == nullptr && object == roots.name()) root_name = #CamelName;
@@ -59,14 +59,14 @@ static void DumpKnownMap(i::Heap* heap, const char* space_name,
   MUTABLE_ROOT_LIST(MUTABLE_ROOT_LIST_CASE)
 
   if (root_name == nullptr) return;
-  i::PrintF("  (\"%s\", 0x%05" V8PRIxPTR "): (%d, \"%s\"),\n", space_name,
+  i::PrintF(out, "  (\"%s\", 0x%05" V8PRIxPTR "): (%d, \"%s\"),\n", space_name,
             root_ptr, map.instance_type(), root_name);
 
 #undef MUTABLE_ROOT_LIST_CASE
 #undef RO_ROOT_LIST_CASE
 }
 
-static void DumpKnownObject(i::Heap* heap, const char* space_name,
+static void DumpKnownObject(FILE* out, i::Heap* heap, const char* space_name,
                             i::HeapObject object) {
 #define RO_ROOT_LIST_CASE(type, name, CamelName)        \
   if (root_name == nullptr && object == roots.name()) { \
@@ -90,14 +90,14 @@ static void DumpKnownObject(i::Heap* heap, const char* space_name,
   if (root_name == nullptr) return;
   if (!i::RootsTable::IsImmortalImmovable(root_index)) return;
 
-  i::PrintF("  (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", space_name, root_ptr,
-            root_name);
+  i::PrintF(out, "  (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", space_name,
+            root_ptr, root_name);
 
 #undef ROOT_LIST_CASE
 #undef RO_ROOT_LIST_CASE
 }
 
-static int DumpHeapConstants(const char* argv0) {
+static int DumpHeapConstants(FILE* out, const char* argv0) {
   // Start up V8.
   std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
   v8::V8::InitializePlatform(platform.get());
@@ -112,42 +112,42 @@ static int DumpHeapConstants(const char* argv0) {
     i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
     i::ReadOnlyHeap* read_only_heap =
         reinterpret_cast<i::Isolate*>(isolate)->read_only_heap();
-    i::PrintF("%s", kHeader);
-#define DUMP_TYPE(T) i::PrintF("  %d: \"%s\",\n", i::T, #T);
-    i::PrintF("INSTANCE_TYPES = {\n");
+    i::PrintF(out, "%s", kHeader);
+#define DUMP_TYPE(T) i::PrintF(out, "  %d: \"%s\",\n", i::T, #T);
+    i::PrintF(out, "INSTANCE_TYPES = {\n");
     INSTANCE_TYPE_LIST(DUMP_TYPE)
-    i::PrintF("}\n");
+    i::PrintF(out, "}\n");
 #undef DUMP_TYPE
 
     {
       // Dump the KNOWN_MAP table to the console.
-      i::PrintF("\n# List of known V8 maps.\n");
-      i::PrintF("KNOWN_MAPS = {\n");
+      i::PrintF(out, "\n# List of known V8 maps.\n");
+      i::PrintF(out, "KNOWN_MAPS = {\n");
       i::ReadOnlyHeapObjectIterator ro_iterator(read_only_heap);
       for (i::HeapObject object = ro_iterator.Next(); !object.is_null();
            object = ro_iterator.Next()) {
         if (!object.IsMap()) continue;
-        DumpKnownMap(heap, i::Heap::GetSpaceName(i::RO_SPACE), object);
+        DumpKnownMap(out, heap, i::Heap::GetSpaceName(i::RO_SPACE), object);
       }
       i::PagedSpaceObjectIterator iterator(heap->map_space());
       for (i::HeapObject object = iterator.Next(); !object.is_null();
            object = iterator.Next()) {
         if (!object.IsMap()) continue;
-        DumpKnownMap(heap, i::Heap::GetSpaceName(i::MAP_SPACE), object);
+        DumpKnownMap(out, heap, i::Heap::GetSpaceName(i::MAP_SPACE), object);
       }
-      i::PrintF("}\n");
+      i::PrintF(out, "}\n");
     }
 
     {
       // Dump the KNOWN_OBJECTS table to the console.
-      i::PrintF("\n# List of known V8 objects.\n");
-      i::PrintF("KNOWN_OBJECTS = {\n");
+      i::PrintF(out, "\n# List of known V8 objects.\n");
+      i::PrintF(out, "KNOWN_OBJECTS = {\n");
       i::ReadOnlyHeapObjectIterator ro_iterator(read_only_heap);
       for (i::HeapObject object = ro_iterator.Next(); !object.is_null();
            object = ro_iterator.Next()) {
         // Skip read-only heap maps, they will be reported elsewhere.
         if (object.IsMap()) continue;
-        DumpKnownObject(heap, i::Heap::GetSpaceName(i::RO_SPACE), object);
+        DumpKnownObject(out, heap, i::Heap::GetSpaceName(i::RO_SPACE), object);
       }
 
       i::PagedSpaceIterator spit(heap);
@@ -158,22 +158,22 @@ static int DumpHeapConstants(const char* argv0) {
           continue;
         const char* sname = s->name();
         for (i::HeapObject o = it.Next(); !o.is_null(); o = it.Next()) {
-          DumpKnownObject(heap, sname, o);
+          DumpKnownObject(out, heap, sname, o);
         }
       }
-      i::PrintF("}\n");
+      i::PrintF(out, "}\n");
     }
 
     // Dump frame markers
-    i::PrintF("\n# List of known V8 Frame Markers.\n");
-#define DUMP_MARKER(T, class) i::PrintF("  \"%s\",\n", #T);
-    i::PrintF("FRAME_MARKERS = (\n");
+    i::PrintF(out, "\n# List of known V8 Frame Markers.\n");
+#define DUMP_MARKER(T, class) i::PrintF(out, "  \"%s\",\n", #T);
+    i::PrintF(out, "FRAME_MARKERS = (\n");
     STACK_FRAME_TYPE_LIST(DUMP_MARKER)
-    i::PrintF(")\n");
+    i::PrintF(out, ")\n");
 #undef DUMP_MARKER
   }
 
-  i::PrintF("\n# This set of constants is generated from a %s build.\n",
+  i::PrintF(out, "\n# This set of constants is generated from a %s build.\n",
             kBuild);
 
   // Teardown.
@@ -184,4 +184,10 @@ static int DumpHeapConstants(const char* argv0) {
 
 }  // namespace v8
 
-int main(int argc, char* argv[]) { return v8::DumpHeapConstants(argv[0]); }
+int main(int argc, char* argv[]) {
+  FILE* out = stdout;
+  if (argc > 2 && strcmp(argv[1], "--outfile") == 0) {
+    out = fopen(argv[2], "wb");
+  }
+  return v8::DumpHeapConstants(out, argv[0]);
+}
diff --git a/deps/v8/test/test262/local-tests/test/language/expressions/class/elements/private-field-on-nested-class.js b/deps/v8/test/test262/local-tests/test/language/expressions/class/elements/private-field-on-nested-class.js
new file mode 100644
index 00000000000000..8aafe6df386c9d
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/expressions/class/elements/private-field-on-nested-class.js
@@ -0,0 +1,45 @@
+// This file was procedurally generated from the following sources:
+// - src/class-elements/private-field-on-nested-class.case
+// - src/class-elements/default/cls-expr.template
+/*---
+description: PrivateName CallExpression usage (private field) (field definitions in a class expression)
+esid: prod-FieldDefinition
+features: [class-fields-private, class-fields-public, class]
+flags: [generated]
+info: |
+    Updated Productions
+
+    CallExpression[Yield, Await]:
+      CoverCallExpressionAndAsyncArrowHead[?Yield, ?Await]
+      SuperCall[?Yield, ?Await]
+      CallExpression[?Yield, ?Await]Arguments[?Yield, ?Await]
+      CallExpression[?Yield, ?Await][Expression[+In, ?Yield, ?Await]]
+      CallExpression[?Yield, ?Await].IdentifierName
+      CallExpression[?Yield, ?Await]TemplateLiteral[?Yield, ?Await]
+      CallExpression[?Yield, ?Await].PrivateName
+
+---*/
+
+
+var C = class {
+  #outer = 'test262';
+
+  B_withoutPrivateField = class {
+    method(o) {
+      return o.#outer;
+    }
+  }
+
+  B_withPrivateField = class {
+    #inner = 42;
+    method(o) {
+      return o.#outer;
+    }
+  }
+}
+
+let c = new C();
+let innerB1 = new c.B_withoutPrivateField();
+assert.sameValue(innerB1.method(c), 'test262');
+let innerB2 = new c.B_withPrivateField();
+assert.sameValue(innerB2.method(c), 'test262');
diff --git a/deps/v8/test/test262/local-tests/test/language/statements/class/elements/private-field-on-nested-class.js b/deps/v8/test/test262/local-tests/test/language/statements/class/elements/private-field-on-nested-class.js
new file mode 100644
index 00000000000000..3f22efeb7cd3f0
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/statements/class/elements/private-field-on-nested-class.js
@@ -0,0 +1,45 @@
+// This file was procedurally generated from the following sources:
+// - src/class-elements/private-field-on-nested-class.case
+// - src/class-elements/default/cls-decl.template
+/*---
+description: PrivateName CallExpression usage (private field) (field definitions in a class declaration)
+esid: prod-FieldDefinition
+features: [class-fields-private, class-fields-public, class]
+flags: [generated]
+info: |
+    Updated Productions
+
+    CallExpression[Yield, Await]:
+      CoverCallExpressionAndAsyncArrowHead[?Yield, ?Await]
+      SuperCall[?Yield, ?Await]
+      CallExpression[?Yield, ?Await]Arguments[?Yield, ?Await]
+      CallExpression[?Yield, ?Await][Expression[+In, ?Yield, ?Await]]
+      CallExpression[?Yield, ?Await].IdentifierName
+      CallExpression[?Yield, ?Await]TemplateLiteral[?Yield, ?Await]
+      CallExpression[?Yield, ?Await].PrivateName
+
+---*/
+
+
+class C {
+  #outer = 'test262';
+
+  B_withoutPrivateField = class {
+    method(o) {
+      return o.#outer;
+    }
+  }
+
+  B_withPrivateField = class {
+    #inner = 42;
+    method(o) {
+      return o.#outer;
+    }
+  }
+}
+
+let c = new C();
+let innerB1 = new c.B_withoutPrivateField();
+assert.sameValue(innerB1.method(c), 'test262');
+let innerB2 = new c.B_withPrivateField();
+assert.sameValue(innerB2.method(c), 'test262');
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 54ba1579cca2c7..7ccb304a0b05a0 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -60,32 +60,13 @@
   'language/expressions/assignment/S11.13.1_A6*': [FAIL],
 
   # https://bugs.chromium.org/p/v8/issues/detail?id=4709
-  'built-ins/Promise/all/resolve-element-function-name': [FAIL],
-  'built-ins/Promise/allSettled/reject-element-function-name': [FAIL],
-  'built-ins/Promise/allSettled/resolve-element-function-name': [FAIL],
-  'built-ins/Promise/executor-function-name': [FAIL],
-  'built-ins/Promise/reject-function-name': [FAIL],
-  'built-ins/Promise/resolve-function-name': [FAIL],
-  'built-ins/Proxy/revocable/revocation-function-name': [FAIL],
   'language/expressions/assignment/fn-name-lhs-cover': [FAIL],
-  'language/expressions/assignment/fn-name-lhs-member': [FAIL],
-  'language/expressions/function/name': [FAIL],
-  'language/expressions/generators/name': [FAIL],
-  'intl402/NumberFormat/prototype/format/format-function-name': [FAIL],
-  'intl402/DateTimeFormat/prototype/format/format-function-name': [FAIL],
-  'intl402/Collator/prototype/compare/compare-function-name': [FAIL],
 
   # Intl tests which require flags.
   # https://bugs.chromium.org/p/v8/issues/detail?id=9154
   'intl402/NumberFormat/numbering-system-options': ['--harmony-intl-add-calendar-numbering-system'],
   'intl402/DateTimeFormat/numbering-system-calendar-options': ['--harmony-intl-add-calendar-numbering-system'],
 
-  # https://bugs.chromium.org/p/v8/issues/detail?id=9319
-  'intl402/NumberFormat/prototype/resolvedOptions/order': [FAIL],
-
-  # crbug.com/v8/9483
-  'intl402/NumberFormat/currencyDisplay-unit': [FAIL],
-
   # https://bugs.chromium.org/p/v8/issues/detail?id=9084
   'intl402/supportedLocalesOf-consistent-with-resolvedOptions': [FAIL],
   'intl402/fallback-locales-are-supported': [FAIL],
@@ -450,10 +431,6 @@
   'built-ins/TypedArrayConstructors/internals/Set/key-is-out-of-bounds': [FAIL],
   'built-ins/TypedArrayConstructors/internals/Set/BigInt/key-is-out-of-bounds': [FAIL],
 
-  # https://bugs.chromium.org/p/v8/issues/detail?id=8100
-  'built-ins/Atomics/notify/bigint/*': [SKIP],
-  'built-ins/Atomics/wait/bigint/*': [SKIP],
-
   # https://bugs.chromium.org/p/v8/issues/detail?id=6049
   'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-caller': [FAIL_SLOPPY],
   'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-arguments': [FAIL_SLOPPY],
@@ -480,25 +457,8 @@
   'language/expressions/async-generator/generator-created-after-decl-inst': [FAIL],
   'language/statements/async-generator/generator-created-after-decl-inst': [FAIL],
 
-  # await tests that require flags
-  'language/expressions/await/async-generator-interleaved': ['--harmony-await-optimization'],
-  'language/expressions/await/await-monkey-patched-promise': ['--harmony-await-optimization'],
-  'language/expressions/await/for-await-of-interleaved': ['--harmony-await-optimization'],
-  'language/expressions/await/async-await-interleaved': ['--harmony-await-optimization'],
-
-  # https://github.com/tc39/test262/issues/2033
-  'language/expressions/class/elements/private-derived-cls-direct-eval-err-contains-supercall': [SKIP],
-  'language/expressions/class/elements/private-derived-cls-direct-eval-err-contains-supercall-1': [SKIP],
-  'language/expressions/class/elements/private-derived-cls-direct-eval-err-contains-supercall-2': [SKIP],
-  'language/expressions/class/elements/private-derived-cls-indirect-eval-err-contains-supercall': [SKIP],
-  'language/expressions/class/elements/private-derived-cls-indirect-eval-err-contains-supercall-1': [SKIP],
-  'language/expressions/class/elements/private-derived-cls-indirect-eval-err-contains-supercall-2': [SKIP],
-  'language/statements/class/elements/private-derived-cls-direct-eval-err-contains-supercall': [SKIP],
-  'language/statements/class/elements/private-derived-cls-direct-eval-err-contains-supercall-1': [SKIP],
-  'language/statements/class/elements/private-derived-cls-direct-eval-err-contains-supercall-2': [SKIP],
-  'language/statements/class/elements/private-derived-cls-indirect-eval-err-contains-supercall': [SKIP],
-  'language/statements/class/elements/private-derived-cls-indirect-eval-err-contains-supercall-1': [SKIP],
-  'language/statements/class/elements/private-derived-cls-indirect-eval-err-contains-supercall-2': [SKIP],
+  # https://bugs.chromium.org/p/v8/issues/detail?id=9611
+  'language/statements/class/elements/private-field-is-visible-in-computed-properties': [SKIP],
 
   # https://github.com/tc39/test262/issues/2034
   'language/expressions/postfix-decrement/arguments': [SKIP],
@@ -565,10 +525,285 @@
   # https://github.com/tc39/test262/issues/2255
   'built-ins/FinalizationGroup/prototype/cleanupSome/iterator-holdings-multiple-values': [FAIL],
 
+  # https://github.com/tc39/test262/issues/2260
+  'built-ins/FinalizationGroup/prototype/cleanupSome/return-undefined-with-gc': [FAIL],
+
+  # https://bugs.chromium.org/p/v8/issues/detail?id=9612
+  'intl402/DateTimeFormat/prototype/formatRange/fractionalSecondDigits': [FAIL],
+
+  # https://bugs.chromium.org/p/v8/issues/detail?id=9613
+  'intl402/Intl/getCanonicalLocales/canonicalized-tags': [FAIL],
+  'intl402/Intl/getCanonicalLocales/grandfathered': [FAIL],
+  'intl402/Intl/getCanonicalLocales/invalid-tags': [FAIL],
+  'intl402/Intl/getCanonicalLocales/non-iana-canon': [FAIL],
+  'intl402/Intl/getCanonicalLocales/preferred-grandfathered': [FAIL],
+  'intl402/Intl/getCanonicalLocales/preferred-variant': [FAIL],
+  'intl402/language-tags-invalid': [FAIL],
+  'intl402/ListFormat/constructor/constructor/locales-valid': [FAIL],
+  'intl402/Locale/constructor-non-iana-canon': [FAIL],
+  'intl402/Locale/constructor-options-region-valid': [FAIL],
+  'intl402/Locale/constructor-tag': [FAIL],
+  'intl402/Locale/getters': [FAIL],
+  'intl402/Locale/likely-subtags-grandfathered': [FAIL],
+  'intl402/PluralRules/prototype/resolvedOptions/order': [FAIL],
+  'intl402/RelativeTimeFormat/constructor/constructor/locales-valid': [FAIL],
+  'intl402/Segmenter/constructor/constructor/locales-valid': [FAIL],
+
+  # https://bugs.chromium.org/p/v8/issues/detail?id=9647
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-break-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-case-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-catch-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-class-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-const-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-continue-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-debugger-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-default-escaped-ext': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-default-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-delete-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-do-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-else-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-enum-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-export-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-extends-escaped-ext': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-extends-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-finally-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-for-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-function-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-if-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-import-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-in-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-instanceof-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-new-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-return-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-super-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-switch-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-this-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-throw-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-try-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-typeof-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-var-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-void-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-while-escaped': [FAIL],
+  'language/expressions/assignment/dstr/ident-name-prop-name-literal-with-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-break-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-case-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-catch-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-class-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-const-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-continue-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-debugger-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-default-escaped-ext': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-default-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-delete-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-do-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-else-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-enum-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-export-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-extends-escaped-ext': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-extends-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-finally-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-for-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-function-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-if-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-import-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-in-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-instanceof-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-new-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-return-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-super-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-switch-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-this-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-throw-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-try-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-typeof-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-var-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-void-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-while-escaped': [FAIL],
+  'language/expressions/assignment/member-expr-ident-name-with-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-break-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-case-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-catch-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-class-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-const-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-continue-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-debugger-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-default-escaped-ext': [FAIL],
+  'language/expressions/class/ident-name-method-def-default-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-delete-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-do-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-else-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-enum-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-export-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-extends-escaped-ext': [FAIL],
+  'language/expressions/class/ident-name-method-def-extends-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-finally-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-for-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-function-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-if-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-import-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-in-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-instanceof-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-new-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-return-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-super-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-switch-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-this-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-throw-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-try-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-typeof-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-var-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-void-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-while-escaped': [FAIL],
+  'language/expressions/class/ident-name-method-def-with-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-break-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-case-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-catch-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-class-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-const-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-continue-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-debugger-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-default-escaped-ext': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-default-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-delete-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-do-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-else-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-enum-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-export-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-extends-escaped-ext': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-extends-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-finally-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-for-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-function-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-if-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-import-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-in-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-instanceof-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-new-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-return-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-super-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-switch-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-this-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-throw-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-try-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-typeof-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-var-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-void-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-while-escaped': [FAIL],
+  'language/expressions/object/covered-ident-name-prop-name-literal-with-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-break-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-case-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-catch-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-class-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-const-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-continue-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-debugger-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-default-escaped-ext': [FAIL],
+  'language/expressions/object/ident-name-method-def-default-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-delete-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-do-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-else-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-enum-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-export-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-extends-escaped-ext': [FAIL],
+  'language/expressions/object/ident-name-method-def-extends-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-finally-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-for-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-function-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-if-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-import-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-in-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-instanceof-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-new-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-return-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-super-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-switch-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-this-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-throw-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-try-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-typeof-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-var-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-void-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-while-escaped': [FAIL],
+  'language/expressions/object/ident-name-method-def-with-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-break-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-case-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-catch-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-class-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-const-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-continue-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-debugger-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-default-escaped-ext': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-default-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-delete-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-do-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-else-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-enum-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-export-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-extends-escaped-ext': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-extends-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-finally-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-for-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-function-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-if-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-import-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-in-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-instanceof-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-new-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-return-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-super-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-switch-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-this-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-throw-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-try-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-typeof-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-var-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-void-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-while-escaped': [FAIL],
+  'language/expressions/object/ident-name-prop-name-literal-with-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-break-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-case-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-catch-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-class-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-const-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-continue-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-debugger-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-default-escaped-ext': [FAIL],
+  'language/statements/class/ident-name-method-def-default-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-delete-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-do-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-else-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-enum-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-export-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-extends-escaped-ext': [FAIL],
+  'language/statements/class/ident-name-method-def-extends-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-finally-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-for-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-function-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-if-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-import-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-in-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-instanceof-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-new-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-return-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-super-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-switch-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-this-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-throw-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-try-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-typeof-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-var-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-void-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-while-escaped': [FAIL],
+  'language/statements/class/ident-name-method-def-with-escaped': [FAIL],
+
   ######################## NEEDS INVESTIGATION ###########################
 
   # https://bugs.chromium.org/p/v8/issues/detail?id=7833
+  #
+  # Test262 needs to expose CanBlock
+  'built-ins/Atomics/wait/bigint/cannot-suspend-throws': [SKIP],
   'built-ins/Atomics/wait/cannot-suspend-throws': [SKIP],
+  # Flaky
   'built-ins/Atomics/wait/undefined-index-defaults-to-zero': [SKIP],
 
   ##################### DELIBERATE INCOMPATIBILITIES #####################
@@ -688,10 +923,6 @@
 ['asan == True', {
   # BUG(v8:4653): Test262 tests which rely on quit() are not compatible with
   # asan's --omit-quit flag.
-  'built-ins/Promise/allSettled/reject-deferred': [FAIL],
-  'built-ins/Promise/allSettled/reject-ignored-deferred': [FAIL],
-  'built-ins/Promise/allSettled/reject-ignored-immed': [FAIL],
-  'built-ins/Promise/allSettled/reject-immed': [FAIL],
   'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
   'language/expressions/dynamic-import/always-create-new-promise': [SKIP],
   'language/expressions/dynamic-import/assign-expr-get-value-abrupt-throws': [SKIP],
@@ -837,8 +1068,6 @@
   'language/expressions/dynamic-import/catch/top-level-import-catch-instn-iee-err-circular': [SKIP],
   'language/expressions/dynamic-import/catch/top-level-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
   'language/expressions/dynamic-import/custom-primitive': [SKIP],
-  'language/expressions/dynamic-import/double-error-resolution': [SKIP],
-  'language/expressions/dynamic-import/double-error-resolution-promise': [SKIP],
   'language/expressions/dynamic-import/escape-sequence-import': [SKIP],
   'language/expressions/dynamic-import/eval-export-dflt-cls-anon': [SKIP],
   'language/expressions/dynamic-import/eval-export-dflt-cls-named': [SKIP],
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index ff866a832f4aae..9aa91dfaef144c 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -44,7 +44,6 @@
 
 # TODO(littledan): move the flag mapping into the status file
 FEATURE_FLAGS = {
-  'numeric-separator-literal': '--harmony-numeric-separator',
   'Intl.DateTimeFormat-datetimestyle': '--harmony-intl-datetime-style',
   'Intl.DateTimeFormat-formatRange': '--harmony-intl-date-format-range',
   'Intl.NumberFormat-unified': '--harmony-intl-numberformat-unified',
@@ -59,10 +58,12 @@
   'FinalizationGroup': '--harmony-weak-refs',
   'WeakRef': '--harmony-weak-refs',
   'host-gc-required': '--expose-gc-as=v8GC',
+  'optional-chaining': '--harmony-optional-chaining',
 }
 
 SKIPPED_FEATURES = set(['class-methods-private',
-                        'class-static-methods-private'])
+                        'class-static-methods-private',
+                        'top-level-await'])
 
 DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
 
diff --git a/deps/v8/test/torque/OWNERS b/deps/v8/test/torque/OWNERS
index 5f8830d55af36d..4e832f8c2595ee 100644
--- a/deps/v8/test/torque/OWNERS
+++ b/deps/v8/test/torque/OWNERS
@@ -1 +1 @@
-file://src/torque/OWNERS
+file:../../src/torque/OWNERS
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 6d2aee1479d863..4fa106b9c55ed9 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -83,11 +83,11 @@ namespace test {
     }
   }
 
-  builtin GenericBuiltinTest<T: type>(_c: Context, _param: T): Object {
+  builtin GenericBuiltinTest<T: type>(_c: Context, _param: T): JSAny {
     return Null;
   }
 
-  GenericBuiltinTest<Object>(_c: Context, param: Object): Object {
+  GenericBuiltinTest<JSAny>(_c: Context, param: JSAny): JSAny {
     return param;
   }
 
@@ -95,8 +95,8 @@ namespace test {
   macro TestBuiltinSpecialization(c: Context) {
     check(GenericBuiltinTest<Smi>(c, 0) == Null);
     check(GenericBuiltinTest<Smi>(c, 1) == Null);
-    check(GenericBuiltinTest<Object>(c, Undefined) == Undefined);
-    check(GenericBuiltinTest<Object>(c, Undefined) == Undefined);
+    check(GenericBuiltinTest<JSAny>(c, Undefined) == Undefined);
+    check(GenericBuiltinTest<JSAny>(c, Undefined) == Undefined);
   }
 
   macro LabelTestHelper4(flag: constexpr bool): never
@@ -202,9 +202,8 @@ namespace test {
 
   @export
   macro TestFunctionPointerToGeneric(c: Context) {
-    const fptr1: builtin(Context, Smi) => Object = GenericBuiltinTest<Smi>;
-    const fptr2: builtin(Context, Object) => Object =
-        GenericBuiltinTest<Object>;
+    const fptr1: builtin(Context, Smi) => JSAny = GenericBuiltinTest<Smi>;
+    const fptr2: builtin(Context, JSAny) => JSAny = GenericBuiltinTest<JSAny>;
 
     check(fptr1(c, 0) == Null);
     check(fptr1(c, 1) == Null);
@@ -212,7 +211,7 @@ namespace test {
     check(fptr2(c, Undefined) == Undefined);
   }
 
-  type ObjectToObject = builtin(Context, Object) => Object;
+  type ObjectToObject = builtin(Context, JSAny) => JSAny;
   @export
   macro TestTypeAlias(x: ObjectToObject): BuiltinPtr {
     return x;
@@ -452,7 +451,7 @@ namespace test {
 
   @export
   macro TestSubtyping(x: Smi) {
-    const _foo: Object = x;
+    const _foo: JSAny = x;
   }
 
   macro IncrementIfSmi<A: type>(x: A): A {
@@ -543,95 +542,34 @@ namespace test {
     check(equal);
   }
 
-  macro BoolToBranch(x: bool): never
-      labels Taken, NotTaken {
-    if (x) {
-      goto Taken;
-    } else {
-      goto NotTaken;
-    }
-  }
-
-  @export
-  macro TestOrAnd1(x: bool, y: bool, z: bool): bool {
-    return BoolToBranch(x) || y && z ? true : false;
-  }
-
-  @export
-  macro TestOrAnd2(x: bool, y: bool, z: bool): bool {
-    return x || BoolToBranch(y) && z ? true : false;
-  }
-
   @export
-  macro TestOrAnd3(x: bool, y: bool, z: bool): bool {
-    return x || y && BoolToBranch(z) ? true : false;
+  macro TestOrAnd(x: bool, y: bool, z: bool): bool {
+    return x || y && z ? true : false;
   }
 
   @export
-  macro TestAndOr1(x: bool, y: bool, z: bool): bool {
-    return BoolToBranch(x) && y || z ? true : false;
-  }
-
-  @export
-  macro TestAndOr2(x: bool, y: bool, z: bool): bool {
-    return x && BoolToBranch(y) || z ? true : false;
-  }
-
-  @export
-  macro TestAndOr3(x: bool, y: bool, z: bool): bool {
-    return x && y || BoolToBranch(z) ? true : false;
+  macro TestAndOr(x: bool, y: bool, z: bool): bool {
+    return x && y || z ? true : false;
   }
 
   @export
   macro TestLogicalOperators() {
-    check(TestAndOr1(true, true, true));
-    check(TestAndOr2(true, true, true));
-    check(TestAndOr3(true, true, true));
-    check(TestAndOr1(true, true, false));
-    check(TestAndOr2(true, true, false));
-    check(TestAndOr3(true, true, false));
-    check(TestAndOr1(true, false, true));
-    check(TestAndOr2(true, false, true));
-    check(TestAndOr3(true, false, true));
-    check(!TestAndOr1(true, false, false));
-    check(!TestAndOr2(true, false, false));
-    check(!TestAndOr3(true, false, false));
-    check(TestAndOr1(false, true, true));
-    check(TestAndOr2(false, true, true));
-    check(TestAndOr3(false, true, true));
-    check(!TestAndOr1(false, true, false));
-    check(!TestAndOr2(false, true, false));
-    check(!TestAndOr3(false, true, false));
-    check(TestAndOr1(false, false, true));
-    check(TestAndOr2(false, false, true));
-    check(TestAndOr3(false, false, true));
-    check(!TestAndOr1(false, false, false));
-    check(!TestAndOr2(false, false, false));
-    check(!TestAndOr3(false, false, false));
-    check(TestOrAnd1(true, true, true));
-    check(TestOrAnd2(true, true, true));
-    check(TestOrAnd3(true, true, true));
-    check(TestOrAnd1(true, true, false));
-    check(TestOrAnd2(true, true, false));
-    check(TestOrAnd3(true, true, false));
-    check(TestOrAnd1(true, false, true));
-    check(TestOrAnd2(true, false, true));
-    check(TestOrAnd3(true, false, true));
-    check(TestOrAnd1(true, false, false));
-    check(TestOrAnd2(true, false, false));
-    check(TestOrAnd3(true, false, false));
-    check(TestOrAnd1(false, true, true));
-    check(TestOrAnd2(false, true, true));
-    check(TestOrAnd3(false, true, true));
-    check(!TestOrAnd1(false, true, false));
-    check(!TestOrAnd2(false, true, false));
-    check(!TestOrAnd3(false, true, false));
-    check(!TestOrAnd1(false, false, true));
-    check(!TestOrAnd2(false, false, true));
-    check(!TestOrAnd3(false, false, true));
-    check(!TestOrAnd1(false, false, false));
-    check(!TestOrAnd2(false, false, false));
-    check(!TestOrAnd3(false, false, false));
+    check(TestAndOr(true, true, true));
+    check(TestAndOr(true, true, false));
+    check(TestAndOr(true, false, true));
+    check(!TestAndOr(true, false, false));
+    check(TestAndOr(false, true, true));
+    check(!TestAndOr(false, true, false));
+    check(TestAndOr(false, false, true));
+    check(!TestAndOr(false, false, false));
+    check(TestOrAnd(true, true, true));
+    check(TestOrAnd(true, true, false));
+    check(TestOrAnd(true, false, true));
+    check(TestOrAnd(true, false, false));
+    check(TestOrAnd(false, true, true));
+    check(!TestOrAnd(false, true, false));
+    check(!TestOrAnd(false, false, true));
+    check(!TestOrAnd(false, false, false));
   }
 
   @export
@@ -688,7 +626,7 @@ namespace test {
   @export
   macro TestQualifiedAccess(implicit context: Context)() {
     const s: Smi = 0;
-    check(!array::IsJSArray(s));
+    check(!Is<JSArray>(s));
   }
 
   @export
@@ -746,14 +684,14 @@ namespace test {
   @export
   macro TestIterator(implicit context: Context)(o: JSReceiver, map: Map) {
     try {
-      const t1: Object = iterator::GetIteratorMethod(o);
+      const t1: JSAny = iterator::GetIteratorMethod(o);
       const t2: iterator::IteratorRecord = iterator::GetIterator(o);
 
-      const _t3: Object = iterator::IteratorStep(t2) otherwise Fail;
-      const _t4: Object = iterator::IteratorStep(t2, map) otherwise Fail;
+      const _t3: JSAny = iterator::IteratorStep(t2) otherwise Fail;
+      const _t4: JSAny = iterator::IteratorStep(t2, map) otherwise Fail;
 
-      const t5: Object = iterator::IteratorValue(o);
-      const _t6: Object = iterator::IteratorValue(o, map);
+      const t5: JSAny = iterator::IteratorValue(o);
+      const _t6: JSAny = iterator::IteratorValue(o, map);
 
       const _t7: JSArray = iterator::IterableToList(t1, t1);
 
@@ -903,6 +841,64 @@ namespace test {
     check(array.b == 9);
   }
 
+  @export
+  macro TestSlices() {
+    const it = TestIterator{count: 3};
+    const a = new FixedArray{map: kFixedArrayMap, length: 3, objects: ...it};
+    check(a.length == 3);
+
+    const oneTwoThree = Convert<Smi>(123);
+    a.objects[0] = oneTwoThree;
+    const firstRef:&Object = & a.objects[0];
+    check(TaggedEqual(* firstRef, oneTwoThree));
+
+    const slice: torque_internal::Slice<Object> = & a.objects;
+    const firstRefAgain:&Object = slice.TryAtIndex(0) otherwise unreachable;
+    check(TaggedEqual(* firstRefAgain, oneTwoThree));
+
+    const threeTwoOne = Convert<Smi>(321);
+    * firstRefAgain = threeTwoOne;
+    check(TaggedEqual(a.objects[0], threeTwoOne));
+
+    // *slice;             // error, not allowed
+    // a.objects;          // error, not allowed
+    // a.objects = slice;  // error, not allowed
+
+    // TODO(gsps): Currently errors, but should be allowed:
+    // const _sameSlice: torque_internal::Slice<Object> = &(*slice);
+    // (*slice)[0] : Smi
+  }
+
+  @export
+  macro TestSliceEnumeration(implicit context: Context)(): Undefined {
+    const fixedArray: FixedArray = AllocateZeroedFixedArray(3);
+    for (let i: intptr = 0; i < 3; i++) {
+      check(UnsafeCast<Smi>(fixedArray.objects[i]) == 0);
+      fixedArray.objects[i] = Convert<Smi>(i) + 3;
+    }
+
+    let slice = & fixedArray.objects;
+    for (let i: intptr = 0; i < slice.length; i++) {
+      let ref = slice.TryAtIndex(i) otherwise unreachable;
+      const value = UnsafeCast<Smi>(* ref);
+      check(value == Convert<Smi>(i) + 3);
+      * ref = value + 4;
+    }
+
+    let it = slice.Iterator();
+    let count: Smi = 0;
+    while (true) {
+      let ref = it.Next() otherwise break;
+      const value = UnsafeCast<Smi>(* ref);
+      check(value == count + 7);
+      count++;
+    }
+    check(count == 3);
+    check(it.Empty());
+
+    return Undefined;
+  }
+
   @export
   macro TestStaticAssert() {
     StaticAssert(1 + 2 == 3);
@@ -923,12 +919,12 @@ namespace test {
     const v1 = box.value;
     box.unrelated = 999;
     const v2 = (box.unrelated == 0) ? box.value : box.value;
-    StaticAssert(WordEqual(v1, v2));
+    StaticAssert(TaggedEqual(v1, v2));
 
     box.value = 11;
     const v3 = box.value;
     const eleven: Smi = 11;
-    StaticAssert(WordEqual(v3, eleven));
+    StaticAssert(TaggedEqual(v3, eleven));
   }
 
   @export
@@ -939,8 +935,8 @@ namespace test {
     const u1 = a.objects[box.value + 2];
     const v2 = a.objects[box.value];
     const u2 = a.objects[box.value + 2];
-    StaticAssert(WordEqual(v1, v2));
-    StaticAssert(WordEqual(u1, u2));
+    StaticAssert(TaggedEqual(v1, v2));
+    StaticAssert(TaggedEqual(u1, u2));
   }
 
   @export
@@ -980,8 +976,8 @@ namespace test {
   @export
   macro TestGenericStruct1(): intptr {
     const i: intptr = 123;
-    let box = SBox<intptr>{value: i};
-    let boxbox = SBox<SBox<intptr>>{value: box};
+    let box = SBox{value: i};
+    let boxbox: SBox<SBox<intptr>> = SBox{value: box};
     check(box.value == 123);
     boxbox.value.value *= 2;
     check(boxbox.value.value == 246);
@@ -993,18 +989,43 @@ namespace test {
     const snd: T2;
   }
 
-  macro Swap<T1: type, T2: type>(tuple: TestTuple<T1, T2>):
+  macro TupleSwap<T1: type, T2: type>(tuple: TestTuple<T1, T2>):
       TestTuple<T2, T1> {
-    return TestTuple<T2, T1>{fst: tuple.snd, snd: tuple.fst};
+    return TestTuple{fst: tuple.snd, snd: tuple.fst};
   }
 
   @export
-  macro TestGenericStruct2(): TestTuple<Smi, intptr> {
+  macro TestGenericStruct2():
+      TestTuple<TestTuple<intptr, Smi>, TestTuple<Smi, intptr>> {
     const intptrAndSmi = TestTuple<intptr, Smi>{fst: 1, snd: 2};
-    const smiAndIntptr = Swap<intptr, Smi>(intptrAndSmi);
+    const smiAndIntptr = TupleSwap(intptrAndSmi);
     check(intptrAndSmi.fst == smiAndIntptr.snd);
     check(intptrAndSmi.snd == smiAndIntptr.fst);
-    return smiAndIntptr;
+    const tupleTuple =
+        TestTuple<TestTuple<intptr, Smi>>{fst: intptrAndSmi, snd: smiAndIntptr};
+    return tupleTuple;
+  }
+
+  macro BranchAndWriteResult(x: Smi, box: SmiBox): bool {
+    if (x > 5 || x < 0) {
+      box.value = 1;
+      return true;
+    } else {
+      box.value = 2;
+      return false;
+    }
+  }
+
+  @export
+  macro TestBranchOnBoolOptimization(implicit context: Context)(input: Smi) {
+    const box = NewSmiBox(1);
+    // If the two branches get combined into one, we should be able to determine
+    // the value of {box} statically.
+    if (BranchAndWriteResult(input, box)) {
+      StaticAssert(box.value == 1);
+    } else {
+      StaticAssert(box.value == 2);
+    }
   }
 
 }
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 87013f9fbce6a1..7a379f77e851ac 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -224,6 +224,7 @@ v8_source_set("unittests_sources") {
     "wasm/wasm-compiler-unittest.cc",
     "wasm/wasm-macro-gen-unittest.cc",
     "wasm/wasm-module-builder-unittest.cc",
+    "wasm/wasm-module-sourcemap-unittest.cc",
     "wasm/wasm-opcodes-unittest.cc",
     "wasm/wasm-text-unittest.cc",
     "zone/zone-allocator-unittest.cc",
@@ -301,8 +302,8 @@ v8_source_set("unittests_sources") {
     "../..:v8_for_testing",
     "../..:v8_libbase",
     "../..:v8_libplatform",
-    "../../third_party/inspector_protocol:encoding_test",
     "../../third_party/inspector_protocol:bindings_test",
+    "../../third_party/inspector_protocol:encoding_test",
     "//build/win:default_exe_manifest",
     "//testing/gmock",
     "//testing/gtest",
diff --git a/deps/v8/test/unittests/api/access-check-unittest.cc b/deps/v8/test/unittests/api/access-check-unittest.cc
index 65e20d2510e130..3b63666f4bbd04 100644
--- a/deps/v8/test/unittests/api/access-check-unittest.cc
+++ b/deps/v8/test/unittests/api/access-check-unittest.cc
@@ -27,6 +27,12 @@ MaybeLocal<Value> CompileRun(Isolate* isolate, const char* source) {
   return script->Run(context);
 }
 
+v8::Local<v8::String> v8_str(const char* x) {
+  return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x,
+                                 v8::NewStringType::kNormal)
+      .ToLocalChecked();
+}
+
 }  // namespace
 
 TEST_F(AccessCheckTest, GetOwnPropertyDescriptor) {
@@ -42,10 +48,8 @@ TEST_F(AccessCheckTest, GetOwnPropertyDescriptor) {
   Local<FunctionTemplate> setter_template = FunctionTemplate::New(
       isolate(), [](const FunctionCallbackInfo<v8::Value>& info) { FAIL(); });
   setter_template->SetAcceptAnyReceiver(false);
-  global_template->SetAccessorProperty(
-      String::NewFromUtf8(isolate(), "property", NewStringType::kNormal)
-          .ToLocalChecked(),
-      getter_template, setter_template);
+  global_template->SetAccessorProperty(v8_str("property"), getter_template,
+                                       setter_template);
 
   Local<Context> target_context =
       Context::New(isolate(), nullptr, global_template);
@@ -53,10 +57,7 @@ TEST_F(AccessCheckTest, GetOwnPropertyDescriptor) {
       Context::New(isolate(), nullptr, global_template);
 
   accessing_context->Global()
-      ->Set(accessing_context,
-            String::NewFromUtf8(isolate(), "other", NewStringType::kNormal)
-                .ToLocalChecked(),
-            target_context->Global())
+      ->Set(accessing_context, v8_str("other"), target_context->Global())
       .FromJust();
 
   Context::Scope context_scope(accessing_context);
@@ -71,15 +72,118 @@ TEST_F(AccessCheckTest, GetOwnPropertyDescriptor) {
              "    .set.call(other, 42);");
 }
 
-namespace {
-bool failed_access_check_callback_called;
+class AccessRegressionTest : public AccessCheckTest {
+ protected:
+  i::Handle<i::JSFunction> RetrieveFunctionFrom(Local<Context> context,
+                                                const char* script) {
+    Context::Scope context_scope(context);
+    Local<Value> getter = CompileRun(isolate(), script).ToLocalChecked();
+    EXPECT_TRUE(getter->IsFunction());
+
+    i::Handle<i::JSReceiver> r =
+        Utils::OpenHandle(*Local<Function>::Cast(getter));
+    EXPECT_TRUE(r->IsJSFunction());
+    return i::Handle<i::JSFunction>::cast(r);
+  }
+};
 
-v8::Local<v8::String> v8_str(const char* x) {
-  return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x,
-                                 v8::NewStringType::kNormal)
-      .ToLocalChecked();
+TEST_F(AccessRegressionTest,
+       InstantiatedLazyAccessorPairsHaveCorrectNativeContext) {
+  // The setup creates two contexts and sets an object created
+  // in context 1 on the global of context 2.
+  // The object has an accessor pair {property}. Accessing the
+  // property descriptor of {property} causes instantiation of the
+  // accessor pair. The test checks that the access pair has the
+  // correct native context.
+  Local<FunctionTemplate> getter_template = FunctionTemplate::New(
+      isolate(), [](const FunctionCallbackInfo<Value>&) { FAIL(); });
+  Local<FunctionTemplate> setter_template = FunctionTemplate::New(
+      isolate(), [](const FunctionCallbackInfo<v8::Value>&) { FAIL(); });
+
+  Local<ObjectTemplate> object_template = ObjectTemplate::New(isolate());
+  object_template->SetAccessorProperty(v8_str("property"), getter_template,
+                                       setter_template);
+
+  Local<Context> context1 = Context::New(isolate(), nullptr);
+  Local<Context> context2 = Context::New(isolate(), nullptr);
+
+  Local<Object> object =
+      object_template->NewInstance(context1).ToLocalChecked();
+  context2->Global()
+      ->Set(context2, v8_str("object_from_context1"), object)
+      .Check();
+
+  i::Handle<i::JSFunction> getter = RetrieveFunctionFrom(
+      context2,
+      "Object.getOwnPropertyDescriptor(object_from_context1, 'property').get");
+
+  ASSERT_EQ(getter->native_context(), *Utils::OpenHandle(*context1));
+}
+
+// Regression test for https://crbug.com/986063.
+TEST_F(AccessRegressionTest,
+       InstantiatedLazyAccessorPairsHaveCorrectNativeContextDebug) {
+  // The setup creates two contexts and installs an object "object"
+  // on the global this for each context.
+  // The object consists of:
+  //    - an accessor pair "property".
+  //    - a normal function "breakfn".
+  //
+  // The test sets a break point on {object.breakfn} in the first context.
+  // This forces instantation of the JSFunction for the {object.property}
+  // accessor pair. The test verifies afterwards that the respective
+  // JSFunction of the getter have the correct native context.
+
+  Local<FunctionTemplate> getter_template = FunctionTemplate::New(
+      isolate(), [](const FunctionCallbackInfo<Value>&) { FAIL(); });
+  Local<FunctionTemplate> setter_template = FunctionTemplate::New(
+      isolate(), [](const FunctionCallbackInfo<v8::Value>&) { FAIL(); });
+  Local<FunctionTemplate> break_template = FunctionTemplate::New(
+      isolate(), [](const FunctionCallbackInfo<v8::Value>&) { FAIL(); });
+
+  Local<Context> context1 = Context::New(isolate(), nullptr);
+  Local<Context> context2 = Context::New(isolate(), nullptr);
+
+  Local<ObjectTemplate> object_template = ObjectTemplate::New(isolate());
+  object_template->Set(isolate(), "breakfn", break_template);
+  object_template->SetAccessorProperty(v8_str("property"), getter_template,
+                                       setter_template);
+
+  Local<Object> object1 =
+      object_template->NewInstance(context1).ToLocalChecked();
+  EXPECT_TRUE(
+      context1->Global()->Set(context1, v8_str("object"), object1).IsJust());
+
+  Local<Object> object2 =
+      object_template->NewInstance(context2).ToLocalChecked();
+  EXPECT_TRUE(
+      context2->Global()->Set(context2, v8_str("object"), object2).IsJust());
+
+  // Force instantiation of the JSFunction for the getter and setter
+  // of {object.property} by setting a break point on {object.breakfn}
+  {
+    Context::Scope context_scope(context1);
+    i::Isolate* iso = reinterpret_cast<i::Isolate*>(isolate());
+    i::Handle<i::JSFunction> break_fn =
+        RetrieveFunctionFrom(context1, "object.breakfn");
+
+    int id;
+    iso->debug()->SetBreakpointForFunction(i::handle(break_fn->shared(), iso),
+                                           iso->factory()->empty_string(), &id);
+  }
+
+  i::Handle<i::JSFunction> getter_c1 = RetrieveFunctionFrom(
+      context1, "Object.getOwnPropertyDescriptor(object, 'property').get");
+  i::Handle<i::JSFunction> getter_c2 = RetrieveFunctionFrom(
+      context2, "Object.getOwnPropertyDescriptor(object, 'property').get");
+
+  ASSERT_EQ(getter_c1->native_context(), *Utils::OpenHandle(*context1));
+  ASSERT_EQ(getter_c2->native_context(), *Utils::OpenHandle(*context2));
 }
 
+namespace {
+bool failed_access_check_callback_called;
+
 class AccessCheckTestConsoleDelegate : public debug::ConsoleDelegate {
  public:
   void Log(const debug::ConsoleCallArguments& args,
diff --git a/deps/v8/test/unittests/base/atomic-utils-unittest.cc b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
index 442257eff8f7de..7ef0e948d7a2ff 100644
--- a/deps/v8/test/unittests/base/atomic-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
@@ -105,7 +105,7 @@ TEST(AsAtomic8, CompareAndSwap_Concurrent) {
     }
   }
   for (int i = 0; i < kThreadCount; i++) {
-    threads[i].Start();
+    CHECK(threads[i].Start());
   }
 
   for (int i = 0; i < kThreadCount; i++) {
@@ -179,7 +179,7 @@ TEST(AsAtomicWord, SetBits_Concurrent) {
     threads[i].Initialize(&word, i * 2);
   }
   for (int i = 0; i < kThreadCount; i++) {
-    threads[i].Start();
+    CHECK(threads[i].Start());
   }
   for (int i = 0; i < kThreadCount; i++) {
     threads[i].Join();
diff --git a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
index 6206569433428a..375f17ad2fb90c 100644
--- a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
@@ -64,7 +64,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
     MutexGuard lock_guard(&threads[n].mutex_);
     EXPECT_FALSE(threads[n].running_);
     EXPECT_FALSE(threads[n].finished_);
-    threads[n].Start();
+    CHECK(threads[n].Start());
     // Wait for nth thread to start.
     while (!threads[n].running_) {
       threads[n].cv_.Wait(&threads[n].mutex_);
@@ -153,7 +153,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
     for (int n = 0; n < kThreadCount; ++n) {
       EXPECT_FALSE(threads[n].running_);
       EXPECT_FALSE(threads[n].finished_);
-      threads[n].Start();
+      CHECK(threads[n].Start());
     }
   }
 
@@ -281,7 +281,7 @@ TEST(ConditionVariable, LoopIncrement) {
 
     // Start all threads.
     for (int n = thread_count - 1; n >= 0; --n) {
-      threads[n]->Start();
+      CHECK(threads[n]->Start());
     }
 
     // Join and cleanup all threads.
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index d31d85447c0ae3..27154b3c24cfd6 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -79,7 +79,7 @@ class ThreadLocalStorageTest : public Thread, public ::testing::Test {
 
 TEST_F(ThreadLocalStorageTest, DoTest) {
   Run();
-  Start();
+  CHECK(Start());
   Join();
 }
 
diff --git a/deps/v8/test/unittests/base/platform/semaphore-unittest.cc b/deps/v8/test/unittests/base/platform/semaphore-unittest.cc
index bd4a00fe95b815..3cddc565c73999 100644
--- a/deps/v8/test/unittests/base/platform/semaphore-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/semaphore-unittest.cc
@@ -94,8 +94,8 @@ TEST(Semaphore, ProducerConsumer) {
   Semaphore used_space(0);
   ProducerThread producer_thread(buffer, &free_space, &used_space);
   ConsumerThread consumer_thread(buffer, &free_space, &used_space);
-  producer_thread.Start();
-  consumer_thread.Start();
+  CHECK(producer_thread.Start());
+  CHECK(consumer_thread.Start());
   producer_thread.Join();
   consumer_thread.Join();
 }
@@ -106,8 +106,8 @@ TEST(Semaphore, WaitAndSignal) {
   WaitAndSignalThread t1(&semaphore);
   WaitAndSignalThread t2(&semaphore);
 
-  t1.Start();
-  t2.Start();
+  CHECK(t1.Start());
+  CHECK(t2.Start());
 
   // Make something available.
   semaphore.Signal();
diff --git a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
index 287a11442bc853..df387d3d94dde2 100644
--- a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
@@ -14,6 +14,7 @@
 
 using ::testing::_;
 using v8::internal::compiler::Node;
+using v8::internal::compiler::TNode;
 
 namespace c = v8::internal::compiler;
 
@@ -29,11 +30,11 @@ CodeStubAssemblerTestState::CodeStubAssemblerTestState(
 TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) {
   CodeStubAssemblerTestState state(this);
   CodeStubAssemblerForTest m(&state);
-  Node* value = m.Int32Constant(44);
+  TNode<IntPtrT> value = m.IntPtrConstant(44);
   EXPECT_THAT(m.SmiTag(value),
               IsBitcastWordToTaggedSigned(c::IsIntPtrConstant(
                   static_cast<intptr_t>(44) << (kSmiShiftSize + kSmiTagSize))));
-  EXPECT_THAT(m.SmiUntag(value),
+  EXPECT_THAT(m.SmiUntag(m.ReinterpretCast<Smi>(value)),
               c::IsIntPtrConstant(static_cast<intptr_t>(44) >>
                                   (kSmiShiftSize + kSmiTagSize)));
 }
@@ -42,9 +43,9 @@ TARGET_TEST_F(CodeStubAssemblerTest, IntPtrMax) {
   CodeStubAssemblerTestState state(this);
   CodeStubAssemblerForTest m(&state);
   {
-    Node* a = m.IntPtrConstant(100);
-    Node* b = m.IntPtrConstant(1);
-    Node* z = m.IntPtrMax(a, b);
+    TNode<IntPtrT> a = m.IntPtrConstant(100);
+    TNode<IntPtrT> b = m.IntPtrConstant(1);
+    TNode<IntPtrT> z = m.IntPtrMax(a, b);
     EXPECT_THAT(z, c::IsIntPtrConstant(100));
   }
 }
@@ -53,9 +54,9 @@ TARGET_TEST_F(CodeStubAssemblerTest, IntPtrMin) {
   CodeStubAssemblerTestState state(this);
   CodeStubAssemblerForTest m(&state);
   {
-    Node* a = m.IntPtrConstant(100);
-    Node* b = m.IntPtrConstant(1);
-    Node* z = m.IntPtrMin(a, b);
+    TNode<IntPtrT> a = m.IntPtrConstant(100);
+    TNode<IntPtrT> b = m.IntPtrConstant(1);
+    TNode<IntPtrT> z = m.IntPtrMin(a, b);
     EXPECT_THAT(z, c::IsIntPtrConstant(1));
   }
 }
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index bc74e6fe197242..8b15811d3605a4 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -92,7 +92,7 @@ class CompilerDispatcherTest : public TestWithNativeContext {
         ast_node_factory.NewFunctionLiteral(
             function_name, function_scope, statements, -1, -1, -1,
             FunctionLiteral::kNoDuplicateParameters,
-            FunctionLiteral::kAnonymousExpression,
+            FunctionSyntaxKind::kAnonymousExpression,
             FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
             shared->function_literal_id(), nullptr);
 
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index ae2e42b61f85e9..ff59e79a60e0b8 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -27,8 +27,7 @@ namespace {
 class BlockingCompilationJob : public OptimizedCompilationJob {
  public:
   BlockingCompilationJob(Isolate* isolate, Handle<JSFunction> function)
-      : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_,
-                                "BlockingCompilationJob",
+      : OptimizedCompilationJob(&info_, "BlockingCompilationJob",
                                 State::kReadyToExecute),
         shared_(function->shared(), isolate),
         zone_(isolate->allocator(), ZONE_NAME),
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index a26a8d9192d388..bb4848db6c291f 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -3225,54 +3225,6 @@ TEST_F(InstructionSelectorTest, Float64Neg) {
   EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
 }
 
-TEST_F(InstructionSelectorTest, StackCheck0) {
-  StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
-  Node* const sp = m.LoadStackPointer();
-  Node* const stack_limit = m.Load(MachineType::Int32(), m.Parameter(0));
-  Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
-
-  RawMachineLabel if_true, if_false;
-  m.Branch(interrupt, &if_true, &if_false);
-
-  m.Bind(&if_true);
-  m.Return(m.Int32Constant(1));
-
-  m.Bind(&if_false);
-  m.Return(m.Int32Constant(0));
-
-  Stream s = m.Build();
-
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kArmLdr, s[0]->arch_opcode());
-  EXPECT_EQ(kArmCmp, s[1]->arch_opcode());
-  EXPECT_EQ(4U, s[1]->InputCount());
-  EXPECT_EQ(0U, s[1]->OutputCount());
-}
-
-TEST_F(InstructionSelectorTest, StackCheck1) {
-  StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
-  Node* const sp = m.LoadStackPointer();
-  Node* const stack_limit = m.Load(MachineType::Int32(), m.Parameter(0));
-  Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
-
-  RawMachineLabel if_true, if_false;
-  m.Branch(sp_within_limit, &if_true, &if_false);
-
-  m.Bind(&if_true);
-  m.Return(m.Int32Constant(1));
-
-  m.Bind(&if_false);
-  m.Return(m.Int32Constant(0));
-
-  Stream s = m.Build();
-
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kArmLdr, s[0]->arch_opcode());
-  EXPECT_EQ(kArmCmp, s[1]->arch_opcode());
-  EXPECT_EQ(4U, s[1]->InputCount());
-  EXPECT_EQ(0U, s[1]->OutputCount());
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 867f89abfd6b6e..b969d9a278a9aa 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -4571,54 +4571,6 @@ TEST_F(InstructionSelectorTest, CompareFloat64HighGreaterThanOrEqualZero64) {
   EXPECT_EQ(63, s.ToInt32(s[1]->InputAt(1)));
 }
 
-TEST_F(InstructionSelectorTest, StackCheck0) {
-  StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
-  Node* const sp = m.LoadStackPointer();
-  Node* const stack_limit = m.Load(MachineType::Int64(), m.Parameter(0));
-  Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
-
-  RawMachineLabel if_true, if_false;
-  m.Branch(interrupt, &if_true, &if_false);
-
-  m.Bind(&if_true);
-  m.Return(m.Int32Constant(1));
-
-  m.Bind(&if_false);
-  m.Return(m.Int32Constant(0));
-
-  Stream s = m.Build();
-
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kArm64Ldr, s[0]->arch_opcode());
-  EXPECT_EQ(kArm64Cmp, s[1]->arch_opcode());
-  EXPECT_EQ(4U, s[1]->InputCount());
-  EXPECT_EQ(0U, s[1]->OutputCount());
-}
-
-TEST_F(InstructionSelectorTest, StackCheck1) {
-  StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
-  Node* const sp = m.LoadStackPointer();
-  Node* const stack_limit = m.Load(MachineType::Int64(), m.Parameter(0));
-  Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
-
-  RawMachineLabel if_true, if_false;
-  m.Branch(sp_within_limit, &if_true, &if_false);
-
-  m.Bind(&if_true);
-  m.Return(m.Int32Constant(1));
-
-  m.Bind(&if_false);
-  m.Return(m.Int32Constant(0));
-
-  Stream s = m.Build();
-
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kArm64Ldr, s[0]->arch_opcode());
-  EXPECT_EQ(kArm64Cmp, s[1]->arch_opcode());
-  EXPECT_EQ(4U, s[1]->InputCount());
-  EXPECT_EQ(0U, s[1]->OutputCount());
-}
-
 TEST_F(InstructionSelectorTest, ExternalReferenceLoad1) {
   // Test offsets we can use kMode_Root for.
   const int64_t kOffsets[] = {0, 1, 4, INT32_MIN, INT32_MAX};
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index a48ad1b35975ab..c29979c600c112 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -25,7 +25,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
     InstructionSelector::Features features,
     InstructionSelectorTest::StreamBuilderMode mode,
     InstructionSelector::SourcePositionMode source_position_mode) {
-  Schedule* schedule = Export();
+  Schedule* schedule = ExportForTest();
   if (FLAG_trace_turbo) {
     StdoutStream{} << "=== Schedule before instruction selection ==="
                    << std::endl
@@ -40,11 +40,13 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
                                instruction_blocks);
   SourcePositionTable source_position_table(graph());
   TickCounter tick_counter;
+  size_t max_unoptimized_frame_height = 0;
   InstructionSelector selector(
       test_->zone(), node_count, &linkage, &sequence, schedule,
       &source_position_table, nullptr,
       InstructionSelector::kEnableSwitchJumpTable, &tick_counter,
-      source_position_mode, features, InstructionSelector::kDisableScheduling,
+      &max_unoptimized_frame_height, source_position_mode, features,
+      InstructionSelector::kDisableScheduling,
       InstructionSelector::kEnableRootsRelativeAddressing,
       PoisoningMitigationLevel::kPoisonAll);
   selector.SelectInstructions();
diff --git a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
index 34fb84957c5c0c..a231539f6fc8d8 100644
--- a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
@@ -39,7 +39,6 @@ class BranchEliminationTest : public GraphTest {
   MachineOperatorBuilder machine_;
 };
 
-
 TEST_F(BranchEliminationTest, NestedBranchSameTrue) {
   // { return (x ? (x ? 1 : 2) : 3; }
   // should be reduced to
@@ -80,7 +79,6 @@ TEST_F(BranchEliminationTest, NestedBranchSameTrue) {
                     IsInt32Constant(2), IsMerge(outer_if_true, IsDead())));
 }
 
-
 TEST_F(BranchEliminationTest, NestedBranchSameFalse) {
   // { return (x ? 1 : (x ? 2 : 3); }
   // should be reduced to
@@ -122,10 +120,9 @@ TEST_F(BranchEliminationTest, NestedBranchSameFalse) {
                     IsInt32Constant(3), IsMerge(IsDead(), outer_if_false)));
 }
 
-
 TEST_F(BranchEliminationTest, BranchAfterDiamond) {
   // { var y = x ? 1 : 2; return y + x ? 3 : 4; }
-  // should not be reduced.
+  // second branch's condition should be replaced with a phi.
   Node* condition = Parameter(0);
 
   Node* branch1 =
@@ -136,7 +133,7 @@ TEST_F(BranchEliminationTest, BranchAfterDiamond) {
   Node* phi1 =
       graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
                        Int32Constant(1), Int32Constant(2), merge1);
-
+  // Second branch use the same condition.
   Node* branch2 = graph()->NewNode(common()->Branch(), condition, merge1);
   Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
   Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
@@ -145,7 +142,6 @@ TEST_F(BranchEliminationTest, BranchAfterDiamond) {
       graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
                        Int32Constant(3), Int32Constant(4), merge1);
 
-
   Node* add = graph()->NewNode(machine()->Int32Add(), phi1, phi2);
   Node* zero = graph()->NewNode(common()->Int32Constant(0));
   Node* ret =
@@ -154,13 +150,13 @@ TEST_F(BranchEliminationTest, BranchAfterDiamond) {
 
   Reduce();
 
-  // Outer branch should not be rewritten, the inner branch condition should
-  // be true.
-  EXPECT_THAT(branch1, IsBranch(condition, graph()->start()));
-  EXPECT_THAT(branch2, IsBranch(condition, merge1));
+  // The branch condition for branch2 should be a phi with constants.
+  EXPECT_THAT(branch2,
+              IsBranch(IsPhi(MachineRepresentation::kWord32, IsInt32Constant(1),
+                             IsInt32Constant(0), merge1),
+                       merge1));
 }
 
-
 TEST_F(BranchEliminationTest, BranchInsideLoopSame) {
   // if (x) while (x) { return 2; } else { return 1; }
   // should be rewritten to
@@ -172,7 +168,6 @@ TEST_F(BranchEliminationTest, BranchInsideLoopSame) {
       graph()->NewNode(common()->Branch(), condition, graph()->start());
   Node* outer_if_true = graph()->NewNode(common()->IfTrue(), outer_branch);
 
-
   Node* loop = graph()->NewNode(common()->Loop(1), outer_if_true);
   Node* effect =
       graph()->NewNode(common()->EffectPhi(1), graph()->start(), loop);
diff --git a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
index 0541f68440ef9b..43dfd9876fd9ab 100644
--- a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
@@ -13,6 +13,7 @@
 #include "test/unittests/compiler/node-test-utils.h"
 
 using ::testing::_;
+using ::testing::Eq;
 
 namespace v8 {
 namespace internal {
@@ -28,29 +29,29 @@ TARGET_TEST_F(CodeAssemblerTest, IntPtrAdd) {
   CodeAssemblerForTest m(&state);
   {
     Node* a = m.Parameter(0);
-    Node* b = m.Int32Constant(1);
-    Node* add = m.IntPtrAdd(a, b);
-    EXPECT_THAT(add, IsIntPtrAdd(a, b));
+    TNode<IntPtrT> b = m.IntPtrConstant(1);
+    TNode<WordT> add = m.IntPtrAdd(a, b);
+    EXPECT_THAT(add, IsIntPtrAdd(Eq(a), Eq(b)));
   }
   // x + 0  =>  x
   {
     Node* a = m.Parameter(0);
-    Node* b = m.Int32Constant(0);
-    Node* add = m.IntPtrAdd(a, b);
+    TNode<IntPtrT> b = m.IntPtrConstant(0);
+    TNode<WordT> add = m.IntPtrAdd(a, b);
     EXPECT_THAT(add, a);
   }
   // 0 + x  => x
   {
     Node* a = m.Parameter(0);
-    Node* b = m.Int32Constant(0);
-    Node* add = m.IntPtrAdd(b, a);
+    TNode<IntPtrT> b = m.IntPtrConstant(0);
+    TNode<WordT> add = m.IntPtrAdd(b, a);
     EXPECT_THAT(add, a);
   }
   // CONST_a + CONST_b => CONST_c
   {
-    Node* a = m.IntPtrConstant(22);
-    Node* b = m.IntPtrConstant(33);
-    Node* c = m.IntPtrAdd(a, b);
+    TNode<IntPtrT> a = m.IntPtrConstant(22);
+    TNode<IntPtrT> b = m.IntPtrConstant(33);
+    TNode<IntPtrT> c = m.IntPtrAdd(a, b);
     EXPECT_THAT(c, IsIntPtrConstant(55));
   }
 }
@@ -60,22 +61,22 @@ TARGET_TEST_F(CodeAssemblerTest, IntPtrSub) {
   CodeAssemblerForTest m(&state);
   {
     Node* a = m.Parameter(0);
-    Node* b = m.Int32Constant(1);
-    Node* sub = m.IntPtrSub(a, b);
-    EXPECT_THAT(sub, IsIntPtrSub(a, b));
+    TNode<IntPtrT> b = m.IntPtrConstant(1);
+    TNode<WordT> sub = m.IntPtrSub(a, b);
+    EXPECT_THAT(sub, IsIntPtrSub(Eq(a), Eq(b)));
   }
   // x - 0  => x
   {
     Node* a = m.Parameter(0);
-    Node* b = m.Int32Constant(0);
-    Node* c = m.IntPtrSub(a, b);
+    TNode<IntPtrT> b = m.IntPtrConstant(0);
+    TNode<WordT> c = m.IntPtrSub(a, b);
     EXPECT_THAT(c, a);
   }
   // CONST_a - CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(100);
-    Node* b = m.IntPtrConstant(1);
-    Node* c = m.IntPtrSub(a, b);
+    TNode<IntPtrT> a = m.IntPtrConstant(100);
+    TNode<IntPtrT> b = m.IntPtrConstant(1);
+    TNode<IntPtrT> c = m.IntPtrSub(a, b);
     EXPECT_THAT(c, IsIntPtrConstant(99));
   }
 }
@@ -85,43 +86,43 @@ TARGET_TEST_F(CodeAssemblerTest, IntPtrMul) {
   CodeAssemblerForTest m(&state);
   {
     Node* a = m.Parameter(0);
-    Node* b = m.Int32Constant(100);
-    Node* mul = m.IntPtrMul(a, b);
-    EXPECT_THAT(mul, IsIntPtrMul(a, b));
+    TNode<IntPtrT> b = m.IntPtrConstant(100);
+    TNode<WordT> mul = m.IntPtrMul(a, b);
+    EXPECT_THAT(mul, IsIntPtrMul(Eq(a), Eq(b)));
   }
   // x * 1  => x
   {
     Node* a = m.Parameter(0);
-    Node* b = m.Int32Constant(1);
-    Node* mul = m.IntPtrMul(a, b);
+    TNode<IntPtrT> b = m.IntPtrConstant(1);
+    TNode<WordT> mul = m.IntPtrMul(a, b);
     EXPECT_THAT(mul, a);
   }
   // 1 * x  => x
   {
     Node* a = m.Parameter(0);
-    Node* b = m.Int32Constant(1);
-    Node* mul = m.IntPtrMul(b, a);
+    TNode<IntPtrT> b = m.IntPtrConstant(1);
+    TNode<WordT> mul = m.IntPtrMul(b, a);
     EXPECT_THAT(mul, a);
   }
   // CONST_a * CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(100);
-    Node* b = m.IntPtrConstant(5);
-    Node* c = m.IntPtrMul(a, b);
+    TNode<IntPtrT> a = m.IntPtrConstant(100);
+    TNode<IntPtrT> b = m.IntPtrConstant(5);
+    TNode<IntPtrT> c = m.IntPtrMul(a, b);
     EXPECT_THAT(c, IsIntPtrConstant(500));
   }
   // x * 2^CONST  => x << CONST
   {
     Node* a = m.Parameter(0);
-    Node* b = m.IntPtrConstant(1 << 3);
-    Node* c = m.IntPtrMul(a, b);
+    TNode<IntPtrT> b = m.IntPtrConstant(1 << 3);
+    TNode<WordT> c = m.IntPtrMul(a, b);
     EXPECT_THAT(c, IsWordShl(a, IsIntPtrConstant(3)));
   }
   // 2^CONST * x  => x << CONST
   {
-    Node* a = m.IntPtrConstant(1 << 3);
+    TNode<IntPtrT> a = m.IntPtrConstant(1 << 3);
     Node* b = m.Parameter(0);
-    Node* c = m.IntPtrMul(a, b);
+    TNode<WordT> c = m.IntPtrMul(a, b);
     EXPECT_THAT(c, IsWordShl(b, IsIntPtrConstant(3)));
   }
 }
@@ -169,19 +170,19 @@ TARGET_TEST_F(CodeAssemblerTest, WordShl) {
   CodeAssemblerForTest m(&state);
   {
     Node* a = m.Parameter(0);
-    Node* add = m.WordShl(a, 10);
+    TNode<WordT> add = m.WordShl(a, 10);
     EXPECT_THAT(add, IsWordShl(a, IsIntPtrConstant(10)));
   }
   // x << 0  => x
   {
     Node* a = m.Parameter(0);
-    Node* add = m.WordShl(a, 0);
+    TNode<WordT> add = m.WordShl(a, 0);
     EXPECT_THAT(add, a);
   }
   // CONST_a << CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(1024);
-    Node* shl = m.WordShl(a, 2);
+    TNode<IntPtrT> a = m.IntPtrConstant(1024);
+    TNode<WordT> shl = m.WordShl(a, 2);
     EXPECT_THAT(shl, IsIntPtrConstant(4096));
   }
 }
@@ -191,25 +192,25 @@ TARGET_TEST_F(CodeAssemblerTest, WordShr) {
   CodeAssemblerForTest m(&state);
   {
     Node* a = m.Parameter(0);
-    Node* shr = m.WordShr(a, 10);
+    TNode<WordT> shr = m.WordShr(a, 10);
     EXPECT_THAT(shr, IsWordShr(a, IsIntPtrConstant(10)));
   }
   // x >> 0  => x
   {
     Node* a = m.Parameter(0);
-    Node* shr = m.WordShr(a, 0);
+    TNode<WordT> shr = m.WordShr(a, 0);
     EXPECT_THAT(shr, a);
   }
   // +CONST_a >> CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(4096);
-    Node* shr = m.WordShr(a, 2);
+    TNode<IntPtrT> a = m.IntPtrConstant(4096);
+    TNode<IntPtrT> shr = m.WordShr(a, 2);
     EXPECT_THAT(shr, IsIntPtrConstant(1024));
   }
   // -CONST_a >> CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(-1234);
-    Node* shr = m.WordShr(a, 2);
+    TNode<IntPtrT> a = m.IntPtrConstant(-1234);
+    TNode<IntPtrT> shr = m.WordShr(a, 2);
     EXPECT_THAT(shr, IsIntPtrConstant(static_cast<uintptr_t>(-1234) >> 2));
   }
 }
@@ -219,25 +220,25 @@ TARGET_TEST_F(CodeAssemblerTest, WordSar) {
   CodeAssemblerForTest m(&state);
   {
     Node* a = m.Parameter(0);
-    Node* sar = m.WordSar(a, m.IntPtrConstant(10));
+    TNode<WordT> sar = m.WordSar(a, m.IntPtrConstant(10));
     EXPECT_THAT(sar, IsWordSar(a, IsIntPtrConstant(10)));
   }
   // x >>> 0  => x
   {
     Node* a = m.Parameter(0);
-    Node* sar = m.WordSar(a, m.IntPtrConstant(0));
+    TNode<WordT> sar = m.WordSar(a, m.IntPtrConstant(0));
     EXPECT_THAT(sar, a);
   }
   // +CONST_a >>> CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(4096);
-    Node* sar = m.WordSar(a, m.IntPtrConstant(2));
+    TNode<IntPtrT> a = m.IntPtrConstant(4096);
+    TNode<IntPtrT> sar = m.WordSar(a, m.IntPtrConstant(2));
     EXPECT_THAT(sar, IsIntPtrConstant(1024));
   }
   // -CONST_a >>> CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(-1234);
-    Node* sar = m.WordSar(a, m.IntPtrConstant(2));
+    TNode<IntPtrT> a = m.IntPtrConstant(-1234);
+    TNode<IntPtrT> sar = m.WordSar(a, m.IntPtrConstant(2));
     EXPECT_THAT(sar, IsIntPtrConstant(static_cast<intptr_t>(-1234) >> 2));
   }
 }
@@ -247,25 +248,25 @@ TARGET_TEST_F(CodeAssemblerTest, WordOr) {
   CodeAssemblerForTest m(&state);
   {
     Node* a = m.Parameter(0);
-    Node* z = m.WordOr(a, m.IntPtrConstant(8));
+    TNode<WordT> z = m.WordOr(a, m.IntPtrConstant(8));
     EXPECT_THAT(z, IsWordOr(a, IsIntPtrConstant(8)));
   }
   // x | 0  => x
   {
     Node* a = m.Parameter(0);
-    Node* z = m.WordOr(a, m.IntPtrConstant(0));
+    TNode<WordT> z = m.WordOr(a, m.IntPtrConstant(0));
     EXPECT_THAT(z, a);
   }
   // 0 | x  => x
   {
     Node* a = m.Parameter(0);
-    Node* z = m.WordOr(m.IntPtrConstant(0), a);
+    TNode<WordT> z = m.WordOr(m.IntPtrConstant(0), a);
     EXPECT_THAT(z, a);
   }
   // CONST_a | CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(3);
-    Node* b = m.WordOr(a, m.IntPtrConstant(7));
+    TNode<IntPtrT> a = m.IntPtrConstant(3);
+    TNode<WordT> b = m.WordOr(a, m.IntPtrConstant(7));
     EXPECT_THAT(b, IsIntPtrConstant(7));
   }
 }
@@ -275,13 +276,13 @@ TARGET_TEST_F(CodeAssemblerTest, WordAnd) {
   CodeAssemblerForTest m(&state);
   {
     Node* a = m.Parameter(0);
-    Node* z = m.WordAnd(a, m.IntPtrConstant(8));
+    TNode<WordT> z = m.WordAnd(a, m.IntPtrConstant(8));
     EXPECT_THAT(z, IsWordAnd(a, IsIntPtrConstant(8)));
   }
   // CONST_a & CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(3);
-    Node* b = m.WordAnd(a, m.IntPtrConstant(7));
+    TNode<IntPtrT> a = m.IntPtrConstant(3);
+    TNode<IntPtrT> b = m.WordAnd(a, m.IntPtrConstant(7));
     EXPECT_THAT(b, IsIntPtrConstant(3));
   }
 }
@@ -291,13 +292,13 @@ TARGET_TEST_F(CodeAssemblerTest, WordXor) {
   CodeAssemblerForTest m(&state);
   {
     Node* a = m.Parameter(0);
-    Node* z = m.WordXor(a, m.IntPtrConstant(8));
+    TNode<WordT> z = m.WordXor(a, m.IntPtrConstant(8));
     EXPECT_THAT(z, IsWordXor(a, IsIntPtrConstant(8)));
   }
   // CONST_a ^ CONST_b  => CONST_c
   {
-    Node* a = m.IntPtrConstant(3);
-    Node* b = m.WordXor(a, m.IntPtrConstant(7));
+    TNode<IntPtrT> a = m.IntPtrConstant(3);
+    TNode<WordT> b = m.WordXor(a, m.IntPtrConstant(7));
     EXPECT_THAT(b, IsIntPtrConstant(4));
   }
 }
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 485df8e4014880..ee6b7c02a3e92f 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -23,7 +23,7 @@ GraphTest::GraphTest(int num_parameters)
       node_origins_(&graph_) {
   graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
   graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
-  broker()->SetNativeContextRef();
+  broker()->SetTargetNativeContextRef(isolate()->native_context());
 }
 
 GraphTest::~GraphTest() = default;
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 8851a6a2dfa869..ba6d3f299eb8e4 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -836,58 +836,6 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
   EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
 }
 
-TEST_F(InstructionSelectorTest, StackCheck0) {
-  ExternalReference js_stack_limit =
-      ExternalReference::Create(isolate()->stack_guard()->address_of_jslimit());
-  StreamBuilder m(this, MachineType::Int32());
-  Node* const sp = m.LoadStackPointer();
-  Node* const stack_limit =
-      m.Load(MachineType::Pointer(), m.ExternalConstant(js_stack_limit));
-  Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
-
-  RawMachineLabel if_true, if_false;
-  m.Branch(interrupt, &if_true, &if_false);
-
-  m.Bind(&if_true);
-  m.Return(m.Int32Constant(1));
-
-  m.Bind(&if_false);
-  m.Return(m.Int32Constant(0));
-
-  Stream s = m.Build();
-
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kIA32Cmp, s[0]->arch_opcode());
-  EXPECT_EQ(4U, s[0]->InputCount());
-  EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
-TEST_F(InstructionSelectorTest, StackCheck1) {
-  ExternalReference js_stack_limit =
-      ExternalReference::Create(isolate()->stack_guard()->address_of_jslimit());
-  StreamBuilder m(this, MachineType::Int32());
-  Node* const sp = m.LoadStackPointer();
-  Node* const stack_limit =
-      m.Load(MachineType::Pointer(), m.ExternalConstant(js_stack_limit));
-  Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
-
-  RawMachineLabel if_true, if_false;
-  m.Branch(sp_within_limit, &if_true, &if_false);
-
-  m.Bind(&if_true);
-  m.Return(m.Int32Constant(1));
-
-  m.Bind(&if_false);
-  m.Return(m.Int32Constant(0));
-
-  Stream s = m.Build();
-
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kIA32StackCheck, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 84d42b31d06457..52769b09ded304 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -3,6 +3,9 @@
 // found in the LICENSE file.
 
 #include "src/compiler/int64-lowering.h"
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
 #include "src/codegen/signature.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/linkage.h"
@@ -10,7 +13,6 @@
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
 #include "src/compiler/wasm-compiler.h"
-#include "src/objects/objects-inl.h"
 #include "src/wasm/value-type.h"
 #include "src/wasm/wasm-module.h"
 #include "test/unittests/compiler/graph-unittest.h"
@@ -48,6 +50,25 @@ class Int64LoweringTest : public GraphTest {
     lowering.LowerGraph();
   }
 
+  void LowerGraphWithSpecialCase(
+      Node* node, std::unique_ptr<Int64LoweringSpecialCase> special_case,
+      MachineRepresentation rep) {
+    Node* zero = graph()->NewNode(common()->Int32Constant(0));
+    Node* ret = graph()->NewNode(common()->Return(), zero, node,
+                                 graph()->start(), graph()->start());
+    NodeProperties::MergeControlToEnd(graph(), common(), ret);
+
+    // Create a signature for the outer wasm<>js call; for these tests we focus
+    // on lowering the special cases rather than the wrapper node at the
+    // JavaScript boundaries.
+    Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
+    sig_builder.AddReturn(rep);
+
+    Int64Lowering lowering(graph(), machine(), common(), zone(),
+                           sig_builder.Build(), std::move(special_case));
+    lowering.LowerGraph();
+  }
+
   void LowerGraph(Node* node, MachineRepresentation return_type,
                   MachineRepresentation rep = MachineRepresentation::kWord32,
                   int num_params = 0) {
@@ -968,6 +989,100 @@ TEST_F(Int64LoweringTest, LoopCycle) {
 
   LowerGraph(load, MachineRepresentation::kWord64);
 }
+
+TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
+  Node* target = Int32Constant(1);
+  Node* context = Int32Constant(2);
+  Node* bigint = Int32Constant(4);
+
+  CallDescriptor* bigint_to_i64_call_descriptor =
+      Linkage::GetStubCallDescriptor(
+          zone(),                   // zone
+          BigIntToI64Descriptor(),  // descriptor
+          BigIntToI64Descriptor()
+              .GetStackParameterCount(),   // stack parameter count
+          CallDescriptor::kNoFlags,        // flags
+          Operator::kNoProperties,         // properties
+          StubCallMode::kCallCodeObject);  // stub call mode
+
+  CallDescriptor* bigint_to_i32_pair_call_descriptor =
+      Linkage::GetStubCallDescriptor(
+          zone(),                       // zone
+          BigIntToI32PairDescriptor(),  // descriptor
+          BigIntToI32PairDescriptor()
+              .GetStackParameterCount(),   // stack parameter count
+          CallDescriptor::kNoFlags,        // flags
+          Operator::kNoProperties,         // properties
+          StubCallMode::kCallCodeObject);  // stub call mode
+
+  auto lowering_special_case = base::make_unique<Int64LoweringSpecialCase>();
+  lowering_special_case->bigint_to_i64_call_descriptor =
+      bigint_to_i64_call_descriptor;
+  lowering_special_case->bigint_to_i32_pair_call_descriptor =
+      bigint_to_i32_pair_call_descriptor;
+
+  Node* call_node =
+      graph()->NewNode(common()->Call(bigint_to_i64_call_descriptor), target,
+                       bigint, context, start(), start());
+
+  LowerGraphWithSpecialCase(call_node, std::move(lowering_special_case),
+                            MachineRepresentation::kWord64);
+
+  Capture<Node*> call;
+  Matcher<Node*> call_matcher =
+      IsCall(bigint_to_i32_pair_call_descriptor, target, bigint, context,
+             start(), start());
+
+  EXPECT_THAT(graph()->end()->InputAt(1),
+              IsReturn2(IsProjection(0, AllOf(CaptureEq(&call), call_matcher)),
+                        IsProjection(1, AllOf(CaptureEq(&call), call_matcher)),
+                        start(), start()));
+}
+
+TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseI64ToBigInt) {
+  Node* target = Int32Constant(1);
+  Node* i64 = Int64Constant(value(0));
+
+  CallDescriptor* i64_to_bigint_call_descriptor =
+      Linkage::GetStubCallDescriptor(
+          zone(),                   // zone
+          I64ToBigIntDescriptor(),  // descriptor
+          I64ToBigIntDescriptor()
+              .GetStackParameterCount(),   // stack parameter count
+          CallDescriptor::kNoFlags,        // flags
+          Operator::kNoProperties,         // properties
+          StubCallMode::kCallCodeObject);  // stub call mode
+
+  CallDescriptor* i32_pair_to_bigint_call_descriptor =
+      Linkage::GetStubCallDescriptor(
+          zone(),                       // zone
+          I32PairToBigIntDescriptor(),  // descriptor
+          I32PairToBigIntDescriptor()
+              .GetStackParameterCount(),   // stack parameter count
+          CallDescriptor::kNoFlags,        // flags
+          Operator::kNoProperties,         // properties
+          StubCallMode::kCallCodeObject);  // stub call mode
+
+  auto lowering_special_case = base::make_unique<Int64LoweringSpecialCase>();
+  lowering_special_case->i64_to_bigint_call_descriptor =
+      i64_to_bigint_call_descriptor;
+  lowering_special_case->i32_pair_to_bigint_call_descriptor =
+      i32_pair_to_bigint_call_descriptor;
+
+  Node* call = graph()->NewNode(common()->Call(i64_to_bigint_call_descriptor),
+                                target, i64, start(), start());
+
+  LowerGraphWithSpecialCase(call, std::move(lowering_special_case),
+                            MachineRepresentation::kTaggedPointer);
+
+  EXPECT_THAT(
+      graph()->end()->InputAt(1),
+      IsReturn(IsCall(i32_pair_to_bigint_call_descriptor, target,
+                      IsInt32Constant(low_word_value(0)),
+                      IsInt32Constant(high_word_value(0)), start(), start()),
+               start(), start()));
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 3d4e16ac682006..7c062698c41010 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -6,6 +6,7 @@
 
 #include "src/codegen/tick-counter.h"
 #include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/feedback-source.h"
 #include "src/compiler/js-call-reducer.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/simplified-operator.h"
@@ -23,7 +24,6 @@ class JSCallReducerTest : public TypedGraphTest {
  public:
   JSCallReducerTest()
       : TypedGraphTest(3), javascript_(zone()), deps_(broker(), zone()) {
-    broker()->SerializeStandardObjects();
   }
   ~JSCallReducerTest() override = default;
 
@@ -113,7 +113,7 @@ class JSCallReducerTest : public TypedGraphTest {
         ClosureFeedbackCellArray::New(isolate(), shared);
     Handle<FeedbackVector> vector =
         FeedbackVector::New(isolate(), shared, closure_feedback_cell_array);
-    VectorSlotPair feedback(vector, FeedbackSlot(0), UNINITIALIZED);
+    FeedbackSource feedback(vector, FeedbackSlot(0));
     return javascript()->Call(arity, CallFrequency(), feedback,
                               ConvertReceiverMode::kAny,
                               SpeculationMode::kAllowSpeculation);
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 95c03e543f91ab..fb5254903d75c2 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -172,7 +172,8 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
 // JSCreateWithContext
 
 TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
-  Handle<ScopeInfo> scope_info = ScopeInfo::CreateForEmptyFunction(isolate());
+  Handle<ScopeInfo> scope_info =
+      ReadOnlyRoots(isolate()).empty_function_scope_info_handle();
   Node* const object = Parameter(Type::Receiver());
   Node* const context = Parameter(Type::Any());
   Node* const effect = graph()->start();
@@ -192,7 +193,8 @@ TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
 // JSCreateCatchContext
 
 TEST_F(JSCreateLoweringTest, JSCreateCatchContext) {
-  Handle<ScopeInfo> scope_info = ScopeInfo::CreateForEmptyFunction(isolate());
+  Handle<ScopeInfo> scope_info =
+      ReadOnlyRoots(isolate()).empty_function_scope_info_handle();
   Node* const exception = Parameter(Type::Receiver());
   Node* const context = Parameter(Type::Any());
   Node* const effect = graph()->start();
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 3510cd4b74005d..5b4088f28e7ac9 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -37,7 +37,7 @@ class JSIntrinsicLoweringTest : public GraphTest {
                     &machine);
     // TODO(titzer): mock the GraphReducer here for better unit testing.
     GraphReducer graph_reducer(zone(), graph(), tick_counter());
-    JSIntrinsicLowering reducer(&graph_reducer, &jsgraph);
+    JSIntrinsicLowering reducer(&graph_reducer, &jsgraph, broker());
     return reducer.Reduce(node);
   }
 
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 0d7bb946e3e012..0d85253847c130 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -322,12 +322,13 @@ TEST_F(JSTypedLoweringTest, JSLoadContext) {
       Reduction const r2 = Reduce(graph()->NewNode(
           javascript()->LoadContext(1, index, immutable), context, effect));
       ASSERT_TRUE(r2.Changed());
-      EXPECT_THAT(r2.replacement(),
-                  IsLoadField(AccessBuilder::ForContextSlot(index),
-                              IsLoadField(AccessBuilder::ForContextSlot(
-                                              Context::PREVIOUS_INDEX),
-                                          context, effect, graph()->start()),
-                              _, graph()->start()));
+      EXPECT_THAT(
+          r2.replacement(),
+          IsLoadField(AccessBuilder::ForContextSlot(index),
+                      IsLoadField(AccessBuilder::ForContextSlotKnownPointer(
+                                      Context::PREVIOUS_INDEX),
+                                  context, effect, graph()->start()),
+                      _, graph()->start()));
     }
   }
 }
@@ -357,12 +358,13 @@ TEST_F(JSTypedLoweringTest, JSStoreContext) {
           Reduce(graph()->NewNode(javascript()->StoreContext(1, index), value,
                                   context, effect, control));
       ASSERT_TRUE(r2.Changed());
-      EXPECT_THAT(r2.replacement(),
-                  IsStoreField(AccessBuilder::ForContextSlot(index),
-                               IsLoadField(AccessBuilder::ForContextSlot(
-                                               Context::PREVIOUS_INDEX),
-                                           context, effect, graph()->start()),
-                               value, _, control));
+      EXPECT_THAT(
+          r2.replacement(),
+          IsStoreField(AccessBuilder::ForContextSlot(index),
+                       IsLoadField(AccessBuilder::ForContextSlotKnownPointer(
+                                       Context::PREVIOUS_INDEX),
+                                   context, effect, graph()->start()),
+                       value, _, control));
     }
   }
 }
@@ -373,7 +375,7 @@ TEST_F(JSTypedLoweringTest, JSStoreContext) {
 
 
 TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
-  VectorSlotPair feedback;
+  FeedbackSource feedback;
   Handle<Name> name = factory()->length_string();
   Node* const receiver = Parameter(Type::String(), 0);
   Node* const context = UndefinedConstant();
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index 17dc998f6dc285..d86771a8c36648 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -55,8 +55,8 @@ TEST_F(LinkageTailCall, EmptyToEmpty) {
   CommonOperatorBuilder common(zone());
   const Operator* op = common.Call(desc);
   Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
-  EXPECT_TRUE(desc->CanTailCall(node));
   const CallDescriptor* callee = CallDescriptorOf(node->op());
+  EXPECT_TRUE(desc->CanTailCall(callee));
   int stack_param_delta = callee->GetStackParameterDelta(desc);
   EXPECT_EQ(0, stack_param_delta);
 }
@@ -74,7 +74,7 @@ TEST_F(LinkageTailCall, SameReturn) {
   CommonOperatorBuilder common(zone());
   const Operator* op = common.Call(desc2);
   Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   EXPECT_EQ(0, stack_param_delta);
 }
@@ -94,7 +94,7 @@ TEST_F(LinkageTailCall, DifferingReturn) {
   CommonOperatorBuilder common(zone());
   const Operator* op = common.Call(desc2);
   Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
-  EXPECT_TRUE(!desc1->CanTailCall(node));
+  EXPECT_FALSE(desc1->CanTailCall(CallDescriptorOf(node->op())));
 }
 
 
@@ -113,7 +113,7 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCallee) {
   CommonOperatorBuilder common(zone());
   const Operator* op = common.Call(desc2);
   Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   EXPECT_EQ(0, stack_param_delta);
 }
@@ -134,7 +134,7 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCaller) {
   CommonOperatorBuilder common(zone());
   const Operator* op = common.Call(desc2);
   Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   EXPECT_EQ(0, stack_param_delta);
 }
@@ -155,7 +155,7 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
   CommonOperatorBuilder common(zone());
   const Operator* op = common.Call(desc2);
   Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   // We might need to add one slot of padding to the callee arguments.
   int expected = kPadArguments ? 2 : 1;
@@ -178,7 +178,7 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
   CommonOperatorBuilder common(zone());
   const Operator* op = common.Call(desc2);
   Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   // We might need to drop one slot of padding from the caller's arguments.
   int expected = kPadArguments ? -2 : -1;
@@ -206,7 +206,7 @@ TEST_F(LinkageTailCall, MatchingStackParameters) {
   const Operator* op = common.Call(desc2);
   Node* const node =
       Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   EXPECT_EQ(0, stack_param_delta);
 }
@@ -232,7 +232,7 @@ TEST_F(LinkageTailCall, NonMatchingStackParameters) {
   const Operator* op = common.Call(desc2);
   Node* const node =
       Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   EXPECT_EQ(0, stack_param_delta);
 }
@@ -259,7 +259,7 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegisters) {
   const Operator* op = common.Call(desc2);
   Node* const node =
       Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   EXPECT_EQ(0, stack_param_delta);
 }
@@ -287,7 +287,7 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegisters) {
   const Operator* op = common.Call(desc2);
   Node* const node =
       Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   EXPECT_EQ(0, stack_param_delta);
 }
@@ -315,7 +315,7 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
   const Operator* op = common.Call(desc2);
   Node* const node =
       Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   // We might need to add one slot of padding to the callee arguments.
   int expected = kPadArguments ? 0 : -1;
@@ -345,7 +345,7 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
   const Operator* op = common.Call(desc2);
   Node* const node =
       Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
-  EXPECT_TRUE(desc1->CanTailCall(node));
+  EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
   int stack_param_delta = desc2->GetStackParameterDelta(desc1);
   // We might need to drop one slot of padding from the caller's arguments.
   int expected = kPadArguments ? 0 : 1;
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index 24fc6a31c7dea8..d0acbf341c37ae 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -245,7 +245,7 @@ const PureOperator kPureOperators[] = {
     PURE(Float64Equal, 2, 0, 1),              // --
     PURE(Float64LessThan, 2, 0, 1),           // --
     PURE(Float64LessThanOrEqual, 2, 0, 1),    // --
-    PURE(LoadStackPointer, 0, 0, 1),          // --
+    PURE(StackPointerGreaterThan, 1, 0, 1),   // --
     PURE(Float64ExtractLowWord32, 1, 0, 1),   // --
     PURE(Float64ExtractHighWord32, 1, 0, 1),  // --
     PURE(Float64InsertLowWord32, 2, 0, 1),    // --
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index fc6f1d550099d5..6fa4ce0cf07981 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -2204,6 +2204,7 @@ IS_UNOP_MATCHER(ChangeInt32ToInt64)
 IS_UNOP_MATCHER(ChangeUint32ToFloat64)
 IS_UNOP_MATCHER(ChangeUint32ToUint64)
 IS_UNOP_MATCHER(ChangeCompressedToTagged)
+IS_UNOP_MATCHER(ChangeCompressedPointerToTaggedPointer)
 IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
 IS_UNOP_MATCHER(TruncateInt64ToInt32)
 IS_UNOP_MATCHER(Float32Abs)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index be8d67cb354aae..a71aff913b3fdd 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -427,6 +427,8 @@ Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsChangeCompressedToTagged(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeCompressedPointerToTaggedPointer(
+    const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsFloat32Abs(const Matcher<Node*>& input_matcher);
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index 76fbc4a368f21d..9655fc70b2cc95 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/redundancy-elimination.h"
 #include "src/codegen/tick-counter.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/feedback-source.h"
 #include "test/unittests/compiler/graph-reducer-unittest.h"
 #include "test/unittests/compiler/graph-unittest.h"
 #include "test/unittests/compiler/node-test-utils.h"
@@ -40,26 +41,24 @@ class RedundancyEliminationTest : public GraphTest {
         ClosureFeedbackCellArray::New(isolate(), shared);
     Handle<FeedbackVector> feedback_vector =
         FeedbackVector::New(isolate(), shared, closure_feedback_cell_array);
-    vector_slot_pairs_.push_back(VectorSlotPair());
-    vector_slot_pairs_.push_back(
-        VectorSlotPair(feedback_vector, slot1, UNINITIALIZED));
-    vector_slot_pairs_.push_back(
-        VectorSlotPair(feedback_vector, slot2, UNINITIALIZED));
+    vector_slot_pairs_.push_back(FeedbackSource());
+    vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot1));
+    vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot2));
   }
   ~RedundancyEliminationTest() override = default;
 
  protected:
   Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
 
-  std::vector<VectorSlotPair> const& vector_slot_pairs() const {
+  std::vector<FeedbackSource> const& vector_slot_pairs() const {
     return vector_slot_pairs_;
   }
   SimplifiedOperatorBuilder* simplified() { return &simplified_; }
 
  private:
   NiceMock<MockAdvancedReducerEditor> editor_;
-  std::vector<VectorSlotPair> vector_slot_pairs_;
-  VectorSlotPair feedback2_;
+  std::vector<FeedbackSource> vector_slot_pairs_;
+  FeedbackSource feedback2_;
   RedundancyElimination reducer_;
   SimplifiedOperatorBuilder simplified_;
 };
@@ -88,8 +87,8 @@ const NumberOperationHint kNumberOperationHints[] = {
 // CheckBounds
 
 TEST_F(RedundancyEliminationTest, CheckBounds) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* index = Parameter(0);
       Node* length = Parameter(1);
       Node* effect = graph()->start();
@@ -114,8 +113,8 @@ TEST_F(RedundancyEliminationTest, CheckBounds) {
 // CheckNumber
 
 TEST_F(RedundancyEliminationTest, CheckNumberSubsumedByCheckSmi) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -201,7 +200,7 @@ TEST_F(RedundancyEliminationTest,
 
 TEST_F(RedundancyEliminationTest,
        CheckStringSubsumedByCheckInternalizedString) {
-  TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
     Node* value = Parameter(0);
     Node* effect = graph()->start();
     Node* control = graph()->start();
@@ -245,8 +244,8 @@ TEST_F(RedundancyEliminationTest, CheckSymbol) {
 // CheckedFloat64ToInt32
 
 TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt32) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
         Node* value = Parameter(0);
         Node* effect = graph()->start();
@@ -274,8 +273,8 @@ TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt32) {
 // CheckedFloat64ToInt64
 
 TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt64) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
         Node* value = Parameter(0);
         Node* effect = graph()->start();
@@ -306,8 +305,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt32ToCompressedSigned) {
   if (!COMPRESS_POINTERS_BOOL) {
     return;
   }
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -333,8 +332,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt32ToCompressedSigned) {
 // CheckedInt32ToTaggedSigned
 
 TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -360,8 +359,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
 // CheckedInt64ToInt32
 
 TEST_F(RedundancyEliminationTest, CheckedInt64ToInt32) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -385,8 +384,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt64ToInt32) {
 // CheckedInt64ToTaggedSigned
 
 TEST_F(RedundancyEliminationTest, CheckedInt64ToTaggedSigned) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -412,8 +411,8 @@ TEST_F(RedundancyEliminationTest, CheckedInt64ToTaggedSigned) {
 // CheckedTaggedSignedToInt32
 
 TEST_F(RedundancyEliminationTest, CheckedTaggedSignedToInt32) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -439,8 +438,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedSignedToInt32) {
 // CheckedTaggedToFloat64
 
 TEST_F(RedundancyEliminationTest, CheckedTaggedToFloat64) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
         Node* value = Parameter(0);
         Node* effect = graph()->start();
@@ -466,8 +465,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToFloat64) {
 
 TEST_F(RedundancyEliminationTest,
        CheckedTaggedToFloat64SubsubmedByCheckedTaggedToFloat64) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -497,8 +496,8 @@ TEST_F(RedundancyEliminationTest,
 // CheckedTaggedToInt32
 
 TEST_F(RedundancyEliminationTest, CheckedTaggedToInt32) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
         Node* value = Parameter(0);
         Node* effect = graph()->start();
@@ -524,8 +523,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToInt32) {
 
 TEST_F(RedundancyEliminationTest,
        CheckedTaggedToInt32SubsumedByCheckedTaggedSignedToInt32) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
         Node* value = Parameter(0);
         Node* effect = graph()->start();
@@ -553,8 +552,8 @@ TEST_F(RedundancyEliminationTest,
 // CheckedTaggedToInt64
 
 TEST_F(RedundancyEliminationTest, CheckedTaggedToInt64) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
         Node* value = Parameter(0);
         Node* effect = graph()->start();
@@ -582,8 +581,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToInt64) {
 // CheckedTaggedToTaggedPointer
 
 TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedPointer) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -609,8 +608,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedPointer) {
 // CheckedTaggedToTaggedSigned
 
 TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedSigned) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -636,8 +635,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedSigned) {
 // CheckedCompressedToTaggedPointer
 
 TEST_F(RedundancyEliminationTest, CheckedCompressedToTaggedPointer) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -663,8 +662,8 @@ TEST_F(RedundancyEliminationTest, CheckedCompressedToTaggedPointer) {
 // CheckedCompressedToTaggedSigned
 
 TEST_F(RedundancyEliminationTest, CheckedCompressedToTaggedSigned) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -690,8 +689,8 @@ TEST_F(RedundancyEliminationTest, CheckedCompressedToTaggedSigned) {
 // CheckedTaggedToCompressedPointer
 
 TEST_F(RedundancyEliminationTest, CheckedTaggedToCompressedPointer) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -717,8 +716,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToCompressedPointer) {
 // CheckedTaggedToCompressedSigned
 
 TEST_F(RedundancyEliminationTest, CheckedTaggedToCompressedSigned) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -744,8 +743,8 @@ TEST_F(RedundancyEliminationTest, CheckedTaggedToCompressedSigned) {
 // CheckedTruncateTaggedToWord32
 
 TEST_F(RedundancyEliminationTest, CheckedTruncateTaggedToWord32) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
         Node* value = Parameter(0);
         Node* effect = graph()->start();
@@ -771,8 +770,8 @@ TEST_F(RedundancyEliminationTest, CheckedTruncateTaggedToWord32) {
 
 TEST_F(RedundancyEliminationTest,
        CheckedTruncateTaggedToWord32SubsumedByCheckedTruncateTaggedToWord32) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -802,8 +801,8 @@ TEST_F(RedundancyEliminationTest,
 // CheckedUint32Bounds
 
 TEST_F(RedundancyEliminationTest, CheckedUint32Bounds) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* index = Parameter(0);
       Node* length = Parameter(1);
       Node* effect = graph()->start();
@@ -832,8 +831,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint32Bounds) {
 // CheckedUint32ToInt32
 
 TEST_F(RedundancyEliminationTest, CheckedUint32ToInt32) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -859,8 +858,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint32ToInt32) {
 // CheckedUint32ToTaggedSigned
 
 TEST_F(RedundancyEliminationTest, CheckedUint32ToTaggedSigned) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -886,8 +885,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint32ToTaggedSigned) {
 // CheckedUint64Bounds
 
 TEST_F(RedundancyEliminationTest, CheckedUint64Bounds) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* index = Parameter(0);
       Node* length = Parameter(1);
       Node* effect = graph()->start();
@@ -914,8 +913,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint64Bounds) {
 // CheckedUint64ToInt32
 
 TEST_F(RedundancyEliminationTest, CheckedUint64ToInt32) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -941,8 +940,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint64ToInt32) {
 // CheckedUint64ToTaggedSigned
 
 TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* value = Parameter(0);
       Node* effect = graph()->start();
       Node* control = graph()->start();
@@ -970,8 +969,8 @@ TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
 TEST_F(RedundancyEliminationTest,
        SpeculativeNumberEqualWithCheckBoundsBetterType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* lhs = Parameter(Type::Any(), 0);
       Node* rhs = Parameter(Type::Any(), 1);
       Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1006,8 +1005,8 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeNumberEqualWithCheckBoundsSameType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* lhs = Parameter(Type::UnsignedSmall(), 0);
       Node* rhs = Parameter(Type::UnsignedSmall(), 1);
       Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1045,8 +1044,8 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeNumberLessThanWithCheckBoundsBetterType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* lhs = Parameter(Type::Any(), 0);
       Node* rhs = Parameter(Type::Any(), 1);
       Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1081,8 +1080,8 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeNumberLessThanWithCheckBoundsSameType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* lhs = Parameter(Type::UnsignedSmall(), 0);
       Node* rhs = Parameter(Type::UnsignedSmall(), 1);
       Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1120,8 +1119,8 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeNumberLessThanOrEqualWithCheckBoundsBetterType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* lhs = Parameter(Type::Any(), 0);
       Node* rhs = Parameter(Type::Any(), 1);
       Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1156,8 +1155,8 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeNumberLessThanOrEqualWithCheckBoundsSameType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       Node* lhs = Parameter(Type::UnsignedSmall(), 0);
       Node* rhs = Parameter(Type::UnsignedSmall(), 1);
       Node* length = Parameter(Type::Unsigned31(), 2);
@@ -1195,7 +1194,7 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeNumberAddWithCheckBoundsBetterType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
     TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
       Node* lhs = Parameter(Type::Any(), 0);
       Node* rhs = Parameter(Type::Any(), 1);
@@ -1221,7 +1220,7 @@ TEST_F(RedundancyEliminationTest,
 
 TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
     TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
       Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
       Node* rhs = Parameter(Type::Any(), 0);
@@ -1251,7 +1250,7 @@ TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
 TEST_F(RedundancyEliminationTest,
        SpeculativeNumberSubtractWithCheckBoundsBetterType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
     TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
       Node* lhs = Parameter(Type::Any(), 0);
       Node* rhs = Parameter(Type::Any(), 1);
@@ -1279,7 +1278,7 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeNumberSubtractWithCheckBoundsSameType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
     TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
       Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
       Node* rhs = Parameter(Type::Any(), 0);
@@ -1310,7 +1309,7 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeSafeIntegerAddWithCheckBoundsBetterType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
     TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
       Node* lhs = Parameter(Type::Any(), 0);
       Node* rhs = Parameter(Type::Any(), 1);
@@ -1338,7 +1337,7 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeSafeIntegerAddWithCheckBoundsSameType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
     TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
       Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
       Node* rhs = Parameter(Type::Any(), 0);
@@ -1369,7 +1368,7 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeSafeIntegerSubtractWithCheckBoundsBetterType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
     TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
       Node* lhs = Parameter(Type::Any(), 0);
       Node* rhs = Parameter(Type::Any(), 1);
@@ -1397,7 +1396,7 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeSafeIntegerSubtractWithCheckBoundsSameType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
     TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
       Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
       Node* rhs = Parameter(Type::Any(), 0);
@@ -1428,8 +1427,8 @@ TEST_F(RedundancyEliminationTest,
 TEST_F(RedundancyEliminationTest,
        SpeculativeToNumberWithCheckBoundsBetterType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
         Node* index = Parameter(Type::Any(), 0);
         Node* length = Parameter(Type::Unsigned31(), 1);
@@ -1456,8 +1455,8 @@ TEST_F(RedundancyEliminationTest,
 
 TEST_F(RedundancyEliminationTest, SpeculativeToNumberWithCheckBoundsSameType) {
   Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
-  TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
-    TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+  TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
+    TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
       TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
         Node* index = Parameter(Type::Range(42.0, 42.0, zone()), 0);
         Node* length = Parameter(Type::Unsigned31(), 1);
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index b198592ddd7098..e2d4f080f5c219 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -360,7 +360,7 @@ TEST_F(SimplifiedOperatorReducerTest, CheckedFloat64ToInt32WithConstant) {
   TRACED_FOREACH(int32_t, n, kInt32Values) {
     Reduction r = Reduce(graph()->NewNode(
         simplified()->CheckedFloat64ToInt32(
-            CheckForMinusZeroMode::kDontCheckForMinusZero, VectorSlotPair()),
+            CheckForMinusZeroMode::kDontCheckForMinusZero, FeedbackSource()),
         Float64Constant(n), effect, control));
     ASSERT_TRUE(r.Changed());
     EXPECT_THAT(r.replacement(), IsInt32Constant(n));
@@ -418,7 +418,7 @@ TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithChangeInt31ToTaggedSigned) {
   Node* value =
       graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0);
   Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->CheckSmi(VectorSlotPair()), value, effect, control));
+      simplified()->CheckSmi(FeedbackSource()), value, effect, control));
   ASSERT_TRUE(reduction.Changed());
   EXPECT_EQ(value, reduction.replacement());
 }
@@ -428,7 +428,7 @@ TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithNumberConstant) {
   Node* control = graph()->start();
   Node* value = NumberConstant(1.0);
   Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->CheckSmi(VectorSlotPair()), value, effect, control));
+      simplified()->CheckSmi(FeedbackSource()), value, effect, control));
   ASSERT_TRUE(reduction.Changed());
   EXPECT_EQ(value, reduction.replacement());
 }
@@ -438,9 +438,9 @@ TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithCheckSmi) {
   Node* effect = graph()->start();
   Node* control = graph()->start();
   Node* value = effect = graph()->NewNode(
-      simplified()->CheckSmi(VectorSlotPair()), param0, effect, control);
+      simplified()->CheckSmi(FeedbackSource()), param0, effect, control);
   Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->CheckSmi(VectorSlotPair()), value, effect, control));
+      simplified()->CheckSmi(FeedbackSource()), value, effect, control));
   ASSERT_TRUE(reduction.Changed());
   EXPECT_EQ(value, reduction.replacement());
 }
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index ec689932132414..23e4dbe5ae1a65 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -463,7 +463,7 @@ TEST_MONOTONICITY(Add)
 #undef TEST_MONOTONICITY
 
 TEST_F(TyperTest, Monotonicity_InstanceOf) {
-  TestBinaryMonotonicity(javascript_.InstanceOf(VectorSlotPair()));
+  TestBinaryMonotonicity(javascript_.InstanceOf(FeedbackSource()));
 }
 
 // JS BINOPS without hint
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index f8e3e26aa91fe7..a62e8246008056 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -1718,58 +1718,6 @@ TEST_F(InstructionSelectorTest, LoadAndWord64ShiftRight32) {
   }
 }
 
-TEST_F(InstructionSelectorTest, StackCheck0) {
-  ExternalReference js_stack_limit =
-      ExternalReference::Create(isolate()->stack_guard()->address_of_jslimit());
-  StreamBuilder m(this, MachineType::Int32());
-  Node* const sp = m.LoadStackPointer();
-  Node* const stack_limit =
-      m.Load(MachineType::Pointer(), m.ExternalConstant(js_stack_limit));
-  Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
-
-  RawMachineLabel if_true, if_false;
-  m.Branch(interrupt, &if_true, &if_false);
-
-  m.Bind(&if_true);
-  m.Return(m.Int32Constant(1));
-
-  m.Bind(&if_false);
-  m.Return(m.Int32Constant(0));
-
-  Stream s = m.Build();
-
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kX64Cmp, s[0]->arch_opcode());
-  EXPECT_EQ(4U, s[0]->InputCount());
-  EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
-TEST_F(InstructionSelectorTest, StackCheck1) {
-  ExternalReference js_stack_limit =
-      ExternalReference::Create(isolate()->stack_guard()->address_of_jslimit());
-  StreamBuilder m(this, MachineType::Int32());
-  Node* const sp = m.LoadStackPointer();
-  Node* const stack_limit =
-      m.Load(MachineType::Pointer(), m.ExternalConstant(js_stack_limit));
-  Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
-
-  RawMachineLabel if_true, if_false;
-  m.Branch(sp_within_limit, &if_true, &if_false);
-
-  m.Bind(&if_true);
-  m.Return(m.Int32Constant(1));
-
-  m.Bind(&if_false);
-  m.Return(m.Int32Constant(0));
-
-  Stream s = m.Build();
-
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kX64StackCheck, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/test/unittests/date/date-cache-unittest.cc b/deps/v8/test/unittests/date/date-cache-unittest.cc
index 2ceaaebabb7155..2c252424e730db 100644
--- a/deps/v8/test/unittests/date/date-cache-unittest.cc
+++ b/deps/v8/test/unittests/date/date-cache-unittest.cc
@@ -69,12 +69,12 @@ TEST(DateCache, AdoptDefaultFirst) {
   // We finish all the operation AdoptDefaultThread before
   // running all other thread so it won't show the problem of
   // AdoptDefault trashing newly create default.
-  t1.Start();
+  CHECK(t1.Start());
   t1.Join();
 
-  t2.Start();
-  t3.Start();
-  t4.Start();
+  CHECK(t2.Start());
+  CHECK(t3.Start());
+  CHECK(t4.Start());
 
   t2.Join();
   t3.Join();
@@ -92,10 +92,10 @@ TEST(DateCache, AdoptDefaultMixed) {
   // it will cause crash in other thread because the TimeZone
   // newly created by createDefault could be trashed by AdoptDefault
   // while a deleted DEFAULT_ZONE got cloned.
-  t1.Start();
-  t2.Start();
-  t3.Start();
-  t4.Start();
+  CHECK(t1.Start());
+  CHECK(t2.Start());
+  CHECK(t3.Start());
+  CHECK(t4.Start());
 
   t1.Join();
   t2.Join();
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index 37b037147b3d86..6cb9df0895b3cc 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -377,32 +377,6 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_Run) {
   }
 }
 
-TEST_F(MicrotaskQueueTest, DetachGlobal_FinalizationGroup) {
-  // Enqueue an FinalizationGroupCleanupTask.
-  Handle<JSArray> ran = RunJS<JSArray>(
-      "var ran = [false];"
-      "var wf = new FinalizationGroup(() => { ran[0] = true; });"
-      "(function() { wf.register({}, {}); })();"
-      "gc();"
-      "ran");
-
-  EXPECT_TRUE(
-      Object::GetElement(isolate(), ran, 0).ToHandleChecked()->IsFalse());
-  EXPECT_EQ(1, microtask_queue()->size());
-
-  // Detach MicrotaskQueue from the current context.
-  context()->DetachGlobal();
-
-  microtask_queue()->RunMicrotasks(isolate());
-
-  // RunMicrotasks processes the pending Microtask, but Microtasks that are
-  // associated to a detached context should be cancelled and should not take
-  // effect.
-  EXPECT_EQ(0, microtask_queue()->size());
-  EXPECT_TRUE(
-      Object::GetElement(isolate(), ran, 0).ToHandleChecked()->IsFalse());
-}
-
 namespace {
 
 void DummyPromiseHook(PromiseHookType type, Local<Promise> promise,
diff --git a/deps/v8/test/unittests/heap/barrier-unittest.cc b/deps/v8/test/unittests/heap/barrier-unittest.cc
index 07906b20c18527..99cf5d897868a9 100644
--- a/deps/v8/test/unittests/heap/barrier-unittest.cc
+++ b/deps/v8/test/unittests/heap/barrier-unittest.cc
@@ -57,7 +57,7 @@ TEST(OneshotBarrier, DoneAfterWait_Concurrent) {
     barrier.Start();
   }
   for (int i = 0; i < kThreadCount; i++) {
-    threads[i].Start();
+    CHECK(threads[i].Start());
   }
   for (int i = 0; i < kThreadCount; i++) {
     threads[i].Join();
@@ -80,7 +80,7 @@ TEST(OneshotBarrier, EarlyFinish_Concurrent) {
     barrier.Start();
   }
   for (int i = 0; i < kThreadCount; i++) {
-    threads[i].Start();
+    CHECK(threads[i].Start());
   }
   for (int i = 0; i < kThreadCount; i++) {
     threads[i].Join();
@@ -133,7 +133,7 @@ TEST(OneshotBarrier, Processing_Concurrent) {
   barrier.Start();
   barrier.Start();
   EXPECT_FALSE(barrier.DoneForTesting());
-  counting_thread.Start();
+  CHECK(counting_thread.Start());
 
   for (size_t i = 0; i < kWorkCounter; i++) {
     {
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 53b919a8604887..742e86c357e258 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -355,11 +355,9 @@ TEST_F(GCTracerTest, BackgroundScavengerScope) {
   tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
                 "collector unittest");
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 10,
-      nullptr);
+      GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 10);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 1,
-      nullptr);
+      GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 1);
   tracer->Stop(SCAVENGER);
   EXPECT_DOUBLE_EQ(
       11, tracer->current_
@@ -372,20 +370,19 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
   tracer->Start(MINOR_MARK_COMPACTOR, GarbageCollectionReason::kTesting,
                 "collector unittest");
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 10, nullptr);
+      GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 10);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 1, nullptr);
+      GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 1);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 20,
-      nullptr);
+      GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 20);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 2, nullptr);
+      GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 2);
   tracer->AddBackgroundScopeSample(
       GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS,
-      30, nullptr);
+      30);
   tracer->AddBackgroundScopeSample(
       GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS,
-      3, nullptr);
+      3);
   tracer->Stop(MINOR_MARK_COMPACTOR);
   EXPECT_DOUBLE_EQ(
       11,
@@ -402,33 +399,31 @@ TEST_F(GCTracerTest, BackgroundMajorMCScope) {
   GCTracer* tracer = i_isolate()->heap()->tracer();
   tracer->ResetForTesting();
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 100, nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 100);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 200, nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 200);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 10, nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 10);
   // Scavenger should not affect the major mark-compact scopes.
   tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
                 "collector unittest");
   tracer->Stop(SCAVENGER);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 20, nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 20);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 1, nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 1);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 2, nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 2);
   tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
                 "collector unittest");
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 30, nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 30);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 3, nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 3);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 40,
-      nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 40);
   tracer->AddBackgroundScopeSample(
-      GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 4,
-      nullptr);
+      GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 4);
   tracer->Stop(MARK_COMPACTOR);
   EXPECT_DOUBLE_EQ(
       111, tracer->current_.scopes[GCTracer::Scope::MC_BACKGROUND_MARKING]);
@@ -448,7 +443,7 @@ class ThreadWithBackgroundScope final : public base::Thread {
       : Thread(Options("ThreadWithBackgroundScope")), tracer_(tracer) {}
   void Run() override {
     GCTracer::BackgroundScope scope(
-        tracer_, GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
+        tracer_, GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, nullptr);
   }
 
  private:
@@ -460,8 +455,8 @@ TEST_F(GCTracerTest, MultithreadedBackgroundScope) {
   ThreadWithBackgroundScope thread1(tracer);
   ThreadWithBackgroundScope thread2(tracer);
   tracer->ResetForTesting();
-  thread1.Start();
-  thread2.Start();
+  CHECK(thread1.Start());
+  CHECK(thread2.Start());
   tracer->FetchBackgroundMarkCompactCounters();
   thread1.Join();
   thread2.Join();
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index 140d3d45b3aea0..c5acc6c43e17dd 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -143,5 +143,146 @@ TEST_F(SpacesTest, CodeRangeAddressReuse) {
   EXPECT_EQ(code_range6, code_range3);
 }
 
+// Tests that FreeListMany::SelectFreeListCategoryType returns what it should.
+TEST_F(SpacesTest, FreeListManySelectFreeListCategoryType) {
+  FreeListMany free_list;
+
+  // Testing that all sizes below 256 bytes get assigned the correct category
+  for (size_t size = 0; size <= FreeListMany::kPreciseCategoryMaxSize; size++) {
+    FreeListCategoryType cat = free_list.SelectFreeListCategoryType(size);
+    if (cat == 0) {
+      // If cat == 0, then we make sure that |size| doesn't fit in the 2nd
+      // category.
+      EXPECT_LT(size, free_list.categories_min[1]);
+    } else {
+      // Otherwise, size should fit in |cat|, but not in |cat+1|.
+      EXPECT_LE(free_list.categories_min[cat], size);
+      EXPECT_LT(size, free_list.categories_min[cat + 1]);
+    }
+  }
+
+  // Testing every size above 256 would take long time, so test only some
+  // "interesting cases": picking some number in the middle of the categories,
+  // as well as at the categories' bounds.
+  for (int cat = kFirstCategory + 1; cat <= free_list.last_category_; cat++) {
+    std::vector<size_t> sizes;
+    // Adding size less than this category's minimum
+    sizes.push_back(free_list.categories_min[cat] - 8);
+    // Adding size equal to this category's minimum
+    sizes.push_back(free_list.categories_min[cat]);
+    // Adding size greater than this category's minimum
+    sizes.push_back(free_list.categories_min[cat] + 8);
+    // Adding size between this category's minimum and the next category
+    if (cat != free_list.last_category_) {
+      sizes.push_back(
+          (free_list.categories_min[cat] + free_list.categories_min[cat + 1]) /
+          2);
+    }
+
+    for (size_t size : sizes) {
+      FreeListCategoryType cat = free_list.SelectFreeListCategoryType(size);
+      if (cat == free_list.last_category_) {
+        // If cat == last_category, then we make sure that |size| indeeds fits
+        // in the last category.
+        EXPECT_LE(free_list.categories_min[cat], size);
+      } else {
+        // Otherwise, size should fit in |cat|, but not in |cat+1|.
+        EXPECT_LE(free_list.categories_min[cat], size);
+        EXPECT_LT(size, free_list.categories_min[cat + 1]);
+      }
+    }
+  }
+}
+
+// Tests that FreeListMany::GuaranteedAllocatable returns what it should.
+TEST_F(SpacesTest, FreeListManyGuaranteedAllocatable) {
+  FreeListMany free_list;
+
+  for (int cat = kFirstCategory; cat < free_list.last_category_; cat++) {
+    std::vector<size_t> sizes;
+    // Adding size less than this category's minimum
+    sizes.push_back(free_list.categories_min[cat] - 8);
+    // Adding size equal to this category's minimum
+    sizes.push_back(free_list.categories_min[cat]);
+    // Adding size greater than this category's minimum
+    sizes.push_back(free_list.categories_min[cat] + 8);
+    if (cat != free_list.last_category_) {
+      // Adding size between this category's minimum and the next category
+      sizes.push_back(
+          (free_list.categories_min[cat] + free_list.categories_min[cat + 1]) /
+          2);
+    }
+
+    for (size_t size : sizes) {
+      FreeListCategoryType cat_free =
+          free_list.SelectFreeListCategoryType(size);
+      size_t guaranteed_allocatable = free_list.GuaranteedAllocatable(size);
+      if (cat_free == free_list.last_category_) {
+        // If |cat_free| == last_category, then guaranteed_allocatable must
+        // return the last category, because when allocating, the last category
+        // is searched entirely.
+        EXPECT_EQ(free_list.SelectFreeListCategoryType(guaranteed_allocatable),
+                  free_list.last_category_);
+      } else if (size < free_list.categories_min[0]) {
+        // If size < free_list.categories_min[0], then the bytes are wasted, and
+        // guaranteed_allocatable should return 0.
+        EXPECT_EQ(guaranteed_allocatable, 0ul);
+      } else {
+        // Otherwise, |guaranteed_allocatable| is equal to the minimum of
+        // |size|'s category (|cat_free|);
+        EXPECT_EQ(free_list.categories_min[cat_free], guaranteed_allocatable);
+      }
+    }
+  }
+}
+
+// Tests that
+// FreeListManyCachedFastPath::SelectFastAllocationFreeListCategoryType returns
+// what it should.
+TEST_F(SpacesTest,
+       FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType) {
+  FreeListManyCachedFastPath free_list;
+
+  for (int cat = kFirstCategory; cat <= free_list.last_category_; cat++) {
+    std::vector<size_t> sizes;
+    // Adding size less than this category's minimum
+    sizes.push_back(free_list.categories_min[cat] - 8);
+    // Adding size equal to this category's minimum
+    sizes.push_back(free_list.categories_min[cat]);
+    // Adding size greater than this category's minimum
+    sizes.push_back(free_list.categories_min[cat] + 8);
+    // Adding size between this category's minimum and the next category
+    if (cat != free_list.last_category_) {
+      sizes.push_back(
+          (free_list.categories_min[cat] + free_list.categories_min[cat + 1]) /
+          2);
+    }
+
+    for (size_t size : sizes) {
+      FreeListCategoryType cat =
+          free_list.SelectFastAllocationFreeListCategoryType(size);
+      if (size <= FreeListManyCachedFastPath::kTinyObjectMaxSize) {
+        // For tiny objects, the first category of the fast path should be
+        // chosen.
+        EXPECT_TRUE(cat == FreeListManyCachedFastPath::kFastPathFirstCategory);
+      } else if (size >= free_list.categories_min[free_list.last_category_] -
+                             FreeListManyCachedFastPath::kFastPathOffset) {
+        // For objects close to the minimum of the last category, the last
+        // category is chosen.
+        EXPECT_EQ(cat, free_list.last_category_);
+      } else {
+        // For other objects, the chosen category must satisfy that its minimum
+        // is at least |size|+1.85k.
+        EXPECT_GE(free_list.categories_min[cat],
+                  size + FreeListManyCachedFastPath::kFastPathOffset);
+        // And the smaller categoriy's minimum is less than |size|+1.85k
+        // (otherwise it would have been chosen instead).
+        EXPECT_LT(free_list.categories_min[cat - 1],
+                  size + FreeListManyCachedFastPath::kFastPathOffset);
+      }
+    }
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 3d02db74130697..a9c631f8d2202c 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -151,6 +151,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
       .StoreNamedOwnProperty(reg, name, store_own_slot.ToInt())
       .StoreInArrayLiteral(reg, reg, store_array_element_slot.ToInt());
 
+  // Emit Iterator-protocol operations
+  builder.GetIterator(reg, load_slot.ToInt());
+
   // Emit load / store lookup slots.
   builder.LoadLookupSlot(name, TypeofMode::NOT_INSIDE_TYPEOF)
       .LoadLookupSlot(name, TypeofMode::INSIDE_TYPEOF)
@@ -283,7 +286,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
     BytecodeLoopHeader loop_header;
     BytecodeLabel after_jump1, after_jump2, after_jump3, after_jump4,
         after_jump5, after_jump6, after_jump7, after_jump8, after_jump9,
-        after_jump10, after_loop;
+        after_jump10, after_jump11, after_loop;
     builder.JumpIfNull(&after_loop)
         .Bind(&loop_header)
         .Jump(&after_jump1)
@@ -296,21 +299,23 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
         .Bind(&after_jump4)
         .JumpIfNotUndefined(&after_jump5)
         .Bind(&after_jump5)
-        .JumpIfJSReceiver(&after_jump6)
+        .JumpIfUndefinedOrNull(&after_jump6)
         .Bind(&after_jump6)
-        .JumpIfTrue(ToBooleanMode::kConvertToBoolean, &after_jump7)
+        .JumpIfJSReceiver(&after_jump7)
         .Bind(&after_jump7)
-        .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &after_jump8)
+        .JumpIfTrue(ToBooleanMode::kConvertToBoolean, &after_jump8)
         .Bind(&after_jump8)
-        .JumpIfFalse(ToBooleanMode::kConvertToBoolean, &after_jump9)
+        .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &after_jump9)
         .Bind(&after_jump9)
-        .JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &after_jump10)
+        .JumpIfFalse(ToBooleanMode::kConvertToBoolean, &after_jump10)
         .Bind(&after_jump10)
+        .JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &after_jump11)
+        .Bind(&after_jump11)
         .JumpLoop(&loop_header, 0)
         .Bind(&after_loop);
   }
 
-  BytecodeLabel end[10];
+  BytecodeLabel end[11];
   {
     // Longer jumps with constant operands
     BytecodeLabel after_jump;
@@ -325,8 +330,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
         .JumpIfNotNull(&end[6])
         .JumpIfUndefined(&end[7])
         .JumpIfNotUndefined(&end[8])
+        .JumpIfUndefinedOrNull(&end[9])
         .LoadLiteral(ast_factory.prototype_string())
-        .JumpIfJSReceiver(&end[9]);
+        .JumpIfJSReceiver(&end[10]);
   }
 
   // Emit Smi table switch bytecode.
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 6e7b9452315b42..339fc331783bdd 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -146,6 +146,8 @@ TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
 
   Handle<BytecodeArray> bytecode_array =
       writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
+  bytecode_array->set_source_position_table(
+      *writer()->ToSourcePositionTable(isolate()));
   CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
 
   PositionTableEntry expected_positions[] = {
@@ -236,6 +238,8 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
 
   Handle<BytecodeArray> bytecode_array =
       writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
+  bytecode_array->set_source_position_table(
+      *writer()->ToSourcePositionTable(isolate()));
   SourcePositionTableIterator source_iterator(
       bytecode_array->SourcePositionTable());
   for (size_t i = 0; i < arraysize(expected_positions); ++i) {
@@ -288,6 +292,8 @@ TEST_F(BytecodeArrayWriterUnittest, ElideNoneffectfulBytecodes) {
 
   Handle<BytecodeArray> bytecode_array =
       writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
+  bytecode_array->set_source_position_table(
+      *writer()->ToSourcePositionTable(isolate()));
   SourcePositionTableIterator source_iterator(
       bytecode_array->SourcePositionTable());
   for (size_t i = 0; i < arraysize(expected_positions); ++i) {
@@ -356,6 +362,8 @@ TEST_F(BytecodeArrayWriterUnittest, DeadcodeElimination) {
 
   Handle<BytecodeArray> bytecode_array =
       writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
+  bytecode_array->set_source_position_table(
+      *writer()->ToSourcePositionTable(isolate()));
   SourcePositionTableIterator source_iterator(
       bytecode_array->SourcePositionTable());
   for (size_t i = 0; i < arraysize(expected_positions); ++i) {
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index cb9a83997e6e66..a8ff9981073db5 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -14,7 +14,9 @@
 #include "test/unittests/compiler/node-test-utils.h"
 
 using ::testing::_;
+using ::testing::Eq;
 using v8::internal::compiler::Node;
+using v8::internal::compiler::TNode;
 
 namespace c = v8::internal::compiler;
 
@@ -441,7 +443,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
     InterpreterAssemblerTestState state(this, bytecode);
     InterpreterAssemblerForTest m(&state, bytecode);
     {
-      Node* index = m.IntPtrConstant(2);
+      TNode<IntPtrT> index = m.IntPtrConstant(2);
       Node* load_constant = m.LoadConstantPoolEntry(index);
 #ifdef V8_COMPRESS_POINTERS
       Matcher<Node*> constant_pool_matcher =
@@ -511,16 +513,17 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
   TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
     InterpreterAssemblerTestState state(this, bytecode);
     InterpreterAssemblerForTest m(&state, bytecode);
-    Node* object = m.IntPtrConstant(0xDEADBEEF);
+    TNode<HeapObject> object =
+        m.ReinterpretCast<HeapObject>(m.IntPtrConstant(0xDEADBEEF));
     int offset = 16;
-    Node* load_field = m.LoadObjectField(object, offset);
+    TNode<Object> load_field = m.LoadObjectField(object, offset);
 #ifdef V8_COMPRESS_POINTERS
     EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoadFromObject(
-                                MachineType::AnyCompressed(), object,
+                                MachineType::AnyCompressed(), Eq(object),
                                 c::IsIntPtrConstant(offset - kHeapObjectTag))));
 #else
     EXPECT_THAT(load_field, m.IsLoadFromObject(
-                                MachineType::AnyTagged(), object,
+                                MachineType::AnyTagged(), Eq(object),
                                 c::IsIntPtrConstant(offset - kHeapObjectTag)));
 #endif
   }
@@ -530,12 +533,14 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
   TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
     InterpreterAssemblerTestState state(this, bytecode);
     InterpreterAssemblerForTest m(&state, bytecode);
-    Node* arg1 = m.Int32Constant(2);
-    Node* arg2 = m.Int32Constant(3);
-    Node* context = m.Int32Constant(4);
-    Node* call_runtime = m.CallRuntime(Runtime::kAdd, context, arg1, arg2);
-    EXPECT_THAT(call_runtime, c::IsCall(_, _, arg1, arg2, _,
-                                        c::IsInt32Constant(2), context, _, _));
+    TNode<Object> arg1 = m.ReinterpretCast<Object>(m.Int32Constant(2));
+    TNode<Object> arg2 = m.ReinterpretCast<Object>(m.Int32Constant(3));
+    TNode<Object> context = m.ReinterpretCast<Object>(m.Int32Constant(4));
+    TNode<Object> call_runtime =
+        m.CallRuntime(Runtime::kAdd, context, arg1, arg2);
+    EXPECT_THAT(call_runtime,
+                c::IsCall(_, _, Eq(arg1), Eq(arg2), _, c::IsInt32Constant(2),
+                          Eq(context), _, _));
   }
 }
 
@@ -549,29 +554,30 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
         Callable builtin =
             CodeFactory::InterpreterCEntry(isolate(), result_size);
 
-        Node* function_id = m.Int32Constant(0);
+        TNode<Int32T> function_id = m.Int32Constant(0);
         InterpreterAssembler::RegListNodePair registers(m.IntPtrConstant(1),
                                                         m.Int32Constant(2));
-        Node* context = m.IntPtrConstant(4);
+        TNode<Object> context = m.ReinterpretCast<Object>(m.Int32Constant(4));
 
         Matcher<Node*> function_table = c::IsExternalConstant(
             ExternalReference::runtime_function_table_address_for_unittests(
                 isolate()));
-        Matcher<Node*> function = c::IsIntPtrAdd(
-            function_table,
-            c::IsChangeUint32ToWord(c::IsInt32Mul(
-                function_id, c::IsInt32Constant(sizeof(Runtime::Function)))));
+        Matcher<Node*> function =
+            c::IsIntPtrAdd(function_table,
+                           c::IsChangeUint32ToWord(c::IsInt32Mul(
+                               Eq(function_id),
+                               c::IsInt32Constant(sizeof(Runtime::Function)))));
         Matcher<Node*> function_entry =
             m.IsLoad(MachineType::Pointer(), function,
                      c::IsIntPtrConstant(offsetof(Runtime::Function, entry)));
 
         Node* call_runtime =
             m.CallRuntimeN(function_id, context, registers, result_size);
-        EXPECT_THAT(
-            call_runtime,
-            c::IsCall(_, c::IsHeapConstant(builtin.code()),
-                      registers.reg_count(), registers.base_reg_location(),
-                      function_entry, context, _, _));
+        EXPECT_THAT(call_runtime,
+                    c::IsCall(_, c::IsHeapConstant(builtin.code()),
+                              Eq(registers.reg_count()),
+                              Eq(registers.base_reg_location()), function_entry,
+                              Eq(context), _, _));
       }
     }
   }
@@ -581,12 +587,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
   TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
     InterpreterAssemblerTestState state(this, bytecode);
     InterpreterAssemblerForTest m(&state, bytecode);
-    Node* feedback_vector = m.LoadFeedbackVector();
+    TNode<HeapObject> feedback_vector = m.LoadFeedbackVector();
 
     // Feedback vector is a phi node with two inputs. One of them is loading the
     // feedback vector and the other is undefined constant (when feedback
     // vectors aren't allocated). Find the input that loads feedback vector.
-    CHECK(feedback_vector->opcode() == i::compiler::IrOpcode::kPhi);
+    CHECK_EQ(static_cast<Node*>(feedback_vector)->opcode(),
+             i::compiler::IrOpcode::kPhi);
     Node* value0 =
         i::compiler::NodeProperties::GetValueInput(feedback_vector, 0);
     Node* value1 =
@@ -601,21 +608,22 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
                  c::IsIntPtrConstant(Register::function_closure().ToOperand() *
                                      kSystemPointerSize)));
 #ifdef V8_COMPRESS_POINTERS
-    Matcher<Node*> load_vector_cell_matcher = IsChangeCompressedToTagged(
-        m.IsLoadFromObject(MachineType::AnyCompressed(), load_function_matcher,
-                           c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
-                                               kHeapObjectTag)));
+    Matcher<Node*> load_vector_cell_matcher =
+        IsChangeCompressedPointerToTaggedPointer(m.IsLoadFromObject(
+            MachineType::CompressedPointer(), load_function_matcher,
+            c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
+                                kHeapObjectTag)));
     EXPECT_THAT(load_feedback_vector,
-                IsChangeCompressedToTagged(m.IsLoadFromObject(
-                    MachineType::AnyCompressed(), load_vector_cell_matcher,
+                IsChangeCompressedPointerToTaggedPointer(m.IsLoadFromObject(
+                    MachineType::CompressedPointer(), load_vector_cell_matcher,
                     c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag))));
 #else
     Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject(
-        MachineType::AnyTagged(), load_function_matcher,
+        MachineType::TaggedPointer(), load_function_matcher,
         c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
     EXPECT_THAT(load_feedback_vector,
                 m.IsLoadFromObject(
-                    MachineType::AnyTagged(), load_vector_cell_matcher,
+                    MachineType::TaggedPointer(), load_vector_cell_matcher,
                     c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
 #endif
   }
diff --git a/deps/v8/test/unittests/libplatform/task-queue-unittest.cc b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
index 4001048a8e90f0..1ae440d0c0a840 100644
--- a/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
@@ -51,8 +51,8 @@ TEST(TaskQueueTest, TerminateMultipleReaders) {
   TaskQueue queue;
   TaskQueueThread thread1(&queue);
   TaskQueueThread thread2(&queue);
-  thread1.Start();
-  thread2.Start();
+  CHECK(thread1.Start());
+  CHECK(thread2.Start());
   queue.Terminate();
   thread1.Join();
   thread2.Join();
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index 8b425542c188af..8c3fb017a4ec49 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -71,7 +71,7 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
         ast_node_factory.NewFunctionLiteral(
             function_name, function_scope, statements, -1, -1, -1,
             FunctionLiteral::kNoDuplicateParameters,
-            FunctionLiteral::kAnonymousExpression,
+            FunctionSyntaxKind::kAnonymousExpression,
             FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
             shared->function_literal_id(), nullptr);
 
diff --git a/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc b/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
index b3843db46d0139..2a0e7d7f90fe2f 100644
--- a/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
+++ b/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
@@ -160,8 +160,8 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksStarted) {
   ResultType result2{0};
   ThreadedRunner runner1(NewTask(&result1, TestTask::kWaitTillCancelTriggered));
   ThreadedRunner runner2(NewTask(&result2, TestTask::kWaitTillCancelTriggered));
-  runner1.Start();
-  runner2.Start();
+  CHECK(runner1.Start());
+  CHECK(runner2.Start());
   // Busy wait on result to make sure both tasks are done.
   while (result1.load() == 0 || result2.load() == 0) {
   }
@@ -179,8 +179,8 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksNotRun) {
   ThreadedRunner runner2(NewTask(&result2, TestTask::kCheckNotRun));
   CancelAndWait();
   // Tasks are canceled, hence the runner will bail out and not update result.
-  runner1.Start();
-  runner2.Start();
+  CHECK(runner1.Start());
+  CHECK(runner2.Start());
   runner1.Join();
   runner2.Join();
   EXPECT_EQ(0u, result1);
@@ -193,7 +193,7 @@ TEST_F(CancelableTaskManagerTest, RemoveBeforeCancelAndWait) {
   CancelableTaskManager::Id id = runner1.task_id();
   EXPECT_EQ(1u, id);
   EXPECT_EQ(TryAbortResult::kTaskAborted, manager()->TryAbort(id));
-  runner1.Start();
+  CHECK(runner1.Start());
   runner1.Join();
   CancelAndWait();
   EXPECT_EQ(0u, result1);
@@ -204,7 +204,7 @@ TEST_F(CancelableTaskManagerTest, RemoveAfterCancelAndWait) {
   ThreadedRunner runner1(NewTask(&result1));
   CancelableTaskManager::Id id = runner1.task_id();
   EXPECT_EQ(1u, id);
-  runner1.Start();
+  CHECK(runner1.Start());
   runner1.Join();
   CancelAndWait();
   EXPECT_EQ(TryAbortResult::kTaskRemoved, manager()->TryAbort(id));
@@ -231,8 +231,8 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksNotRunTryAbortAll) {
   ThreadedRunner runner2(NewTask(&result2, TestTask::kCheckNotRun));
   EXPECT_EQ(TryAbortResult::kTaskAborted, TryAbortAll());
   // Tasks are canceled, hence the runner will bail out and not update result.
-  runner1.Start();
-  runner2.Start();
+  CHECK(runner1.Start());
+  CHECK(runner2.Start());
   runner1.Join();
   runner2.Join();
   EXPECT_EQ(0u, result1);
@@ -245,7 +245,7 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksStartedTryAbortAll) {
   ResultType result2{0};
   ThreadedRunner runner1(NewTask(&result1, TestTask::kWaitTillCancelTriggered));
   ThreadedRunner runner2(NewTask(&result2, TestTask::kWaitTillCancelTriggered));
-  runner1.Start();
+  CHECK(runner1.Start());
   // Busy wait on result to make sure task1 is done.
   while (result1.load() == 0) {
   }
@@ -255,7 +255,7 @@ TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksStartedTryAbortAll) {
   EXPECT_THAT(TryAbortAll(),
               testing::AnyOf(testing::Eq(TryAbortResult::kTaskAborted),
                              testing::Eq(TryAbortResult::kTaskRunning)));
-  runner2.Start();
+  CHECK(runner2.Start());
   runner1.Join();
   runner2.Join();
   EXPECT_EQ(1u, result1);
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index 1366f86ce76ae1..22ee7543213f09 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -20,6 +20,13 @@ constexpr const char* kTestTorquePrelude = R"(
 type void;
 type never;
 
+namespace torque_internal {
+  struct Reference<T: type> {
+    const object: HeapObject;
+    const offset: intptr;
+  }
+}
+
 type Tagged generates 'TNode<Object>' constexpr 'ObjectPtr';
 type Smi extends Tagged generates 'TNode<Smi>' constexpr 'Smi';
 
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index c36a0b70f846f9..def90fc3b5b2d7 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -39,4 +39,17 @@
   'Torque*': [SKIP],
 }],  # 'system == windows and asan'
 
+['system == windows and arch == x64 and mode == release', {
+  # BUG(992783).
+  'Torque.ConditionalFields': [SKIP],
+  'Torque.UsingUnderscorePrefixedIdentifierError': [SKIP],
+}],  # 'system == windows and arch == x64 and mode == release'
+
+##############################################################################
+['tsan == True', {
+  # https://crbug.com/v8/9380
+  # The test is broken and needs to be fixed to use separate isolates.
+  'BackingStoreTest.RacyGrowWasmMemoryInPlace': [SKIP],
+}],  # 'tsan == True'
+
 ]
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 725f7f4a592c9a..791770ee948998 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -1448,28 +1448,18 @@ TEST_F(FunctionBodyDecoderTest, StoreMemOffset_void) {
                                                    WASM_ZERO, WASM_ZERO)});
 }
 
-#define BYTE0(x) ((x)&0x7F)
-#define BYTE1(x) ((x >> 7) & 0x7F)
-#define BYTE2(x) ((x >> 14) & 0x7F)
-#define BYTE3(x) ((x >> 21) & 0x7F)
-
-#define VARINT1(x) BYTE0(x)
-#define VARINT2(x) BYTE0(x) | 0x80, BYTE1(x)
-#define VARINT3(x) BYTE0(x) | 0x80, BYTE1(x) | 0x80, BYTE2(x)
-#define VARINT4(x) BYTE0(x) | 0x80, BYTE1(x) | 0x80, BYTE2(x) | 0x80, BYTE3(x)
-
 TEST_F(FunctionBodyDecoderTest, LoadMemOffset_varint) {
   TestModuleBuilder builder;
   module = builder.module();
   builder.InitializeMemory();
   ExpectValidates(sigs.i_i(),
-                  {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT, VARINT1(0x45)});
+                  {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT, U32V_1(0x45)});
   ExpectValidates(sigs.i_i(), {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
-                               VARINT2(0x3999)});
+                               U32V_2(0x3999)});
   ExpectValidates(sigs.i_i(), {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
-                               VARINT3(0x344445)});
+                               U32V_3(0x344445)});
   ExpectValidates(sigs.i_i(), {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
-                               VARINT4(0x36666667)});
+                               U32V_4(0x36666667)});
 }
 
 TEST_F(FunctionBodyDecoderTest, StoreMemOffset_varint) {
@@ -1477,25 +1467,15 @@ TEST_F(FunctionBodyDecoderTest, StoreMemOffset_varint) {
   module = builder.module();
   builder.InitializeMemory();
   ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
-                               ZERO_ALIGNMENT, VARINT1(0x33)});
+                               ZERO_ALIGNMENT, U32V_1(0x33)});
   ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
-                               ZERO_ALIGNMENT, VARINT2(0x1111)});
+                               ZERO_ALIGNMENT, U32V_2(0x1111)});
   ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
-                               ZERO_ALIGNMENT, VARINT3(0x222222)});
+                               ZERO_ALIGNMENT, U32V_3(0x222222)});
   ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
-                               ZERO_ALIGNMENT, VARINT4(0x44444444)});
+                               ZERO_ALIGNMENT, U32V_4(0x44444444)});
 }
 
-#undef BYTE0
-#undef BYTE1
-#undef BYTE2
-#undef BYTE3
-
-#undef VARINT1
-#undef VARINT2
-#undef VARINT3
-#undef VARINT4
-
 TEST_F(FunctionBodyDecoderTest, AllLoadMemCombinations) {
   TestModuleBuilder builder;
   module = builder.module();
@@ -2104,6 +2084,9 @@ TEST_F(FunctionBodyDecoderTest, TableGet) {
   ExpectValidates(
       &sig,
       {WASM_SET_LOCAL(local_func, WASM_TABLE_GET(tab_func2, WASM_I32V(7)))});
+  ExpectValidates(
+      &sig, {WASM_SET_LOCAL(local_ref, WASM_SEQ(WASM_I32V(6), kExprTableGet,
+                                                U32V_2(tab_ref1)))});
 
   // We can store funcref values as anyref, but not the other way around.
   ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
@@ -3542,6 +3525,24 @@ TEST_F(WasmOpcodeLengthTest, VariableLength) {
   ExpectLength(4, kExprRefFunc, U32V_3(44));
   ExpectLength(5, kExprRefFunc, U32V_4(66));
   ExpectLength(6, kExprRefFunc, U32V_5(77));
+
+  ExpectLength(2, kExprTableGet, U32V_1(1));
+  ExpectLength(3, kExprTableGet, U32V_2(33));
+  ExpectLength(4, kExprTableGet, U32V_3(44));
+  ExpectLength(5, kExprTableGet, U32V_4(66));
+  ExpectLength(6, kExprTableGet, U32V_5(77));
+
+  ExpectLength(2, kExprTableSet, U32V_1(1));
+  ExpectLength(3, kExprTableSet, U32V_2(33));
+  ExpectLength(4, kExprTableSet, U32V_3(44));
+  ExpectLength(5, kExprTableSet, U32V_4(66));
+  ExpectLength(6, kExprTableSet, U32V_5(77));
+
+  ExpectLength(3, kExprCallIndirect, U32V_1(1), U32V_1(1));
+  ExpectLength(4, kExprCallIndirect, U32V_1(1), U32V_2(33));
+  ExpectLength(5, kExprCallIndirect, U32V_1(1), U32V_3(44));
+  ExpectLength(6, kExprCallIndirect, U32V_1(1), U32V_4(66));
+  ExpectLength(7, kExprCallIndirect, U32V_1(1), U32V_5(77));
 }
 
 TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
index 16593709995009..9f7cfc6b1d284d 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
@@ -465,7 +465,7 @@ TEST_P(TrapHandlerTest, TestCrashInOtherThread) {
   CHECK(!GetThreadInWasmFlag());
   // Set the thread-in-wasm flag manually in this thread.
   *trap_handler::GetThreadInWasmThreadLocalAddress() = 1;
-  runner.Start();
+  CHECK(runner.Start());
   runner.Join();
   CHECK(GetThreadInWasmFlag());
   // Reset the thread-in-wasm flag.
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index eea1f8208d7da6..a6b29ffc6c93d0 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -156,8 +156,6 @@ class WasmCodeManagerTest : public TestWithContext,
                             public ::testing::WithParamInterface<ModuleStyle> {
  public:
   static constexpr uint32_t kNumFunctions = 10;
-  static constexpr uint32_t kJumpTableSize = RoundUp<kCodeAlignment>(
-      JumpTableAssembler::SizeForNumberOfSlots(kNumFunctions));
   static size_t allocate_page_size;
   static size_t commit_page_size;
 
@@ -169,6 +167,7 @@ class WasmCodeManagerTest : public TestWithContext,
     }
     CHECK_NE(0, allocate_page_size);
     CHECK_NE(0, commit_page_size);
+    manager()->DisableImplicitAllocationsForTesting();
   }
 
   using NativeModulePtr = std::shared_ptr<NativeModule>;
@@ -199,12 +198,6 @@ class WasmCodeManagerTest : public TestWithContext,
   void SetMaxCommittedMemory(size_t limit) {
     manager()->SetMaxCommittedMemoryForTesting(limit);
   }
-
-  void DisableWin64UnwindInfoForTesting() {
-#if defined(V8_OS_WIN_X64)
-    manager()->DisableWin64UnwindInfoForTesting();
-#endif
-  }
 };
 
 // static
@@ -219,18 +212,18 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
   SetMaxCommittedMemory(0);
   CHECK_EQ(0, manager()->committed_code_space());
 
-  ASSERT_DEATH_IF_SUPPORTED(AllocModule(allocate_page_size, GetParam()),
+  NativeModulePtr native_module = AllocModule(allocate_page_size, GetParam());
+  ASSERT_DEATH_IF_SUPPORTED(AddCode(native_module.get(), 0, kCodeAlignment),
                             "OOM in wasm code commit");
 }
 
 TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
   SetMaxCommittedMemory(allocate_page_size);
-  DisableWin64UnwindInfoForTesting();
 
   CHECK_EQ(0, manager()->committed_code_space());
   NativeModulePtr native_module = AllocModule(allocate_page_size, GetParam());
   CHECK(native_module);
-  CHECK_EQ(commit_page_size, manager()->committed_code_space());
+  CHECK_EQ(0, manager()->committed_code_space());
   WasmCodeRefScope code_ref_scope;
   uint32_t index = 0;
   WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
@@ -242,7 +235,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
   CHECK_EQ(commit_page_size, manager()->committed_code_space());
 
   code = AddCode(native_module.get(), index++,
-                 allocate_page_size - 4 * kCodeAlignment - kJumpTableSize);
+                 allocate_page_size - 4 * kCodeAlignment);
   CHECK_NOT_NULL(code);
   CHECK_EQ(allocate_page_size, manager()->committed_code_space());
 
@@ -256,29 +249,25 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
 
 TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
   SetMaxCommittedMemory(3 * allocate_page_size);
-  DisableWin64UnwindInfoForTesting();
 
   NativeModulePtr nm1 = AllocModule(2 * allocate_page_size, GetParam());
   NativeModulePtr nm2 = AllocModule(2 * allocate_page_size, GetParam());
   CHECK(nm1);
   CHECK(nm2);
   WasmCodeRefScope code_ref_scope;
-  WasmCode* code =
-      AddCode(nm1.get(), 0, 2 * allocate_page_size - kJumpTableSize);
+  WasmCode* code = AddCode(nm1.get(), 0, 2 * allocate_page_size);
   CHECK_NOT_NULL(code);
-  ASSERT_DEATH_IF_SUPPORTED(
-      AddCode(nm2.get(), 0, 2 * allocate_page_size - kJumpTableSize),
-      "OOM in wasm code commit");
+  ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * allocate_page_size),
+                            "OOM in wasm code commit");
 }
 
 TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
   SetMaxCommittedMemory(3 * allocate_page_size);
-  DisableWin64UnwindInfoForTesting();
 
   NativeModulePtr nm = AllocModule(allocate_page_size, GetParam());
   size_t module_size =
       GetParam() == Fixed ? kMaxWasmCodeMemory : allocate_page_size;
-  size_t remaining_space_in_module = module_size - kJumpTableSize;
+  size_t remaining_space_in_module = module_size;
   if (GetParam() == Fixed) {
     // Requesting more than the remaining space fails because the module cannot
     // grow.
@@ -297,7 +286,6 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
 
 TEST_P(WasmCodeManagerTest, CommitIncrements) {
   SetMaxCommittedMemory(10 * allocate_page_size);
-  DisableWin64UnwindInfoForTesting();
 
   NativeModulePtr nm = AllocModule(3 * allocate_page_size, GetParam());
   WasmCodeRefScope code_ref_scope;
@@ -308,15 +296,13 @@ TEST_P(WasmCodeManagerTest, CommitIncrements) {
   CHECK_NOT_NULL(code);
   CHECK_EQ(commit_page_size + 2 * allocate_page_size,
            manager()->committed_code_space());
-  code = AddCode(nm.get(), 2,
-                 allocate_page_size - kCodeAlignment - kJumpTableSize);
+  code = AddCode(nm.get(), 2, allocate_page_size - kCodeAlignment);
   CHECK_NOT_NULL(code);
   CHECK_EQ(3 * allocate_page_size, manager()->committed_code_space());
 }
 
 TEST_P(WasmCodeManagerTest, Lookup) {
   SetMaxCommittedMemory(2 * allocate_page_size);
-  DisableWin64UnwindInfoForTesting();
 
   NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
   NativeModulePtr nm2 = AllocModule(allocate_page_size, GetParam());
@@ -362,7 +348,6 @@ TEST_P(WasmCodeManagerTest, Lookup) {
 
 TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) {
   SetMaxCommittedMemory(2 * allocate_page_size);
-  DisableWin64UnwindInfoForTesting();
 
   NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
 
diff --git a/deps/v8/test/unittests/wasm/wasm-module-sourcemap-unittest.cc b/deps/v8/test/unittests/wasm/wasm-module-sourcemap-unittest.cc
new file mode 100644
index 00000000000000..04c611e1dedb71
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-module-sourcemap-unittest.cc
@@ -0,0 +1,224 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-module-sourcemap.h"
+#include <memory>
+
+#include "src/api/api.h"
+#include "test/common/wasm/flag-utils.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmModuleSourceMapTest : public TestWithIsolateAndZone {};
+
+TEST_F(WasmModuleSourceMapTest, InvalidSourceMap) {
+  auto i_isolate = isolate();
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+
+  // Incomplete source map without "sources" entry.
+  char incomplete_src_map[] =
+      "{\"version\":3,\"names\":[],\"mappings\":\"6/"
+      "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto incomplete_src_map_str =
+      v8::String::NewFromUtf8(v8_isolate, incomplete_src_map).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> incomplete_src_map_ptr(
+      new WasmModuleSourceMap(v8_isolate, incomplete_src_map_str));
+  EXPECT_FALSE(incomplete_src_map_ptr->IsValid());
+
+  // Miswrite key "mappings" as "mapping".
+  char wrong_key[] =
+      "{\"version\":3,\"sources\":[\"./"
+      "test.h\",\"main.cpp\"],\"names\":[],\"mapping\":\"6/"
+      "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto wrong_key_str =
+      v8::String::NewFromUtf8(v8_isolate, wrong_key).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> wrong_key_ptr(
+      new WasmModuleSourceMap(v8_isolate, wrong_key_str));
+  EXPECT_FALSE(wrong_key_ptr->IsValid());
+
+  // Wrong version number.
+  char wrong_ver[] =
+      "{\"version\":2,\"sources\":[\"./"
+      "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+      "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto wrong_ver_str =
+      v8::String::NewFromUtf8(v8_isolate, wrong_ver).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> wrong_ver_ptr(
+      new WasmModuleSourceMap(v8_isolate, wrong_ver_str));
+  EXPECT_FALSE(wrong_ver_ptr->IsValid());
+
+  // Wrong type of "version" entry.
+  char ver_as_arr[] =
+      "{\"version\":[3],\"sources\":[\"./"
+      "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+      "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto ver_as_arr_str =
+      v8::String::NewFromUtf8(v8_isolate, ver_as_arr).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> ver_as_arr_ptr(
+      new WasmModuleSourceMap(v8_isolate, ver_as_arr_str));
+  EXPECT_FALSE(ver_as_arr_ptr->IsValid());
+
+  // Wrong type of "sources" entry.
+  char sources_as_str[] =
+      "{\"version\":3,\"sources\":\"./"
+      "test.h,main.cpp\",\"names\":[],\"mappings\":\"6/"
+      "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto sources_as_str_str =
+      v8::String::NewFromUtf8(v8_isolate, sources_as_str).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> sources_as_str_ptr(
+      new WasmModuleSourceMap(v8_isolate, sources_as_str_str));
+  EXPECT_FALSE(sources_as_str_ptr->IsValid());
+
+  // Invalid "mappings" entry.
+  char wrong_mappings[] =
+      "{\"version\":3,\"sources\":[\"./"
+      "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+      "&BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto wrong_mappings_str =
+      v8::String::NewFromUtf8(v8_isolate, wrong_mappings).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> wrong_mappings_ptr(
+      new WasmModuleSourceMap(v8_isolate, wrong_mappings_str));
+  EXPECT_FALSE(wrong_mappings_ptr->IsValid());
+}
+
+TEST_F(WasmModuleSourceMapTest, HasSource) {
+  char src_map[] =
+      "{\"version\":3,\"sources\":[\"./"
+      "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+      "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto i_isolate = isolate();
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+  auto src_map_str =
+      v8::String::NewFromUtf8(v8_isolate, src_map).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> src_map_ptr(
+      new WasmModuleSourceMap(v8_isolate, src_map_str));
+  EXPECT_TRUE(src_map_ptr->IsValid());
+
+  EXPECT_FALSE(src_map_ptr->HasSource(0x387, 0x3AF));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x3B0, 0x3B5));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x3B6, 0x3BC));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x3BD, 0x3C7));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x3C8, 0x3DA));
+  EXPECT_TRUE(src_map_ptr->HasSource(0x3DB, 0x414));
+  EXPECT_TRUE(src_map_ptr->HasSource(0x415, 0x44E));
+  EXPECT_TRUE(src_map_ptr->HasSource(0x450, 0x4DC));
+  EXPECT_TRUE(src_map_ptr->HasSource(0x4DE, 0x5F1));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x5F3, 0x437A));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x437C, 0x5507));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x5508, 0x5557));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x5559, 0x5609));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x560A, 0x563D));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x563E, 0x564A));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x564B, 0x5656));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x5658, 0x5713));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x5715, 0x59B0));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x59B1, 0x59BC));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x59BD, 0x59C6));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x59C7, 0x59D8));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x59D9, 0x59E7));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x59E9, 0x5B50));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x5B52, 0x5C53));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x5C54, 0x5C57));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x5C59, 0x5EBD));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x5EBF, 0x6030));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x6031, 0x608D));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x608E, 0x609E));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x609F, 0x60B3));
+  EXPECT_FALSE(src_map_ptr->HasSource(0x60B4, 0x60BD));
+}
+
+TEST_F(WasmModuleSourceMapTest, HasValidEntry) {
+  char src_map[] =
+      "{\"version\":3,\"sources\":[\"./"
+      "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+      "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto i_isolate = isolate();
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+  auto src_map_str =
+      v8::String::NewFromUtf8(v8_isolate, src_map).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> src_map_ptr(
+      new WasmModuleSourceMap(v8_isolate, src_map_str));
+  EXPECT_TRUE(src_map_ptr->IsValid());
+
+  EXPECT_FALSE(src_map_ptr->HasValidEntry(0x450, 0x467));
+  EXPECT_FALSE(src_map_ptr->HasValidEntry(0x450, 0x450));
+  EXPECT_TRUE(src_map_ptr->HasValidEntry(0x450, 0x47A));
+  EXPECT_TRUE(src_map_ptr->HasValidEntry(0x450, 0x4A9));
+  EXPECT_FALSE(src_map_ptr->HasValidEntry(0x4DE, 0x4F5));
+  EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x541));
+  EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x57D));
+  EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x5B7));
+  EXPECT_FALSE(src_map_ptr->HasValidEntry(0x4DE, 0x4DE));
+  EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x500));
+  EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x521));
+  EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x560));
+  EXPECT_TRUE(src_map_ptr->HasValidEntry(0x4DE, 0x597));
+}
+
+TEST_F(WasmModuleSourceMapTest, GetFilename) {
+  char src_map[] =
+      "{\"version\":3,\"sources\":[\"./"
+      "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+      "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto i_isolate = isolate();
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+  auto src_map_str =
+      v8::String::NewFromUtf8(v8_isolate, src_map).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> src_map_ptr(
+      new WasmModuleSourceMap(v8_isolate, src_map_str));
+  EXPECT_TRUE(src_map_ptr->IsValid());
+
+  EXPECT_STREQ("./test.h", src_map_ptr->GetFilename(0x47A).c_str());
+  EXPECT_STREQ("./test.h", src_map_ptr->GetFilename(0x4A9).c_str());
+  EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x500).c_str());
+  EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x521).c_str());
+  EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x541).c_str());
+  EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x560).c_str());
+  EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x57D).c_str());
+  EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x597).c_str());
+  EXPECT_STREQ("main.cpp", src_map_ptr->GetFilename(0x5B7).c_str());
+}
+
+TEST_F(WasmModuleSourceMapTest, SourceLine) {
+  char src_map[] =
+      "{\"version\":3,\"sources\":[\"./"
+      "test.h\",\"main.cpp\"],\"names\":[],\"mappings\":\"6/"
+      "BAGA,0DAIA,2DAIA,IAEA,+BACA,wCADA,mBAGA,4CCXA,6BACA,IACA,4BACA,gBADA,"
+      "mBAIA,4BACA,QADA,mBAIA,4BACA,gBADA,mBAVA,mBAcA\"}";
+  auto i_isolate = isolate();
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+  auto src_map_str =
+      v8::String::NewFromUtf8(v8_isolate, src_map).ToLocalChecked();
+  std::unique_ptr<WasmModuleSourceMap> src_map_ptr(
+      new WasmModuleSourceMap(v8_isolate, src_map_str));
+  EXPECT_TRUE(src_map_ptr->IsValid());
+
+  EXPECT_EQ(13ul, src_map_ptr->GetSourceLine(0x47A));
+  EXPECT_EQ(14ul, src_map_ptr->GetSourceLine(0x4A9));
+  EXPECT_EQ(5ul, src_map_ptr->GetSourceLine(0x500));
+  EXPECT_EQ(7ul, src_map_ptr->GetSourceLine(0x521));
+  EXPECT_EQ(8ul, src_map_ptr->GetSourceLine(0x541));
+  EXPECT_EQ(11ul, src_map_ptr->GetSourceLine(0x560));
+  EXPECT_EQ(12ul, src_map_ptr->GetSourceLine(0x57D));
+  EXPECT_EQ(15ul, src_map_ptr->GetSourceLine(0x597));
+  EXPECT_EQ(16ul, src_map_ptr->GetSourceLine(0x5B7));
+}
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/BUILD.gn b/deps/v8/test/wasm-api-tests/BUILD.gn
index d0e2c01ac9916b..5bc48f57da3daa 100644
--- a/deps/v8/test/wasm-api-tests/BUILD.gn
+++ b/deps/v8/test/wasm-api-tests/BUILD.gn
@@ -32,10 +32,13 @@ v8_executable("wasm_api_tests") {
     "callbacks.cc",
     "finalize.cc",
     "globals.cc",
+    "hostref.cc",
     "memory.cc",
+    "multi-return.cc",
     "reflect.cc",
     "run-all-wasm-api-tests.cc",
     "serialize.cc",
+    "startup-errors.cc",
     "table.cc",
     "threads.cc",
     "traps.cc",
diff --git a/deps/v8/test/wasm-api-tests/callbacks.cc b/deps/v8/test/wasm-api-tests/callbacks.cc
index 960fa726dd5be6..350a425d47456a 100644
--- a/deps/v8/test/wasm-api-tests/callbacks.cc
+++ b/deps/v8/test/wasm-api-tests/callbacks.cc
@@ -14,11 +14,11 @@ namespace wasm {
 
 namespace {
 
-own<Trap*> Stage2(void* env, const Val args[], Val results[]) {
+own<Trap> Stage2(void* env, const Val args[], Val results[]) {
   printf("Stage2...\n");
   WasmCapiTest* self = reinterpret_cast<WasmCapiTest*>(env);
   Func* stage3 = self->GetExportedFunction(1);
-  own<Trap*> trap = stage3->call(args, results);
+  own<Trap> trap = stage3->call(args, results);
   if (trap) {
     printf("Stage2: got exception: %s\n", trap->message().get());
   } else {
@@ -27,7 +27,7 @@ own<Trap*> Stage2(void* env, const Val args[], Val results[]) {
   return trap;
 }
 
-own<Trap*> Stage4_GC(void* env, const Val args[], Val results[]) {
+own<Trap> Stage4_GC(void* env, const Val args[], Val results[]) {
   printf("Stage4...\n");
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env);
   isolate->heap()->PreciseCollectAllGarbage(
@@ -57,7 +57,7 @@ class WasmCapiCallbacksTest : public WasmCapiTest {
   }
 
  private:
-  own<Func*> stage2_;
+  own<Func> stage2_;
 };
 
 }  // namespace
@@ -72,7 +72,7 @@ TEST_F(WasmCapiCallbacksTest, Trap) {
   Instantiate(imports);
   Val args[] = {Val::i32(42)};
   Val results[1];
-  own<Trap*> trap = GetExportedFunction(0)->call(args, results);
+  own<Trap> trap = GetExportedFunction(0)->call(args, results);
   EXPECT_NE(trap, nullptr);
   printf("Stage0: Got trap as expected: %s\n", trap->message().get());
 }
@@ -87,21 +87,21 @@ TEST_F(WasmCapiCallbacksTest, GC) {
 
   i::Isolate* isolate =
       reinterpret_cast<::wasm::StoreImpl*>(store())->i_isolate();
-  own<Func*> stage4 = Func::make(store(), cpp_i_i_sig(), Stage4_GC, isolate);
+  own<Func> stage4 = Func::make(store(), cpp_i_i_sig(), Stage4_GC, isolate);
   EXPECT_EQ(cpp_i_i_sig()->params().size(), stage4->type()->params().size());
   EXPECT_EQ(cpp_i_i_sig()->results().size(), stage4->type()->results().size());
   Extern* imports[] = {stage2(), stage4.get()};
   Instantiate(imports);
   Val args[] = {Val::i32(42)};
   Val results[1];
-  own<Trap*> trap = GetExportedFunction(0)->call(args, results);
+  own<Trap> trap = GetExportedFunction(0)->call(args, results);
   EXPECT_EQ(trap, nullptr);
   EXPECT_EQ(43, results[0].i32());
 }
 
 namespace {
 
-own<Trap*> FibonacciC(void* env, const Val args[], Val results[]) {
+own<Trap> FibonacciC(void* env, const Val args[], Val results[]) {
   int32_t x = args[0].i32();
   if (x == 0 || x == 1) {
     results[0] = Val::i32(x);
@@ -113,7 +113,7 @@ own<Trap*> FibonacciC(void* env, const Val args[], Val results[]) {
   // style, but this test intentionally ensures that it works if someone
   // insists on doing it.
   Val recursive_args[] = {Val::i32(x - 1)};
-  own<Trap*> trap = fibo_wasm->call(recursive_args, results);
+  own<Trap> trap = fibo_wasm->call(recursive_args, results);
   DCHECK_NULL(trap);
   int32_t x1 = results[0].i32();
   recursive_args[0] = Val::i32(x - 2);
@@ -148,20 +148,20 @@ TEST_F(WasmCapiTest, Recursion) {
   AddExportedFunction(CStrVector("fibonacci_wasm"), code_fibo,
                       sizeof(code_fibo), wasm_i_i_sig());
 
-  own<Func*> fibonacci = Func::make(store(), cpp_i_i_sig(), FibonacciC, this);
+  own<Func> fibonacci = Func::make(store(), cpp_i_i_sig(), FibonacciC, this);
   Extern* imports[] = {fibonacci.get()};
   Instantiate(imports);
   // Enough iterations to make it interesting, few enough to keep it fast.
   Val args[] = {Val::i32(15)};
   Val results[1];
-  own<Trap*> result = GetExportedFunction(0)->call(args, results);
+  own<Trap> result = GetExportedFunction(0)->call(args, results);
   EXPECT_EQ(result, nullptr);
   EXPECT_EQ(610, results[0].i32());
 }
 
 namespace {
 
-own<Trap*> PlusOne(const Val args[], Val results[]) {
+own<Trap> PlusOne(const Val args[], Val results[]) {
   int32_t a0 = args[0].i32();
   results[0] = Val::i32(a0 + 1);
   int64_t a1 = args[1].i64();
@@ -177,16 +177,16 @@ own<Trap*> PlusOne(const Val args[], Val results[]) {
 }  // namespace
 
 TEST_F(WasmCapiTest, DirectCallCapiFunction) {
-  own<FuncType*> cpp_sig =
-      FuncType::make(vec<ValType*>::make(
+  own<FuncType> cpp_sig =
+      FuncType::make(ownvec<ValType>::make(
                          ValType::make(::wasm::I32), ValType::make(::wasm::I64),
                          ValType::make(::wasm::F32), ValType::make(::wasm::F64),
                          ValType::make(::wasm::ANYREF)),
-                     vec<ValType*>::make(
+                     ownvec<ValType>::make(
                          ValType::make(::wasm::I32), ValType::make(::wasm::I64),
                          ValType::make(::wasm::F32), ValType::make(::wasm::F64),
                          ValType::make(::wasm::ANYREF)));
-  own<Func*> func = Func::make(store(), cpp_sig.get(), PlusOne);
+  own<Func> func = Func::make(store(), cpp_sig.get(), PlusOne);
   Extern* imports[] = {func.get()};
   ValueType wasm_types[] = {kWasmI32,    kWasmI64,   kWasmF32, kWasmF64,
                             kWasmAnyRef, kWasmI32,   kWasmI64, kWasmF32,
@@ -203,14 +203,13 @@ TEST_F(WasmCapiTest, DirectCallCapiFunction) {
                 Val::ref(func->copy())};
   Val results[5];
   // Test that {func} can be called directly.
-  own<Trap*> trap = func->call(args, results);
+  own<Trap> trap = func->call(args, results);
   EXPECT_EQ(nullptr, trap);
   EXPECT_EQ(a0 + 1, results[0].i32());
   EXPECT_EQ(a1 + 1, results[1].i64());
   EXPECT_EQ(a2 + 1, results[2].f32());
   EXPECT_EQ(a3 + 1, results[3].f64());
-  // TODO(jkummerow): Check that func == results[4] when we have a way
-  // to do so.
+  EXPECT_TRUE(func->same(results[4].ref()));
 
   // Test that {func} can be called after import/export round-tripping.
   trap = GetExportedFunction(0)->call(args, results);
@@ -219,8 +218,7 @@ TEST_F(WasmCapiTest, DirectCallCapiFunction) {
   EXPECT_EQ(a1 + 1, results[1].i64());
   EXPECT_EQ(a2 + 1, results[2].f32());
   EXPECT_EQ(a3 + 1, results[3].f64());
-  // TODO(jkummerow): Check that func == results[4] when we have a way
-  // to do so.
+  EXPECT_TRUE(func->same(results[4].ref()));
 }
 
 }  // namespace wasm
diff --git a/deps/v8/test/wasm-api-tests/finalize.cc b/deps/v8/test/wasm-api-tests/finalize.cc
index 7b3b976ca75af2..93da85460e1960 100644
--- a/deps/v8/test/wasm-api-tests/finalize.cc
+++ b/deps/v8/test/wasm-api-tests/finalize.cc
@@ -15,6 +15,8 @@ int g_functions_finalized = 0;
 int g_foreigns_finalized = 0;
 int g_modules_finalized = 0;
 
+const int kModuleMagic = 42;
+
 void FinalizeInstance(void* data) {
   int iteration = static_cast<int>(reinterpret_cast<intptr_t>(data));
   g_instances_finalized += iteration;
@@ -34,6 +36,31 @@ void FinalizeModule(void* data) {
   g_modules_finalized += static_cast<int>(reinterpret_cast<intptr_t>(data));
 }
 
+void RunInStore(Store* store, ZoneBuffer* wire_bytes, int iterations) {
+  size_t size = wire_bytes->end() - wire_bytes->begin();
+  vec<byte_t> binary = vec<byte_t>::make(
+      size, reinterpret_cast<byte_t*>(const_cast<byte*>(wire_bytes->begin())));
+  own<Module> module = Module::make(store, binary);
+  module->set_host_info(reinterpret_cast<void*>(kModuleMagic), &FinalizeModule);
+  for (int iteration = 0; iteration < iterations; iteration++) {
+    void* finalizer_data = reinterpret_cast<void*>(iteration);
+    own<Instance> instance = Instance::make(store, module.get(), nullptr);
+    EXPECT_NE(nullptr, instance.get());
+    instance->set_host_info(finalizer_data, &FinalizeInstance);
+
+    own<Func> func = instance->exports()[0]->func()->copy();
+    ASSERT_NE(func, nullptr);
+    func->set_host_info(finalizer_data, &FinalizeFunction);
+    Val args[] = {Val::i32(iteration)};
+    Val results[1];
+    func->call(args, results);
+    EXPECT_EQ(iteration, results[0].i32());
+
+    own<Foreign> foreign = Foreign::make(store);
+    foreign->set_host_info(finalizer_data, &FinalizeForeign);
+  }
+}
+
 }  // namespace
 
 TEST_F(WasmCapiTest, InstanceFinalization) {
@@ -45,31 +72,114 @@ TEST_F(WasmCapiTest, InstanceFinalization) {
   g_functions_finalized = 0;
   g_foreigns_finalized = 0;
   g_modules_finalized = 0;
-  module()->set_host_info(reinterpret_cast<void*>(42), &FinalizeModule);
+  module()->set_host_info(reinterpret_cast<void*>(kModuleMagic),
+                          &FinalizeModule);
   static const int kIterations = 10;
-  for (int iteration = 0; iteration < kIterations; iteration++) {
-    void* finalizer_data = reinterpret_cast<void*>(iteration);
-    own<Instance*> instance = Instance::make(store(), module(), nullptr);
-    EXPECT_NE(nullptr, instance.get());
-    instance->set_host_info(finalizer_data, &FinalizeInstance);
-
-    own<Func*> func = instance->exports()[0]->func()->copy();
-    ASSERT_NE(func, nullptr);
-    func->set_host_info(finalizer_data, &FinalizeFunction);
-
-    own<Foreign*> foreign = Foreign::make(store());
-    foreign->set_host_info(finalizer_data, &FinalizeForeign);
+  RunInStore(store(), wire_bytes(), kIterations);
+  {
+    own<Store> store2 = Store::make(engine());
+    RunInStore(store2.get(), wire_bytes(), kIterations);
   }
+  RunInStore(store(), wire_bytes(), kIterations);
   Shutdown();
   // Verify that (1) all finalizers were called, and (2) they passed the
   // correct host data: the loop above sets {i} as data, and the finalizer
-  // callbacks add them all up, so the expected value is
-  // sum([0, 1, ..., kIterations - 1]), which per Gauss's formula is:
-  static const int kExpected = (kIterations * (kIterations - 1)) / 2;
+  // callbacks add them all up, so the expected value after three rounds is
+  // 3 * sum([0, 1, ..., kIterations - 1]), which per Gauss's formula is:
+  static const int kExpected = 3 * ((kIterations * (kIterations - 1)) / 2);
   EXPECT_EQ(g_instances_finalized, kExpected);
+  // There are two functions per iteration.
   EXPECT_EQ(g_functions_finalized, kExpected);
   EXPECT_EQ(g_foreigns_finalized, kExpected);
-  EXPECT_EQ(g_modules_finalized, 42);
+  EXPECT_EQ(g_modules_finalized, 4 * kModuleMagic);
+}
+
+namespace {
+
+own<Trap> CapiFunction(void* env, const Val args[], Val results[]) {
+  int offset = static_cast<int>(reinterpret_cast<intptr_t>(env));
+  results[0] = Val::i32(offset + args[0].i32());
+  return nullptr;
+}
+
+int g_host_data_finalized = 0;
+int g_capi_function_finalized = 0;
+
+void FinalizeCapiFunction(void* data) {
+  int value = static_cast<int>(reinterpret_cast<intptr_t>(data));
+  g_capi_function_finalized += value;
+}
+
+void FinalizeHostData(void* data) {
+  g_host_data_finalized += static_cast<int>(reinterpret_cast<intptr_t>(data));
+}
+
+}  // namespace
+
+TEST_F(WasmCapiTest, CapiFunctionLifetimes) {
+  uint32_t func_index = builder()->AddImport(CStrVector("f"), wasm_i_i_sig());
+  builder()->ExportImportedFunction(CStrVector("f"), func_index);
+  Compile();
+  own<Instance> instance;
+  void* kHostData = reinterpret_cast<void*>(1234);
+  int base_summand = 1000;
+  {
+    // Test that the own<> pointers for Func and FuncType can go out of scope
+    // without affecting the ability of the Func to be called later.
+    own<FuncType> capi_func_type =
+        FuncType::make(ownvec<ValType>::make(ValType::make(::wasm::I32)),
+                       ownvec<ValType>::make(ValType::make(::wasm::I32)));
+    own<Func> capi_func =
+        Func::make(store(), capi_func_type.get(), &CapiFunction,
+                   reinterpret_cast<void*>(base_summand));
+    Extern* imports[] = {capi_func.get()};
+    instance = Instance::make(store(), module(), imports);
+    // TODO(jkummerow): It may or may not be desirable to be able to set
+    // host data even here and have it survive the import/export dance.
+    // We are awaiting resolution of the discussion at:
+    // https://github.com/WebAssembly/wasm-c-api/issues/100
+  }
+  {
+    ownvec<Extern> exports = instance->exports();
+    Func* exported_func = exports[0]->func();
+    constexpr int kArg = 123;
+    Val args[] = {Val::i32(kArg)};
+    Val results[1];
+    exported_func->call(args, results);
+    EXPECT_EQ(base_summand + kArg, results[0].i32());
+    // Host data should survive destruction of the own<> pointer.
+    exported_func->set_host_info(kHostData);
+  }
+  {
+    ownvec<Extern> exports = instance->exports();
+    Func* exported_func = exports[0]->func();
+    EXPECT_EQ(kHostData, exported_func->get_host_info());
+  }
+  // Test that a Func can have its own internal metadata, an {env}, and
+  // separate {host info}, without any of that interfering with each other.
+  g_host_data_finalized = 0;
+  g_capi_function_finalized = 0;
+  base_summand = 23;
+  constexpr int kFinalizerData = 345;
+  {
+    own<FuncType> capi_func_type =
+        FuncType::make(ownvec<ValType>::make(ValType::make(::wasm::I32)),
+                       ownvec<ValType>::make(ValType::make(::wasm::I32)));
+    own<Func> capi_func = Func::make(
+        store(), capi_func_type.get(), &CapiFunction,
+        reinterpret_cast<void*>(base_summand), &FinalizeCapiFunction);
+    capi_func->set_host_info(reinterpret_cast<void*>(kFinalizerData),
+                             &FinalizeHostData);
+    constexpr int kArg = 19;
+    Val args[] = {Val::i32(kArg)};
+    Val results[1];
+    capi_func->call(args, results);
+    EXPECT_EQ(base_summand + kArg, results[0].i32());
+  }
+  instance.reset();
+  Shutdown();
+  EXPECT_EQ(base_summand, g_capi_function_finalized);
+  EXPECT_EQ(kFinalizerData, g_host_data_finalized);
 }
 
 }  // namespace wasm
diff --git a/deps/v8/test/wasm-api-tests/globals.cc b/deps/v8/test/wasm-api-tests/globals.cc
index d47f326cc4a50e..a41cf4c9bce8c5 100644
--- a/deps/v8/test/wasm-api-tests/globals.cc
+++ b/deps/v8/test/wasm-api-tests/globals.cc
@@ -90,21 +90,21 @@ TEST_F(WasmCapiTest, Globals) {
                       &param_i64);
 
   // Create imported globals.
-  own<GlobalType*> const_f32_type =
+  own<GlobalType> const_f32_type =
       GlobalType::make(ValType::make(::wasm::F32), ::wasm::CONST);
-  own<GlobalType*> const_i64_type =
+  own<GlobalType> const_i64_type =
       GlobalType::make(ValType::make(::wasm::I64), ::wasm::CONST);
-  own<GlobalType*> var_f32_type =
+  own<GlobalType> var_f32_type =
       GlobalType::make(ValType::make(::wasm::F32), ::wasm::VAR);
-  own<GlobalType*> var_i64_type =
+  own<GlobalType> var_i64_type =
       GlobalType::make(ValType::make(::wasm::I64), ::wasm::VAR);
-  own<Global*> const_f32_import =
+  own<Global> const_f32_import =
       Global::make(store(), const_f32_type.get(), Val::f32(1));
-  own<Global*> const_i64_import =
+  own<Global> const_i64_import =
       Global::make(store(), const_i64_type.get(), Val::i64(2));
-  own<Global*> var_f32_import =
+  own<Global> var_f32_import =
       Global::make(store(), var_f32_type.get(), Val::f32(3));
-  own<Global*> var_i64_import =
+  own<Global> var_i64_import =
       Global::make(store(), var_i64_type.get(), Val::i64(4));
   Extern* imports[] = {const_f32_import.get(), const_i64_import.get(),
                        var_f32_import.get(), var_i64_import.get()};
@@ -130,6 +130,9 @@ TEST_F(WasmCapiTest, Globals) {
   Func* set_var_f32_export = GetExportedFunction(i++);
   Func* set_var_i64_export = GetExportedFunction(i++);
 
+  // Try cloning.
+  EXPECT_TRUE(var_f32_import->copy()->same(var_f32_import.get()));
+
   // Check initial values.
   EXPECT_EQ(1.f, const_f32_import->get().f32());
   EXPECT_EQ(2, const_i64_import->get().i64());
diff --git a/deps/v8/test/wasm-api-tests/hostref.cc b/deps/v8/test/wasm-api-tests/hostref.cc
new file mode 100644
index 00000000000000..d40cb25d8feca6
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/hostref.cc
@@ -0,0 +1,178 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+#include <iostream>
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using ::wasm::Frame;
+using ::wasm::Message;
+
+namespace {
+
+own<Trap> IdentityCallback(const Val args[], Val results[]) {
+  results[0] = args[0].copy();
+  return nullptr;
+}
+
+}  // namespace
+
+TEST_F(WasmCapiTest, HostRef) {
+  ValueType rr_reps[] = {kWasmAnyRef, kWasmAnyRef};
+  ValueType ri_reps[] = {kWasmAnyRef, kWasmI32};
+  ValueType ir_reps[] = {kWasmI32, kWasmAnyRef};
+  // Naming convention: result_params_sig.
+  FunctionSig r_r_sig(1, 1, rr_reps);
+  FunctionSig v_r_sig(0, 1, rr_reps);
+  FunctionSig r_v_sig(1, 0, rr_reps);
+  FunctionSig v_ir_sig(0, 2, ir_reps);
+  FunctionSig r_i_sig(1, 1, ri_reps);
+  uint32_t func_index = builder()->AddImport(CStrVector("f"), &r_r_sig);
+  const bool kMutable = true;
+  const WasmInitExpr global_init(WasmInitExpr::kRefNullConst, 0);
+  uint32_t global_index = builder()->AddExportedGlobal(
+      kWasmAnyRef, kMutable, global_init, CStrVector("global"));
+  uint32_t table_index = builder()->AddTable(kWasmAnyRef, 10);
+  builder()->AddExport(CStrVector("table"), kExternalTable, table_index);
+  byte global_set_code[] = {WASM_SET_GLOBAL(global_index, WASM_GET_LOCAL(0))};
+  AddExportedFunction(CStrVector("global.set"), global_set_code,
+                      sizeof(global_set_code), &v_r_sig);
+  byte global_get_code[] = {WASM_GET_GLOBAL(global_index)};
+  AddExportedFunction(CStrVector("global.get"), global_get_code,
+                      sizeof(global_get_code), &r_v_sig);
+  byte table_set_code[] = {
+      WASM_TABLE_SET(table_index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
+  AddExportedFunction(CStrVector("table.set"), table_set_code,
+                      sizeof(table_set_code), &v_ir_sig);
+  byte table_get_code[] = {WASM_TABLE_GET(table_index, WASM_GET_LOCAL(0))};
+  AddExportedFunction(CStrVector("table.get"), table_get_code,
+                      sizeof(table_get_code), &r_i_sig);
+  byte func_call_code[] = {WASM_CALL_FUNCTION(func_index, WASM_GET_LOCAL(0))};
+  AddExportedFunction(CStrVector("func.call"), func_call_code,
+                      sizeof(func_call_code), &r_r_sig);
+
+  own<FuncType> func_type =
+      FuncType::make(ownvec<ValType>::make(ValType::make(::wasm::ANYREF)),
+                     ownvec<ValType>::make(ValType::make(::wasm::ANYREF)));
+  own<Func> callback = Func::make(store(), func_type.get(), IdentityCallback);
+  Extern* imports[] = {callback.get()};
+  Instantiate(imports);
+
+  Global* global = GetExportedGlobal(0);
+  Table* table = GetExportedTable(1);
+  const Func* global_set = GetExportedFunction(2);
+  const Func* global_get = GetExportedFunction(3);
+  const Func* table_set = GetExportedFunction(4);
+  const Func* table_get = GetExportedFunction(5);
+  const Func* func_call = GetExportedFunction(6);
+
+  own<Foreign> host1 = Foreign::make(store());
+  own<Foreign> host2 = Foreign::make(store());
+  host1->set_host_info(reinterpret_cast<void*>(1));
+  host2->set_host_info(reinterpret_cast<void*>(2));
+
+  // Basic checks.
+  EXPECT_TRUE(host1->copy()->same(host1.get()));
+  EXPECT_TRUE(host2->copy()->same(host2.get()));
+  Val val = Val::ref(host1->copy());
+  EXPECT_TRUE(val.ref()->copy()->same(host1.get()));
+  own<Ref> ref = val.release_ref();
+  EXPECT_EQ(nullptr, val.ref());
+  EXPECT_TRUE(ref->copy()->same(host1.get()));
+
+  // Interact with the Global.
+  Val args[2];
+  Val results[1];
+  own<Trap> trap = global_get->call(nullptr, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_EQ(nullptr, results[0].release_ref());
+  args[0] = Val::ref(host1.get()->copy());
+  trap = global_set->call(args, nullptr);
+  EXPECT_EQ(nullptr, trap);
+  trap = global_get->call(nullptr, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_TRUE(results[0].release_ref()->same(host1.get()));
+  args[0] = Val::ref(host2.get()->copy());
+  trap = global_set->call(args, nullptr);
+  EXPECT_EQ(nullptr, trap);
+  trap = global_get->call(nullptr, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_TRUE(results[0].release_ref()->same(host2.get()));
+  args[0] = Val::ref(own<Ref>());
+  trap = global_set->call(args, nullptr);
+  EXPECT_EQ(nullptr, trap);
+  trap = global_get->call(nullptr, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_EQ(nullptr, results[0].release_ref());
+
+  EXPECT_EQ(nullptr, global->get().release_ref());
+  global->set(Val(host2->copy()));
+  trap = global_get->call(nullptr, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_TRUE(results[0].release_ref()->same(host2.get()));
+  EXPECT_TRUE(global->get().release_ref()->same(host2.get()));
+
+  // Interact with the Table.
+  args[0] = Val::i32(0);
+  trap = table_get->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_EQ(nullptr, results[0].release_ref());
+  args[0] = Val::i32(1);
+  trap = table_get->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_EQ(nullptr, results[0].release_ref());
+  args[0] = Val::i32(0);
+  args[1] = Val::ref(host1.get()->copy());
+  trap = table_set->call(args, nullptr);
+  EXPECT_EQ(nullptr, trap);
+  args[0] = Val::i32(1);
+  args[1] = Val::ref(host2.get()->copy());
+  trap = table_set->call(args, nullptr);
+  EXPECT_EQ(nullptr, trap);
+  args[0] = Val::i32(0);
+  trap = table_get->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_TRUE(results[0].release_ref()->same(host1.get()));
+  args[0] = Val::i32(1);
+  trap = table_get->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_TRUE(results[0].release_ref()->same(host2.get()));
+  args[0] = Val::i32(0);
+  args[1] = Val::ref(own<Ref>());
+  trap = table_set->call(args, nullptr);
+  EXPECT_EQ(nullptr, trap);
+  trap = table_get->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_EQ(nullptr, results[0].release_ref());
+
+  EXPECT_EQ(nullptr, table->get(2));
+  table->set(2, host1.get());
+  args[0] = Val::i32(2);
+  trap = table_get->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_TRUE(results[0].release_ref()->same(host1.get()));
+  EXPECT_TRUE(table->get(2)->same(host1.get()));
+
+  // Interact with the Function.
+  args[0] = Val::ref(own<Ref>());
+  trap = func_call->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_EQ(nullptr, results[0].release_ref());
+  args[0] = Val::ref(host1.get()->copy());
+  trap = func_call->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_TRUE(results[0].release_ref()->same(host1.get()));
+  args[0] = Val::ref(host2.get()->copy());
+  trap = func_call->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_TRUE(results[0].release_ref()->same(host2.get()));
+}
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/memory.cc b/deps/v8/test/wasm-api-tests/memory.cc
index aec4cf8cccfdd3..131bdb96e3173f 100644
--- a/deps/v8/test/wasm-api-tests/memory.cc
+++ b/deps/v8/test/wasm-api-tests/memory.cc
@@ -41,6 +41,9 @@ TEST_F(WasmCapiTest, Memory) {
   Func* load_func = GetExportedFunction(2);
   Func* store_func = GetExportedFunction(3);
 
+  // Try cloning.
+  EXPECT_TRUE(memory->copy()->same(memory));
+
   // Check initial state.
   EXPECT_EQ(2u, memory->size());
   EXPECT_EQ(0x20000u, memory->data_size());
@@ -70,7 +73,7 @@ TEST_F(WasmCapiTest, Memory) {
   EXPECT_EQ(0, result[0].i32());
   // load(0x20000) -> trap
   args[0] = Val::i32(0x20000);
-  own<Trap*> trap = load_func->call(args, result);
+  own<Trap> trap = load_func->call(args, result);
   EXPECT_NE(nullptr, trap.get());
 
   // Mutate memory.
@@ -111,8 +114,8 @@ TEST_F(WasmCapiTest, Memory) {
 
   // Create standalone memory.
   // TODO(wasm): Once Wasm allows multiple memories, turn this into an import.
-  own<MemoryType*> mem_type = MemoryType::make(Limits(5, 5));
-  own<Memory*> memory2 = Memory::make(store(), mem_type.get());
+  own<MemoryType> mem_type = MemoryType::make(Limits(5, 5));
+  own<Memory> memory2 = Memory::make(store(), mem_type.get());
   EXPECT_EQ(5u, memory2->size());
   EXPECT_EQ(false, memory2->grow(1));
   EXPECT_EQ(true, memory2->grow(0));
diff --git a/deps/v8/test/wasm-api-tests/multi-return.cc b/deps/v8/test/wasm-api-tests/multi-return.cc
new file mode 100644
index 00000000000000..2cd624d05346f4
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/multi-return.cc
@@ -0,0 +1,58 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using ::wasm::I32;
+using ::wasm::I64;
+
+namespace {
+
+own<Trap> Callback(const Val args[], Val results[]) {
+  results[0] = args[3].copy();
+  results[1] = args[1].copy();
+  results[2] = args[2].copy();
+  results[3] = args[0].copy();
+  return nullptr;
+}
+
+}  // namespace
+
+TEST_F(WasmCapiTest, MultiReturn) {
+  ValueType reps[] = {kWasmI32, kWasmI64, kWasmI64, kWasmI32,
+                      kWasmI32, kWasmI64, kWasmI64, kWasmI32};
+  FunctionSig sig(4, 4, reps);
+  uint32_t func_index = builder()->AddImport(CStrVector("f"), &sig);
+  byte code[] = {WASM_CALL_FUNCTION(func_index, WASM_GET_LOCAL(0),
+                                    WASM_GET_LOCAL(2), WASM_GET_LOCAL(1),
+                                    WASM_GET_LOCAL(3))};
+  AddExportedFunction(CStrVector("g"), code, sizeof(code), &sig);
+
+  ownvec<ValType> types =
+      ownvec<ValType>::make(ValType::make(I32), ValType::make(I64),
+                            ValType::make(I64), ValType::make(I32));
+  own<FuncType> func_type =
+      FuncType::make(types.deep_copy(), types.deep_copy());
+  own<Func> callback = Func::make(store(), func_type.get(), Callback);
+  Extern* imports[] = {callback.get()};
+  Instantiate(imports);
+
+  Func* run_func = GetExportedFunction(0);
+  Val args[] = {Val::i32(1), Val::i64(2), Val::i64(3), Val::i32(4)};
+  Val results[4];
+  own<Trap> trap = run_func->call(args, results);
+  EXPECT_EQ(nullptr, trap);
+  EXPECT_EQ(4, results[0].i32());
+  EXPECT_EQ(3, results[1].i64());
+  EXPECT_EQ(2, results[2].i64());
+  EXPECT_EQ(1, results[3].i32());
+}
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/reflect.cc b/deps/v8/test/wasm-api-tests/reflect.cc
index a7def627db4616..dee3334155f152 100644
--- a/deps/v8/test/wasm-api-tests/reflect.cc
+++ b/deps/v8/test/wasm-api-tests/reflect.cc
@@ -51,8 +51,8 @@ TEST_F(WasmCapiTest, Reflect) {
 
   Instantiate(nullptr);
 
-  vec<ExportType*> export_types = module()->exports();
-  const vec<Extern*>& exports = this->exports();
+  ownvec<ExportType> export_types = module()->exports();
+  const ownvec<Extern>& exports = this->exports();
   EXPECT_EQ(exports.size(), export_types.size());
   EXPECT_EQ(4u, exports.size());
   for (size_t i = 0; i < exports.size(); i++) {
@@ -62,13 +62,13 @@ TEST_F(WasmCapiTest, Reflect) {
     if (kind == ::wasm::EXTERN_FUNC) {
       ExpectName(kFuncName, export_types[i]->name());
       const FuncType* type = extern_type->func();
-      const vec<ValType*>& params = type->params();
+      const ownvec<ValType>& params = type->params();
       EXPECT_EQ(4u, params.size());
       EXPECT_EQ(::wasm::I32, params[0]->kind());
       EXPECT_EQ(::wasm::I64, params[1]->kind());
       EXPECT_EQ(::wasm::F32, params[2]->kind());
       EXPECT_EQ(::wasm::F64, params[3]->kind());
-      const vec<ValType*>& results = type->results();
+      const ownvec<ValType>& results = type->results();
       EXPECT_EQ(2u, results.size());
       EXPECT_EQ(::wasm::I32, results[0]->kind());
       EXPECT_EQ(::wasm::ANYREF, results[1]->kind());
diff --git a/deps/v8/test/wasm-api-tests/serialize.cc b/deps/v8/test/wasm-api-tests/serialize.cc
index 5f10980cf2ac16..710f1236256581 100644
--- a/deps/v8/test/wasm-api-tests/serialize.cc
+++ b/deps/v8/test/wasm-api-tests/serialize.cc
@@ -12,7 +12,7 @@ namespace {
 
 bool g_callback_called;
 
-own<Trap*> Callback(const Val args[], Val results[]) {
+own<Trap> Callback(const Val args[], Val results[]) {
   g_callback_called = true;
   return nullptr;
 }
@@ -27,16 +27,15 @@ TEST_F(WasmCapiTest, Serialize) {
   Compile();
 
   vec<byte_t> serialized = module()->serialize();
-  own<Module*> deserialized = Module::deserialize(store(), serialized);
+  own<Module> deserialized = Module::deserialize(store(), serialized);
 
-  own<FuncType*> callback_type =
-      FuncType::make(vec<ValType*>::make(), vec<ValType*>::make());
-  own<Func*> callback = Func::make(store(), callback_type.get(), Callback);
+  own<FuncType> callback_type =
+      FuncType::make(ownvec<ValType>::make(), ownvec<ValType>::make());
+  own<Func> callback = Func::make(store(), callback_type.get(), Callback);
   Extern* imports[] = {callback.get()};
 
-  own<Instance*> instance =
-      Instance::make(store(), deserialized.get(), imports);
-  vec<Extern*> exports = instance->exports();
+  own<Instance> instance = Instance::make(store(), deserialized.get(), imports);
+  ownvec<Extern> exports = instance->exports();
   Func* run = exports[0]->func();
   g_callback_called = false;
   run->call();
diff --git a/deps/v8/test/wasm-api-tests/startup-errors.cc b/deps/v8/test/wasm-api-tests/startup-errors.cc
new file mode 100644
index 00000000000000..d2187e569a6c49
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/startup-errors.cc
@@ -0,0 +1,62 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+
+own<Trap> DummyCallback(const Val args[], Val results[]) { return nullptr; }
+
+}  // namespace
+
+TEST_F(WasmCapiTest, StartupErrors) {
+  FunctionSig sig(0, 0, nullptr);
+  byte code[] = {WASM_UNREACHABLE};
+  WasmFunctionBuilder* start_func = builder()->AddFunction(&sig);
+  start_func->EmitCode(code, static_cast<uint32_t>(sizeof(code)));
+  start_func->Emit(kExprEnd);
+  builder()->MarkStartFunction(start_func);
+  builder()->AddImport(CStrVector("dummy"), &sig);
+  Compile();
+  own<Trap> trap;
+
+  // Try to make an Instance with non-matching imports.
+  own<Func> bad_func = Func::make(store(), cpp_i_i_sig(), DummyCallback);
+  Extern* bad_imports[] = {bad_func.get()};
+  own<Instance> instance =
+      Instance::make(store(), module(), bad_imports, &trap);
+  EXPECT_EQ(nullptr, instance);
+  EXPECT_NE(nullptr, trap);
+  EXPECT_STREQ(
+      "Uncaught LinkError: instantiation: Import #0 module=\"\" "
+      "function=\"dummy\" "
+      "error: imported function does not match the expected type",
+      trap->message().get());
+  EXPECT_EQ(nullptr, trap->origin());
+  // Don't crash if there is no {trap}.
+  instance = Instance::make(store(), module(), bad_imports, nullptr);
+  EXPECT_EQ(nullptr, instance);
+
+  // Try to make an instance with a {start} function that traps.
+  own<FuncType> good_sig =
+      FuncType::make(ownvec<ValType>::make(), ownvec<ValType>::make());
+  own<Func> good_func = Func::make(store(), good_sig.get(), DummyCallback);
+  Extern* good_imports[] = {good_func.get()};
+  instance = Instance::make(store(), module(), good_imports, &trap);
+  EXPECT_EQ(nullptr, instance);
+  EXPECT_NE(nullptr, trap);
+  EXPECT_STREQ("Uncaught RuntimeError: unreachable", trap->message().get());
+  EXPECT_NE(nullptr, trap->origin());
+  // Don't crash if there is no {trap}.
+  instance = Instance::make(store(), module(), good_imports, nullptr);
+  EXPECT_EQ(nullptr, instance);
+}
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/table.cc b/deps/v8/test/wasm-api-tests/table.cc
index 17ddfa2f0b9764..bfd5e14a8db460 100644
--- a/deps/v8/test/wasm-api-tests/table.cc
+++ b/deps/v8/test/wasm-api-tests/table.cc
@@ -14,7 +14,7 @@ using ::wasm::TableType;
 
 namespace {
 
-own<Trap*> Negate(const Val args[], Val results[]) {
+own<Trap> Negate(const Val args[], Val results[]) {
   results[0] = Val(-args[0].i32());
   return nullptr;
 }
@@ -22,14 +22,14 @@ own<Trap*> Negate(const Val args[], Val results[]) {
 void ExpectTrap(const Func* func, int arg1, int arg2) {
   Val args[2] = {Val::i32(arg1), Val::i32(arg2)};
   Val results[1];
-  own<Trap*> trap = func->call(args, results);
+  own<Trap> trap = func->call(args, results);
   EXPECT_NE(nullptr, trap);
 }
 
 void ExpectResult(int expected, const Func* func, int arg1, int arg2) {
   Val args[2] = {Val::i32(arg1), Val::i32(arg2)};
   Val results[1];
-  own<Trap*> trap = func->call(args, results);
+  own<Trap> trap = func->call(args, results);
   EXPECT_EQ(nullptr, trap);
   EXPECT_EQ(expected, results[0].i32());
 }
@@ -60,7 +60,10 @@ TEST_F(WasmCapiTest, Table) {
   Func* call_indirect = GetExportedFunction(1);
   Func* f = GetExportedFunction(2);
   Func* g = GetExportedFunction(3);
-  own<Func*> h = Func::make(store(), cpp_i_i_sig(), Negate);
+  own<Func> h = Func::make(store(), cpp_i_i_sig(), Negate);
+
+  // Try cloning.
+  EXPECT_TRUE(table->copy()->same(table));
 
   // Check initial table state.
   EXPECT_EQ(2u, table->size());
@@ -103,9 +106,9 @@ TEST_F(WasmCapiTest, Table) {
 
   // Create standalone table.
   // TODO(wasm+): Once Wasm allows multiple tables, turn this into import.
-  own<TableType*> tabletype =
+  own<TableType> tabletype =
       TableType::make(ValType::make(FUNCREF), Limits(5, 5));
-  own<Table*> table2 = Table::make(store(), tabletype.get());
+  own<Table> table2 = Table::make(store(), tabletype.get());
   EXPECT_EQ(5u, table2->size());
   EXPECT_FALSE(table2->grow(1));
   EXPECT_TRUE(table2->grow(0));
diff --git a/deps/v8/test/wasm-api-tests/threads.cc b/deps/v8/test/wasm-api-tests/threads.cc
index c93afc4a89579e..5fb4309c4be89e 100644
--- a/deps/v8/test/wasm-api-tests/threads.cc
+++ b/deps/v8/test/wasm-api-tests/threads.cc
@@ -19,26 +19,27 @@ const int kNumThreads = 10;
 const int kIterationsPerThread = 3;
 int g_traces;
 
-own<Trap*> Callback(void* env, const Val args[], Val results[]) {
+own<Trap> Callback(void* env, const Val args[], Val results[]) {
   std::lock_guard<std::mutex> lock(*reinterpret_cast<std::mutex*>(env));
   g_traces += args[0].i32();
   return nullptr;
 }
 
 void Main(Engine* engine, Shared<Module>* shared, std::mutex* mutex, int id) {
-  own<Store*> store = Store::make(engine);
-  own<Module*> module = Module::obtain(store.get(), shared);
+  own<Store> store = Store::make(engine);
+  own<Module> module = Module::obtain(store.get(), shared);
   EXPECT_NE(nullptr, module.get());
   for (int i = 0; i < kIterationsPerThread; i++) {
     std::this_thread::sleep_for(std::chrono::microseconds(100));
 
     // Create imports.
-    own<FuncType*> func_type = FuncType::make(
-        vec<ValType*>::make(ValType::make(::wasm::I32)), vec<ValType*>::make());
-    own<Func*> func = Func::make(store.get(), func_type.get(), Callback, mutex);
-    own<::wasm::GlobalType*> global_type =
+    own<FuncType> func_type =
+        FuncType::make(ownvec<ValType>::make(ValType::make(::wasm::I32)),
+                       ownvec<ValType>::make());
+    own<Func> func = Func::make(store.get(), func_type.get(), Callback, mutex);
+    own<::wasm::GlobalType> global_type =
         ::wasm::GlobalType::make(ValType::make(::wasm::I32), ::wasm::CONST);
-    own<Global*> global =
+    own<Global> global =
         Global::make(store.get(), global_type.get(), Val::i32(id));
 
     // Instantiate and run.
@@ -46,9 +47,8 @@ void Main(Engine* engine, Shared<Module>* shared, std::mutex* mutex, int id) {
     // imports always come before function imports, regardless of the
     // order of builder()->Add*Import() calls below.
     Extern* imports[] = {global.get(), func.get()};
-    own<Instance*> instance =
-        Instance::make(store.get(), module.get(), imports);
-    vec<Extern*> exports = instance->exports();
+    own<Instance> instance = Instance::make(store.get(), module.get(), imports);
+    ownvec<Extern> exports = instance->exports();
     Func* run_func = exports[0]->func();
     run_func->call();
   }
@@ -70,7 +70,7 @@ TEST_F(WasmCapiTest, Threads) {
   FunctionSig empty_sig(0, 0, nullptr);
   AddExportedFunction(CStrVector("run"), code, sizeof(code), &empty_sig);
   Compile();
-  own<Shared<Module>*> shared = module()->share();
+  own<Shared<Module>> shared = module()->share();
 
   // Spawn threads.
   g_traces = 0;
@@ -92,9 +92,9 @@ TEST_F(WasmCapiTest, Threads) {
 
 TEST_F(WasmCapiTest, MultiStoresOneThread) {
   // These Stores intentionally have overlapping, but non-nested lifetimes.
-  own<Store*> store1 = Store::make(engine());
-  own<Store*> store2 = Store::make(engine());
-  own<Store*> store3 = Store::make(engine());
+  own<Store> store1 = Store::make(engine());
+  own<Store> store2 = Store::make(engine());
+  own<Store> store3 = Store::make(engine());
   store1.reset();
   store2.reset();
   store3.reset();
diff --git a/deps/v8/test/wasm-api-tests/traps.cc b/deps/v8/test/wasm-api-tests/traps.cc
index b049d09330056e..e6e425fc4c6bcf 100644
--- a/deps/v8/test/wasm-api-tests/traps.cc
+++ b/deps/v8/test/wasm-api-tests/traps.cc
@@ -4,17 +4,23 @@
 
 #include "test/wasm-api-tests/wasm-api-test.h"
 
+#include "src/execution/isolate.h"
+#include "src/wasm/c-api.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
+
 #include <iostream>
 
 namespace v8 {
 namespace internal {
 namespace wasm {
 
+using ::wasm::Frame;
 using ::wasm::Message;
 
 namespace {
 
-own<Trap*> FailCallback(void* env, const Val args[], Val results[]) {
+own<Trap> FailCallback(void* env, const Val args[], Val results[]) {
   Store* store = reinterpret_cast<Store*>(env);
   Message message = Message::make(std::string("callback abort"));
   return Trap::make(store, message);
@@ -34,25 +40,66 @@ TEST_F(WasmCapiTest, Traps) {
   uint32_t callback_index = builder()->AddImport(CStrVector("callback"), &sig);
   byte code[] = {WASM_CALL_FUNCTION0(callback_index)};
   AddExportedFunction(CStrVector("callback"), code, sizeof(code), &sig);
-  byte code2[] = {WASM_UNREACHABLE, WASM_I32V_1(1)};
+  // The first constant is a 4-byte dummy so that the {unreachable} trap
+  // has a more interesting offset.
+  byte code2[] = {WASM_I32V_3(0), WASM_UNREACHABLE, WASM_I32V_1(1)};
   AddExportedFunction(CStrVector("unreachable"), code2, sizeof(code2), &sig);
 
-  own<FuncType*> func_type = FuncType::make(
-      vec<ValType*>::make(), vec<ValType*>::make(ValType::make(::wasm::I32)));
-  own<Func*> cpp_callback = Func::make(store(), func_type.get(), FailCallback,
-                                       reinterpret_cast<void*>(store()));
+  own<FuncType> func_type =
+      FuncType::make(ownvec<ValType>::make(),
+                     ownvec<ValType>::make(ValType::make(::wasm::I32)));
+  own<Func> cpp_callback = Func::make(store(), func_type.get(), FailCallback,
+                                      reinterpret_cast<void*>(store()));
   Extern* imports[] = {cpp_callback.get()};
   Instantiate(imports);
 
+  // Use internal machinery to parse the module to find the function offsets.
+  // This makes the test more robust than hardcoding them.
+  i::Isolate* isolate =
+      reinterpret_cast<::wasm::StoreImpl*>(store())->i_isolate();
+  ModuleResult result = DecodeWasmModule(
+      kAllWasmFeatures, wire_bytes()->begin(), wire_bytes()->end(), false,
+      ModuleOrigin::kWasmOrigin, isolate->counters(),
+      isolate->wasm_engine()->allocator());
+  ASSERT_TRUE(result.ok());
+  const WasmFunction* func1 = &result.value()->functions[1];
+  const WasmFunction* func2 = &result.value()->functions[2];
+  const uint32_t func1_offset = func1->code.offset();
+  const uint32_t func2_offset = func2->code.offset();
+
   Func* cpp_trapping_func = GetExportedFunction(0);
-  own<Trap*> cpp_trap = cpp_trapping_func->call();
+  own<Trap> cpp_trap = cpp_trapping_func->call();
   EXPECT_NE(nullptr, cpp_trap.get());
   ExpectMessage("Uncaught Error: callback abort", cpp_trap->message());
+  own<Frame> frame = cpp_trap->origin();
+  EXPECT_TRUE(frame->instance()->same(instance()));
+  EXPECT_EQ(1u, frame->func_index());
+  EXPECT_EQ(1u, frame->func_offset());
+  EXPECT_EQ(func1_offset + frame->func_offset(), frame->module_offset());
+  ownvec<Frame> trace = cpp_trap->trace();
+  EXPECT_EQ(1u, trace.size());
+  frame.reset(trace[0].release());
+  EXPECT_TRUE(frame->instance()->same(instance()));
+  EXPECT_EQ(1u, frame->func_index());
+  EXPECT_EQ(1u, frame->func_offset());
+  EXPECT_EQ(func1_offset + frame->func_offset(), frame->module_offset());
 
   Func* wasm_trapping_func = GetExportedFunction(1);
-  own<Trap*> wasm_trap = wasm_trapping_func->call();
+  own<Trap> wasm_trap = wasm_trapping_func->call();
   EXPECT_NE(nullptr, wasm_trap.get());
   ExpectMessage("Uncaught RuntimeError: unreachable", wasm_trap->message());
+  frame = wasm_trap->origin();
+  EXPECT_TRUE(frame->instance()->same(instance()));
+  EXPECT_EQ(2u, frame->func_index());
+  EXPECT_EQ(5u, frame->func_offset());
+  EXPECT_EQ(func2_offset + frame->func_offset(), frame->module_offset());
+  trace = wasm_trap->trace();
+  EXPECT_EQ(1u, trace.size());
+  frame.reset(trace[0].release());
+  EXPECT_TRUE(frame->instance()->same(instance()));
+  EXPECT_EQ(2u, frame->func_index());
+  EXPECT_EQ(5u, frame->func_offset());
+  EXPECT_EQ(func2_offset + frame->func_offset(), frame->module_offset());
 }
 
 }  // namespace wasm
diff --git a/deps/v8/test/wasm-api-tests/wasm-api-test.h b/deps/v8/test/wasm-api-tests/wasm-api-test.h
index cb1d9301e1cad1..6fc70ef5f429a3 100644
--- a/deps/v8/test/wasm-api-tests/wasm-api-test.h
+++ b/deps/v8/test/wasm-api-tests/wasm-api-test.h
@@ -13,19 +13,6 @@
 #include "testing/gtest/include/gtest/gtest.h"
 #include "third_party/wasm-api/wasm.hh"
 
-namespace wasm {
-
-// TODO(jkummerow): Drop these from the API.
-#ifdef DEBUG
-template <class T>
-void vec<T>::make_data() {}
-
-template <class T>
-void vec<T>::free_data() {}
-#endif
-
-}  // namespace wasm
-
 namespace v8 {
 namespace internal {
 namespace wasm {
@@ -40,6 +27,7 @@ using ::wasm::Instance;
 using ::wasm::Memory;
 using ::wasm::Module;
 using ::wasm::own;
+using ::wasm::ownvec;
 using ::wasm::Ref;
 using ::wasm::Store;
 using ::wasm::Table;
@@ -53,22 +41,23 @@ class WasmCapiTest : public ::testing::Test {
   WasmCapiTest()
       : Test(),
         zone_(&allocator_, ZONE_NAME),
+        wire_bytes_(&zone_),
         builder_(&zone_),
-        exports_(vec<Extern*>::make()),
+        exports_(ownvec<Extern>::make()),
         wasm_i_i_sig_(1, 1, wasm_i_i_sig_types_) {
     engine_ = Engine::make();
     store_ = Store::make(engine_.get());
     cpp_i_i_sig_ =
-        FuncType::make(vec<ValType*>::make(ValType::make(::wasm::I32)),
-                       vec<ValType*>::make(ValType::make(::wasm::I32)));
+        FuncType::make(ownvec<ValType>::make(ValType::make(::wasm::I32)),
+                       ownvec<ValType>::make(ValType::make(::wasm::I32)));
   }
 
   void Compile() {
-    ZoneBuffer buffer(&zone_);
-    builder_.WriteTo(&buffer);
-    size_t size = buffer.end() - buffer.begin();
+    builder_.WriteTo(&wire_bytes_);
+    size_t size = wire_bytes_.end() - wire_bytes_.begin();
     vec<byte_t> binary = vec<byte_t>::make(
-        size, reinterpret_cast<byte_t*>(const_cast<byte*>(buffer.begin())));
+        size,
+        reinterpret_cast<byte_t*>(const_cast<byte*>(wire_bytes_.begin())));
 
     module_ = Module::make(store_.get(), binary);
     DCHECK_NE(module_.get(), nullptr);
@@ -91,7 +80,7 @@ class WasmCapiTest : public ::testing::Test {
 
   Func* GetExportedFunction(size_t index) {
     DCHECK_GT(exports_.size(), index);
-    Extern* exported = exports_[index];
+    Extern* exported = exports_[index].get();
     DCHECK_EQ(exported->kind(), ::wasm::EXTERN_FUNC);
     Func* func = exported->func();
     DCHECK_NE(func, nullptr);
@@ -100,7 +89,7 @@ class WasmCapiTest : public ::testing::Test {
 
   Global* GetExportedGlobal(size_t index) {
     DCHECK_GT(exports_.size(), index);
-    Extern* exported = exports_[index];
+    Extern* exported = exports_[index].get();
     DCHECK_EQ(exported->kind(), ::wasm::EXTERN_GLOBAL);
     Global* global = exported->global();
     DCHECK_NE(global, nullptr);
@@ -109,7 +98,7 @@ class WasmCapiTest : public ::testing::Test {
 
   Memory* GetExportedMemory(size_t index) {
     DCHECK_GT(exports_.size(), index);
-    Extern* exported = exports_[index];
+    Extern* exported = exports_[index].get();
     DCHECK_EQ(exported->kind(), ::wasm::EXTERN_MEMORY);
     Memory* memory = exported->memory();
     DCHECK_NE(memory, nullptr);
@@ -118,7 +107,7 @@ class WasmCapiTest : public ::testing::Test {
 
   Table* GetExportedTable(size_t index) {
     DCHECK_GT(exports_.size(), index);
-    Extern* exported = exports_[index];
+    Extern* exported = exports_[index].get();
     DCHECK_EQ(exported->kind(), ::wasm::EXTERN_TABLE);
     Table* table = exported->table();
     DCHECK_NE(table, nullptr);
@@ -126,6 +115,7 @@ class WasmCapiTest : public ::testing::Test {
   }
 
   void Shutdown() {
+    exports_.reset();
     instance_.reset();
     module_.reset();
     store_.reset();
@@ -136,7 +126,9 @@ class WasmCapiTest : public ::testing::Test {
   Engine* engine() { return engine_.get(); }
   Store* store() { return store_.get(); }
   Module* module() { return module_.get(); }
-  const vec<Extern*>& exports() { return exports_; }
+  Instance* instance() { return instance_.get(); }
+  const ownvec<Extern>& exports() { return exports_; }
+  ZoneBuffer* wire_bytes() { return &wire_bytes_; }
 
   FunctionSig* wasm_i_i_sig() { return &wasm_i_i_sig_; }
   FuncType* cpp_i_i_sig() { return cpp_i_i_sig_.get(); }
@@ -144,13 +136,14 @@ class WasmCapiTest : public ::testing::Test {
  private:
   AccountingAllocator allocator_;
   Zone zone_;
+  ZoneBuffer wire_bytes_;
   WasmModuleBuilder builder_;
-  own<Engine*> engine_;
-  own<Store*> store_;
-  own<Module*> module_;
-  own<Instance*> instance_;
-  vec<Extern*> exports_;
-  own<FuncType*> cpp_i_i_sig_;
+  own<Engine> engine_;
+  own<Store> store_;
+  own<Module> module_;
+  own<Instance> instance_;
+  ownvec<Extern> exports_;
+  own<FuncType> cpp_i_i_sig_;
   ValueType wasm_i_i_sig_types_[2] = {kWasmI32, kWasmI32};
   FunctionSig wasm_i_i_sig_;
 };
diff --git a/deps/v8/test/wasm-js/OWNERS b/deps/v8/test/wasm-js/OWNERS
index 4b6b34d24a7129..32941e6257c7d1 100644
--- a/deps/v8/test/wasm-js/OWNERS
+++ b/deps/v8/test/wasm-js/OWNERS
@@ -1 +1 @@
-file://src/wasm/OWNERS
+file:../../src/wasm/OWNERS
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index 3f3c67ac6a09a9..197d9195f11c83 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -13,6 +13,21 @@
 META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
 META_TIMEOUT_REGEXP = re.compile(r"META:\s*timeout=(.*)")
 
+proposal_flags = [{
+                    'name': 'reference-types',
+                    'flags': ['--experimental-wasm-anyref',
+                              '--no-experimental-wasm-bulk-memory']
+                  },
+                  {
+                    'name': 'bulk-memory-operations',
+                    'flags': ['--experimental-wasm-bulk-memory']
+                  },
+                  {
+                    'name': 'js-types',
+                    'flags': ['--experimental-wasm-type-reflection',
+                              '--no-experimental-wasm-bulk-memory']
+                  }]
+
 
 class TestLoader(testsuite.JSTestLoader):
   @property
@@ -25,7 +40,7 @@ def __init__(self, *args, **kwargs):
     super(TestSuite, self).__init__(*args, **kwargs)
     self.mjsunit_js = os.path.join(os.path.dirname(self.root), "mjsunit",
                                    "mjsunit.js")
-    self.test_root = os.path.join(self.root, "data", "test", "js-api")
+    self.test_root = os.path.join(self.root, "tests")
     self._test_loader.test_root = self.test_root
 
   def _test_loader_class(self):
@@ -34,6 +49,8 @@ def _test_loader_class(self):
   def _test_class(self):
     return TestCase
 
+def get_proposal_path_identifier(proposal):
+  return os.sep.join(['proposals', proposal['name']])
 
 class TestCase(testcase.D8TestCase):
   def _get_timeout_param(self):
@@ -50,19 +67,27 @@ def _get_timeout_param(self):
       return None
 
   def _get_files_params(self):
-    files = [os.path.join(self.suite.mjsunit_js),
+    files = [self.suite.mjsunit_js,
              os.path.join(self.suite.root, "testharness.js")]
 
     source = self.get_source()
+    current_dir = os.path.dirname(self._get_source_path())
     for script in META_SCRIPT_REGEXP.findall(source):
       if script.startswith(WPT_ROOT):
         # Matched an absolute path, strip the root and replace it with our
         # local root.
-        script = os.path.join(self.suite.test_root, script[len(WPT_ROOT):])
+        found = False
+        for proposal in proposal_flags:
+          if get_proposal_path_identifier(proposal) in current_dir:
+            found = True
+            script = os.path.join(self.suite.test_root,
+                                  os.sep.join(['proposals', proposal['name']]),
+                                  script[len(WPT_ROOT):])
+        if not found:
+          script = os.path.join(self.suite.test_root, script[len(WPT_ROOT):])
       elif not script.startswith("/"):
         # Matched a relative path, prepend this test's directory.
-        thisdir = os.path.dirname(self._get_source_path())
-        script = os.path.join(thisdir, script)
+        script = os.path.join(current_dir, script)
       else:
         raise Exception("Unexpected absolute path for script: \"%s\"" % script);
 
@@ -74,6 +99,12 @@ def _get_files_params(self):
     ])
     return files
 
+  def _get_source_flags(self):
+    for proposal in proposal_flags:
+      if get_proposal_path_identifier(proposal) in self.path:
+        return proposal['flags']
+    return []
+
   def _get_source_path(self):
     # All tests are named `path/name.any.js`
     return os.path.join(self.suite.test_root, self.path + ANY_JS)
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
new file mode 100644
index 00000000000000..ec8be70e2aa179
--- /dev/null
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -0,0 +1 @@
+26e59563060bd6de4adbb4021684e8cf38fe71c8
\ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index 51961fd46dd348..42ad2a4152cec3 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -6,6 +6,7 @@
 [ALWAYS, {
   # https://bugs.chromium.org/p/v8/issues/detail?id=8633
   'limits': [SKIP],
+  'proposals/reference-types/limits': [SKIP],
 }], # ALWAYS
 
 ['arch == s390 or arch == s390x or system == aix', {
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index 34230b07d0bce6..e0bd19f2685d04 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -10,27 +10,36 @@
 proposal_flags = [{
                     'name': 'reference-types',
                     'flags': ['--experimental-wasm-anyref',
-                               '--no-experimental-wasm-bulk-memory']
+                              '--no-experimental-wasm-bulk-memory']
                   },
                   {
                     'name': 'bulk-memory-operations',
                     'flags': ['--experimental-wasm-bulk-memory']
+                  },
+                  {
+                    'name': 'js-types',
+                    'flags': ['--experimental-wasm-type-reflection',
+                              '--no-experimental-wasm-bulk-memory']
                   }]
 
 class TestLoader(testsuite.JSTestLoader):
   pass
 
 class TestSuite(testsuite.TestSuite):
+  def __init__(self, *args, **kwargs):
+    super(TestSuite, self).__init__(*args, **kwargs)
+    self.test_root = os.path.join(self.root, "tests")
+    self._test_loader.test_root = self.test_root
+
   def _test_loader_class(self):
     return TestLoader
 
   def _test_class(self):
     return TestCase
 
-
 class TestCase(testcase.D8TestCase):
   def _get_files_params(self):
-    return [os.path.join(self.suite.root, self.path + self._get_suffix())]
+    return [os.path.join(self.suite.test_root, self.path + self._get_suffix())]
 
   def _get_source_flags(self):
     for proposal in proposal_flags:
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 0b068afe0a3f30..36c9e08ac1effc 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-b02f00e24b28ad76537a10a788a8be966c3577bd
\ No newline at end of file
+d9e649f4ea6da6bd18999795201c2bd138c0d786
\ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index e61f5ceb261143..16faaaed35e827 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -5,56 +5,75 @@
 [
 [ALWAYS, {
   #TODO(ahaas): Add additional stack checks on mips.
-  'tests/skip-stack-guard-page': [PASS, ['arch == mipsel or arch == mips64el or ((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
+  'skip-stack-guard-page': [PASS, ['arch == mipsel or arch == mips64el or ((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
   # TODO(v8:9144): The MVP behavior when bounds-checking segments changed in
   # the bulk-memory proposal. Since we've enabled bulk-memory by default, we
   # need to update to use its testsuite.
-  'tests/linking': [FAIL],
-
-  # TODO(ahaas): Needs investigation, I disable the test for now.
-  'tests/conversions': [PASS, ['system == windows and arch == ia32', FAIL]],
-
-  # TODO(ahaas): Incorporate recent changes to the bulk-memory-operations
-  # proposal.
-  'tests/proposals/bulk-memory-operations/elem': [FAIL],
-  'tests/proposals/bulk-memory-operations/data': [FAIL],
+  'linking': [FAIL],
+  'elem': [FAIL],
+  'data': [FAIL],
+  # TODO(v8:9658): The encoding of element segments changed in the bulk memory
+  # proposal
+  'proposals/bulk-memory-operations/bulk': [FAIL],
+  'proposals/bulk-memory-operations/table_init': [FAIL],
+  'proposals/bulk-memory-operations/table_copy': [FAIL],
+  'proposals/bulk-memory-operations/elem': [FAIL],
+  'proposals/bulk-memory-operations/binary': [FAIL],
+  # TODO(mstarzinger): Roll newest tests into "js-types" repository.
+  'proposals/js-types/exports': [FAIL],
+  'proposals/js-types/globals': [FAIL],
+  'proposals/js-types/linking': [FAIL],
 }],  # ALWAYS
 
 ['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
   # These tests fail because mips does not support the correct NaN bit patterns.
-  'tests/float_misc': [SKIP],
-  'tests/float_exprs': [SKIP],
-  'tests/f32': [SKIP],
-  'tests/f64': [SKIP],
-  'tests/f32_bitwise': [SKIP],
-  'tests/f64_bitwise': [SKIP],
+  'float_misc': [SKIP],
+  'float_exprs': [SKIP],
+  'f32': [SKIP],
+  'f64': [SKIP],
+  'f32_bitwise': [SKIP],
+  'f64_bitwise': [SKIP],
+  'proposals/reference-types/conversions':  [SKIP],
+  'proposals/bulk-memory-operations/conversions': [SKIP],
+  'proposals/js-types/f32': [SKIP],
+  'proposals/js-types/f64': [SKIP],
+  'proposals/js-types/f32_bitwise': [SKIP],
+  'proposals/js-types/f64_bitwise': [SKIP],
+  'proposals/js-types/float_exprs': [SKIP],
+  'proposals/js-types/float_misc': [SKIP],
+  'proposals/js-types/conversions': [SKIP],
 }],  # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64'
 
 ['(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simulator_run', {
   # This test fail because mips does not support the correct NaN bit patterns.
   # But it doesn't fail in simulator.
-  'tests/conversions': [SKIP],
+  'conversions': [SKIP],
 }],  # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simulator_run'
 
 ['arch == arm and not simulator_run', {
   # Too slow on chromebooks.
-  'tests/br_table': [SKIP],
+  'br_table': [SKIP],
 }],  # 'arch == arm and not simulator_run'
 
 ['arch == ppc or arch == ppc64', {
   # These tests fail because ppc float min and max doesn't convert sNaN to qNaN.
-  'tests/f32': [SKIP],
-  'tests/f64': [SKIP],
+  'f32': [SKIP],
+  'f64': [SKIP],
+  'proposals/js-types/f32': [SKIP],
+  'proposals/js-types/f64': [SKIP],
   # This test fails because ppc float to double doesn't convert sNaN to qNaN.
-  'tests/conversions': [SKIP],
-   'tests/proposals/reference-types/conversions':  [SKIP],
-   'tests/proposals/bulk-memory-operations/conversions': [SKIP],
+  'conversions': [SKIP],
+  'proposals/reference-types/conversions':  [SKIP],
+  'proposals/bulk-memory-operations/conversions': [SKIP],
+  'proposals/js-types/conversions': [SKIP],
 }],  # 'arch == ppc or arch == ppc64'
 
 ['arch == s390 or arch == s390x', {
   # These tests fail because s390 float min and max doesn't convert sNaN to qNaN.
-  'tests/f32': [SKIP],
-  'tests/f64': [SKIP],
+  'f32': [SKIP],
+  'f64': [SKIP],
+  'proposals/js-types/f32': [SKIP],
+  'proposals/js-types/f64': [SKIP],
 }],  # 'arch == s390 or arch == s390x'
 
 ##############################################################################
diff --git a/deps/v8/testing/OWNERS b/deps/v8/testing/OWNERS
index bdb1d555a4fb98..cb04fa0838fbb5 100644
--- a/deps/v8/testing/OWNERS
+++ b/deps/v8/testing/OWNERS
@@ -1 +1 @@
-file://INFRA_OWNERS
+file:../INFRA_OWNERS
diff --git a/deps/v8/third_party/binutils/OWNERS b/deps/v8/third_party/binutils/OWNERS
index bdb1d555a4fb98..09e0096a2ee4cd 100644
--- a/deps/v8/third_party/binutils/OWNERS
+++ b/deps/v8/third_party/binutils/OWNERS
@@ -1 +1 @@
-file://INFRA_OWNERS
+file:../../INFRA_OWNERS
diff --git a/deps/v8/third_party/colorama/OWNERS b/deps/v8/third_party/colorama/OWNERS
index bdb1d555a4fb98..09e0096a2ee4cd 100644
--- a/deps/v8/third_party/colorama/OWNERS
+++ b/deps/v8/third_party/colorama/OWNERS
@@ -1 +1 @@
-file://INFRA_OWNERS
+file:../../INFRA_OWNERS
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index 622ff72f7454d3..a092e3e7a1dff3 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
 Short Name: inspector_protocol
 URL: https://chromium.googlesource.com/deps/inspector_protocol/
 Version: 0
-Revision: 373efb7fe33a7ae84038868ed08b9f1bd328b55d
+Revision: d114a62e144cdfdae697fe0af6581ce39a31af37
 License: BSD
 License File: LICENSE
 Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/code_generator.py b/deps/v8/third_party/inspector_protocol/code_generator.py
index 5cf5a308c5e6a0..7c72cc70e4fbeb 100755
--- a/deps/v8/third_party/inspector_protocol/code_generator.py
+++ b/deps/v8/third_party/inspector_protocol/code_generator.py
@@ -43,9 +43,6 @@ def json_object_hook(object_dict):
       items = [(k, os.path.join(output_base, v) if k == "output" else v)
                for (k, v) in items]
       keys, values = list(zip(*items))
-      # 'async' is a keyword since Python 3.7.
-      # Avoid namedtuple(rename=True) for compatibility with Python 2.X.
-      keys = tuple('async_' if k == 'async' else k for k in keys)
       return collections.namedtuple('X', keys)(*values)
     return json.loads(data, object_hook=json_object_hook)
 
@@ -558,7 +555,7 @@ def is_async_command(self, domain, command):
     if not self.config.protocol.options:
       return False
     return self.check_options(self.config.protocol.options, domain, command,
-                              "async_", None, False)
+                              "async", None, False)
 
   def is_exported(self, domain, name):
     if not self.config.protocol.options:
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding.cc b/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
index 1da7c45aa39623..6e5619d00e1c7c 100644
--- a/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
@@ -53,6 +53,10 @@ std::string Status::ToASCIIString() const {
       return ToASCIIString("CBOR: invalid double");
     case Error::CBOR_INVALID_ENVELOPE:
       return ToASCIIString("CBOR: invalid envelope");
+    case Error::CBOR_ENVELOPE_CONTENTS_LENGTH_MISMATCH:
+      return ToASCIIString("CBOR: envelope contents length mismatch");
+    case Error::CBOR_MAP_OR_ARRAY_EXPECTED_IN_ENVELOPE:
+      return ToASCIIString("CBOR: map or array expected in envelope");
     case Error::CBOR_INVALID_STRING8:
       return ToASCIIString("CBOR: invalid string8");
     case Error::CBOR_INVALID_STRING16:
@@ -929,6 +933,9 @@ bool ParseArray(int32_t stack_depth,
 bool ParseValue(int32_t stack_depth,
                 CBORTokenizer* tokenizer,
                 StreamingParserHandler* out);
+bool ParseEnvelope(int32_t stack_depth,
+                   CBORTokenizer* tokenizer,
+                   StreamingParserHandler* out);
 
 void ParseUTF16String(CBORTokenizer* tokenizer, StreamingParserHandler* out) {
   std::vector<uint16_t> value;
@@ -946,6 +953,52 @@ bool ParseUTF8String(CBORTokenizer* tokenizer, StreamingParserHandler* out) {
   return true;
 }
 
+bool ParseEnvelope(int32_t stack_depth,
+                   CBORTokenizer* tokenizer,
+                   StreamingParserHandler* out) {
+  assert(tokenizer->TokenTag() == CBORTokenTag::ENVELOPE);
+  // Before we enter the envelope, we save the position that we
+  // expect to see after we're done parsing the envelope contents.
+  // This way we can compare and produce an error if the contents
+  // didn't fit exactly into the envelope length.
+  size_t pos_past_envelope = tokenizer->Status().pos +
+                             kEncodedEnvelopeHeaderSize +
+                             tokenizer->GetEnvelopeContents().size();
+  tokenizer->EnterEnvelope();
+  switch (tokenizer->TokenTag()) {
+    case CBORTokenTag::ERROR_VALUE:
+      out->HandleError(tokenizer->Status());
+      return false;
+    case CBORTokenTag::MAP_START:
+      if (!ParseMap(stack_depth + 1, tokenizer, out))
+        return false;
+      break;  // Continue to check pos_past_envelope below.
+    case CBORTokenTag::ARRAY_START:
+      if (stack_depth == 0) {  // Not allowed at the top level.
+        out->HandleError(
+            Status{Error::CBOR_MAP_START_EXPECTED, tokenizer->Status().pos});
+        return false;
+      }
+      if (!ParseArray(stack_depth + 1, tokenizer, out))
+        return false;
+      break;  // Continue to check pos_past_envelope below.
+    default:
+      out->HandleError(Status{
+          stack_depth == 0 ? Error::CBOR_MAP_START_EXPECTED
+                           : Error::CBOR_MAP_OR_ARRAY_EXPECTED_IN_ENVELOPE,
+          tokenizer->Status().pos});
+      return false;
+  }
+  // The contents of the envelope parsed OK, now check that we're at
+  // the expected position.
+  if (pos_past_envelope != tokenizer->Status().pos) {
+    out->HandleError(Status{Error::CBOR_ENVELOPE_CONTENTS_LENGTH_MISMATCH,
+                            tokenizer->Status().pos});
+    return false;
+  }
+  return true;
+}
+
 bool ParseValue(int32_t stack_depth,
                 CBORTokenizer* tokenizer,
                 StreamingParserHandler* out) {
@@ -954,9 +1007,6 @@ bool ParseValue(int32_t stack_depth,
         Status{Error::CBOR_STACK_LIMIT_EXCEEDED, tokenizer->Status().pos});
     return false;
   }
-  // Skip past the envelope to get to what's inside.
-  if (tokenizer->TokenTag() == CBORTokenTag::ENVELOPE)
-    tokenizer->EnterEnvelope();
   switch (tokenizer->TokenTag()) {
     case CBORTokenTag::ERROR_VALUE:
       out->HandleError(tokenizer->Status());
@@ -965,6 +1015,8 @@ bool ParseValue(int32_t stack_depth,
       out->HandleError(Status{Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE,
                               tokenizer->Status().pos});
       return false;
+    case CBORTokenTag::ENVELOPE:
+      return ParseEnvelope(stack_depth, tokenizer, out);
     case CBORTokenTag::TRUE_VALUE:
       out->HandleBool(true);
       tokenizer->Next();
@@ -1091,13 +1143,7 @@ void ParseCBOR(span<uint8_t> bytes, StreamingParserHandler* out) {
   // We checked for the envelope start byte above, so the tokenizer
   // must agree here, since it's not an error.
   assert(tokenizer.TokenTag() == CBORTokenTag::ENVELOPE);
-  tokenizer.EnterEnvelope();
-  if (tokenizer.TokenTag() != CBORTokenTag::MAP_START) {
-    out->HandleError(
-        Status{Error::CBOR_MAP_START_EXPECTED, tokenizer.Status().pos});
-    return;
-  }
-  if (!ParseMap(/*stack_depth=*/1, &tokenizer, out))
+  if (!ParseEnvelope(/*stack_depth=*/0, &tokenizer, out))
     return;
   if (tokenizer.TokenTag() == CBORTokenTag::DONE)
     return;
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding.h b/deps/v8/third_party/inspector_protocol/encoding/encoding.h
index 340667f6046556..c9ddd3a9becdf5 100644
--- a/deps/v8/third_party/inspector_protocol/encoding/encoding.h
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding.h
@@ -125,21 +125,23 @@ enum class Error {
   CBOR_INVALID_INT32 = 0x0e,
   CBOR_INVALID_DOUBLE = 0x0f,
   CBOR_INVALID_ENVELOPE = 0x10,
-  CBOR_INVALID_STRING8 = 0x11,
-  CBOR_INVALID_STRING16 = 0x12,
-  CBOR_INVALID_BINARY = 0x13,
-  CBOR_UNSUPPORTED_VALUE = 0x14,
-  CBOR_NO_INPUT = 0x15,
-  CBOR_INVALID_START_BYTE = 0x16,
-  CBOR_UNEXPECTED_EOF_EXPECTED_VALUE = 0x17,
-  CBOR_UNEXPECTED_EOF_IN_ARRAY = 0x18,
-  CBOR_UNEXPECTED_EOF_IN_MAP = 0x19,
-  CBOR_INVALID_MAP_KEY = 0x1a,
-  CBOR_STACK_LIMIT_EXCEEDED = 0x1b,
-  CBOR_TRAILING_JUNK = 0x1c,
-  CBOR_MAP_START_EXPECTED = 0x1d,
-  CBOR_MAP_STOP_EXPECTED = 0x1e,
-  CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x1f,
+  CBOR_ENVELOPE_CONTENTS_LENGTH_MISMATCH = 0x11,
+  CBOR_MAP_OR_ARRAY_EXPECTED_IN_ENVELOPE = 0x12,
+  CBOR_INVALID_STRING8 = 0x13,
+  CBOR_INVALID_STRING16 = 0x14,
+  CBOR_INVALID_BINARY = 0x15,
+  CBOR_UNSUPPORTED_VALUE = 0x16,
+  CBOR_NO_INPUT = 0x17,
+  CBOR_INVALID_START_BYTE = 0x18,
+  CBOR_UNEXPECTED_EOF_EXPECTED_VALUE = 0x19,
+  CBOR_UNEXPECTED_EOF_IN_ARRAY = 0x1a,
+  CBOR_UNEXPECTED_EOF_IN_MAP = 0x1b,
+  CBOR_INVALID_MAP_KEY = 0x1c,
+  CBOR_STACK_LIMIT_EXCEEDED = 0x1d,
+  CBOR_TRAILING_JUNK = 0x1e,
+  CBOR_MAP_START_EXPECTED = 0x1f,
+  CBOR_MAP_STOP_EXPECTED = 0x20,
+  CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x21,
 };
 
 // A status value with position that can be copied. The default status
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc b/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
index 768e19bb9eeed1..a36b200bad1053 100644
--- a/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
@@ -979,6 +979,72 @@ TEST(ParseCBORTest, UnexpectedEofInMapError) {
   EXPECT_EQ("", out);
 }
 
+TEST(ParseCBORTest, TopLevelCantBeEmptyEnvelope) {
+  // Normally, an array would be allowed inside an envelope, but
+  // the top-level envelope is required to contain a map.
+  std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, 0};  // envelope
+  std::string out;
+  Status status;
+  std::unique_ptr<StreamingParserHandler> json_writer =
+      NewJSONEncoder(&GetTestPlatform(), &out, &status);
+  ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+  EXPECT_EQ(Error::CBOR_MAP_START_EXPECTED, status.error);
+  EXPECT_EQ(bytes.size(), status.pos);
+  EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, MapStartExpectedAtTopLevel) {
+  // Normally, an array would be allowed inside an envelope, but
+  // the top-level envelope is required to contain a map.
+  constexpr uint8_t kPayloadLen = 1;
+  std::vector<uint8_t> bytes = {0xd8,
+                                0x5a,
+                                0,
+                                0,
+                                0,
+                                kPayloadLen,  // envelope
+                                EncodeIndefiniteLengthArrayStart()};
+  EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+  std::string out;
+  Status status;
+  std::unique_ptr<StreamingParserHandler> json_writer =
+      NewJSONEncoder(&GetTestPlatform(), &out, &status);
+  ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+  EXPECT_EQ(Error::CBOR_MAP_START_EXPECTED, status.error);
+  EXPECT_EQ(6u, status.pos);
+  EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, OnlyMapsAndArraysSupportedInsideEnvelopes) {
+  // The top level is a map with key "foo", and the value
+  // is an envelope that contains just a number (1). We don't
+  // allow numbers to be contained in an envelope though, only
+  // maps and arrays.
+  constexpr uint8_t kPayloadLen = 1;
+  std::vector<uint8_t> bytes = {0xd8,
+                                0x5a,
+                                0,
+                                0,
+                                0,
+                                kPayloadLen,  // envelope
+                                EncodeIndefiniteLengthMapStart()};
+  EncodeString8(SpanFrom("foo"), &bytes);
+  for (uint8_t byte : {0xd8, 0x5a, 0, 0, 0, /*payload_len*/ 1})
+    bytes.emplace_back(byte);
+  size_t error_pos = bytes.size();
+  bytes.push_back(1);  // Envelope contents / payload = number 1.
+  bytes.emplace_back(EncodeStop());
+
+  std::string out;
+  Status status;
+  std::unique_ptr<StreamingParserHandler> json_writer =
+      NewJSONEncoder(&GetTestPlatform(), &out, &status);
+  ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+  EXPECT_EQ(Error::CBOR_MAP_OR_ARRAY_EXPECTED_IN_ENVELOPE, status.error);
+  EXPECT_EQ(error_pos, status.pos);
+  EXPECT_EQ("", out);
+}
+
 TEST(ParseCBORTest, InvalidMapKeyError) {
   constexpr uint8_t kPayloadLen = 2;
   std::vector<uint8_t> bytes = {0xd8,       0x5a, 0,
@@ -1195,18 +1261,18 @@ TEST(ParseCBORTest, InvalidSignedError) {
 }
 
 TEST(ParseCBORTest, TrailingJunk) {
-  constexpr uint8_t kPayloadLen = 35;
+  constexpr uint8_t kPayloadLen = 12;
   std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen,  // envelope
                                 0xbf};                             // map start
   EncodeString8(SpanFrom("key"), &bytes);
   EncodeString8(SpanFrom("value"), &bytes);
   bytes.push_back(0xff);  // Up to here, it's a perfectly fine msg.
+  ASSERT_EQ(kPayloadLen, bytes.size() - 6);
   size_t error_pos = bytes.size();
+  // Now write some trailing junk after the message.
   EncodeString8(SpanFrom("trailing junk"), &bytes);
-
   internals::WriteTokenStart(MajorType::UNSIGNED,
                              std::numeric_limits<uint64_t>::max(), &bytes);
-  EXPECT_EQ(kPayloadLen, bytes.size() - 6);
   std::string out;
   Status status;
   std::unique_ptr<StreamingParserHandler> json_writer =
@@ -1217,6 +1283,29 @@ TEST(ParseCBORTest, TrailingJunk) {
   EXPECT_EQ("", out);
 }
 
+TEST(ParseCBORTest, EnvelopeContentsLengthMismatch) {
+  constexpr uint8_t kPartialPayloadLen = 5;
+  std::vector<uint8_t> bytes = {0xd8, 0x5a, 0,
+                                0,    0,    kPartialPayloadLen,  // envelope
+                                0xbf};                           // map start
+  EncodeString8(SpanFrom("key"), &bytes);
+  // kPartialPayloadLen would need to indicate the length of the entire map,
+  // all the way past the 0xff map stop character. Instead, it only covers
+  // a portion of the map.
+  EXPECT_EQ(bytes.size() - 6, kPartialPayloadLen);
+  EncodeString8(SpanFrom("value"), &bytes);
+  bytes.push_back(0xff);  // map stop
+
+  std::string out;
+  Status status;
+  std::unique_ptr<StreamingParserHandler> json_writer =
+      NewJSONEncoder(&GetTestPlatform(), &out, &status);
+  ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+  EXPECT_EQ(Error::CBOR_ENVELOPE_CONTENTS_LENGTH_MISMATCH, status.error);
+  EXPECT_EQ(bytes.size(), status.pos);
+  EXPECT_EQ("", out);
+}
+
 // =============================================================================
 // cbor::AppendString8EntryToMap - for limited in-place editing of messages
 // =============================================================================
@@ -1376,7 +1465,7 @@ TEST(JsonEncoder, IncompleteUtf8Sequence) {
 
   {  // 🌎 takes four bytes to encode in UTF-8. We test with the first three;
     // This means we're trying to emit a string that consists solely of an
-    // incomplete UTF-8 sequence. So the string in the JSON output is emtpy.
+    // incomplete UTF-8 sequence. So the string in the JSON output is empty.
     std::string world_utf8 = "🌎";
     ASSERT_EQ(4u, world_utf8.size());
     std::vector<uint8_t> chars(world_utf8.begin(), world_utf8.begin() + 3);
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index 530ed4faf91d0b..04184d967661ce 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -15,7 +15,7 @@
 
 namespace array {
   class SortState extends Struct {
-    Compare(implicit context: Context)(x: Object, y: Object): Number {
+    Compare(implicit context: Context)(x: JSAny, y: JSAny): Number {
       const sortCompare: CompareBuiltinFn = this.sortComparePtr;
       return sortCompare(context, this.userCmpFn, x, y);
     }
@@ -143,7 +143,7 @@ namespace array {
     let canUseSameAccessorFn: CanUseSameAccessorFn;
 
     try {
-      GotoIfForceSlowPath() otherwise Slow;
+      if (IsForceSlowPath()) goto Slow;
       const a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
 
       // Copy copy-on-write (COW) arrays.
@@ -214,12 +214,12 @@ namespace array {
   // it is first requested, but it has always at least this size.
   const kSortStateTempSize: Smi = 32;
 
-  type LoadFn = builtin(Context, SortState, Smi) => Object;
-  type StoreFn = builtin(Context, SortState, Smi, Object) => Smi;
+  type LoadFn = builtin(Context, SortState, Smi) => (JSAny | TheHole);
+  type StoreFn = builtin(Context, SortState, Smi, JSAny) => Smi;
   type DeleteFn = builtin(Context, SortState, Smi) => Smi;
-  type CanUseSameAccessorFn = builtin(Context, JSReceiver, Object, Number) =>
+  type CanUseSameAccessorFn = builtin(Context, JSReceiver, Map, Number) =>
       Boolean;
-  type CompareBuiltinFn = builtin(Context, Object, Object, Object) => Number;
+  type CompareBuiltinFn = builtin(Context, JSAny, JSAny, JSAny) => Number;
 
   // The following builtins implement Load/Store for all the Accessors.
   // The most generic baseline version uses Get-/SetProperty. We do not need
@@ -228,28 +228,28 @@ namespace array {
   // through a hole.
 
   transitioning builtin Load<ElementsAccessor: type>(
-      context: Context, sortState: SortState, index: Smi): Object {
+      context: Context, sortState: SortState, index: Smi): JSAny | TheHole {
     const receiver = sortState.receiver;
     if (!HasProperty_Inline(receiver, index)) return TheHole;
     return GetProperty(receiver, index);
   }
 
   Load<FastSmiElements>(context: Context, sortState: SortState, index: Smi):
-      Object {
+      JSAny | TheHole {
     const object = UnsafeCast<JSObject>(sortState.receiver);
     const elements = UnsafeCast<FixedArray>(object.elements);
-    return elements.objects[index];
+    return UnsafeCast<(JSAny | TheHole)>(elements.objects[index]);
   }
 
   Load<FastObjectElements>(context: Context, sortState: SortState, index: Smi):
-      Object {
+      JSAny | TheHole {
     const object = UnsafeCast<JSObject>(sortState.receiver);
     const elements = UnsafeCast<FixedArray>(object.elements);
-    return elements.objects[index];
+    return UnsafeCast<(JSAny | TheHole)>(elements.objects[index]);
   }
 
   Load<FastDoubleElements>(context: Context, sortState: SortState, index: Smi):
-      Object {
+      JSAny | TheHole {
     try {
       const object = UnsafeCast<JSObject>(sortState.receiver);
       const elements = UnsafeCast<FixedDoubleArray>(object.elements);
@@ -262,13 +262,13 @@ namespace array {
   }
 
   transitioning builtin Store<ElementsAccessor: type>(
-      context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+      context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
     SetProperty(sortState.receiver, index, value);
     return kSuccess;
   }
 
   Store<FastSmiElements>(
-      context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+      context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
     const object = UnsafeCast<JSObject>(sortState.receiver);
     const elements = UnsafeCast<FixedArray>(object.elements);
     const value = UnsafeCast<Smi>(value);
@@ -277,7 +277,7 @@ namespace array {
   }
 
   Store<FastObjectElements>(
-      context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+      context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
     const object = UnsafeCast<JSObject>(sortState.receiver);
     const elements = UnsafeCast<FixedArray>(object.elements);
     elements.objects[index] = value;
@@ -285,7 +285,7 @@ namespace array {
   }
 
   Store<FastDoubleElements>(
-      context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+      context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
     const object = UnsafeCast<JSObject>(sortState.receiver);
     const elements = UnsafeCast<FixedDoubleArray>(object.elements);
     const heapVal = UnsafeCast<HeapNumber>(value);
@@ -333,7 +333,7 @@ namespace array {
   }
 
   transitioning builtin SortCompareDefault(
-      context: Context, comparefn: Object, x: Object, y: Object): Number {
+      context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
     assert(comparefn == Undefined);
 
     if (TaggedIsSmi(x) && TaggedIsSmi(y)) {
@@ -361,7 +361,7 @@ namespace array {
   }
 
   transitioning builtin SortCompareUserFn(
-      context: Context, comparefn: Object, x: Object, y: Object): Number {
+      context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
     assert(comparefn != Undefined);
     const cmpfn = UnsafeCast<Callable>(comparefn);
 
@@ -376,7 +376,7 @@ namespace array {
   }
 
   builtin CanUseSameAccessor<ElementsAccessor: type>(
-      context: Context, receiver: JSReceiver, initialReceiverMap: Object,
+      context: Context, receiver: JSReceiver, initialReceiverMap: Map,
       initialReceiverLength: Number): Boolean {
     if (receiver.map != initialReceiverMap) return False;
 
@@ -389,7 +389,7 @@ namespace array {
   }
 
   CanUseSameAccessor<GenericElementsAccessor>(
-      _context: Context, _receiver: JSReceiver, _initialReceiverMap: Object,
+      _context: Context, _receiver: JSReceiver, _initialReceiverMap: Map,
       _initialReceiverLength: Number): Boolean {
     // Do nothing. We are already on the slow path.
     return True;
@@ -456,7 +456,7 @@ namespace array {
   transitioning builtin
   Copy(implicit context: Context)(
       source: FixedArray, srcPos: Smi, target: FixedArray, dstPos: Smi,
-      length: Smi): Object {
+      length: Smi): JSAny {
     assert(srcPos >= 0);
     assert(dstPos >= 0);
     assert(srcPos <= source.length - length);
@@ -509,7 +509,7 @@ namespace array {
       let left: Smi = low;
       let right: Smi = start;
 
-      const pivot = workArray.objects[right];
+      const pivot = UnsafeCast<JSAny>(workArray.objects[right]);
 
       // Invariants:
       //   pivot >= all in [low, left).
@@ -519,7 +519,8 @@ namespace array {
       // Find pivot insertion point.
       while (left < right) {
         const mid: Smi = left + ((right - left) >> 1);
-        const order = sortState.Compare(pivot, workArray.objects[mid]);
+        const order =
+            sortState.Compare(pivot, UnsafeCast<JSAny>(workArray.objects[mid]));
 
         if (order < 0) {
           right = mid;
@@ -571,8 +572,8 @@ namespace array {
 
     let runLength: Smi = 2;
 
-    const elementLow = workArray.objects[low];
-    const elementLowPred = workArray.objects[low - 1];
+    const elementLow = UnsafeCast<JSAny>(workArray.objects[low]);
+    const elementLowPred = UnsafeCast<JSAny>(workArray.objects[low - 1]);
     let order = sortState.Compare(elementLow, elementLowPred);
 
     // TODO(szuend): Replace with "order < 0" once Torque supports it.
@@ -580,9 +581,9 @@ namespace array {
     //               'never' and uses two labels to branch.
     const isDescending: bool = order < 0 ? true : false;
 
-    let previousElement: Object = elementLow;
+    let previousElement: JSAny = elementLow;
     for (let idx: Smi = low + 1; idx < high; ++idx) {
-      const currentElement = workArray.objects[idx];
+      const currentElement = UnsafeCast<JSAny>(workArray.objects[idx]);
       order = sortState.Compare(currentElement, previousElement);
 
       if (isDescending) {
@@ -650,7 +651,7 @@ namespace array {
 
     // Where does b start in a? Elements in a before that can be ignored,
     // because they are already in place.
-    const keyRight = workArray.objects[baseB];
+    const keyRight = UnsafeCast<JSAny>(workArray.objects[baseB]);
     const k: Smi = GallopRight(workArray, keyRight, baseA, lengthA, 0);
     assert(k >= 0);
 
@@ -661,7 +662,7 @@ namespace array {
 
     // Where does a end in b? Elements in b after that can be ignored,
     // because they are already in place.
-    const keyLeft = workArray.objects[baseA + lengthA - 1];
+    const keyLeft = UnsafeCast<JSAny>(workArray.objects[baseA + lengthA - 1]);
     lengthB = GallopLeft(workArray, keyLeft, baseB, lengthB, lengthB - 1);
     assert(lengthB >= 0);
     if (lengthB == 0) return kSuccess;
@@ -695,14 +696,14 @@ namespace array {
   // pretending that array[base - 1] is minus infinity and array[base + len]
   // is plus infinity. In other words, key belongs at index base + k.
   builtin GallopLeft(implicit context: Context, sortState: SortState)(
-      array: FixedArray, key: Object, base: Smi, length: Smi, hint: Smi): Smi {
+      array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
     assert(length > 0 && base >= 0);
     assert(0 <= hint && hint < length);
 
     let lastOfs: Smi = 0;
     let offset: Smi = 1;
 
-    const baseHintElement = array.objects[base + hint];
+    const baseHintElement = UnsafeCast<JSAny>(array.objects[base + hint]);
     let order = sortState.Compare(baseHintElement, key);
 
     if (order < 0) {
@@ -712,7 +713,8 @@ namespace array {
       // a[base + length - 1] is highest.
       const maxOfs: Smi = length - hint;
       while (offset < maxOfs) {
-        const offsetElement = array.objects[base + hint + offset];
+        const offsetElement =
+            UnsafeCast<JSAny>(array.objects[base + hint + offset]);
         order = sortState.Compare(offsetElement, key);
 
         // a[base + hint + offset] >= key? Break.
@@ -738,7 +740,8 @@ namespace array {
       // a[base + hint] is lowest.
       const maxOfs: Smi = hint + 1;
       while (offset < maxOfs) {
-        const offsetElement = array.objects[base + hint - offset];
+        const offsetElement =
+            UnsafeCast<JSAny>(array.objects[base + hint - offset]);
         order = sortState.Compare(offsetElement, key);
 
         if (order < 0) break;
@@ -768,7 +771,8 @@ namespace array {
     while (lastOfs < offset) {
       const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
 
-      order = sortState.Compare(array.objects[base + m], key);
+      order =
+          sortState.Compare(UnsafeCast<JSAny>(array.objects[base + m]), key);
 
       if (order < 0) {
         lastOfs = m + 1;  // a[base + m] < key.
@@ -792,14 +796,14 @@ namespace array {
   //
   // or kFailure on error.
   builtin GallopRight(implicit context: Context, sortState: SortState)(
-      array: FixedArray, key: Object, base: Smi, length: Smi, hint: Smi): Smi {
+      array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
     assert(length > 0 && base >= 0);
     assert(0 <= hint && hint < length);
 
     let lastOfs: Smi = 0;
     let offset: Smi = 1;
 
-    const baseHintElement = array.objects[base + hint];
+    const baseHintElement = UnsafeCast<JSAny>(array.objects[base + hint]);
     let order = sortState.Compare(key, baseHintElement);
 
     if (order < 0) {
@@ -809,7 +813,8 @@ namespace array {
       // a[base + hint] is lowest.
       const maxOfs: Smi = hint + 1;
       while (offset < maxOfs) {
-        const offsetElement = array.objects[base + hint - offset];
+        const offsetElement =
+            UnsafeCast<JSAny>(array.objects[base + hint - offset]);
         order = sortState.Compare(key, offsetElement);
 
         if (order >= 0) break;
@@ -834,7 +839,8 @@ namespace array {
       // a[base + length - 1] is highest.
       const maxOfs: Smi = length - hint;
       while (offset < maxOfs) {
-        const offsetElement = array.objects[base + hint + offset];
+        const offsetElement =
+            UnsafeCast<JSAny>(array.objects[base + hint + offset]);
         order = sortState.Compare(key, offsetElement);
 
         // a[base + hint + ofs] <= key.
@@ -863,7 +869,8 @@ namespace array {
     while (lastOfs < offset) {
       const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
 
-      order = sortState.Compare(key, array.objects[base + m]);
+      order =
+          sortState.Compare(key, UnsafeCast<JSAny>(array.objects[base + m]));
 
       if (order < 0) {
         offset = m;  // key < a[base + m].
@@ -921,7 +928,8 @@ namespace array {
           assert(lengthA > 1 && lengthB > 0);
 
           const order = sortState.Compare(
-              workArray.objects[cursorB], tempArray.objects[cursorTemp]);
+              UnsafeCast<JSAny>(workArray.objects[cursorB]),
+              UnsafeCast<JSAny>(tempArray.objects[cursorTemp]));
 
           if (order < 0) {
             workArray.objects[dest++] = workArray.objects[cursorB++];
@@ -958,7 +966,8 @@ namespace array {
           sortState.minGallop = minGallop;
 
           nofWinsA = GallopRight(
-              tempArray, workArray.objects[cursorB], cursorTemp, lengthA, 0);
+              tempArray, UnsafeCast<JSAny>(workArray.objects[cursorB]),
+              cursorTemp, lengthA, 0);
           assert(nofWinsA >= 0);
 
           if (nofWinsA > 0) {
@@ -977,7 +986,8 @@ namespace array {
           if (--lengthB == 0) goto Succeed;
 
           nofWinsB = GallopLeft(
-              workArray, tempArray.objects[cursorTemp], cursorB, lengthB, 0);
+              workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
+              cursorB, lengthB, 0);
           assert(nofWinsB >= 0);
           if (nofWinsB > 0) {
             Copy(workArray, cursorB, workArray, dest, nofWinsB);
@@ -1053,7 +1063,8 @@ namespace array {
           assert(lengthA > 0 && lengthB > 1);
 
           const order = sortState.Compare(
-              tempArray.objects[cursorTemp], workArray.objects[cursorA]);
+              UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
+              UnsafeCast<JSAny>(workArray.objects[cursorA]));
 
           if (order < 0) {
             workArray.objects[dest--] = workArray.objects[cursorA--];
@@ -1091,8 +1102,8 @@ namespace array {
           sortState.minGallop = minGallop;
 
           let k: Smi = GallopRight(
-              workArray, tempArray.objects[cursorTemp], baseA, lengthA,
-              lengthA - 1);
+              workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
+              baseA, lengthA, lengthA - 1);
           assert(k >= 0);
           nofWinsA = lengthA - k;
 
@@ -1108,7 +1119,8 @@ namespace array {
           if (--lengthB == 1) goto CopyA;
 
           k = GallopLeft(
-              tempArray, workArray.objects[cursorA], 0, lengthB, lengthB - 1);
+              tempArray, UnsafeCast<JSAny>(workArray.objects[cursorA]), 0,
+              lengthB, lengthB - 1);
           assert(k >= 0);
           nofWinsB = lengthB - k;
 
@@ -1295,7 +1307,7 @@ namespace array {
     // are ignored.
     let numberOfUndefined: Smi = 0;
     for (let i: Smi = 0; i < receiverLength; ++i) {
-      const element: Object = loadFn(context, sortState, i);
+      const element: JSAny | TheHole = loadFn(context, sortState, i);
 
       if (element == TheHole) {
         // Do nothing for holes. The result is that elements are
@@ -1333,7 +1345,9 @@ namespace array {
     //      set them to the TheHole up to {sortState.sortLength}.
     let index: Smi = 0;
     for (; index < numberOfNonUndefined; ++index) {
-      storeFn(context, sortState, index, workArray.objects[index]);
+      storeFn(
+          context, sortState, index,
+          UnsafeCast<JSAny>(workArray.objects[index]));
     }
 
     const numberOfUndefinedEnd: Smi =
@@ -1350,7 +1364,7 @@ namespace array {
   }
 
   transitioning builtin
-  ArrayTimSort(context: Context, sortState: SortState): Object {
+  ArrayTimSort(context: Context, sortState: SortState): JSAny {
     const numberOfNonUndefined: Smi = CompactReceiverElementsIntoWorkArray();
     ArrayTimSortImpl(context, sortState, numberOfNonUndefined);
 
@@ -1369,11 +1383,11 @@ namespace array {
 
   // https://tc39.github.io/ecma262/#sec-array.prototype.sort
   transitioning javascript builtin
-  ArrayPrototypeSort(js-implicit context: Context, receiver: Object)(
-      ...arguments): Object {
+  ArrayPrototypeSort(js-implicit context: Context, receiver: JSAny)(
+      ...arguments): JSAny {
     // 1. If comparefn is not undefined and IsCallable(comparefn) is false,
     //    throw a TypeError exception.
-    const comparefnObj: Object = arguments[0];
+    const comparefnObj: JSAny = arguments[0];
     const comparefn = Cast<(Undefined | Callable)>(comparefnObj) otherwise
     ThrowTypeError(kBadSortComparisonFunction, comparefnObj);
 
diff --git a/deps/v8/third_party/wasm-api/README.v8 b/deps/v8/third_party/wasm-api/README.v8
index ea957620b0c004..bebe47b665f7f4 100644
--- a/deps/v8/third_party/wasm-api/README.v8
+++ b/deps/v8/third_party/wasm-api/README.v8
@@ -2,8 +2,8 @@ Name: Wasm C/C++ API
 Short Name: wasm-c-api
 URL: https://github.com/WebAssembly/wasm-c-api/
 Version: 0
-Revision: 5c742b048f7766a0c00be3a7af23fb71ba816026
-Date: 2019-03-18
+Revision: 6db391ee7121a0695602945d11001ea3e00b0afb
+Date: 2019-08-08
 License: Apache 2.0
 License File: LICENSE
 Security Critical: yes
diff --git a/deps/v8/third_party/wasm-api/example/callback.c b/deps/v8/third_party/wasm-api/example/callback.c
index f3b90185940aa1..e17429bdd24688 100644
--- a/deps/v8/third_party/wasm-api/example/callback.c
+++ b/deps/v8/third_party/wasm-api/example/callback.c
@@ -111,7 +111,8 @@ int main(int argc, const char* argv[]) {
   const wasm_extern_t* imports[] = {
     wasm_func_as_extern(print_func), wasm_func_as_extern(closure_func)
   };
-  own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, module, imports, NULL);
   if (!instance) {
     printf("> Error instantiating module!\n");
     return 1;
diff --git a/deps/v8/third_party/wasm-api/example/callback.cc b/deps/v8/third_party/wasm-api/example/callback.cc
index a9ee9aa9199456..d9f8751ea656c4 100644
--- a/deps/v8/third_party/wasm-api/example/callback.cc
+++ b/deps/v8/third_party/wasm-api/example/callback.cc
@@ -36,7 +36,7 @@ auto operator<<(std::ostream& out, const wasm::Val& val) -> std::ostream& {
 // A function to be called from Wasm code.
 auto print_callback(
   const wasm::Val args[], wasm::Val results[]
-) -> wasm::own<wasm::Trap*> {
+) -> wasm::own<wasm::Trap> {
   std::cout << "Calling back..." << std::endl << "> " << args[0] << std::endl;
   results[0] = args[0].copy();
   return nullptr;
@@ -46,7 +46,7 @@ auto print_callback(
 // A function closure.
 auto closure_callback(
   void* env, const wasm::Val args[], wasm::Val results[]
-) -> wasm::own<wasm::Trap*> {
+) -> wasm::own<wasm::Trap> {
   auto i = *reinterpret_cast<int*>(env);
   std::cout << "Calling back closure..." << std::endl;
   std::cout << "> " << i << std::endl;
@@ -73,7 +73,7 @@ void run() {
   file.close();
   if (file.fail()) {
     std::cout << "> Error loading module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Compile.
@@ -81,14 +81,14 @@ void run() {
   auto module = wasm::Module::make(store, binary);
   if (!module) {
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Create external print functions.
   std::cout << "Creating callback..." << std::endl;
   auto print_type = wasm::FuncType::make(
-    wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32)),
-    wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32))
+    wasm::ownvec<wasm::ValType>::make(wasm::ValType::make(wasm::I32)),
+    wasm::ownvec<wasm::ValType>::make(wasm::ValType::make(wasm::I32))
   );
   auto print_func = wasm::Func::make(store, print_type.get(), print_callback);
 
@@ -96,8 +96,8 @@ void run() {
   std::cout << "Creating closure..." << std::endl;
   int i = 42;
   auto closure_type = wasm::FuncType::make(
-    wasm::vec<wasm::ValType*>::make(),
-    wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32))
+    wasm::ownvec<wasm::ValType>::make(),
+    wasm::ownvec<wasm::ValType>::make(wasm::ValType::make(wasm::I32))
   );
   auto closure_func = wasm::Func::make(store, closure_type.get(), closure_callback, &i);
 
@@ -107,7 +107,7 @@ void run() {
   auto instance = wasm::Instance::make(store, module.get(), imports);
   if (!instance) {
     std::cout << "> Error instantiating module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Extract export.
@@ -115,7 +115,7 @@ void run() {
   auto exports = instance->exports();
   if (exports.size() == 0 || exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func()) {
     std::cout << "> Error accessing export!" << std::endl;
-    return;
+    exit(1);
   }
   auto run_func = exports[0]->func();
 
@@ -125,7 +125,7 @@ void run() {
   wasm::Val results[1];
   if (run_func->call(args, results)) {
     std::cout << "> Error calling function!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Print result.
diff --git a/deps/v8/third_party/wasm-api/example/finalize.c b/deps/v8/third_party/wasm-api/example/finalize.c
index 6841617262bc1c..247368f28eb01f 100644
--- a/deps/v8/third_party/wasm-api/example/finalize.c
+++ b/deps/v8/third_party/wasm-api/example/finalize.c
@@ -9,23 +9,21 @@
 
 const int iterations = 100000;
 
+int live_count = 0;
+
 void finalize(void* data) {
   int i = (int)data;
   if (i % (iterations / 10) == 0) printf("Finalizing #%d...\n", i);
+  --live_count;
 }
 
-int main(int argc, const char* argv[]) {
-  // Initialize.
-  printf("Initializing...\n");
-  wasm_engine_t* engine = wasm_engine_new();
-  wasm_store_t* store = wasm_store_new(engine);
-
+void run_in_store(wasm_store_t* store) {
   // Load binary.
   printf("Loading binary...\n");
   FILE* file = fopen("finalize.wasm", "r");
   if (!file) {
     printf("> Error loading module!\n");
-    return 1;
+    exit(1);
   }
   fseek(file, 0L, SEEK_END);
   size_t file_size = ftell(file);
@@ -34,7 +32,7 @@ int main(int argc, const char* argv[]) {
   wasm_byte_vec_new_uninitialized(&binary, file_size);
   if (fread(binary.data, file_size, 1, file) != 1) {
     printf("> Error loading module!\n");
-    return 1;
+    exit(1);
   }
   fclose(file);
 
@@ -43,7 +41,7 @@ int main(int argc, const char* argv[]) {
   own wasm_module_t* module = wasm_module_new(store, &binary);
   if (!module) {
     printf("> Error compiling module!\n");
-    return 1;
+    exit(1);
   }
 
   wasm_byte_vec_delete(&binary);
@@ -52,21 +50,57 @@ int main(int argc, const char* argv[]) {
   printf("Instantiating modules...\n");
   for (int i = 0; i <= iterations; ++i) {
     if (i % (iterations / 10) == 0) printf("%d\n", i);
-    own wasm_instance_t* instance = wasm_instance_new(store, module, NULL);
+    own wasm_instance_t* instance =
+      wasm_instance_new(store, module, NULL, NULL);
     if (!instance) {
       printf("> Error instantiating module %d!\n", i);
-      return 1;
+      exit(1);
     }
     void* data = (void*)(intptr_t)i;
     wasm_instance_set_host_info_with_finalizer(instance, data, &finalize);
     wasm_instance_delete(instance);
+    ++live_count;
   }
 
   wasm_module_delete(module);
+}
+
+int main(int argc, const char* argv[]) {
+  // Initialize.
+  printf("Initializing...\n");
+  wasm_engine_t* engine = wasm_engine_new();
+
+  printf("Live count %d\n", live_count);
+  printf("Creating store 1...\n");
+  wasm_store_t* store1 = wasm_store_new(engine);
+
+  printf("Running in store 1...\n");
+  run_in_store(store1);
+  printf("Live count %d\n", live_count);
+
+  printf("Creating store 2...\n");
+  wasm_store_t* store2 = wasm_store_new(engine);
+
+  printf("Running in store 2...\n");
+  run_in_store(store2);
+  printf("Live count %d\n", live_count);
+
+  printf("Deleting store 2...\n");
+  wasm_store_delete(store2);
+  printf("Live count %d\n", live_count);
+
+  printf("Running in store 1...\n");
+  run_in_store(store1);
+  printf("Live count %d\n", live_count);
+
+  printf("Deleting store 1...\n");
+  wasm_store_delete(store1);
+  printf("Live count %d\n", live_count);
+
+  assert(live_count == 0);
 
   // Shut down.
   printf("Shutting down...\n");
-  wasm_store_delete(store);
   wasm_engine_delete(engine);
 
   // All done.
diff --git a/deps/v8/third_party/wasm-api/example/finalize.cc b/deps/v8/third_party/wasm-api/example/finalize.cc
index a354a2601d31e8..64e134b8d87844 100644
--- a/deps/v8/third_party/wasm-api/example/finalize.cc
+++ b/deps/v8/third_party/wasm-api/example/finalize.cc
@@ -9,20 +9,17 @@
 
 const int iterations = 100000;
 
+int live_count = 0;
+
 void finalize(void* data) {
   intptr_t i = reinterpret_cast<intptr_t>(data);
   if (i % (iterations / 10) == 0) {
     std::cout << "Finalizing #" << i << "..." << std::endl;
   }
+  --live_count;
 }
 
-void run() {
-  // Initialize.
-  std::cout << "Initializing..." << std::endl;
-  auto engine = wasm::Engine::make();
-  auto store_ = wasm::Store::make(engine.get());
-  auto store = store_.get();
-
+void run_in_store(wasm::Store* store) {
   // Load binary.
   std::cout << "Loading binary..." << std::endl;
   std::ifstream file("finalize.wasm");
@@ -34,7 +31,7 @@ void run() {
   file.close();
   if (file.fail()) {
     std::cout << "> Error loading module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Compile.
@@ -42,7 +39,7 @@ void run() {
   auto module = wasm::Module::make(store, binary);
   if (!module) {
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Instantiate.
@@ -52,9 +49,10 @@ void run() {
     auto instance = wasm::Instance::make(store, module.get(), nullptr);
     if (!instance) {
       std::cout << "> Error instantiating module " << i << "!" << std::endl;
-      return;
+      exit(1);
     }
     instance->set_host_info(reinterpret_cast<void*>(i), &finalize);
+    ++live_count;
   }
 
   // Shut down.
@@ -62,8 +60,43 @@ void run() {
 }
 
 
+void run() {
+  // Initialize.
+  std::cout << "Initializing..." << std::endl;
+  auto engine = wasm::Engine::make();
+
+  std::cout << "Live count " << live_count << std::endl;
+  std::cout << "Creating store 1..." << std::endl;
+  auto store1 = wasm::Store::make(engine.get());
+
+  std::cout << "Running in store 1..." << std::endl;
+  run_in_store(store1.get());
+  std::cout << "Live count " << live_count << std::endl;
+
+  {
+    std::cout << "Creating store 2..." << std::endl;
+    auto store2 = wasm::Store::make(engine.get());
+
+    std::cout << "Running in store 2..." << std::endl;
+    run_in_store(store2.get());
+    std::cout << "Live count " << live_count << std::endl;
+
+    std::cout << "Deleting store 2..." << std::endl;
+    std::cout << "Live count " << live_count << std::endl;
+  }
+
+  std::cout << "Running in store 1..." << std::endl;
+  run_in_store(store1.get());
+  std::cout << "Live count " << live_count << std::endl;
+
+  std::cout << "Deleting store 1..." << std::endl;
+}
+
+
 int main(int argc, const char* argv[]) {
   run();
+  std::cout << "Live count " << live_count << std::endl;
+  assert(live_count == 0);
   std::cout << "Done." << std::endl;
   return 0;
 }
diff --git a/deps/v8/third_party/wasm-api/example/global.c b/deps/v8/third_party/wasm-api/example/global.c
index b82d86242e9459..5fe357cd4c190d 100644
--- a/deps/v8/third_party/wasm-api/example/global.c
+++ b/deps/v8/third_party/wasm-api/example/global.c
@@ -91,13 +91,17 @@ int main(int argc, const char* argv[]) {
     wasm_valtype_new(WASM_I64), WASM_VAR);
 
   wasm_val_t val_f32_1 = {.kind = WASM_F32, .of = {.f32 = 1}};
-  own wasm_global_t* const_f32_import = wasm_global_new(store, const_f32_type, &val_f32_1);
+  own wasm_global_t* const_f32_import =
+    wasm_global_new(store, const_f32_type, &val_f32_1);
   wasm_val_t val_i64_2 = {.kind = WASM_I64, .of = {.i64 = 2}};
-  own wasm_global_t* const_i64_import = wasm_global_new(store, const_i64_type, &val_i64_2);
+  own wasm_global_t* const_i64_import =
+    wasm_global_new(store, const_i64_type, &val_i64_2);
   wasm_val_t val_f32_3 = {.kind = WASM_F32, .of = {.f32 = 3}};
-  own wasm_global_t* var_f32_import = wasm_global_new(store, var_f32_type, &val_f32_3);
+  own wasm_global_t* var_f32_import =
+    wasm_global_new(store, var_f32_type, &val_f32_3);
   wasm_val_t val_i64_4 = {.kind = WASM_I64, .of = {.i64 = 4}};
-  own wasm_global_t* var_i64_import = wasm_global_new(store, var_i64_type, &val_i64_4);
+  own wasm_global_t* var_i64_import =
+    wasm_global_new(store, var_i64_type, &val_i64_4);
 
   wasm_globaltype_delete(const_f32_type);
   wasm_globaltype_delete(const_i64_type);
@@ -112,7 +116,8 @@ int main(int argc, const char* argv[]) {
     wasm_global_as_extern(var_f32_import),
     wasm_global_as_extern(var_i64_import)
   };
-  own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, module, imports, NULL);
   if (!instance) {
     printf("> Error instantiating module!\n");
     return 1;
@@ -142,6 +147,11 @@ int main(int argc, const char* argv[]) {
   wasm_func_t* set_var_f32_export = get_export_func(&exports, i++);
   wasm_func_t* set_var_i64_export = get_export_func(&exports, i++);
 
+  // Try cloning.
+  own wasm_global_t* copy = wasm_global_copy(var_f32_import);
+  assert(wasm_global_same(var_f32_import, copy));
+  wasm_global_delete(copy);
+
   // Interact.
   printf("Accessing globals...\n");
 
diff --git a/deps/v8/third_party/wasm-api/example/global.cc b/deps/v8/third_party/wasm-api/example/global.cc
index 75a2513c822d3b..811024e65da56f 100644
--- a/deps/v8/third_party/wasm-api/example/global.cc
+++ b/deps/v8/third_party/wasm-api/example/global.cc
@@ -7,7 +7,7 @@
 #include "wasm.hh"
 
 
-auto get_export_global(wasm::vec<wasm::Extern*>& exports, size_t i) -> wasm::Global* {
+auto get_export_global(wasm::ownvec<wasm::Extern>& exports, size_t i) -> wasm::Global* {
   if (exports.size() <= i || !exports[i]->global()) {
     std::cout << "> Error accessing global export " << i << "!" << std::endl;
     exit(1);
@@ -15,7 +15,7 @@ auto get_export_global(wasm::vec<wasm::Extern*>& exports, size_t i) -> wasm::Glo
   return exports[i]->global();
 }
 
-auto get_export_func(const wasm::vec<wasm::Extern*>& exports, size_t i) -> const wasm::Func* {
+auto get_export_func(const wasm::ownvec<wasm::Extern>& exports, size_t i) -> const wasm::Func* {
   if (exports.size() <= i || !exports[i]->func()) {
     std::cout << "> Error accessing function export " << i << "!" << std::endl;
     exit(1);
@@ -67,7 +67,7 @@ void run() {
   file.close();
   if (file.fail()) {
     std::cout << "> Error loading module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Compile.
@@ -75,7 +75,7 @@ void run() {
   auto module = wasm::Module::make(store, binary);
   if (!module) {
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Create external globals.
@@ -102,7 +102,7 @@ void run() {
   auto instance = wasm::Instance::make(store, module.get(), imports);
   if (!instance) {
     std::cout << "> Error instantiating module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Extract export.
@@ -126,6 +126,9 @@ void run() {
   auto set_var_f32_export = get_export_func(exports, i++);
   auto set_var_i64_export = get_export_func(exports, i++);
 
+  // Try cloning.
+  assert(var_f32_import->copy()->same(var_f32_import.get()));
+
   // Interact.
   std::cout << "Accessing globals..." << std::endl;
 
diff --git a/deps/v8/third_party/wasm-api/example/hello.c b/deps/v8/third_party/wasm-api/example/hello.c
index b1c8c5fee5f34d..e4ef9837ffc08c 100644
--- a/deps/v8/third_party/wasm-api/example/hello.c
+++ b/deps/v8/third_party/wasm-api/example/hello.c
@@ -62,7 +62,8 @@ int main(int argc, const char* argv[]) {
   // Instantiate.
   printf("Instantiating module...\n");
   const wasm_extern_t* imports[] = { wasm_func_as_extern(hello_func) };
-  own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, module, imports, NULL);
   if (!instance) {
     printf("> Error instantiating module!\n");
     return 1;
diff --git a/deps/v8/third_party/wasm-api/example/hello.cc b/deps/v8/third_party/wasm-api/example/hello.cc
index 4956be885ffb4c..e009b3b50d5054 100644
--- a/deps/v8/third_party/wasm-api/example/hello.cc
+++ b/deps/v8/third_party/wasm-api/example/hello.cc
@@ -10,7 +10,7 @@
 // A function to be called from Wasm code.
 auto hello_callback(
   const wasm::Val args[], wasm::Val results[]
-) -> wasm::own<wasm::Trap*> {
+) -> wasm::own<wasm::Trap> {
   std::cout << "Calling back..." << std::endl;
   std::cout << "> Hello world!" << std::endl;
   return nullptr;
@@ -35,7 +35,7 @@ void run() {
   file.close();
   if (file.fail()) {
     std::cout << "> Error loading module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Compile.
@@ -43,13 +43,13 @@ void run() {
   auto module = wasm::Module::make(store, binary);
   if (!module) {
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Create external print functions.
   std::cout << "Creating callback..." << std::endl;
   auto hello_type = wasm::FuncType::make(
-    wasm::vec<wasm::ValType*>::make(), wasm::vec<wasm::ValType*>::make()
+    wasm::ownvec<wasm::ValType>::make(), wasm::ownvec<wasm::ValType>::make()
   );
   auto hello_func = wasm::Func::make(store, hello_type.get(), hello_callback);
 
@@ -59,7 +59,7 @@ void run() {
   auto instance = wasm::Instance::make(store, module.get(), imports);
   if (!instance) {
     std::cout << "> Error instantiating module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Extract export.
@@ -67,7 +67,7 @@ void run() {
   auto exports = instance->exports();
   if (exports.size() == 0 || exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func()) {
     std::cout << "> Error accessing export!" << std::endl;
-    return;
+    exit(1);
   }
   auto run_func = exports[0]->func();
 
@@ -75,7 +75,7 @@ void run() {
   std::cout << "Calling export..." << std::endl;
   if (run_func->call()) {
     std::cout << "> Error calling function!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Shut down.
diff --git a/deps/v8/third_party/wasm-api/example/hostref.c b/deps/v8/third_party/wasm-api/example/hostref.c
new file mode 100644
index 00000000000000..b70218e6105921
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/hostref.c
@@ -0,0 +1,269 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+
+// A function to be called from Wasm code.
+own wasm_trap_t* callback(
+  const wasm_val_t args[], wasm_val_t results[]
+) {
+  printf("Calling back...\n> ");
+  printf("> %p\n",
+    args[0].of.ref ? wasm_ref_get_host_info(args[0].of.ref) : NULL);
+  wasm_val_copy(&results[0], &args[0]);
+  return NULL;
+}
+
+
+wasm_func_t* get_export_func(const wasm_extern_vec_t* exports, size_t i) {
+  if (exports->size <= i || !wasm_extern_as_func(exports->data[i])) {
+    printf("> Error accessing function export %zu!\n", i);
+    exit(1);
+  }
+  return wasm_extern_as_func(exports->data[i]);
+}
+
+wasm_global_t* get_export_global(const wasm_extern_vec_t* exports, size_t i) {
+  if (exports->size <= i || !wasm_extern_as_global(exports->data[i])) {
+    printf("> Error accessing global export %zu!\n", i);
+    exit(1);
+  }
+  return wasm_extern_as_global(exports->data[i]);
+}
+
+wasm_table_t* get_export_table(const wasm_extern_vec_t* exports, size_t i) {
+  if (exports->size <= i || !wasm_extern_as_table(exports->data[i])) {
+    printf("> Error accessing table export %zu!\n", i);
+    exit(1);
+  }
+  return wasm_extern_as_table(exports->data[i]);
+}
+
+
+own wasm_ref_t* call_v_r(const wasm_func_t* func) {
+  printf("call_v_r... "); fflush(stdout);
+  wasm_val_t results[1];
+  if (wasm_func_call(func, NULL, results)) {
+    printf("> Error calling function!\n");
+    exit(1);
+  }
+  printf("okay\n");
+  return results[0].of.ref;
+}
+
+void call_r_v(const wasm_func_t* func, wasm_ref_t* ref) {
+  printf("call_r_v... "); fflush(stdout);
+  wasm_val_t args[1];
+  args[0].kind = WASM_ANYREF;
+  args[0].of.ref = ref;
+  if (wasm_func_call(func, args, NULL)) {
+    printf("> Error calling function!\n");
+    exit(1);
+  }
+  printf("okay\n");
+}
+
+own wasm_ref_t* call_r_r(const wasm_func_t* func, wasm_ref_t* ref) {
+  printf("call_r_r... "); fflush(stdout);
+  wasm_val_t args[1];
+  args[0].kind = WASM_ANYREF;
+  args[0].of.ref = ref;
+  wasm_val_t results[1];
+  if (wasm_func_call(func, args, results)) {
+    printf("> Error calling function!\n");
+    exit(1);
+  }
+  printf("okay\n");
+  return results[0].of.ref;
+}
+
+void call_ir_v(const wasm_func_t* func, int32_t i, wasm_ref_t* ref) {
+  printf("call_ir_v... "); fflush(stdout);
+  wasm_val_t args[2];
+  args[0].kind = WASM_I32;
+  args[0].of.i32 = i;
+  args[1].kind = WASM_ANYREF;
+  args[1].of.ref = ref;
+  if (wasm_func_call(func, args, NULL)) {
+    printf("> Error calling function!\n");
+    exit(1);
+  }
+  printf("okay\n");
+}
+
+own wasm_ref_t* call_i_r(const wasm_func_t* func, int32_t i) {
+  printf("call_i_r... "); fflush(stdout);
+  wasm_val_t args[1];
+  args[0].kind = WASM_I32;
+  args[0].of.i32 = i;
+  wasm_val_t results[1];
+  if (wasm_func_call(func, args, results)) {
+    printf("> Error calling function!\n");
+    exit(1);
+  }
+  printf("okay\n");
+  return results[0].of.ref;
+}
+
+void check(own wasm_ref_t* actual, const wasm_ref_t* expected) {
+  if (actual != expected &&
+      !(actual && expected && wasm_ref_same(actual, expected))) {
+    printf("> Error reading reference, expected %p, got %p\n",
+      expected ? wasm_ref_get_host_info(expected) : NULL,
+      actual ? wasm_ref_get_host_info(actual) : NULL);
+    exit(1);
+  }
+  if (actual) wasm_ref_delete(actual);
+}
+
+
+int main(int argc, const char* argv[]) {
+  // Initialize.
+  printf("Initializing...\n");
+  wasm_engine_t* engine = wasm_engine_new();
+  wasm_store_t* store = wasm_store_new(engine);
+
+  // Load binary.
+  printf("Loading binary...\n");
+  FILE* file = fopen("hostref.wasm", "r");
+  if (!file) {
+    printf("> Error loading module!\n");
+    return 1;
+  }
+  fseek(file, 0L, SEEK_END);
+  size_t file_size = ftell(file);
+  fseek(file, 0L, SEEK_SET);
+  wasm_byte_vec_t binary;
+  wasm_byte_vec_new_uninitialized(&binary, file_size);
+  if (fread(binary.data, file_size, 1, file) != 1) {
+    printf("> Error loading module!\n");
+    return 1;
+  }
+  fclose(file);
+
+  // Compile.
+  printf("Compiling module...\n");
+  own wasm_module_t* module = wasm_module_new(store, &binary);
+  if (!module) {
+    printf("> Error compiling module!\n");
+    return 1;
+  }
+
+  wasm_byte_vec_delete(&binary);
+
+  // Create external callback function.
+  printf("Creating callback...\n");
+  own wasm_functype_t* callback_type = wasm_functype_new_1_1(
+    wasm_valtype_new(WASM_ANYREF), wasm_valtype_new(WASM_ANYREF));
+  own wasm_func_t* callback_func =
+    wasm_func_new(store, callback_type, callback);
+
+  wasm_functype_delete(callback_type);
+
+  // Instantiate.
+  printf("Instantiating module...\n");
+  const wasm_extern_t* imports[] = { wasm_func_as_extern(callback_func) };
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, module, imports, NULL);
+  if (!instance) {
+    printf("> Error instantiating module!\n");
+    return 1;
+  }
+
+  wasm_func_delete(callback_func);
+  wasm_module_delete(module);
+
+  // Extract export.
+  printf("Extracting exports...\n");
+  own wasm_extern_vec_t exports;
+  wasm_instance_exports(instance, &exports);
+  size_t i = 0;
+  wasm_global_t* global = get_export_global(&exports, i++);
+  wasm_table_t* table = get_export_table(&exports, i++);
+  wasm_func_t* global_set = get_export_func(&exports, i++);
+  wasm_func_t* global_get = get_export_func(&exports, i++);
+  wasm_func_t* table_set = get_export_func(&exports, i++);
+  wasm_func_t* table_get = get_export_func(&exports, i++);
+  wasm_func_t* func_call = get_export_func(&exports, i++);
+
+  wasm_instance_delete(instance);
+
+  // Create host references.
+  printf("Creating host references...\n");
+  own wasm_ref_t* host1 = wasm_foreign_as_ref(wasm_foreign_new(store));
+  own wasm_ref_t* host2 = wasm_foreign_as_ref(wasm_foreign_new(store));
+  wasm_ref_set_host_info(host1, (void*)1);
+  wasm_ref_set_host_info(host2, (void*)2);
+
+  // Some sanity checks.
+  check(NULL, NULL);
+  check(wasm_ref_copy(host1), host1);
+  check(wasm_ref_copy(host2), host2);
+
+  own wasm_val_t val;
+  val.kind = WASM_ANYREF;
+  val.of.ref = wasm_ref_copy(host1);
+  check(wasm_ref_copy(val.of.ref), host1);
+  own wasm_ref_t* ref = val.of.ref;
+  check(wasm_ref_copy(ref), host1);
+  wasm_val_delete(&val);
+
+  // Interact.
+  printf("Accessing global...\n");
+  check(call_v_r(global_get), NULL);
+  call_r_v(global_set, host1);
+  check(call_v_r(global_get), host1);
+  call_r_v(global_set, host2);
+  check(call_v_r(global_get), host2);
+  call_r_v(global_set, NULL);
+  check(call_v_r(global_get), NULL);
+
+  wasm_global_get(global, &val);
+  assert(val.kind == WASM_ANYREF);
+  check(val.of.ref, NULL);
+  val.of.ref = host2;
+  wasm_global_set(global, &val);
+  check(call_v_r(global_get), host2);
+  wasm_global_get(global, &val);
+  assert(val.kind == WASM_ANYREF);
+  check(val.of.ref, host2);
+
+  printf("Accessing table...\n");
+  check(call_i_r(table_get, 0), NULL);
+  check(call_i_r(table_get, 1), NULL);
+  call_ir_v(table_set, 0, host1);
+  call_ir_v(table_set, 1, host2);
+  check(call_i_r(table_get, 0), host1);
+  check(call_i_r(table_get, 1), host2);
+  call_ir_v(table_set, 0, NULL);
+  check(call_i_r(table_get, 0), NULL);
+
+  check(wasm_table_get(table, 2), NULL);
+  wasm_table_set(table, 2, host1);
+  check(call_i_r(table_get, 2), host1);
+  check(wasm_table_get(table, 2), host1);
+
+  printf("Accessing function...\n");
+  check(call_r_r(func_call, NULL), NULL);
+  check(call_r_r(func_call, host1), host1);
+  check(call_r_r(func_call, host2), host2);
+
+  wasm_ref_delete(host1);
+  wasm_ref_delete(host2);
+
+  wasm_extern_vec_delete(&exports);
+
+  // Shut down.
+  printf("Shutting down...\n");
+  wasm_store_delete(store);
+  wasm_engine_delete(engine);
+
+  // All done.
+  printf("Done.\n");
+  return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/hostref.cc b/deps/v8/third_party/wasm-api/example/hostref.cc
new file mode 100644
index 00000000000000..74e1f119d343dd
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/hostref.cc
@@ -0,0 +1,232 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+
+// A function to be called from Wasm code.
+auto callback(
+  const wasm::Val args[], wasm::Val results[]
+) -> wasm::own<wasm::Trap> {
+  std::cout << "Calling back..." << std::endl;
+  std::cout << "> " << (args[0].ref() ? args[0].ref()->get_host_info() : nullptr) << std::endl;
+  results[0] = args[0].copy();
+  return nullptr;
+}
+
+
+auto get_export_func(const wasm::ownvec<wasm::Extern>& exports, size_t i) -> const wasm::Func* {
+  if (exports.size() <= i || !exports[i]->func()) {
+    std::cout << "> Error accessing function export " << i << "/" << exports.size() << "!" << std::endl;
+    exit(1);
+  }
+  return exports[i]->func();
+}
+
+auto get_export_global(wasm::ownvec<wasm::Extern>& exports, size_t i) -> wasm::Global* {
+  if (exports.size() <= i || !exports[i]->global()) {
+    std::cout << "> Error accessing global export " << i << "!" << std::endl;
+    exit(1);
+  }
+  return exports[i]->global();
+}
+
+auto get_export_table(wasm::ownvec<wasm::Extern>& exports, size_t i) -> wasm::Table* {
+  if (exports.size() <= i || !exports[i]->table()) {
+    std::cout << "> Error accessing table export " << i << "!" << std::endl;
+    exit(1);
+  }
+  return exports[i]->table();
+}
+
+
+void call_r_v(const wasm::Func* func, const wasm::Ref* ref) {
+  std::cout << "call_r_v... " << std::flush;
+  wasm::Val args[1] = {wasm::Val::ref(ref ? ref->copy() : wasm::own<wasm::Ref>())};
+  if (func->call(args, nullptr)) {
+    std::cout << "> Error calling function!" << std::endl;
+    exit(1);
+  }
+  std::cout << "okay" << std::endl;
+}
+
+auto call_v_r(const wasm::Func* func) -> wasm::own<wasm::Ref> {
+  std::cout << "call_v_r... " << std::flush;
+  wasm::Val results[1];
+  if (func->call(nullptr, results)) {
+    std::cout << "> Error calling function!" << std::endl;
+    exit(1);
+  }
+  std::cout << "okay" << std::endl;
+  return results[0].release_ref();
+}
+
+auto call_r_r(const wasm::Func* func, const wasm::Ref* ref) -> wasm::own<wasm::Ref> {
+  std::cout << "call_r_r... " << std::flush;
+  wasm::Val args[1] = {wasm::Val::ref(ref ? ref->copy() : wasm::own<wasm::Ref>())};
+  wasm::Val results[1];
+  if (func->call(args, results)) {
+    std::cout << "> Error calling function!" << std::endl;
+    exit(1);
+  }
+  std::cout << "okay" << std::endl;
+  return results[0].release_ref();
+}
+
+void call_ir_v(const wasm::Func* func, int32_t i, const wasm::Ref* ref) {
+  std::cout << "call_ir_v... " << std::flush;
+  wasm::Val args[2] = {wasm::Val::i32(i), wasm::Val::ref(ref ? ref->copy() : wasm::own<wasm::Ref>())};
+  if (func->call(args, nullptr)) {
+    std::cout << "> Error calling function!" << std::endl;
+    exit(1);
+  }
+  std::cout << "okay" << std::endl;
+}
+
+auto call_i_r(const wasm::Func* func, int32_t i) -> wasm::own<wasm::Ref> {
+  std::cout << "call_i_r... " << std::flush;
+  wasm::Val args[1] = {wasm::Val::i32(i)};
+  wasm::Val results[1];
+  if (func->call(args, results)) {
+    std::cout << "> Error calling function!" << std::endl;
+    exit(1);
+  }
+  std::cout << "okay" << std::endl;
+  return results[0].release_ref();
+}
+
+void check(wasm::own<wasm::Ref> actual, const wasm::Ref* expected) {
+  if (actual.get() != expected &&
+      !(actual && expected && actual->same(expected))) {
+    std::cout << "> Error reading reference, expected "
+      << (expected ? expected->get_host_info() : nullptr) << ", got "
+      << (actual ? actual->get_host_info() : nullptr) << std::endl;
+    exit(1);
+  }
+}
+
+void run() {
+  // Initialize.
+  std::cout << "Initializing..." << std::endl;
+  auto engine = wasm::Engine::make();
+  auto store_ = wasm::Store::make(engine.get());
+  auto store = store_.get();
+
+  // Load binary.
+  std::cout << "Loading binary..." << std::endl;
+  std::ifstream file("hostref.wasm");
+  file.seekg(0, std::ios_base::end);
+  auto file_size = file.tellg();
+  file.seekg(0);
+  auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+  file.read(binary.get(), file_size);
+  file.close();
+  if (file.fail()) {
+    std::cout << "> Error loading module!" << std::endl;
+    return;
+  }
+
+  // Compile.
+  std::cout << "Compiling module..." << std::endl;
+  auto module = wasm::Module::make(store, binary);
+  if (!module) {
+    std::cout << "> Error compiling module!" << std::endl;
+    return;
+  }
+
+  // Create external callback function.
+  std::cout << "Creating callback..." << std::endl;
+  auto callback_type = wasm::FuncType::make(
+    wasm::ownvec<wasm::ValType>::make(wasm::ValType::make(wasm::ANYREF)),
+    wasm::ownvec<wasm::ValType>::make(wasm::ValType::make(wasm::ANYREF))
+  );
+  auto callback_func = wasm::Func::make(store, callback_type.get(), callback);
+
+  // Instantiate.
+  std::cout << "Instantiating module..." << std::endl;
+  wasm::Extern* imports[] = {callback_func.get()};
+  auto instance = wasm::Instance::make(store, module.get(), imports);
+  if (!instance) {
+    std::cout << "> Error instantiating module!" << std::endl;
+    return;
+  }
+
+  // Extract export.
+  std::cout << "Extracting exports..." << std::endl;
+  auto exports = instance->exports();
+  size_t i = 0;
+  auto global = get_export_global(exports, i++);
+  auto table = get_export_table(exports, i++);
+  auto global_set = get_export_func(exports, i++);
+  auto global_get = get_export_func(exports, i++);
+  auto table_set = get_export_func(exports, i++);
+  auto table_get = get_export_func(exports, i++);
+  auto func_call = get_export_func(exports, i++);
+
+  // Create host references.
+  std::cout << "Creating host references..." << std::endl;
+  auto host1 = wasm::Foreign::make(store);
+  auto host2 = wasm::Foreign::make(store);
+  host1->set_host_info(reinterpret_cast<void*>(1));
+  host2->set_host_info(reinterpret_cast<void*>(2));
+
+  // Some sanity checks.
+  check(nullptr, nullptr);
+  check(host1->copy(), host1.get());
+  check(host2->copy(), host2.get());
+
+  wasm::Val val = wasm::Val::ref(host1->copy());
+  check(val.ref()->copy(), host1.get());
+  auto ref = val.release_ref();
+  assert(val.ref() == nullptr);
+  check(ref->copy(), host1.get());
+
+  // Interact.
+  std::cout << "Accessing global..." << std::endl;
+  check(call_v_r(global_get), nullptr);
+  call_r_v(global_set, host1.get());
+  check(call_v_r(global_get), host1.get());
+  call_r_v(global_set, host2.get());
+  check(call_v_r(global_get), host2.get());
+  call_r_v(global_set, nullptr);
+  check(call_v_r(global_get), nullptr);
+
+  check(global->get().release_ref(), nullptr);
+  global->set(wasm::Val(host2->copy()));
+  check(call_v_r(global_get), host2.get());
+  check(global->get().release_ref(), host2.get());
+
+  std::cout << "Accessing table..." << std::endl;
+  check(call_i_r(table_get, 0), nullptr);
+  check(call_i_r(table_get, 1), nullptr);
+  call_ir_v(table_set, 0, host1.get());
+  call_ir_v(table_set, 1, host2.get());
+  check(call_i_r(table_get, 0), host1.get());
+  check(call_i_r(table_get, 1), host2.get());
+  call_ir_v(table_set, 0, nullptr);
+  check(call_i_r(table_get, 0), nullptr);
+
+  check(table->get(2), nullptr);
+  table->set(2, host1.get());
+  check(call_i_r(table_get, 2), host1.get());
+  check(table->get(2), host1.get());
+
+  std::cout << "Accessing function..." << std::endl;
+  check(call_r_r(func_call, nullptr), nullptr);
+  check(call_r_r(func_call, host1.get()), host1.get());
+  check(call_r_r(func_call, host2.get()), host2.get());
+
+  // Shut down.
+  std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+  run();
+  std::cout << "Done." << std::endl;
+  return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/hostref.wasm b/deps/v8/third_party/wasm-api/example/hostref.wasm
new file mode 100644
index 00000000000000..7bfc7288e9b46f
Binary files /dev/null and b/deps/v8/third_party/wasm-api/example/hostref.wasm differ
diff --git a/deps/v8/third_party/wasm-api/example/hostref.wat b/deps/v8/third_party/wasm-api/example/hostref.wat
new file mode 100644
index 00000000000000..4d14ba6ae81687
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/hostref.wat
@@ -0,0 +1,24 @@
+(module
+  (import "" "f" (func $fun (param anyref) (result anyref)))
+
+  (global $glob (export "global") (mut anyref) (ref.null))
+  (table $tab (export "table") 10 anyref)
+
+  (func (export "global.set") (param $r anyref)
+    (global.set $glob (local.get $r))
+  )
+  (func (export "global.get") (result anyref)
+    (global.get $glob)
+  )
+
+  (func (export "table.set") (param $i i32) (param $r anyref)
+    (table.set $tab (local.get $i) (local.get $r))
+  )
+  (func (export "table.get") (param $i i32) (result anyref)
+    (table.get $tab (local.get $i))
+  )
+
+  (func (export "func.call") (param $r anyref) (result anyref)
+    (call $fun (local.get $r))
+  )
+)
diff --git a/deps/v8/third_party/wasm-api/example/memory.c b/deps/v8/third_party/wasm-api/example/memory.c
index 64b0f86b51ebc3..2c020c459740ba 100644
--- a/deps/v8/third_party/wasm-api/example/memory.c
+++ b/deps/v8/third_party/wasm-api/example/memory.c
@@ -132,7 +132,8 @@ int main(int argc, const char* argv[]) {
 
   // Instantiate.
   printf("Instantiating module...\n");
-  own wasm_instance_t* instance = wasm_instance_new(store, module, NULL);
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, module, NULL, NULL);
   if (!instance) {
     printf("> Error instantiating module!\n");
     return 1;
@@ -150,6 +151,11 @@ int main(int argc, const char* argv[]) {
 
   wasm_module_delete(module);
 
+  // Try cloning.
+  own wasm_memory_t* copy = wasm_memory_copy(memory);
+  assert(wasm_memory_same(memory, copy));
+  wasm_memory_delete(copy);
+
   // Check initial memory.
   printf("Checking memory...\n");
   check(wasm_memory_size(memory) == 2);
diff --git a/deps/v8/third_party/wasm-api/example/memory.cc b/deps/v8/third_party/wasm-api/example/memory.cc
index fb50565c8518c5..4094accd8d07ff 100644
--- a/deps/v8/third_party/wasm-api/example/memory.cc
+++ b/deps/v8/third_party/wasm-api/example/memory.cc
@@ -7,7 +7,7 @@
 #include "wasm.hh"
 
 
-auto get_export_memory(wasm::vec<wasm::Extern*>& exports, size_t i) -> wasm::Memory* {
+auto get_export_memory(wasm::ownvec<wasm::Extern>& exports, size_t i) -> wasm::Memory* {
   if (exports.size() <= i || !exports[i]->memory()) {
     std::cout << "> Error accessing memory export " << i << "!" << std::endl;
     exit(1);
@@ -15,7 +15,7 @@ auto get_export_memory(wasm::vec<wasm::Extern*>& exports, size_t i) -> wasm::Mem
   return exports[i]->memory();
 }
 
-auto get_export_func(const wasm::vec<wasm::Extern*>& exports, size_t i) -> const wasm::Func* {
+auto get_export_func(const wasm::ownvec<wasm::Extern>& exports, size_t i) -> const wasm::Func* {
   if (exports.size() <= i || !exports[i]->func()) {
     std::cout << "> Error accessing function export " << i << "!" << std::endl;
     exit(1);
@@ -79,7 +79,7 @@ void run() {
   file.close();
   if (file.fail()) {
     std::cout << "> Error loading module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Compile.
@@ -87,7 +87,7 @@ void run() {
   auto module = wasm::Module::make(store, binary);
   if (!module) {
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Instantiate.
@@ -95,7 +95,7 @@ void run() {
   auto instance = wasm::Instance::make(store, module.get(), nullptr);
   if (!instance) {
     std::cout << "> Error instantiating module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Extract export.
@@ -107,6 +107,9 @@ void run() {
   auto load_func = get_export_func(exports, i++);
   auto store_func = get_export_func(exports, i++);
 
+  // Try cloning.
+  assert(memory->copy()->same(memory));
+
   // Check initial memory.
   std::cout << "Checking memory..." << std::endl;
   check(memory->size(), 2u);
diff --git a/deps/v8/third_party/wasm-api/example/multi.c b/deps/v8/third_party/wasm-api/example/multi.c
new file mode 100644
index 00000000000000..7bd4676bae76dd
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/multi.c
@@ -0,0 +1,154 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+// A function to be called from Wasm code.
+own wasm_trap_t* callback(
+  const wasm_val_t args[], wasm_val_t results[]
+) {
+  printf("Calling back...\n> ");
+  printf("> %"PRIu32" %"PRIu64" %"PRIu64" %"PRIu32"\n",
+    args[0].of.i32, args[1].of.i64, args[2].of.i64, args[3].of.i32);
+  printf("\n");
+
+  wasm_val_copy(&results[0], &args[0]);
+  return NULL;
+}
+
+
+// A function closure.
+own wasm_trap_t* closure_callback(
+  void* env, const wasm_val_t args[], wasm_val_t results[]
+) {
+  int i = *(int*)env;
+  printf("Calling back closure...\n");
+  printf("> %d\n", i);
+
+  results[0].kind = WASM_I32;
+  results[0].of.i32 = (int32_t)i;
+  return NULL;
+}
+
+
+int main(int argc, const char* argv[]) {
+  // Initialize.
+  printf("Initializing...\n");
+  wasm_engine_t* engine = wasm_engine_new();
+  wasm_store_t* store = wasm_store_new(engine);
+
+  // Load binary.
+  printf("Loading binary...\n");
+  FILE* file = fopen("multi.wasm", "r");
+  if (!file) {
+    printf("> Error loading module!\n");
+    return 1;
+  }
+  fseek(file, 0L, SEEK_END);
+  size_t file_size = ftell(file);
+  fseek(file, 0L, SEEK_SET);
+  wasm_byte_vec_t binary;
+  wasm_byte_vec_new_uninitialized(&binary, file_size);
+  if (fread(binary.data, file_size, 1, file) != 1) {
+    printf("> Error loading module!\n");
+    return 1;
+  }
+  fclose(file);
+
+  // Compile.
+  printf("Compiling module...\n");
+  own wasm_module_t* module = wasm_module_new(store, &binary);
+  if (!module) {
+    printf("> Error compiling module!\n");
+    return 1;
+  }
+
+  wasm_byte_vec_delete(&binary);
+
+  // Create external print functions.
+  printf("Creating callback...\n");
+  wasm_valtype_t* types[4] = {
+    wasm_valtype_new_i32(), wasm_valtype_new_i64(),
+    wasm_valtype_new_i64(), wasm_valtype_new_i32()
+  };
+  own wasm_valtype_vec_t tuple1, tuple2;
+  wasm_valtype_vec_new(&tuple1, 4, types);
+  wasm_valtype_vec_copy(&tuple2, &tuple1);
+  own wasm_functype_t* callback_type = wasm_functype_new(&tuple1, &tuple2);
+  own wasm_func_t* callback_func =
+    wasm_func_new(store, callback_type, callback);
+
+  wasm_functype_delete(callback_type);
+
+  // Instantiate.
+  printf("Instantiating module...\n");
+  const wasm_extern_t* imports[] = {wasm_func_as_extern(callback_func)};
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, module, imports, NULL);
+  if (!instance) {
+    printf("> Error instantiating module!\n");
+    return 1;
+  }
+
+  wasm_func_delete(callback_func);
+
+  // Extract export.
+  printf("Extracting export...\n");
+  own wasm_extern_vec_t exports;
+  wasm_instance_exports(instance, &exports);
+  if (exports.size == 0) {
+    printf("> Error accessing exports!\n");
+    return 1;
+  }
+  const wasm_func_t* run_func = wasm_extern_as_func(exports.data[0]);
+  if (run_func == NULL) {
+    printf("> Error accessing export!\n");
+    return 1;
+  }
+
+  wasm_module_delete(module);
+  wasm_instance_delete(instance);
+
+  // Call.
+  printf("Calling export...\n");
+  wasm_val_t args[4];
+  args[0].kind = WASM_I32;
+  args[0].of.i32 = 1;
+  args[1].kind = WASM_I64;
+  args[1].of.i64 = 2;
+  args[2].kind = WASM_I64;
+  args[2].of.i64 = 3;
+  args[3].kind = WASM_I32;
+  args[3].of.i32 = 4;
+  wasm_val_t results[4];
+  if (wasm_func_call(run_func, args, results)) {
+    printf("> Error calling function!\n");
+    return 1;
+  }
+
+  wasm_extern_vec_delete(&exports);
+
+  // Print result.
+  printf("Printing result...\n");
+  printf("> %"PRIu32" %"PRIu64" %"PRIu64" %"PRIu32"\n",
+    results[0].of.i32, results[1].of.i64,
+    results[2].of.i64, results[3].of.i32);
+
+  assert(results[0].of.i32 == 4);
+  assert(results[1].of.i64 == 3);
+  assert(results[2].of.i64 == 2);
+  assert(results[3].of.i32 == 1);
+
+  // Shut down.
+  printf("Shutting down...\n");
+  wasm_store_delete(store);
+  wasm_engine_delete(engine);
+
+  // All done.
+  printf("Done.\n");
+  return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/multi.cc b/deps/v8/third_party/wasm-api/example/multi.cc
new file mode 100644
index 00000000000000..5ed4c9b7717fa0
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/multi.cc
@@ -0,0 +1,118 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+// A function to be called from Wasm code.
+auto callback(
+  const wasm::Val args[], wasm::Val results[]
+) -> wasm::own<wasm::Trap> {
+  std::cout << "Calling back..." << std::endl;
+  std::cout << "> " << args[0].i32();
+  std::cout << " " << args[1].i64();
+  std::cout << " " << args[2].i64();
+  std::cout << " " << args[3].i32() << std::endl;
+  results[0] = args[3].copy();
+  results[1] = args[1].copy();
+  results[2] = args[2].copy();
+  results[3] = args[0].copy();
+  return nullptr;
+}
+
+
+void run() {
+  // Initialize.
+  std::cout << "Initializing..." << std::endl;
+  auto engine = wasm::Engine::make();
+  auto store_ = wasm::Store::make(engine.get());
+  auto store = store_.get();
+
+  // Load binary.
+  std::cout << "Loading binary..." << std::endl;
+  std::ifstream file("multi.wasm");
+  file.seekg(0, std::ios_base::end);
+  auto file_size = file.tellg();
+  file.seekg(0);
+  auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+  file.read(binary.get(), file_size);
+  file.close();
+  if (file.fail()) {
+    std::cout << "> Error loading module!" << std::endl;
+    exit(1);
+  }
+
+  // Compile.
+  std::cout << "Compiling module..." << std::endl;
+  auto module = wasm::Module::make(store, binary);
+  if (!module) {
+    std::cout << "> Error compiling module!" << std::endl;
+    exit(1);
+  }
+
+  // Create external print functions.
+  std::cout << "Creating callback..." << std::endl;
+  auto tuple = wasm::ownvec<wasm::ValType>::make(
+    wasm::ValType::make(wasm::I32),
+    wasm::ValType::make(wasm::I64),
+    wasm::ValType::make(wasm::I64),
+    wasm::ValType::make(wasm::I32)
+  );
+  auto callback_type =
+    wasm::FuncType::make(tuple.deep_copy(), tuple.deep_copy());
+  auto callback_func = wasm::Func::make(store, callback_type.get(), callback);
+
+  // Instantiate.
+  std::cout << "Instantiating module..." << std::endl;
+  wasm::Extern* imports[] = {callback_func.get()};
+  auto instance = wasm::Instance::make(store, module.get(), imports);
+  if (!instance) {
+    std::cout << "> Error instantiating module!" << std::endl;
+    exit(1);
+  }
+
+  // Extract export.
+  std::cout << "Extracting export..." << std::endl;
+  auto exports = instance->exports();
+  if (exports.size() == 0 || exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func()) {
+    std::cout << "> Error accessing export!" << std::endl;
+    exit(1);
+  }
+  auto run_func = exports[0]->func();
+
+  // Call.
+  std::cout << "Calling export..." << std::endl;
+  wasm::Val args[] = {
+    wasm::Val::i32(1), wasm::Val::i64(2), wasm::Val::i64(3), wasm::Val::i32(4)
+  };
+  wasm::Val results[4];
+  if (wasm::own<wasm::Trap> trap = run_func->call(args, results)) {
+    std::cout << "> Error calling function! " << trap->message().get() << std::endl;
+    exit(1);
+  }
+
+  // Print result.
+  std::cout << "Printing result..." << std::endl;
+  std::cout << "> " << results[0].i32();
+  std::cout << " " << results[1].i64();
+  std::cout << " " << results[2].i64();
+  std::cout << " " << results[3].i32() << std::endl;
+
+  assert(results[0].i32() == 4);
+  assert(results[1].i64() == 3);
+  assert(results[2].i64() == 2);
+  assert(results[3].i32() == 1);
+
+  // Shut down.
+  std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+  run();
+  std::cout << "Done." << std::endl;
+  return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/multi.wasm b/deps/v8/third_party/wasm-api/example/multi.wasm
new file mode 100644
index 00000000000000..bff0143f3f1608
Binary files /dev/null and b/deps/v8/third_party/wasm-api/example/multi.wasm differ
diff --git a/deps/v8/third_party/wasm-api/example/multi.wat b/deps/v8/third_party/wasm-api/example/multi.wat
new file mode 100644
index 00000000000000..e7fb3311259cce
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/multi.wat
@@ -0,0 +1,7 @@
+(module
+  (func $f (import "" "f") (param i32 i64 i64 i32) (result i32 i64 i64 i32))
+
+  (func $g (export "g") (param i32 i64 i64 i32) (result i32 i64 i64 i32)
+    (call $f (local.get 0) (local.get 2) (local.get 1) (local.get 3))
+  )
+)
diff --git a/deps/v8/third_party/wasm-api/example/reflect.c b/deps/v8/third_party/wasm-api/example/reflect.c
index a210a85c153b52..15e0165d1916ba 100644
--- a/deps/v8/third_party/wasm-api/example/reflect.c
+++ b/deps/v8/third_party/wasm-api/example/reflect.c
@@ -45,28 +45,32 @@ void print_valtypes(const wasm_valtype_vec_t* types) {
 void print_externtype(const wasm_externtype_t* type) {
   switch (wasm_externtype_kind(type)) {
     case WASM_EXTERN_FUNC: {
-      const wasm_functype_t* functype = wasm_externtype_as_functype_const(type);
+      const wasm_functype_t* functype =
+        wasm_externtype_as_functype_const(type);
       printf("func ");
       print_valtypes(wasm_functype_params(functype));
       printf(" -> ");
       print_valtypes(wasm_functype_results(functype));
     } break;
     case WASM_EXTERN_GLOBAL: {
-      const wasm_globaltype_t* globaltype = wasm_externtype_as_globaltype_const(type);
+      const wasm_globaltype_t* globaltype =
+        wasm_externtype_as_globaltype_const(type);
       printf("global ");
       print_mutability(wasm_globaltype_mutability(globaltype));
       printf(" ");
       print_valtype(wasm_globaltype_content(globaltype));
     } break;
     case WASM_EXTERN_TABLE: {
-      const wasm_tabletype_t* tabletype = wasm_externtype_as_tabletype_const(type);
+      const wasm_tabletype_t* tabletype =
+        wasm_externtype_as_tabletype_const(type);
       printf("table ");
       print_limits(wasm_tabletype_limits(tabletype));
       printf(" ");
       print_valtype(wasm_tabletype_element(tabletype));
     } break;
     case WASM_EXTERN_MEMORY: {
-      const wasm_memorytype_t* memorytype = wasm_externtype_as_memorytype_const(type);
+      const wasm_memorytype_t* memorytype =
+        wasm_externtype_as_memorytype_const(type);
       printf("memory ");
       print_limits(wasm_memorytype_limits(memorytype));
     } break;
@@ -114,7 +118,7 @@ int main(int argc, const char* argv[]) {
 
   // Instantiate.
   printf("Instantiating module...\n");
-  own wasm_instance_t* instance = wasm_instance_new(store, module, NULL);
+  own wasm_instance_t* instance = wasm_instance_new(store, module, NULL, NULL);
   if (!instance) {
     printf("> Error instantiating module!\n");
     return 1;
@@ -129,7 +133,8 @@ int main(int argc, const char* argv[]) {
   assert(exports.size == export_types.size);
 
   for (size_t i = 0; i < exports.size; ++i) {
-    assert(wasm_extern_kind(exports.data[i]) == wasm_externtype_kind(wasm_exporttype_type(export_types.data[i])));
+    assert(wasm_extern_kind(exports.data[i]) ==
+      wasm_externtype_kind(wasm_exporttype_type(export_types.data[i])));
     printf("> export %zu ", i);
     print_name(wasm_exporttype_name(export_types.data[i]));
     printf("\n");
diff --git a/deps/v8/third_party/wasm-api/example/reflect.cc b/deps/v8/third_party/wasm-api/example/reflect.cc
index c04b9e4d4ee1fb..e0f8ba6856857b 100644
--- a/deps/v8/third_party/wasm-api/example/reflect.cc
+++ b/deps/v8/third_party/wasm-api/example/reflect.cc
@@ -33,7 +33,7 @@ auto operator<<(std::ostream& out, const wasm::ValType& type) -> std::ostream& {
   return out;
 }
 
-auto operator<<(std::ostream& out, const wasm::vec<wasm::ValType*>& types) -> std::ostream& {
+auto operator<<(std::ostream& out, const wasm::ownvec<wasm::ValType>& types) -> std::ostream& {
   bool first = true;
   for (size_t i = 0; i < types.size(); ++i) {
     if (first) {
@@ -88,7 +88,7 @@ void run() {
   file.close();
   if (file.fail()) {
     std::cout << "> Error loading module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Compile.
@@ -96,7 +96,7 @@ void run() {
   auto module = wasm::Module::make(store, binary);
   if (!module) {
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Instantiate.
@@ -104,7 +104,7 @@ void run() {
   auto instance = wasm::Instance::make(store, module.get(), nullptr);
   if (!instance) {
     std::cout << "> Error instantiating module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Extract exports.
diff --git a/deps/v8/third_party/wasm-api/example/serialize.c b/deps/v8/third_party/wasm-api/example/serialize.c
index 8c7efc2ee91151..4522c00dff5e4b 100644
--- a/deps/v8/third_party/wasm-api/example/serialize.c
+++ b/deps/v8/third_party/wasm-api/example/serialize.c
@@ -77,7 +77,8 @@ int main(int argc, const char* argv[]) {
   // Instantiate.
   printf("Instantiating deserialized module...\n");
   const wasm_extern_t* imports[] = { wasm_func_as_extern(hello_func) };
-  own wasm_instance_t* instance = wasm_instance_new(store, deserialized, imports);
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, deserialized, imports, NULL);
   if (!instance) {
     printf("> Error instantiating module!\n");
     return 1;
diff --git a/deps/v8/third_party/wasm-api/example/serialize.cc b/deps/v8/third_party/wasm-api/example/serialize.cc
index 895ef396e0cff7..7f74edba76a286 100644
--- a/deps/v8/third_party/wasm-api/example/serialize.cc
+++ b/deps/v8/third_party/wasm-api/example/serialize.cc
@@ -10,7 +10,7 @@
 // A function to be called from Wasm code.
 auto hello_callback(
   const wasm::Val args[], wasm::Val results[]
-) -> wasm::own<wasm::Trap*> {
+) -> wasm::own<wasm::Trap> {
   std::cout << "Calling back..." << std::endl;
   std::cout << "> Hello world!" << std::endl;
   return nullptr;
@@ -35,7 +35,7 @@ void run() {
   file.close();
   if (file.fail()) {
     std::cout << "> Error loading module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Compile.
@@ -43,7 +43,7 @@ void run() {
   auto module = wasm::Module::make(store, binary);
   if (!module) {
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Serialize module.
@@ -55,13 +55,13 @@ void run() {
   auto deserialized = wasm::Module::deserialize(store, serialized);
   if (!deserialized) {
     std::cout << "> Error deserializing module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Create external print functions.
   std::cout << "Creating callback..." << std::endl;
   auto hello_type = wasm::FuncType::make(
-    wasm::vec<wasm::ValType*>::make(), wasm::vec<wasm::ValType*>::make()
+    wasm::ownvec<wasm::ValType>::make(), wasm::ownvec<wasm::ValType>::make()
   );
   auto hello_func = wasm::Func::make(store, hello_type.get(), hello_callback);
 
@@ -71,7 +71,7 @@ void run() {
   auto instance = wasm::Instance::make(store, deserialized.get(), imports);
   if (!instance) {
     std::cout << "> Error instantiating module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Extract export.
@@ -79,15 +79,15 @@ void run() {
   auto exports = instance->exports();
   if (exports.size() == 0 || exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func()) {
     std::cout << "> Error accessing export!" << std::endl;
-    return;
+    exit(1);
   }
   auto run_func = exports[0]->func();
 
   // Call.
   std::cout << "Calling export..." << std::endl;
-  if (! run_func->call()) {
+  if (run_func->call()) {
     std::cout << "> Error calling function!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Shut down.
diff --git a/deps/v8/third_party/wasm-api/example/start.c b/deps/v8/third_party/wasm-api/example/start.c
new file mode 100644
index 00000000000000..42fa317490ba4b
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/start.c
@@ -0,0 +1,105 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+
+void print_frame(wasm_frame_t* frame) {
+  printf("> %p @ 0x%zx = %"PRIu32".0x%zx\n",
+    wasm_frame_instance(frame),
+    wasm_frame_module_offset(frame),
+    wasm_frame_func_index(frame),
+    wasm_frame_func_offset(frame)
+  );
+}
+
+
+int main(int argc, const char* argv[]) {
+  // Initialize.
+  printf("Initializing...\n");
+  wasm_engine_t* engine = wasm_engine_new();
+  wasm_store_t* store = wasm_store_new(engine);
+
+  // Load binary.
+  printf("Loading binary...\n");
+  FILE* file = fopen("start.wasm", "r");
+  if (!file) {
+    printf("> Error loading module!\n");
+    return 1;
+  }
+  fseek(file, 0L, SEEK_END);
+  size_t file_size = ftell(file);
+  fseek(file, 0L, SEEK_SET);
+  wasm_byte_vec_t binary;
+  wasm_byte_vec_new_uninitialized(&binary, file_size);
+  if (fread(binary.data, file_size, 1, file) != 1) {
+    printf("> Error loading module!\n");
+    return 1;
+  }
+  fclose(file);
+
+  // Compile.
+  printf("Compiling module...\n");
+  own wasm_module_t* module = wasm_module_new(store, &binary);
+  if (!module) {
+    printf("> Error compiling module!\n");
+    return 1;
+  }
+
+  wasm_byte_vec_delete(&binary);
+
+  // Instantiate.
+  printf("Instantiating module...\n");
+  own wasm_trap_t* trap = NULL;
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, module, NULL, &trap);
+  if (instance || !trap) {
+    printf("> Error instantiating module, expected trap!\n");
+    return 1;
+  }
+
+  wasm_module_delete(module);
+
+  // Print result.
+  printf("Printing message...\n");
+  own wasm_name_t message;
+  wasm_trap_message(trap, &message);
+  printf("> %s\n", message.data);
+
+  printf("Printing origin...\n");
+  own wasm_frame_t* frame = wasm_trap_origin(trap);
+  if (frame) {
+    print_frame(frame);
+    wasm_frame_delete(frame);
+  } else {
+    printf("> Empty origin.\n");
+  }
+
+  printf("Printing trace...\n");
+  own wasm_frame_vec_t trace;
+  wasm_trap_trace(trap, &trace);
+  if (trace.size > 0) {
+    for (size_t i = 0; i < trace.size; ++i) {
+      print_frame(trace.data[i]);
+    }
+  } else {
+    printf("> Empty trace.\n");
+  }
+
+  wasm_frame_vec_delete(&trace);
+  wasm_trap_delete(trap);
+  wasm_name_delete(&message);
+
+  // Shut down.
+  printf("Shutting down...\n");
+  wasm_store_delete(store);
+  wasm_engine_delete(engine);
+
+  // All done.
+  printf("Done.\n");
+  return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/start.cc b/deps/v8/third_party/wasm-api/example/start.cc
new file mode 100644
index 00000000000000..71d6fd25add820
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/start.cc
@@ -0,0 +1,88 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+
+void print_frame(const wasm::Frame* frame) {
+  std::cout << "> " << frame->instance();
+  std::cout << " @ 0x" << std::hex << frame->module_offset();
+  std::cout << " = " << frame->func_index();
+  std::cout << ".0x" << std::hex << frame->func_offset() << std::endl;
+}
+
+
+void run() {
+  // Initialize.
+  std::cout << "Initializing..." << std::endl;
+  auto engine = wasm::Engine::make();
+  auto store_ = wasm::Store::make(engine.get());
+  auto store = store_.get();
+
+  // Load binary.
+  std::cout << "Loading binary..." << std::endl;
+  std::ifstream file("start.wasm");
+  file.seekg(0, std::ios_base::end);
+  auto file_size = file.tellg();
+  file.seekg(0);
+  auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+  file.read(binary.get(), file_size);
+  file.close();
+  if (file.fail()) {
+    std::cout << "> Error loading module!" << std::endl;
+    exit(1);
+  }
+
+  // Compile.
+  std::cout << "Compiling module..." << std::endl;
+  auto module = wasm::Module::make(store, binary);
+  if (!module) {
+    std::cout << "> Error compiling module!" << std::endl;
+    exit(1);
+  }
+
+  // Instantiate.
+  std::cout << "Instantiating module..." << std::endl;
+  wasm::own<wasm::Trap> trap;
+  auto instance = wasm::Instance::make(store, module.get(), nullptr, &trap);
+  if (instance || !trap) {
+    std::cout << "> Error instantiating module, expected trap!" << std::endl;
+    exit(1);
+  }
+
+  // Print result.
+  std::cout << "Printing message..." << std::endl;
+  std::cout << "> " << trap->message().get() << std::endl;
+
+  std::cout << "Printing origin..." << std::endl;
+  auto frame = trap->origin();
+  if (frame) {
+    print_frame(frame.get());
+  } else {
+    std::cout << "> Empty origin." << std::endl;
+  }
+
+  std::cout << "Printing trace..." << std::endl;
+  auto trace = trap->trace();
+  if (trace.size() > 0) {
+    for (size_t i = 0; i < trace.size(); ++i) {
+      print_frame(trace[i].get());
+    }
+  } else {
+    std::cout << "> Empty trace." << std::endl;
+  }
+
+  // Shut down.
+  std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+  run();
+  std::cout << "Done." << std::endl;
+  return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/start.wasm b/deps/v8/third_party/wasm-api/example/start.wasm
new file mode 100644
index 00000000000000..90cba2107d6ea9
Binary files /dev/null and b/deps/v8/third_party/wasm-api/example/start.wasm differ
diff --git a/deps/v8/third_party/wasm-api/example/start.wat b/deps/v8/third_party/wasm-api/example/start.wat
new file mode 100644
index 00000000000000..eb95116a49139a
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/start.wat
@@ -0,0 +1,4 @@
+(module
+  (func $start (unreachable))
+  (start $start)
+)
diff --git a/deps/v8/third_party/wasm-api/example/table.c b/deps/v8/third_party/wasm-api/example/table.c
index 8fec71f23faf92..0ff6617f729e8d 100644
--- a/deps/v8/third_party/wasm-api/example/table.c
+++ b/deps/v8/third_party/wasm-api/example/table.c
@@ -110,7 +110,8 @@ int main(int argc, const char* argv[]) {
 
   // Instantiate.
   printf("Instantiating module...\n");
-  own wasm_instance_t* instance = wasm_instance_new(store, module, NULL);
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, module, NULL, NULL);
   if (!instance) {
     printf("> Error instantiating module!\n");
     return 1;
@@ -135,6 +136,11 @@ int main(int argc, const char* argv[]) {
 
   wasm_functype_delete(neg_type);
 
+  // Try cloning.
+  own wasm_table_t* copy = wasm_table_copy(table);
+  assert(wasm_table_same(table, copy));
+  wasm_table_delete(copy);
+
   // Check initial table.
   printf("Checking table...\n");
   check(wasm_table_size(table) == 2);
diff --git a/deps/v8/third_party/wasm-api/example/table.cc b/deps/v8/third_party/wasm-api/example/table.cc
index cb669cdb160888..b19d37763b02c5 100644
--- a/deps/v8/third_party/wasm-api/example/table.cc
+++ b/deps/v8/third_party/wasm-api/example/table.cc
@@ -10,14 +10,14 @@
 // A function to be called from Wasm code.
 auto neg_callback(
   const wasm::Val args[], wasm::Val results[]
-) -> wasm::own<wasm::Trap*> {
+) -> wasm::own<wasm::Trap> {
   std::cout << "Calling back..." << std::endl;
   results[0] = wasm::Val(-args[0].i32());
   return nullptr;
 }
 
 
-auto get_export_table(wasm::vec<wasm::Extern*>& exports, size_t i) -> wasm::Table* {
+auto get_export_table(wasm::ownvec<wasm::Extern>& exports, size_t i) -> wasm::Table* {
   if (exports.size() <= i || !exports[i]->table()) {
     std::cout << "> Error accessing table export " << i << "!" << std::endl;
     exit(1);
@@ -25,7 +25,7 @@ auto get_export_table(wasm::vec<wasm::Extern*>& exports, size_t i) -> wasm::Tabl
   return exports[i]->table();
 }
 
-auto get_export_func(const wasm::vec<wasm::Extern*>& exports, size_t i) -> const wasm::Func* {
+auto get_export_func(const wasm::ownvec<wasm::Extern>& exports, size_t i) -> const wasm::Func* {
   if (exports.size() <= i || !exports[i]->func()) {
     std::cout << "> Error accessing function export " << i << "!" << std::endl;
     exit(1);
@@ -87,7 +87,7 @@ void run() {
   file.close();
   if (file.fail()) {
     std::cout << "> Error loading module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Compile.
@@ -95,7 +95,7 @@ void run() {
   auto module = wasm::Module::make(store, binary);
   if (!module) {
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Instantiate.
@@ -103,7 +103,7 @@ void run() {
   auto instance = wasm::Instance::make(store, module.get(), nullptr);
   if (!instance) {
     std::cout << "> Error instantiating module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Extract export.
@@ -118,11 +118,14 @@ void run() {
   // Create external function.
   std::cout << "Creating callback..." << std::endl;
   auto neg_type = wasm::FuncType::make(
-    wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32)),
-    wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32))
+    wasm::ownvec<wasm::ValType>::make(wasm::ValType::make(wasm::I32)),
+    wasm::ownvec<wasm::ValType>::make(wasm::ValType::make(wasm::I32))
   );
   auto h = wasm::Func::make(store, neg_type.get(), neg_callback);
 
+  // Try cloning.
+  assert(table->copy()->same(table));
+
   // Check initial table.
   std::cout << "Checking table..." << std::endl;
   check(table->size(), 2u);
diff --git a/deps/v8/third_party/wasm-api/example/threads.c b/deps/v8/third_party/wasm-api/example/threads.c
index 2f5b0f3c1fedee..9f9d5894a668c7 100644
--- a/deps/v8/third_party/wasm-api/example/threads.c
+++ b/deps/v8/third_party/wasm-api/example/threads.c
@@ -52,7 +52,8 @@ void* run(void* args_abs) {
     const wasm_extern_t* imports[] = {
       wasm_func_as_extern(func), wasm_global_as_extern(global),
     };
-    own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+    own wasm_instance_t* instance =
+      wasm_instance_new(store, module, imports, NULL);
     if (!instance) {
       printf("> Error instantiating module!\n");
       return NULL;
diff --git a/deps/v8/third_party/wasm-api/example/threads.cc b/deps/v8/third_party/wasm-api/example/threads.cc
index 48b4fcd4862aa8..3fdaded6cd0185 100644
--- a/deps/v8/third_party/wasm-api/example/threads.cc
+++ b/deps/v8/third_party/wasm-api/example/threads.cc
@@ -11,7 +11,7 @@ const int N_REPS = 3;
 // A function to be called from Wasm code.
 auto callback(
   void* env, const wasm::Val args[], wasm::Val results[]
-) -> wasm::own<wasm::Trap*> {
+) -> wasm::own<wasm::Trap> {
   assert(args[0].kind() == wasm::I32);
   std::lock_guard<std::mutex>(*reinterpret_cast<std::mutex*>(env));
   std::cout << "Thread " << args[0].i32() << " running..." << std::endl;
@@ -33,7 +33,7 @@ void run(
   if (!module) {
     std::lock_guard<std::mutex> lock(*mutex);
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Run the example N times.
@@ -42,8 +42,8 @@ void run(
 
     // Create imports.
     auto func_type = wasm::FuncType::make(
-      wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32)),
-      wasm::vec<wasm::ValType*>::make()
+      wasm::ownvec<wasm::ValType>::make(wasm::ValType::make(wasm::I32)),
+      wasm::ownvec<wasm::ValType>::make()
     );
     auto func = wasm::Func::make(store, func_type.get(), callback, mutex);
 
@@ -58,7 +58,7 @@ void run(
     if (!instance) {
       std::lock_guard<std::mutex> lock(*mutex);
       std::cout << "> Error instantiating module!" << std::endl;
-      return;
+      exit(1);
     }
 
     // Extract export.
@@ -66,7 +66,7 @@ void run(
     if (exports.size() == 0 || exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func()) {
       std::lock_guard<std::mutex> lock(*mutex);
       std::cout << "> Error accessing export!" << std::endl;
-      return;
+      exit(1);
     }
     auto run_func = exports[0]->func();
 
diff --git a/deps/v8/third_party/wasm-api/example/trap.c b/deps/v8/third_party/wasm-api/example/trap.c
index 74620dce3bc5f5..975d6f8599f425 100644
--- a/deps/v8/third_party/wasm-api/example/trap.c
+++ b/deps/v8/third_party/wasm-api/example/trap.c
@@ -20,6 +20,16 @@ own wasm_trap_t* fail_callback(
 }
 
 
+void print_frame(wasm_frame_t* frame) {
+  printf("> %p @ 0x%zx = %"PRIu32".0x%zx\n",
+    wasm_frame_instance(frame),
+    wasm_frame_module_offset(frame),
+    wasm_frame_func_index(frame),
+    wasm_frame_func_offset(frame)
+  );
+}
+
+
 int main(int argc, const char* argv[]) {
   // Initialize.
   printf("Initializing...\n");
@@ -56,15 +66,18 @@ int main(int argc, const char* argv[]) {
 
   // Create external print functions.
   printf("Creating callback...\n");
-  own wasm_functype_t* fail_type = wasm_functype_new_0_1(wasm_valtype_new_i32());
-  own wasm_func_t* fail_func = wasm_func_new_with_env(store, fail_type, fail_callback, store, NULL);
+  own wasm_functype_t* fail_type =
+    wasm_functype_new_0_1(wasm_valtype_new_i32());
+  own wasm_func_t* fail_func =
+    wasm_func_new_with_env(store, fail_type, fail_callback, store, NULL);
 
   wasm_functype_delete(fail_type);
 
   // Instantiate.
   printf("Instantiating module...\n");
   const wasm_extern_t* imports[] = { wasm_func_as_extern(fail_func) };
-  own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+  own wasm_instance_t* instance =
+    wasm_instance_new(store, module, imports, NULL);
   if (!instance) {
     printf("> Error instantiating module!\n");
     return 1;
@@ -94,8 +107,8 @@ int main(int argc, const char* argv[]) {
 
     printf("Calling export %d...\n", i);
     own wasm_trap_t* trap = wasm_func_call(func, NULL, NULL);
-    if (! trap) {
-      printf("> Error calling function!\n");
+    if (!trap) {
+      printf("> Error calling function, expected trap!\n");
       return 1;
     }
 
@@ -104,6 +117,27 @@ int main(int argc, const char* argv[]) {
     wasm_trap_message(trap, &message);
     printf("> %s\n", message.data);
 
+    printf("Printing origin...\n");
+    own wasm_frame_t* frame = wasm_trap_origin(trap);
+    if (frame) {
+      print_frame(frame);
+      wasm_frame_delete(frame);
+    } else {
+      printf("> Empty origin.\n");
+    }
+
+    printf("Printing trace...\n");
+    own wasm_frame_vec_t trace;
+    wasm_trap_trace(trap, &trace);
+    if (trace.size > 0) {
+      for (size_t i = 0; i < trace.size; ++i) {
+        print_frame(trace.data[i]);
+      }
+    } else {
+      printf("> Empty trace.\n");
+    }
+
+    wasm_frame_vec_delete(&trace);
     wasm_trap_delete(trap);
     wasm_name_delete(&message);
   }
diff --git a/deps/v8/third_party/wasm-api/example/trap.cc b/deps/v8/third_party/wasm-api/example/trap.cc
index 33116217241b16..3a7dcc6cff0212 100644
--- a/deps/v8/third_party/wasm-api/example/trap.cc
+++ b/deps/v8/third_party/wasm-api/example/trap.cc
@@ -9,7 +9,7 @@
 // A function to be called from Wasm code.
 auto fail_callback(
   void* env, const wasm::Val args[], wasm::Val results[]
-) -> wasm::own<wasm::Trap*> {
+) -> wasm::own<wasm::Trap> {
   std::cout << "Calling back..." << std::endl;
   auto store = reinterpret_cast<wasm::Store*>(env);
   auto message = wasm::Name::make(std::string("callback abort"));
@@ -17,6 +17,14 @@ auto fail_callback(
 }
 
 
+void print_frame(const wasm::Frame* frame) {
+  std::cout << "> " << frame->instance();
+  std::cout << " @ 0x" << std::hex << frame->module_offset();
+  std::cout << " = " << frame->func_index();
+  std::cout << ".0x" << std::hex << frame->func_offset() << std::endl;
+}
+
+
 void run() {
   // Initialize.
   std::cout << "Initializing..." << std::endl;
@@ -35,7 +43,7 @@ void run() {
   file.close();
   if (file.fail()) {
     std::cout << "> Error loading module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Compile.
@@ -43,14 +51,14 @@ void run() {
   auto module = wasm::Module::make(store, binary);
   if (!module) {
     std::cout << "> Error compiling module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Create external print functions.
   std::cout << "Creating callback..." << std::endl;
   auto fail_type = wasm::FuncType::make(
-    wasm::vec<wasm::ValType*>::make(),
-    wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32))
+    wasm::ownvec<wasm::ValType>::make(),
+    wasm::ownvec<wasm::ValType>::make(wasm::ValType::make(wasm::I32))
   );
   auto fail_func =
     wasm::Func::make(store, fail_type.get(), fail_callback, store);
@@ -61,7 +69,7 @@ void run() {
   auto instance = wasm::Instance::make(store, module.get(), imports);
   if (!instance) {
     std::cout << "> Error instantiating module!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Extract export.
@@ -71,7 +79,7 @@ void run() {
       exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func() ||
       exports[1]->kind() != wasm::EXTERN_FUNC || !exports[1]->func()) {
     std::cout << "> Error accessing exports!" << std::endl;
-    return;
+    exit(1);
   }
 
   // Call.
@@ -79,12 +87,30 @@ void run() {
     std::cout << "Calling export " << i << "..." << std::endl;
     auto trap = exports[i]->func()->call();
     if (!trap) {
-      std::cout << "> Error calling function!" << std::endl;
-      return;
+      std::cout << "> Error calling function, expected trap!" << std::endl;
+      exit(1);
     }
 
     std::cout << "Printing message..." << std::endl;
     std::cout << "> " << trap->message().get() << std::endl;
+
+    std::cout << "Printing origin..." << std::endl;
+    auto frame = trap->origin();
+    if (frame) {
+      print_frame(frame.get());
+    } else {
+      std::cout << "> Empty origin." << std::endl;
+    }
+
+    std::cout << "Printing trace..." << std::endl;
+    auto trace = trap->trace();
+    if (trace.size() > 0) {
+      for (size_t i = 0; i < trace.size(); ++i) {
+        print_frame(trace[i].get());
+      }
+    } else {
+      std::cout << "> Empty trace." << std::endl;
+    }
   }
 
   // Shut down.
diff --git a/deps/v8/third_party/wasm-api/wasm.h b/deps/v8/third_party/wasm-api/wasm.h
index bb66c042d9840f..e343c55fae4818 100644
--- a/deps/v8/third_party/wasm-api/wasm.h
+++ b/deps/v8/third_party/wasm-api/wasm.h
@@ -136,10 +136,11 @@ own wasm_store_t* wasm_store_new(wasm_engine_t*);
 
 // Type attributes
 
-typedef enum wasm_mutability_t {
+typedef uint8_t wasm_mutability_t;
+enum wasm_mutability_enum {
   WASM_CONST,
-  WASM_VAR
-} wasm_mutability_t;
+  WASM_VAR,
+};
 
 typedef struct wasm_limits_t {
   uint32_t min;
@@ -162,14 +163,15 @@ static const uint32_t wasm_limits_max_default = 0xffffffff;
 
 WASM_DECLARE_TYPE(valtype)
 
-typedef enum wasm_valkind_t {
+typedef uint8_t wasm_valkind_t;
+enum wasm_valkind_enum {
   WASM_I32,
   WASM_I64,
   WASM_F32,
   WASM_F64,
-  WASM_ANYREF,
-  WASM_FUNCREF
-} wasm_valkind_t;
+  WASM_ANYREF = 128,
+  WASM_FUNCREF,
+};
 
 own wasm_valtype_t* wasm_valtype_new(wasm_valkind_t);
 
@@ -236,12 +238,13 @@ const wasm_limits_t* wasm_memorytype_limits(const wasm_memorytype_t*);
 
 WASM_DECLARE_TYPE(externtype)
 
-typedef enum wasm_externkind_t {
+typedef uint8_t wasm_externkind_t;
+enum wasm_externkind_enum {
   WASM_EXTERN_FUNC,
   WASM_EXTERN_GLOBAL,
   WASM_EXTERN_TABLE,
-  WASM_EXTERN_MEMORY
-} wasm_externkind_t;
+  WASM_EXTERN_MEMORY,
+};
 
 wasm_externkind_t wasm_externtype_kind(const wasm_externtype_t*);
 
@@ -315,15 +318,16 @@ WASM_DECLARE_VEC(val, )
 
 // References
 
-#define WASM_DECLARE_REF_BASE(name) \
-  WASM_DECLARE_OWN(name) \
-  \
-  own wasm_##name##_t* wasm_##name##_copy(const wasm_##name##_t*); \
-  \
-  void* wasm_##name##_get_host_info(const wasm_##name##_t*); \
-  void wasm_##name##_set_host_info(wasm_##name##_t*, void*); \
-  void wasm_##name##_set_host_info_with_finalizer( \
-    wasm_##name##_t*, void*, void (*)(void*));
+#define WASM_DECLARE_REF_BASE(name)                                        \
+  WASM_DECLARE_OWN(name)                                                   \
+                                                                           \
+  own wasm_##name##_t* wasm_##name##_copy(const wasm_##name##_t*);         \
+  bool wasm_##name##_same(const wasm_##name##_t*, const wasm_##name##_t*); \
+                                                                           \
+  void* wasm_##name##_get_host_info(const wasm_##name##_t*);               \
+  void wasm_##name##_set_host_info(wasm_##name##_t*, void*);               \
+  void wasm_##name##_set_host_info_with_finalizer(wasm_##name##_t*, void*, \
+                                                  void (*)(void*));
 
 #define WASM_DECLARE_REF(name) \
   WASM_DECLARE_REF_BASE(name) \
@@ -344,6 +348,18 @@ WASM_DECLARE_VEC(val, )
 WASM_DECLARE_REF_BASE(ref)
 
 
+// Frames
+
+WASM_DECLARE_OWN(frame)
+WASM_DECLARE_VEC(frame, *)
+own wasm_frame_t* wasm_frame_copy(const wasm_frame_t*);
+
+struct wasm_instance_t* wasm_frame_instance(const wasm_frame_t*);
+uint32_t wasm_frame_func_index(const wasm_frame_t*);
+size_t wasm_frame_func_offset(const wasm_frame_t*);
+size_t wasm_frame_module_offset(const wasm_frame_t*);
+
+
 // Traps
 
 typedef wasm_name_t wasm_message_t;  // null terminated
@@ -353,6 +369,8 @@ WASM_DECLARE_REF(trap)
 own wasm_trap_t* wasm_trap_new(wasm_store_t* store, const wasm_message_t*);
 
 void wasm_trap_message(const wasm_trap_t*, own wasm_message_t* out);
+own wasm_frame_t* wasm_trap_origin(const wasm_trap_t*);
+void wasm_trap_trace(const wasm_trap_t*, own wasm_frame_vec_t* out);
 
 
 // Foreign Objects
@@ -485,7 +503,9 @@ const wasm_memory_t* wasm_extern_as_memory_const(const wasm_extern_t*);
 WASM_DECLARE_REF(instance)
 
 own wasm_instance_t* wasm_instance_new(
-  wasm_store_t*, const wasm_module_t*, const wasm_extern_t* const imports[]);
+  wasm_store_t*, const wasm_module_t*, const wasm_extern_t* const imports[],
+  own wasm_trap_t**
+);
 
 void wasm_instance_exports(const wasm_instance_t*, own wasm_extern_vec_t* out);
 
diff --git a/deps/v8/third_party/wasm-api/wasm.hh b/deps/v8/third_party/wasm-api/wasm.hh
index c153d4b9df20d9..8def22428b98c6 100644
--- a/deps/v8/third_party/wasm-api/wasm.hh
+++ b/deps/v8/third_party/wasm-api/wasm.hh
@@ -29,81 +29,8 @@ using float64_t = double;
 
 namespace wasm {
 
-// Ownership
-
-template<class T> struct owner { using type = T; };
-template<class T> struct owner<T*> { using type = std::unique_ptr<T>; };
-
-template<class T>
-using own = typename owner<T>::type;
-
-template<class T>
-auto make_own(T x) -> own<T> { return own<T>(std::move(x)); }
-
-
 // Vectors
 
-template<class T>
-struct vec_traits {
-  static void construct(size_t size, T data[]) {}
-  static void destruct(size_t size, T data[]) {}
-  static void move(size_t size, T* data, T init[]) {
-    for (size_t i = 0; i < size; ++i) data[i] = std::move(init[i]);
-  }
-  static void copy(size_t size, T data[], const T init[]) {
-    for (size_t i = 0; i < size; ++i) data[i] = init[i];
-  }
-
-  using proxy = T&;
-};
-
-template<class T>
-struct vec_traits<T*> {
-  static void construct(size_t size, T* data[]) {
-    for (size_t i = 0; i < size; ++i) data[i] = nullptr;
-  }
-  static void destruct(size_t size, T* data[]) {
-    for (size_t i = 0; i < size; ++i) {
-      if (data[i]) delete data[i];
-    }
-  }
-  static void move(size_t size, T* data[], own<T*> init[]) {
-    for (size_t i = 0; i < size; ++i) data[i] = init[i].release();
-  }
-  static void copy(size_t size, T* data[], const T* const init[]) {
-    for (size_t i = 0; i < size; ++i) {
-      if (init[i]) data[i] = init[i]->copy().release();
-    }
-  }
-
-  class proxy {
-    T*& elem_;
-  public:
-    proxy(T*& elem) : elem_(elem) {}
-    operator T*() { return elem_; }
-    operator const T*() const { return elem_; }
-    auto operator=(own<T*>&& elem) -> proxy& {
-      reset(std::move(elem));
-      return *this;
-    }
-    void reset(own<T*>&& val = own<T*>()) {
-      if (elem_) delete elem_;
-      elem_ = val.release();
-    }
-    auto release() -> T* {
-      auto elem = elem_;
-      elem_ = nullptr;
-      return elem;
-    }
-    auto move() -> own<T*> { return make_own(release()); }
-    auto get() -> T* { return elem_; }
-    auto get() const -> const T* { return elem_; }
-    auto operator->() -> T* { return elem_; }
-    auto operator->() const -> const T* { return elem_; }
-  };
-};
-
-
 template<class T>
 class vec {
   static const size_t invalid_size = SIZE_MAX;
@@ -111,7 +38,7 @@ class vec {
   size_t size_;
   std::unique_ptr<T[]> data_;
 
-#ifdef DEBUG
+#ifdef WASM_API_DEBUG
   void make_data();
   void free_data();
 #else
@@ -128,11 +55,11 @@ class vec {
   }
 
 public:
-  template<class U>
-  vec(vec<U>&& that) : vec(that.size_, that.data_.release()) {}
+  using elem_type = T;
+
+  vec(vec<T>&& that) : vec(that.size_, that.data_.release()) {}
 
   ~vec() {
-    if (data_) vec_traits<T>::destruct(size_, data_.get());
     free_data();
   }
 
@@ -157,14 +84,13 @@ public:
   }
 
   void reset() {
-    if (data_) vec_traits<T>::destruct(size_, data_.get());
     free_data();
-    size_ = 0;
+    size_ = invalid_size;
     data_.reset();
   }
 
   void reset(vec& that) {
-    reset();
+    free_data();
     size_ = that.size_;
     data_.reset(that.data_.release());
   }
@@ -174,31 +100,36 @@ public:
     return *this;
   }
 
-  auto operator[](size_t i) -> typename vec_traits<T>::proxy {
+  auto operator[](size_t i) -> T& {
     assert(i < size_);
-    return typename vec_traits<T>::proxy(data_[i]);
+    return data_[i];
   }
 
-  auto operator[](size_t i) const -> const typename vec_traits<T>::proxy {
+  auto operator[](size_t i) const -> const T& {
     assert(i < size_);
-    return typename vec_traits<T>::proxy(data_[i]);
+    return data_[i];
   }
 
   auto copy() const -> vec {
     auto v = vec(size_);
-    if (v) vec_traits<T>::copy(size_, v.data_.get(), data_.get());
+    if (v) for (size_t i = 0; i < size_; i++) v.data_[i] = data_[i];
     return v;
   }
 
-  static auto make_uninitialized(size_t size = 0) -> vec {
-    auto v = vec(size);
-    if (v) vec_traits<T>::construct(size, v.data_.get());
+  // TODO: This can't be used for e.g. vec<Val>
+  auto deep_copy() const -> vec {
+    auto v = vec(size_);
+    if (v) for (size_t i = 0; i < size_; ++i) v.data_[i] = data_[i]->copy();
     return v;
   }
 
-  static auto make(size_t size, own<T> init[]) -> vec {
+  static auto make_uninitialized(size_t size = 0) -> vec {
+    return vec(size);
+  }
+
+  static auto make(size_t size, T init[]) -> vec {
     auto v = vec(size);
-    if (v) vec_traits<T>::move(size, v.data_.get(), init);
+    if (v) for (size_t i = 0; i < size; ++i) v.data_[i] = std::move(init[i]);
     return v;
   }
 
@@ -208,13 +139,14 @@ public:
     return v;
   }
 
+  // TODO(mvsc): MVSC requires this special case:
   static auto make() -> vec {
     return vec(0);
   }
 
   template<class... Ts>
   static auto make(Ts&&... args) -> vec {
-    own<T> data[] = { make_own(std::move(args))... };
+    T data[] = { std::move(args)... };
     return make(sizeof...(Ts), data);
   }
 
@@ -228,6 +160,15 @@ public:
 };
 
 
+// Ownership
+
+template<class T> using own = std::unique_ptr<T>;
+template<class T> using ownvec = vec<own<T>>;
+
+template<class T>
+auto make_own(T* x) -> own<T> { return own<T>(x); }
+
+
 ///////////////////////////////////////////////////////////////////////////////
 // Runtime Environment
 
@@ -239,7 +180,7 @@ public:
   ~Config();
   void operator delete(void*);
 
-  static auto make() -> own<Config*>;
+  static auto make() -> own<Config>;
 
   // Implementations may provide custom methods for manipulating Configs.
 };
@@ -253,7 +194,7 @@ public:
   ~Engine();
   void operator delete(void*);
 
-  static auto make(own<Config*>&& = Config::make()) -> own<Engine*>;
+  static auto make(own<Config>&& = Config::make()) -> own<Engine>;
 };
 
 
@@ -265,7 +206,7 @@ public:
   ~Store();
   void operator delete(void*);
 
-  static auto make(Engine*) -> own<Store*>;
+  static auto make(Engine*) -> own<Store>;
 };
 
 
@@ -274,7 +215,7 @@ public:
 
 // Type attributes
 
-enum Mutability { CONST, VAR };
+enum Mutability : uint8_t { CONST, VAR };
 
 struct Limits {
   uint32_t min;
@@ -287,7 +228,10 @@ struct Limits {
 
 // Value Types
 
-enum ValKind { I32, I64, F32, F64, ANYREF, FUNCREF };
+enum ValKind : uint8_t {
+  I32, I64, F32, F64,
+  ANYREF = 128, FUNCREF,
+};
 
 inline bool is_num(ValKind k) { return k < ANYREF; }
 inline bool is_ref(ValKind k) { return k >= ANYREF; }
@@ -299,8 +243,8 @@ public:
   ~ValType();
   void operator delete(void*);
 
-  static auto make(ValKind) -> own<ValType*>;
-  auto copy() const -> own<ValType*>;
+  static auto make(ValKind) -> own<ValType>;
+  auto copy() const -> own<ValType>;
 
   auto kind() const -> ValKind;
   auto is_num() const -> bool { return wasm::is_num(kind()); }
@@ -310,7 +254,7 @@ public:
 
 // External Types
 
-enum ExternKind {
+enum ExternKind : uint8_t {
   EXTERN_FUNC, EXTERN_GLOBAL, EXTERN_TABLE, EXTERN_MEMORY
 };
 
@@ -325,7 +269,7 @@ public:
   ~ExternType();
   void operator delete(void*);
 
-  auto copy() const-> own<ExternType*>;
+  auto copy() const-> own<ExternType>;
 
   auto kind() const -> ExternKind;
 
@@ -343,22 +287,20 @@ public:
 
 // Function Types
 
-enum class arrow { ARROW };
-
 class FuncType : public ExternType {
 public:
   FuncType() = delete;
   ~FuncType();
 
   static auto make(
-    vec<ValType*>&& params = vec<ValType*>::make(),
-    vec<ValType*>&& results = vec<ValType*>::make()
-  ) -> own<FuncType*>;
+    ownvec<ValType>&& params = ownvec<ValType>::make(),
+    ownvec<ValType>&& results = ownvec<ValType>::make()
+  ) -> own<FuncType>;
 
-  auto copy() const -> own<FuncType*>;
+  auto copy() const -> own<FuncType>;
 
-  auto params() const -> const vec<ValType*>&;
-  auto results() const -> const vec<ValType*>&;
+  auto params() const -> const ownvec<ValType>&;
+  auto results() const -> const ownvec<ValType>&;
 };
 
 
@@ -369,8 +311,8 @@ public:
   GlobalType() = delete;
   ~GlobalType();
 
-  static auto make(own<ValType*>&&, Mutability) -> own<GlobalType*>;
-  auto copy() const -> own<GlobalType*>;
+  static auto make(own<ValType>&&, Mutability) -> own<GlobalType>;
+  auto copy() const -> own<GlobalType>;
 
   auto content() const -> const ValType*;
   auto mutability() const -> Mutability;
@@ -384,8 +326,8 @@ public:
   TableType() = delete;
   ~TableType();
 
-  static auto make(own<ValType*>&&, Limits) -> own<TableType*>;
-  auto copy() const -> own<TableType*>;
+  static auto make(own<ValType>&&, Limits) -> own<TableType>;
+  auto copy() const -> own<TableType>;
 
   auto element() const -> const ValType*;
   auto limits() const -> const Limits&;
@@ -399,8 +341,8 @@ public:
   MemoryType() = delete;
   ~MemoryType();
 
-  static auto make(Limits) -> own<MemoryType*>;
-  auto copy() const -> own<MemoryType*>;
+  static auto make(Limits) -> own<MemoryType>;
+  auto copy() const -> own<MemoryType>;
 
   auto limits() const -> const Limits&;
 };
@@ -416,9 +358,9 @@ public:
   ~ImportType();
   void operator delete(void*);
 
-  static auto make(Name&& module, Name&& name, own<ExternType*>&&) ->
-    own<ImportType*>;
-  auto copy() const -> own<ImportType*>;
+  static auto make(Name&& module, Name&& name, own<ExternType>&&) ->
+    own<ImportType>;
+  auto copy() const -> own<ImportType>;
 
   auto module() const -> const Name&;
   auto name() const -> const Name&;
@@ -434,8 +376,8 @@ public:
   ~ExportType();
   void operator delete(void*);
 
-  static auto make(Name&&, own<ExternType*>&&) -> own<ExportType*>;
-  auto copy() const -> own<ExportType*>;
+  static auto make(Name&&, own<ExternType>&&) -> own<ExportType>;
+  auto copy() const -> own<ExportType>;
 
   auto name() const -> const Name&;
   auto type() const -> const ExternType*;
@@ -453,7 +395,8 @@ public:
   ~Ref();
   void operator delete(void*);
 
-  auto copy() const -> own<Ref*>;
+  auto copy() const -> own<Ref>;
+  auto same(const Ref*) const -> bool;
 
   auto get_host_info() const -> void*;
   void set_host_info(void* info, void (*finalizer)(void*) = nullptr);
@@ -480,7 +423,7 @@ public:
   Val(int64_t i) : kind_(I64) { impl_.i64 = i; }
   Val(float32_t z) : kind_(F32) { impl_.f32 = z; }
   Val(float64_t z) : kind_(F64) { impl_.f64 = z; }
-  Val(own<Ref*>&& r) : kind_(ANYREF) { impl_.ref = r.release(); }
+  Val(own<Ref>&& r) : kind_(ANYREF) { impl_.ref = r.release(); }
 
   Val(Val&& that) : kind_(that.kind_), impl_(that.impl_) {
     if (is_ref()) that.impl_.ref = nullptr;
@@ -497,7 +440,7 @@ public:
   static auto i64(int64_t x) -> Val { return Val(x); }
   static auto f32(float32_t x) -> Val { return Val(x); }
   static auto f64(float64_t x) -> Val { return Val(x); }
-  static auto ref(own<Ref*>&& x) -> Val { return Val(std::move(x)); }
+  static auto ref(own<Ref>&& x) -> Val { return Val(std::move(x)); }
   template<class T> inline static auto make(T x) -> Val;
   template<class T> inline static auto make(own<T>&& x) -> Val;
 
@@ -528,15 +471,17 @@ public:
   auto ref() const -> Ref* { assert(is_ref()); return impl_.ref; }
   template<class T> inline auto get() const -> T;
 
-  auto release_ref() -> own<Ref*> {
+  auto release_ref() -> own<Ref> {
     assert(is_ref());
     auto ref = impl_.ref;
-    ref = nullptr;
-    return own<Ref*>(ref);
+    impl_.ref = nullptr;
+    return own<Ref>(ref);
   }
 
   auto copy() const -> Val {
     if (is_ref() && impl_.ref != nullptr) {
+      // TODO(mvsc): MVSC cannot handle this:
+      // impl impl = {.ref = impl_.ref->copy().release()};
       impl impl;
       impl.ref = impl_.ref->copy().release();
       return Val(kind_, impl);
@@ -551,7 +496,7 @@ template<> inline auto Val::make<int32_t>(int32_t x) -> Val { return Val(x); }
 template<> inline auto Val::make<int64_t>(int64_t x) -> Val { return Val(x); }
 template<> inline auto Val::make<float32_t>(float32_t x) -> Val { return Val(x); }
 template<> inline auto Val::make<float64_t>(float64_t x) -> Val { return Val(x); }
-template<> inline auto Val::make<Ref*>(own<Ref*>&& x) -> Val {
+template<> inline auto Val::make<Ref>(own<Ref>&& x) -> Val {
   return Val(std::move(x));
 }
 
@@ -580,15 +525,33 @@ template<> inline auto Val::get<uint64_t>() const -> uint64_t {
 
 using Message = vec<byte_t>;  // null terminated
 
+class Instance;
+
+class Frame {
+public:
+  Frame() = delete;
+  ~Frame();
+  void operator delete(void*);
+
+  auto copy() const -> own<Frame>;
+
+  auto instance() const -> Instance*;
+  auto func_index() const -> uint32_t;
+  auto func_offset() const -> size_t;
+  auto module_offset() const -> size_t;
+};
+
 class Trap : public Ref {
 public:
   Trap() = delete;
   ~Trap();
 
-  static auto make(Store*, const Message& msg) -> own<Trap*>;
-  auto copy() const -> own<Trap*>;
+  static auto make(Store*, const Message& msg) -> own<Trap>;
+  auto copy() const -> own<Trap>;
 
   auto message() const -> Message;
+  auto origin() const -> own<Frame>;  // may be null
+  auto trace() const -> ownvec<Frame>;  // may be empty, origin first
 };
 
 
@@ -611,17 +574,17 @@ public:
   ~Module();
 
   static auto validate(Store*, const vec<byte_t>& binary) -> bool;
-  static auto make(Store*, const vec<byte_t>& binary) -> own<Module*>;
-  auto copy() const -> own<Module*>;
+  static auto make(Store*, const vec<byte_t>& binary) -> own<Module>;
+  auto copy() const -> own<Module>;
 
-  auto imports() const -> vec<ImportType*>;
-  auto exports() const -> vec<ExportType*>;
+  auto imports() const -> ownvec<ImportType>;
+  auto exports() const -> ownvec<ExportType>;
 
-  auto share() const -> own<Shared<Module>*>;
-  static auto obtain(Store*, const Shared<Module>*) -> own<Module*>;
+  auto share() const -> own<Shared<Module>>;
+  static auto obtain(Store*, const Shared<Module>*) -> own<Module>;
 
   auto serialize() const -> vec<byte_t>;
-  static auto deserialize(Store*, const vec<byte_t>&) -> own<Module*>;
+  static auto deserialize(Store*, const vec<byte_t>&) -> own<Module>;
 };
 
 
@@ -632,8 +595,8 @@ public:
   Foreign() = delete;
   ~Foreign();
 
-  static auto make(Store*) -> own<Foreign*>;
-  auto copy() const -> own<Foreign*>;
+  static auto make(Store*) -> own<Foreign>;
+  auto copy() const -> own<Foreign>;
 };
 
 
@@ -649,10 +612,10 @@ public:
   Extern() = delete;
   ~Extern();
 
-  auto copy() const -> own<Extern*>;
+  auto copy() const -> own<Extern>;
 
   auto kind() const -> ExternKind;
-  auto type() const -> own<ExternType*>;
+  auto type() const -> own<ExternType>;
 
   auto func() -> Func*;
   auto global() -> Global*;
@@ -673,19 +636,19 @@ public:
   Func() = delete;
   ~Func();
 
-  using callback = auto (*)(const Val[], Val[]) -> own<Trap*>;
-  using callback_with_env = auto (*)(void*, const Val[], Val[]) -> own<Trap*>;
+  using callback = auto (*)(const Val[], Val[]) -> own<Trap>;
+  using callback_with_env = auto (*)(void*, const Val[], Val[]) -> own<Trap>;
 
-  static auto make(Store*, const FuncType*, callback) -> own<Func*>;
+  static auto make(Store*, const FuncType*, callback) -> own<Func>;
   static auto make(Store*, const FuncType*, callback_with_env,
-    void*, void (*finalizer)(void*) = nullptr) -> own<Func*>;
-  auto copy() const -> own<Func*>;
+    void*, void (*finalizer)(void*) = nullptr) -> own<Func>;
+  auto copy() const -> own<Func>;
 
-  auto type() const -> own<FuncType*>;
+  auto type() const -> own<FuncType>;
   auto param_arity() const -> size_t;
   auto result_arity() const -> size_t;
 
-  auto call(const Val[] = nullptr, Val[] = nullptr) const -> own<Trap*>;
+  auto call(const Val[] = nullptr, Val[] = nullptr) const -> own<Trap>;
 };
 
 
@@ -696,10 +659,10 @@ public:
   Global() = delete;
   ~Global();
 
-  static auto make(Store*, const GlobalType*, const Val&) -> own<Global*>;
-  auto copy() const -> own<Global*>;
+  static auto make(Store*, const GlobalType*, const Val&) -> own<Global>;
+  auto copy() const -> own<Global>;
 
-  auto type() const -> own<GlobalType*>;
+  auto type() const -> own<GlobalType>;
   auto get() const -> Val;
   void set(const Val&);
 };
@@ -715,11 +678,11 @@ public:
   using size_t = uint32_t;
 
   static auto make(
-    Store*, const TableType*, const Ref* init = nullptr) -> own<Table*>;
-  auto copy() const -> own<Table*>;
+    Store*, const TableType*, const Ref* init = nullptr) -> own<Table>;
+  auto copy() const -> own<Table>;
 
-  auto type() const -> own<TableType*>;
-  auto get(size_t index) const -> own<Ref*>;
+  auto type() const -> own<TableType>;
+  auto get(size_t index) const -> own<Ref>;
   auto set(size_t index, const Ref*) -> bool;
   auto size() const -> size_t;
   auto grow(size_t delta, const Ref* init = nullptr) -> bool;
@@ -733,14 +696,14 @@ public:
   Memory() = delete;
   ~Memory();
 
-  static auto make(Store*, const MemoryType*) -> own<Memory*>;
-  auto copy() const -> own<Memory*>;
+  static auto make(Store*, const MemoryType*) -> own<Memory>;
+  auto copy() const -> own<Memory>;
 
   using pages_t = uint32_t;
 
   static const size_t page_size = 0x10000;
 
-  auto type() const -> own<MemoryType*>;
+  auto type() const -> own<MemoryType>;
   auto data() const -> byte_t*;
   auto data_size() const -> size_t;
   auto size() const -> pages_t;
@@ -756,15 +719,16 @@ public:
   ~Instance();
 
   static auto make(
-    Store*, const Module*, const Extern* const[]) -> own<Instance*>;
-  auto copy() const -> own<Instance*>;
+    Store*, const Module*, const Extern* const[], own<Trap>* = nullptr
+  ) -> own<Instance>;
+  auto copy() const -> own<Instance>;
 
-  auto exports() const -> vec<Extern*>;
+  auto exports() const -> ownvec<Extern>;
 };
 
 
 ///////////////////////////////////////////////////////////////////////////////
 
-}  // namespave wasm
+}  // namespace wasm
 
 #endif  // #ifdef __WASM_HH
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index e6fd7437158422..8d18f4df754368 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -10,6 +10,7 @@ group("gn_all") {
 
   data_deps = [
     ":v8_check_static_initializers",
+    "debug_helper:v8_debug_helper",
     "gcmole:v8_run_gcmole",
     "jsfunfuzz:v8_jsfunfuzz",
   ]
diff --git a/deps/v8/tools/OWNERS b/deps/v8/tools/OWNERS
index bd9cea5b3ebf73..89ee345b007455 100644
--- a/deps/v8/tools/OWNERS
+++ b/deps/v8/tools/OWNERS
@@ -1,2 +1,2 @@
-file://COMMON_OWNERS
+file:../COMMON_OWNERS
 
diff --git a/deps/v8/tools/clusterfuzz/BUILD.gn b/deps/v8/tools/clusterfuzz/BUILD.gn
index 88219600a28fa9..e0c4531555e914 100644
--- a/deps/v8/tools/clusterfuzz/BUILD.gn
+++ b/deps/v8/tools/clusterfuzz/BUILD.gn
@@ -13,6 +13,7 @@ if (v8_correctness_fuzzer) {
       "v8_fuzz_config.py",
       "v8_mock.js",
       "v8_mock_archs.js",
+      "v8_sanity_checks.js",
       "v8_suppressions.js",
       "v8_suppressions.py",
     ]
diff --git a/deps/v8/tools/clusterfuzz/OWNERS b/deps/v8/tools/clusterfuzz/OWNERS
index bdb1d555a4fb98..09e0096a2ee4cd 100644
--- a/deps/v8/tools/clusterfuzz/OWNERS
+++ b/deps/v8/tools/clusterfuzz/OWNERS
@@ -1 +1 @@
-file://INFRA_OWNERS
+file:../../INFRA_OWNERS
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
index fe94bb9ecca80f..de3c15eab22523 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
@@ -9,9 +9,9 @@
 # Compared x64,ignition with x64,ignition_turbo
 #
 # Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up --flag1 --flag2=0
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up --flag1 --flag2=0
 # Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
 #
 # Difference:
 - unknown
diff --git a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
index 636f4c9d9e0847..1443c61f2b510d 100644
--- a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
@@ -9,9 +9,9 @@
 # Compared x64,ignition with x64,ignition_turbo
 #
 # Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
 # Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
 #
 # Difference:
 - unknown
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index 55f76e8bc6073b..ff481e93703734 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
@@ -105,7 +105,7 @@
 
 FLAGS = ['--correctness-fuzzer-suppressions', '--expose-gc',
          '--allow-natives-syntax', '--invoke-weak-callbacks', '--omit-quit',
-         '--es-staging', '--no-wasm-async-compilation',
+         '--es-staging', '--wasm-staging', '--no-wasm-async-compilation',
          '--suppress-asm-messages']
 
 SUPPORTED_ARCHS = ['ia32', 'x64', 'arm', 'arm64']
diff --git a/deps/v8/tools/clusterfuzz/v8_suppressions.py b/deps/v8/tools/clusterfuzz/v8_suppressions.py
index 04f67b2cf941a1..f1aaa6448a8762 100644
--- a/deps/v8/tools/clusterfuzz/v8_suppressions.py
+++ b/deps/v8/tools/clusterfuzz/v8_suppressions.py
@@ -101,6 +101,7 @@
   r'^(.*)TypeError: .* is not a function$',
   r'^(.*)TypeError: .* is not a constructor$',
   r'^(.*)TypeError: (.*) is not .*$',
+  r'^(.*):\d+: TypeError: Message suppressed for fuzzers.*$',
   r'^(.*)ReferenceError: .* is not defined$',
   r'^(.*):\d+: ReferenceError: .* is not defined$',
 
diff --git a/deps/v8/tools/debug_helper/BUILD.gn b/deps/v8/tools/debug_helper/BUILD.gn
new file mode 100644
index 00000000000000..c81fddc9e5769a
--- /dev/null
+++ b/deps/v8/tools/debug_helper/BUILD.gn
@@ -0,0 +1,104 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/snapshot_toolchain.gni")
+import("../../gni/v8.gni")
+
+config("internal_config") {
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
+
+  if (is_component_build) {
+    defines = [ "BUILDING_V8_DEBUG_HELPER" ]
+  }
+
+  include_dirs = [
+    ".",
+    "../..",
+    "$target_gen_dir",
+    "$target_gen_dir/../..",
+  ]
+}
+
+# This config should be applied to code using v8_debug_helper.
+config("external_config") {
+  if (is_component_build) {
+    defines = [ "USING_V8_DEBUG_HELPER" ]
+  }
+  include_dirs = [ "." ]
+}
+
+action("run_mkgrokdump") {
+  testonly = true
+  visibility = [ ":*" ]
+
+  deps = [
+    "../../test/mkgrokdump:mkgrokdump($v8_generator_toolchain)",
+  ]
+
+  script = "../run.py"
+
+  outputs = [
+    "$target_gen_dir/v8heapconst.py",
+  ]
+
+  args = [
+    "./" + rebase_path(
+            get_label_info(
+                    "../../test/mkgrokdump:mkgrokdump($v8_generator_toolchain)",
+                    "root_out_dir") + "/mkgrokdump",
+            root_build_dir),
+    "--outfile",
+    rebase_path("$target_gen_dir/v8heapconst.py", root_build_dir),
+  ]
+}
+
+action("gen_heap_constants") {
+  testonly = true
+  visibility = [ ":*" ]
+  deps = [
+    ":run_mkgrokdump",
+  ]
+  script = "gen-heap-constants.py"
+  outputs = [
+    "$target_gen_dir/heap-constants-gen.cc",
+  ]
+  args = [
+    rebase_path(target_gen_dir, root_build_dir),
+    rebase_path("$target_gen_dir/heap-constants-gen.cc", root_build_dir),
+  ]
+}
+
+v8_component("v8_debug_helper") {
+  testonly = true
+
+  public = [
+    "debug-helper.h",
+  ]
+
+  sources = [
+    "$target_gen_dir/../../torque-generated/class-debug-readers-tq.cc",
+    "$target_gen_dir/../../torque-generated/class-debug-readers-tq.h",
+    "$target_gen_dir/heap-constants-gen.cc",
+    "debug-helper-internal.cc",
+    "debug-helper-internal.h",
+    "debug-helper.h",
+    "get-object-properties.cc",
+    "heap-constants.cc",
+    "heap-constants.h",
+  ]
+
+  deps = [
+    ":gen_heap_constants",
+    "../..:run_torque",
+    "../..:v8_headers",
+    "../..:v8_libbase",
+  ]
+
+  configs = [ ":internal_config" ]
+  if (v8_enable_i18n_support) {
+    configs += [ "//third_party/icu:icu_config" ]
+  }
+
+  public_configs = [ ":external_config" ]
+}
diff --git a/deps/v8/tools/debug_helper/DEPS b/deps/v8/tools/debug_helper/DEPS
new file mode 100644
index 00000000000000..2c6adb4df5f2ee
--- /dev/null
+++ b/deps/v8/tools/debug_helper/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+torque-generated"
+]
diff --git a/deps/v8/tools/debug_helper/README.md b/deps/v8/tools/debug_helper/README.md
new file mode 100644
index 00000000000000..bc99569c430207
--- /dev/null
+++ b/deps/v8/tools/debug_helper/README.md
@@ -0,0 +1,6 @@
+# V8 debug helper
+
+This library is for debugging V8 itself, not debugging JavaScript running within
+V8. It is designed to be called from a debugger extension running within a
+native debugger such as WinDbg or LLDB. It can be used on live processes or
+crash dumps, and cannot assume that all memory is available in a dump.
diff --git a/deps/v8/tools/debug_helper/debug-helper-internal.cc b/deps/v8/tools/debug_helper/debug-helper-internal.cc
new file mode 100644
index 00000000000000..ee5629b4383f7a
--- /dev/null
+++ b/deps/v8/tools/debug_helper/debug-helper-internal.cc
@@ -0,0 +1,58 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "debug-helper-internal.h"
+#include "src/common/ptr-compr-inl.h"
+#include "torque-generated/class-debug-readers-tq.h"
+
+namespace i = v8::internal;
+
+namespace v8_debug_helper_internal {
+
+bool IsPointerCompressed(uintptr_t address) {
+#if COMPRESS_POINTERS_BOOL
+  STATIC_ASSERT(i::kPtrComprHeapReservationSize == uintptr_t{1} << 32);
+  intptr_t signed_address = static_cast<intptr_t>(address);
+  return signed_address >= INT32_MIN && signed_address <= INT32_MAX;
+#else
+  return false;
+#endif
+}
+
+uintptr_t Decompress(uintptr_t address, uintptr_t any_uncompressed_ptr) {
+  if (!COMPRESS_POINTERS_BOOL || !IsPointerCompressed(address)) return address;
+  return i::DecompressTaggedAny(any_uncompressed_ptr,
+                                static_cast<i::Tagged_t>(address));
+}
+
+d::PropertyKind GetArrayKind(d::MemoryAccessResult mem_result) {
+  d::PropertyKind indexed_field_kind{};
+  switch (mem_result) {
+    case d::MemoryAccessResult::kOk:
+      indexed_field_kind = d::PropertyKind::kArrayOfKnownSize;
+      break;
+    case d::MemoryAccessResult::kAddressNotValid:
+      indexed_field_kind =
+          d::PropertyKind::kArrayOfUnknownSizeDueToInvalidMemory;
+      break;
+    default:
+      indexed_field_kind =
+          d::PropertyKind::kArrayOfUnknownSizeDueToValidButInaccessibleMemory;
+      break;
+  }
+  return indexed_field_kind;
+}
+
+std::vector<std::unique_ptr<ObjectProperty>> TqObject::GetProperties(
+    d::MemoryAccessor accessor) const {
+  return std::vector<std::unique_ptr<ObjectProperty>>();
+}
+
+const char* TqObject::GetName() const { return "v8::internal::Object"; }
+
+void TqObject::Visit(TqObjectVisitor* visitor) const {
+  visitor->VisitObject(this);
+}
+
+}  // namespace v8_debug_helper_internal
diff --git a/deps/v8/tools/debug_helper/debug-helper-internal.h b/deps/v8/tools/debug_helper/debug-helper-internal.h
new file mode 100644
index 00000000000000..82506c0941015d
--- /dev/null
+++ b/deps/v8/tools/debug_helper/debug-helper-internal.h
@@ -0,0 +1,130 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines internal versions of the public API structs. These should
+// all be tidy and simple classes which maintain proper ownership (unique_ptr)
+// of each other. Each contains an instance of its corresponding public type,
+// which can be filled out with GetPublicView.
+
+#ifndef V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_INTERNAL_H_
+#define V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_INTERNAL_H_
+
+#include <string>
+#include <vector>
+
+#include "debug-helper.h"
+#include "src/objects/instance-type.h"
+
+namespace d = v8::debug_helper;
+
+namespace v8_debug_helper_internal {
+
+// A value that was read from the debuggee's memory.
+template <typename TValue>
+struct Value {
+  d::MemoryAccessResult validity;
+  TValue value;
+};
+
+class ObjectProperty {
+ public:
+  inline ObjectProperty(std::string name, std::string type,
+                        std::string decompressed_type, uintptr_t address,
+                        size_t num_values = 1,
+                        d::PropertyKind kind = d::PropertyKind::kSingle)
+      : name_(name),
+        type_(type),
+        decompressed_type_(decompressed_type),
+        address_(address),
+        num_values_(num_values),
+        kind_(kind) {}
+
+  inline d::ObjectProperty* GetPublicView() {
+    public_view_.name = name_.c_str();
+    public_view_.type = type_.c_str();
+    public_view_.decompressed_type = decompressed_type_.c_str();
+    public_view_.address = address_;
+    public_view_.num_values = num_values_;
+    public_view_.kind = kind_;
+    return &public_view_;
+  }
+
+ private:
+  std::string name_;
+  std::string type_;
+  std::string decompressed_type_;
+  uintptr_t address_;
+  size_t num_values_;
+  d::PropertyKind kind_;
+
+  d::ObjectProperty public_view_;
+};
+
+class ObjectPropertiesResult;
+using ObjectPropertiesResultInternal = ObjectPropertiesResult;
+
+struct ObjectPropertiesResultExtended : public d::ObjectPropertiesResult {
+  ObjectPropertiesResultInternal* base;  // Back reference for cleanup
+};
+
+class ObjectPropertiesResult {
+ public:
+  inline ObjectPropertiesResult(
+      d::TypeCheckResult type_check_result, std::string brief, std::string type,
+      std::vector<std::unique_ptr<ObjectProperty>> properties)
+      : type_check_result_(type_check_result),
+        brief_(brief),
+        type_(type),
+        properties_(std::move(properties)) {}
+
+  inline void Prepend(const char* prefix) { brief_ = prefix + brief_; }
+
+  inline d::ObjectPropertiesResult* GetPublicView() {
+    public_view_.type_check_result = type_check_result_;
+    public_view_.brief = brief_.c_str();
+    public_view_.type = type_.c_str();
+    public_view_.num_properties = properties_.size();
+    properties_raw_.resize(0);
+    for (const auto& property : properties_) {
+      properties_raw_.push_back(property->GetPublicView());
+    }
+    public_view_.properties = properties_raw_.data();
+    public_view_.base = this;
+    return &public_view_;
+  }
+
+ private:
+  d::TypeCheckResult type_check_result_;
+  std::string brief_;
+  std::string type_;
+  std::vector<std::unique_ptr<ObjectProperty>> properties_;
+
+  ObjectPropertiesResultExtended public_view_;
+  std::vector<d::ObjectProperty*> properties_raw_;
+};
+
+class TqObjectVisitor;
+
+// Base class representing a V8 object in the debuggee's address space.
+// Subclasses for specific object types are generated by the Torque compiler.
+class TqObject {
+ public:
+  inline TqObject(uintptr_t address) : address_(address) {}
+  virtual ~TqObject() = default;
+  virtual std::vector<std::unique_ptr<ObjectProperty>> GetProperties(
+      d::MemoryAccessor accessor) const;
+  virtual const char* GetName() const;
+  virtual void Visit(TqObjectVisitor* visitor) const;
+
+ protected:
+  uintptr_t address_;
+};
+
+bool IsPointerCompressed(uintptr_t address);
+uintptr_t Decompress(uintptr_t address, uintptr_t any_uncompressed_address);
+d::PropertyKind GetArrayKind(d::MemoryAccessResult mem_result);
+
+}  // namespace v8_debug_helper_internal
+
+#endif
diff --git a/deps/v8/tools/debug_helper/debug-helper.h b/deps/v8/tools/debug_helper/debug-helper.h
new file mode 100644
index 00000000000000..9bbec76c7cfb98
--- /dev/null
+++ b/deps/v8/tools/debug_helper/debug-helper.h
@@ -0,0 +1,177 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines the public interface to v8_debug_helper.
+
+#ifndef V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_H_
+#define V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_H_
+
+#include <cstdint>
+#include <memory>
+
+#if defined(_WIN32)
+
+#ifdef BUILDING_V8_DEBUG_HELPER
+#define V8_DEBUG_HELPER_EXPORT __declspec(dllexport)
+#elif USING_V8_DEBUG_HELPER
+#define V8_DEBUG_HELPER_EXPORT __declspec(dllimport)
+#else
+#define V8_DEBUG_HELPER_EXPORT
+#endif
+
+#else  // defined(_WIN32)
+
+#ifdef BUILDING_V8_DEBUG_HELPER
+#define V8_DEBUG_HELPER_EXPORT __attribute__((visibility("default")))
+#else
+#define V8_DEBUG_HELPER_EXPORT
+#endif
+
+#endif  // defined(_WIN32)
+
+namespace v8 {
+namespace debug_helper {
+
+// Possible results when attempting to fetch memory from the debuggee.
+enum class MemoryAccessResult {
+  kOk,
+  kAddressNotValid,
+  kAddressValidButInaccessible,  // Possible in incomplete dump.
+};
+
+// Information about how this tool discovered the type of the object.
+enum class TypeCheckResult {
+  // Success cases:
+  kSmi,
+  kWeakRef,
+  kUsedMap,
+  kUsedTypeHint,
+
+  // Failure cases:
+  kUnableToDecompress,  // Caller must provide the heap range somehow.
+  kObjectPointerInvalid,
+  kObjectPointerValidButInaccessible,  // Possible in incomplete dump.
+  kMapPointerInvalid,
+  kMapPointerValidButInaccessible,  // Possible in incomplete dump.
+  kUnknownInstanceType,
+  kUnknownTypeHint,
+};
+
+enum class PropertyKind {
+  kSingle,
+  kArrayOfKnownSize,
+  kArrayOfUnknownSizeDueToInvalidMemory,
+  kArrayOfUnknownSizeDueToValidButInaccessibleMemory,
+};
+
+struct ObjectProperty {
+  const char* name;
+
+  // Statically-determined type, such as from .tq definition.
+  const char* type;
+
+  // In some cases, |type| may be a simple type representing a compressed
+  // pointer such as v8::internal::TaggedValue. In those cases,
+  // |decompressed_type| will contain the type of the object when decompressed.
+  // Otherwise, |decompressed_type| will match |type|. In any case, it is safe
+  // to pass the |decompressed_type| value as the type_hint on a subsequent call
+  // to GetObjectProperties.
+  const char* decompressed_type;
+
+  // The address where the property value can be found in the debuggee's address
+  // space, or the address of the first value for an array.
+  uintptr_t address;
+
+  // If kind indicates an array of unknown size, num_values will be 0 and debug
+  // tools should display this property as a raw pointer. Note that there is a
+  // semantic difference between num_values=1 and kind=kSingle (normal property)
+  // versus num_values=1 and kind=kArrayOfKnownSize (one-element array).
+  size_t num_values;
+
+  PropertyKind kind;
+};
+
+struct ObjectPropertiesResult {
+  TypeCheckResult type_check_result;
+  const char* brief;
+  const char* type;  // Runtime type of the object.
+  size_t num_properties;
+  ObjectProperty** properties;
+};
+
+// Copies byte_count bytes of memory from the given address in the debuggee to
+// the destination buffer.
+typedef MemoryAccessResult (*MemoryAccessor)(uintptr_t address,
+                                             uint8_t* destination,
+                                             size_t byte_count);
+
+// Additional data that can help GetObjectProperties to be more accurate. Any
+// fields you don't know can be set to zero and this library will do the best it
+// can with the information available.
+struct Roots {
+  // Beginning of allocated space for various kinds of data. These can help us
+  // to detect certain common objects that are placed in memory during startup.
+  // These values might be provided via name-value pairs in CrashPad dumps.
+  // Otherwise, they can be obtained as follows:
+  // 1. Get the Isolate pointer for the current thread. It might be somewhere on
+  //    the stack, or it might be accessible from thread-local storage with the
+  //    key stored in v8::internal::Isolate::isolate_key_.
+  // 2. Get isolate->heap_.map_space_->memory_chunk_list_.front_ and similar for
+  //    old_space_ and read_only_space_.
+  uintptr_t map_space;
+  uintptr_t old_space;
+  uintptr_t read_only_space;
+
+  // Any valid heap pointer address. On platforms where pointer compression is
+  // enabled, this can allow us to get data from compressed pointers even if the
+  // other data above is not provided. The Isolate pointer is valid for this
+  // purpose if you have it.
+  uintptr_t any_heap_pointer;
+};
+
+}  // namespace debug_helper
+}  // namespace v8
+
+extern "C" {
+// Raw library interface. If possible, use functions in v8::debug_helper
+// namespace instead because they use smart pointers to prevent leaks.
+V8_DEBUG_HELPER_EXPORT v8::debug_helper::ObjectPropertiesResult*
+_v8_debug_helper_GetObjectProperties(
+    uintptr_t object, v8::debug_helper::MemoryAccessor memory_accessor,
+    const v8::debug_helper::Roots& heap_roots, const char* type_hint);
+V8_DEBUG_HELPER_EXPORT void _v8_debug_helper_Free_ObjectPropertiesResult(
+    v8::debug_helper::ObjectPropertiesResult* result);
+}
+
+namespace v8 {
+namespace debug_helper {
+
+struct DebugHelperObjectPropertiesResultDeleter {
+  void operator()(v8::debug_helper::ObjectPropertiesResult* ptr) {
+    _v8_debug_helper_Free_ObjectPropertiesResult(ptr);
+  }
+};
+using ObjectPropertiesResultPtr =
+    std::unique_ptr<ObjectPropertiesResult,
+                    DebugHelperObjectPropertiesResultDeleter>;
+
+// Get information about the given object pointer, which could be:
+// - A tagged pointer, strong or weak
+// - A cleared weak pointer
+// - A compressed tagged pointer, sign-extended to 64 bits
+// - A tagged small integer
+// The type hint is only used if the object's Map is missing or corrupt. It
+// should be the fully-qualified name of a class that inherits from
+// v8::internal::Object.
+inline ObjectPropertiesResultPtr GetObjectProperties(
+    uintptr_t object, v8::debug_helper::MemoryAccessor memory_accessor,
+    const Roots& heap_roots, const char* type_hint = nullptr) {
+  return ObjectPropertiesResultPtr(_v8_debug_helper_GetObjectProperties(
+      object, memory_accessor, heap_roots, type_hint));
+}
+
+}  // namespace debug_helper
+}  // namespace v8
+
+#endif
diff --git a/deps/v8/tools/debug_helper/gen-heap-constants.py b/deps/v8/tools/debug_helper/gen-heap-constants.py
new file mode 100644
index 00000000000000..0fd575a994d336
--- /dev/null
+++ b/deps/v8/tools/debug_helper/gen-heap-constants.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This program writes a C++ file that can be used to look up whether a given
+address matches known object locations. The first argument is the directory
+containing the file v8heapconst.py; the second argument is the output .cc file.
+"""
+
+import sys
+sys.path.insert(0, sys.argv[1])
+import v8heapconst
+
+out = """
+#include <cstdint>
+#include <string>
+
+namespace v8_debug_helper_internal {
+"""
+
+def iterate_objects(target_space, camel_space_name):
+  global out
+  result = []
+  for (space, offset), (instance_type, name) in v8heapconst.KNOWN_MAPS.items():
+    if space == target_space:
+      result.append((offset, name))
+  for (space, offset), name in v8heapconst.KNOWN_OBJECTS.items():
+    if space == target_space:
+      result.append((offset, name))
+  out = out + '\nstd::string FindKnownObjectIn' + camel_space_name \
+      + '(uintptr_t offset) {\n  switch (offset) {\n'
+  for offset, name in result:
+    out = out + '    case ' + str(offset) + ': return "' + name + '";\n'
+  out = out + '    default: return "";\n  }\n}\n'
+
+iterate_objects('map_space', 'MapSpace')
+iterate_objects('read_only_space', 'ReadOnlySpace')
+iterate_objects('old_space', 'OldSpace')
+
+def iterate_maps(target_space, camel_space_name):
+  global out
+  out = out + '\nint FindKnownMapInstanceTypeIn' + camel_space_name \
+      + '(uintptr_t offset) {\n  switch (offset) {\n'
+  for (space, offset), (instance_type, name) in v8heapconst.KNOWN_MAPS.items():
+    if space == target_space:
+      out = out + '    case ' + str(offset) + ': return ' + str(instance_type) \
+          + ';\n'
+  out = out + '    default: return -1;\n  }\n}\n'
+
+iterate_maps('map_space', 'MapSpace')
+iterate_maps('read_only_space', 'ReadOnlySpace')
+
+out = out + '\n}\n'
+
+try:
+  with open(sys.argv[2], "r") as out_file:
+    if out == out_file.read():
+      sys.exit(0)  # No modification needed.
+except:
+  pass  # File probably doesn't exist; write it.
+with open(sys.argv[2], "w") as out_file:
+  out_file.write(out)
diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc
new file mode 100644
index 00000000000000..fbe992c40ee3ea
--- /dev/null
+++ b/deps/v8/tools/debug_helper/get-object-properties.cc
@@ -0,0 +1,535 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+
+#include "debug-helper-internal.h"
+#include "heap-constants.h"
+#include "include/v8-internal.h"
+#include "src/common/ptr-compr-inl.h"
+#include "src/objects/string-inl.h"
+#include "src/strings/unicode-inl.h"
+#include "torque-generated/class-debug-readers-tq.h"
+
+namespace i = v8::internal;
+
+namespace v8_debug_helper_internal {
+
+// INSTANCE_TYPE_CHECKERS_SINGLE_BASE, trimmed down to only classes that have
+// layouts defined in .tq files (this subset relationship is asserted below).
+// For now, this is a hand-maintained list.
+// TODO(v8:7793): Torque should know enough about instance types to generate
+// this list.
+#define TQ_INSTANCE_TYPES_SINGLE_BASE(V)                       \
+  V(ByteArray, BYTE_ARRAY_TYPE)                                \
+  V(BytecodeArray, BYTECODE_ARRAY_TYPE)                        \
+  V(CallHandlerInfo, CALL_HANDLER_INFO_TYPE)                   \
+  V(Cell, CELL_TYPE)                                           \
+  V(DescriptorArray, DESCRIPTOR_ARRAY_TYPE)                    \
+  V(EmbedderDataArray, EMBEDDER_DATA_ARRAY_TYPE)               \
+  V(FeedbackCell, FEEDBACK_CELL_TYPE)                          \
+  V(FeedbackVector, FEEDBACK_VECTOR_TYPE)                      \
+  V(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)                 \
+  V(Foreign, FOREIGN_TYPE)                                     \
+  V(FreeSpace, FREE_SPACE_TYPE)                                \
+  V(HeapNumber, HEAP_NUMBER_TYPE)                              \
+  V(JSArgumentsObject, JS_ARGUMENTS_TYPE)                      \
+  V(JSArray, JS_ARRAY_TYPE)                                    \
+  V(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)                       \
+  V(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE)                   \
+  V(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+  V(JSAsyncFunctionObject, JS_ASYNC_FUNCTION_OBJECT_TYPE)      \
+  V(JSAsyncGeneratorObject, JS_ASYNC_GENERATOR_OBJECT_TYPE)    \
+  V(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)                   \
+  V(JSDataView, JS_DATA_VIEW_TYPE)                             \
+  V(JSDate, JS_DATE_TYPE)                                      \
+  V(JSFunction, JS_FUNCTION_TYPE)                              \
+  V(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)                     \
+  V(JSGlobalProxy, JS_GLOBAL_PROXY_TYPE)                       \
+  V(JSMap, JS_MAP_TYPE)                                        \
+  V(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)                   \
+  V(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)               \
+  V(JSPromise, JS_PROMISE_TYPE)                                \
+  V(JSProxy, JS_PROXY_TYPE)                                    \
+  V(JSRegExp, JS_REGEXP_TYPE)                                  \
+  V(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE)    \
+  V(JSSet, JS_SET_TYPE)                                        \
+  V(JSStringIterator, JS_STRING_ITERATOR_TYPE)                 \
+  V(JSTypedArray, JS_TYPED_ARRAY_TYPE)                         \
+  V(JSPrimitiveWrapper, JS_PRIMITIVE_WRAPPER_TYPE)             \
+  V(JSFinalizationGroup, JS_FINALIZATION_GROUP_TYPE)           \
+  V(JSFinalizationGroupCleanupIterator,                        \
+    JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE)               \
+  V(JSWeakMap, JS_WEAK_MAP_TYPE)                               \
+  V(JSWeakRef, JS_WEAK_REF_TYPE)                               \
+  V(JSWeakSet, JS_WEAK_SET_TYPE)                               \
+  V(Map, MAP_TYPE)                                             \
+  V(Oddball, ODDBALL_TYPE)                                     \
+  V(PreparseData, PREPARSE_DATA_TYPE)                          \
+  V(PropertyArray, PROPERTY_ARRAY_TYPE)                        \
+  V(PropertyCell, PROPERTY_CELL_TYPE)                          \
+  V(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)             \
+  V(Symbol, SYMBOL_TYPE)                                       \
+  V(WasmExceptionObject, WASM_EXCEPTION_TYPE)                  \
+  V(WasmGlobalObject, WASM_GLOBAL_TYPE)                        \
+  V(WasmMemoryObject, WASM_MEMORY_TYPE)                        \
+  V(WasmModuleObject, WASM_MODULE_TYPE)                        \
+  V(WasmTableObject, WASM_TABLE_TYPE)                          \
+  V(WeakArrayList, WEAK_ARRAY_LIST_TYPE)                       \
+  V(WeakCell, WEAK_CELL_TYPE)
+#ifdef V8_INTL_SUPPORT
+
+#define TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS(V)                \
+  TQ_INSTANCE_TYPES_SINGLE_BASE(V)                           \
+  V(JSV8BreakIterator, JS_INTL_V8_BREAK_ITERATOR_TYPE)       \
+  V(JSCollator, JS_INTL_COLLATOR_TYPE)                       \
+  V(JSDateTimeFormat, JS_INTL_DATE_TIME_FORMAT_TYPE)         \
+  V(JSListFormat, JS_INTL_LIST_FORMAT_TYPE)                  \
+  V(JSLocale, JS_INTL_LOCALE_TYPE)                           \
+  V(JSNumberFormat, JS_INTL_NUMBER_FORMAT_TYPE)              \
+  V(JSPluralRules, JS_INTL_PLURAL_RULES_TYPE)                \
+  V(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
+  V(JSSegmentIterator, JS_INTL_SEGMENT_ITERATOR_TYPE)        \
+  V(JSSegmenter, JS_INTL_SEGMENTER_TYPE)
+
+#else
+
+#define TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS(V) TQ_INSTANCE_TYPES_SINGLE_BASE(V)
+
+#endif  // V8_INTL_SUPPORT
+
+enum class InstanceTypeCheckersSingle {
+#define ENUM_VALUE(ClassName, INSTANCE_TYPE) k##ClassName = i::INSTANCE_TYPE,
+  INSTANCE_TYPE_CHECKERS_SINGLE(ENUM_VALUE)
+#undef ENUM_VALUE
+};
+
+#define CHECK_VALUE(ClassName, INSTANCE_TYPE)                            \
+  static_assert(                                                         \
+      static_cast<i::InstanceType>(                                      \
+          InstanceTypeCheckersSingle::k##ClassName) == i::INSTANCE_TYPE, \
+      "TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS must be subset of "            \
+      "INSTANCE_TYPE_CHECKERS_SINGLE. Invalid class: " #ClassName);
+TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS(CHECK_VALUE)
+#undef CHECK_VALUE
+
+// Adapts one STRUCT_LIST_GENERATOR entry to (Name, NAME) format.
+#define STRUCT_INSTANCE_TYPE_ADAPTER(V, NAME, Name, name) V(Name, NAME)
+
+#define TQ_INSTANCE_TYPES_SINGLE(V)     \
+  TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS(V) \
+  STRUCT_LIST_GENERATOR(STRUCT_INSTANCE_TYPE_ADAPTER, V)
+
+// Likewise, these are the subset of INSTANCE_TYPE_CHECKERS_RANGE that have
+// definitions in .tq files, rearranged with more specific things first. Also
+// includes JSObject and JSReceiver, which in the runtime are optimized to use
+// a one-sided check.
+#define TQ_INSTANCE_TYPES_RANGE(V)                                           \
+  V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)                          \
+  V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE)               \
+  V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE)                    \
+  V(String, FIRST_STRING_TYPE, LAST_STRING_TYPE)                             \
+  V(Name, FIRST_NAME_TYPE, LAST_NAME_TYPE)                                   \
+  V(WeakFixedArray, FIRST_WEAK_FIXED_ARRAY_TYPE, LAST_WEAK_FIXED_ARRAY_TYPE) \
+  V(JSObject, FIRST_JS_OBJECT_TYPE, LAST_JS_OBJECT_TYPE)                     \
+  V(JSReceiver, FIRST_JS_RECEIVER_TYPE, LAST_JS_RECEIVER_TYPE)
+
+std::string AppendAddressAndType(const std::string& brief, uintptr_t address,
+                                 const char* type) {
+  std::stringstream brief_stream;
+  brief_stream << "0x" << std::hex << address << " <" << type << ">";
+  return brief.empty() ? brief_stream.str()
+                       : brief + " (" + brief_stream.str() + ")";
+}
+
+struct TypedObject {
+  TypedObject(d::TypeCheckResult type_check_result,
+              std::unique_ptr<TqObject> object)
+      : type_check_result(type_check_result), object(std::move(object)) {}
+  d::TypeCheckResult type_check_result;
+  std::unique_ptr<TqObject> object;
+};
+
+TypedObject GetTypedObjectByHint(uintptr_t address,
+                                 std::string type_hint_string) {
+#define TYPE_NAME_CASE(ClassName, ...)                      \
+  if (type_hint_string == "v8::internal::" #ClassName) {    \
+    return {d::TypeCheckResult::kUsedTypeHint,              \
+            v8::base::make_unique<Tq##ClassName>(address)}; \
+  }
+
+  TQ_INSTANCE_TYPES_SINGLE(TYPE_NAME_CASE)
+  TQ_INSTANCE_TYPES_RANGE(TYPE_NAME_CASE)
+
+#undef TYPE_NAME_CASE
+
+  return {d::TypeCheckResult::kUnknownTypeHint,
+          v8::base::make_unique<TqHeapObject>(address)};
+}
+
+TypedObject GetTypedObjectForString(uintptr_t address, i::InstanceType type) {
+  class StringGetDispatcher : public i::AllStatic {
+   public:
+#define DEFINE_METHOD(ClassName)                                   \
+  static inline TypedObject Handle##ClassName(uintptr_t address) { \
+    return {d::TypeCheckResult::kUsedMap,                          \
+            v8::base::make_unique<Tq##ClassName>(address)};        \
+  }
+    STRING_CLASS_TYPES(DEFINE_METHOD)
+#undef DEFINE_METHOD
+    static inline TypedObject HandleInvalidString(uintptr_t address) {
+      return {d::TypeCheckResult::kUnknownInstanceType,
+              v8::base::make_unique<TqString>(address)};
+    }
+  };
+
+  return i::StringShape(type)
+      .DispatchToSpecificTypeWithoutCast<StringGetDispatcher, TypedObject>(
+          address);
+}
+
+TypedObject GetTypedHeapObject(uintptr_t address, d::MemoryAccessor accessor,
+                               const char* type_hint) {
+  auto heap_object = v8::base::make_unique<TqHeapObject>(address);
+  Value<uintptr_t> map_ptr = heap_object->GetMapValue(accessor);
+
+  if (map_ptr.validity != d::MemoryAccessResult::kOk) {
+    return {map_ptr.validity == d::MemoryAccessResult::kAddressNotValid
+                ? d::TypeCheckResult::kObjectPointerInvalid
+                : d::TypeCheckResult::kObjectPointerValidButInaccessible,
+            std::move(heap_object)};
+  }
+  Value<i::InstanceType> type =
+      TqMap(map_ptr.value).GetInstanceTypeValue(accessor);
+
+  if (type.validity == d::MemoryAccessResult::kOk) {
+    // Dispatch to the appropriate method for each instance type. After calling
+    // the generated method to fetch properties, we can add custom properties.
+    switch (type.value) {
+#define INSTANCE_TYPE_CASE(ClassName, INSTANCE_TYPE) \
+  case i::INSTANCE_TYPE:                             \
+    return {d::TypeCheckResult::kUsedMap,            \
+            v8::base::make_unique<Tq##ClassName>(address)};
+      TQ_INSTANCE_TYPES_SINGLE(INSTANCE_TYPE_CASE)
+#undef INSTANCE_TYPE_CASE
+
+      default:
+
+        // Special case: concrete subtypes of String are not included in the
+        // main instance type list because they use the low bits of the instance
+        // type enum as flags.
+        if (type.value <= i::LAST_STRING_TYPE) {
+          return GetTypedObjectForString(address, type.value);
+        }
+
+#define INSTANCE_RANGE_CASE(ClassName, FIRST_TYPE, LAST_TYPE)      \
+  if (type.value >= i::FIRST_TYPE && type.value <= i::LAST_TYPE) { \
+    return {d::TypeCheckResult::kUsedMap,                          \
+            v8::base::make_unique<Tq##ClassName>(address)};        \
+  }
+        TQ_INSTANCE_TYPES_RANGE(INSTANCE_RANGE_CASE)
+#undef INSTANCE_RANGE_CASE
+
+        return {d::TypeCheckResult::kUnknownInstanceType,
+                std::move(heap_object)};
+        break;
+    }
+  } else if (type_hint != nullptr) {
+    // Try to use the provided type hint, since the real instance type is
+    // unavailable.
+    return GetTypedObjectByHint(address, type_hint);
+  } else {
+    // TODO(v8:9376): Use known maps here. If known map is just a guess (because
+    // root pointers weren't provided), then create a synthetic property with
+    // the more specific type. Then the caller could presumably ask us again
+    // with the type hint we provided. Otherwise, just go ahead and use it to
+    // generate properties.
+    return {type.validity == d::MemoryAccessResult::kAddressNotValid
+                ? d::TypeCheckResult::kMapPointerInvalid
+                : d::TypeCheckResult::kMapPointerValidButInaccessible,
+            std::move(heap_object)};
+  }
+}
+
+#undef STRUCT_INSTANCE_TYPE_ADAPTER
+#undef TQ_INSTANCE_TYPES_SINGLE_BASE
+#undef TQ_INSTANCE_TYPES_SINGLE
+#undef TQ_INSTANCE_TYPES_SINGLE_NOSTRUCTS
+#undef TQ_INSTANCE_TYPES_RANGE
+
+// An object visitor that accumulates the first few characters of a string.
+class ReadStringVisitor : public TqObjectVisitor {
+ public:
+  ReadStringVisitor(d::MemoryAccessor accessor)
+      : accessor_(accessor), index_(0), limit_(INT32_MAX), done_(false) {}
+
+  // Returns the result as UTF-8 once visiting is complete.
+  std::string GetString() {
+    std::vector<char> result(
+        string_.size() * unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit);
+    unsigned write_index = 0;
+    int prev_char = unibrow::Utf16::kNoPreviousCharacter;
+    for (size_t read_index = 0; read_index < string_.size(); ++read_index) {
+      uint16_t character = string_[read_index];
+      write_index +=
+          unibrow::Utf8::Encode(result.data() + write_index, character,
+                                prev_char, /*replace_invalid=*/true);
+      prev_char = character;
+    }
+    return {result.data(), write_index};
+  }
+
+  template <typename T>
+  void ReadSeqString(const T* object) {
+    int32_t length = GetOrFinish(object->GetLengthValue(accessor_));
+    for (; index_ < length && index_ < limit_ && !done_; ++index_) {
+      char16_t c = static_cast<char16_t>(
+          GetOrFinish(object->GetCharsValue(accessor_, index_)));
+      if (!done_) AddCharacter(c);
+    }
+  }
+
+  void VisitSeqOneByteString(const TqSeqOneByteString* object) override {
+    ReadSeqString(object);
+  }
+
+  void VisitSeqTwoByteString(const TqSeqTwoByteString* object) override {
+    ReadSeqString(object);
+  }
+
+  void VisitConsString(const TqConsString* object) override {
+    uintptr_t first_address = GetOrFinish(object->GetFirstValue(accessor_));
+    if (done_) return;
+    auto first = GetTypedHeapObject(first_address, accessor_, nullptr).object;
+    first->Visit(this);
+    if (done_) return;
+    int32_t first_length = GetOrFinish(
+        static_cast<TqString*>(first.get())->GetLengthValue(accessor_));
+    uintptr_t second = GetOrFinish(object->GetSecondValue(accessor_));
+    if (done_) return;
+    IndexModifier modifier(this, -first_length, -first_length);
+    GetTypedHeapObject(second, accessor_, nullptr).object->Visit(this);
+  }
+
+  void VisitSlicedString(const TqSlicedString* object) override {
+    uintptr_t parent = GetOrFinish(object->GetParentValue(accessor_));
+    int32_t length = GetOrFinish(object->GetLengthValue(accessor_));
+    int32_t offset = i::PlatformSmiTagging::SmiToInt(
+        GetOrFinish(object->GetOffsetValue(accessor_)));
+    if (done_) return;
+    int32_t limit_adjust = offset + length - limit_;
+    IndexModifier modifier(this, offset, limit_adjust < 0 ? limit_adjust : 0);
+    GetTypedHeapObject(parent, accessor_, nullptr).object->Visit(this);
+  }
+
+  void VisitThinString(const TqThinString* object) override {
+    uintptr_t actual = GetOrFinish(object->GetActualValue(accessor_));
+    if (done_) return;
+    GetTypedHeapObject(actual, accessor_, nullptr).object->Visit(this);
+  }
+
+  void VisitExternalString(const TqExternalString* object) override {
+    // TODO(v8:9376): External strings are very common and important when
+    // attempting to print the source of a function in the browser. For now
+    // we're just ignoring them, but eventually we'll want some kind of
+    // mechanism where the user of this library can provide a callback function
+    // that fetches data from external strings.
+    AddEllipsisAndFinish();
+  }
+
+  void VisitObject(const TqObject* object) override {
+    // If we fail to find a specific type for a sub-object within a cons string,
+    // sliced string, or thin string, we will end up here.
+    AddEllipsisAndFinish();
+  }
+
+ private:
+  // Unpacks a value that was fetched from the debuggee. If the value indicates
+  // that it couldn't successfully fetch memory, then prevents further work.
+  template <typename T>
+  T GetOrFinish(Value<T> value) {
+    if (value.validity != d::MemoryAccessResult::kOk) {
+      AddEllipsisAndFinish();
+    }
+    return value.value;
+  }
+
+  void AddEllipsisAndFinish() {
+    if (!done_) {
+      string_ += u"...";
+      done_ = true;
+    }
+  }
+
+  void AddCharacter(char16_t c) {
+    if (string_.size() >= kMaxCharacters) {
+      AddEllipsisAndFinish();
+    } else {
+      string_.push_back(c);
+    }
+  }
+
+  // Temporarily adds offsets to both index_ and limit_, to handle ConsString
+  // and SlicedString.
+  class IndexModifier {
+   public:
+    IndexModifier(ReadStringVisitor* that, int32_t index_adjust,
+                  int32_t limit_adjust)
+        : that_(that),
+          index_adjust_(index_adjust),
+          limit_adjust_(limit_adjust) {
+      that_->index_ += index_adjust_;
+      that_->limit_ += limit_adjust_;
+    }
+    ~IndexModifier() {
+      that_->index_ -= index_adjust_;
+      that_->limit_ -= limit_adjust_;
+    }
+
+   private:
+    ReadStringVisitor* that_;
+    int32_t index_adjust_;
+    int32_t limit_adjust_;
+    DISALLOW_COPY_AND_ASSIGN(IndexModifier);
+  };
+
+  static constexpr int kMaxCharacters = 80;  // How many characters to print.
+
+  std::u16string string_;  // Result string.
+  d::MemoryAccessor accessor_;
+  int32_t index_;  // Index of next char to read.
+  int32_t limit_;  // Don't read past this index (set by SlicedString).
+  bool done_;      // Whether to stop further work.
+};
+
+// An object visitor that adds extra debugging information for some types.
+class AddInfoVisitor : public TqObjectVisitor {
+ public:
+  AddInfoVisitor(const std::string& brief, d::MemoryAccessor accessor)
+      : accessor_(accessor), brief_(brief) {}
+
+  // Returns the brief object description, once visiting is complete.
+  const std::string& GetBrief() { return brief_; }
+
+  void VisitString(const TqString* object) override {
+    ReadStringVisitor visitor(accessor_);
+    object->Visit(&visitor);
+    if (!brief_.empty()) brief_ += " ";
+    brief_ += "\"" + visitor.GetString() + "\"";
+  }
+
+ private:
+  d::MemoryAccessor accessor_;
+  std::string brief_;
+};
+
+std::unique_ptr<ObjectPropertiesResult> GetHeapObjectProperties(
+    uintptr_t address, d::MemoryAccessor accessor, const char* type_hint,
+    std::string brief) {
+  TypedObject typed = GetTypedHeapObject(address, accessor, type_hint);
+
+  // TODO(v8:9376): Many object types need additional data that is not included
+  // in their Torque layout definitions. For example, JSObject has an array of
+  // in-object properties after its Torque-defined fields, which at a minimum
+  // should be represented as an array in this response. If the relevant memory
+  // is available, we should instead represent those properties (and any out-of-
+  // object properties) using their JavaScript property names.
+  AddInfoVisitor visitor(brief, accessor);
+  typed.object->Visit(&visitor);
+  brief = visitor.GetBrief();
+
+  brief = AppendAddressAndType(brief, address, typed.object->GetName());
+
+  return v8::base::make_unique<ObjectPropertiesResult>(
+      typed.type_check_result, brief, typed.object->GetName(),
+      typed.object->GetProperties(accessor));
+}
+
+std::unique_ptr<ObjectPropertiesResult> GetHeapObjectProperties(
+    uintptr_t address, d::MemoryAccessor memory_accessor, const d::Roots& roots,
+    const char* type_hint) {
+  // Try to figure out the heap range, for pointer compression (this is unused
+  // if pointer compression is disabled).
+  uintptr_t any_uncompressed_ptr = 0;
+  if (!IsPointerCompressed(address)) any_uncompressed_ptr = address;
+  if (any_uncompressed_ptr == 0) any_uncompressed_ptr = roots.any_heap_pointer;
+  if (any_uncompressed_ptr == 0) any_uncompressed_ptr = roots.map_space;
+  if (any_uncompressed_ptr == 0) any_uncompressed_ptr = roots.old_space;
+  if (any_uncompressed_ptr == 0) any_uncompressed_ptr = roots.read_only_space;
+  if (any_uncompressed_ptr == 0) {
+    // We can't figure out the heap range. Just check for known objects.
+    std::string brief = FindKnownObject(address, roots);
+    brief = AppendAddressAndType(brief, address, "v8::internal::TaggedValue");
+    return v8::base::make_unique<ObjectPropertiesResult>(
+        d::TypeCheckResult::kUnableToDecompress, brief,
+        "v8::internal::TaggedValue",
+        std::vector<std::unique_ptr<ObjectProperty>>());
+  }
+
+  // TODO(v8:9376): It seems that the space roots are at predictable offsets
+  // within the heap reservation block when pointer compression is enabled, so
+  // we should be able to set those here.
+
+  address = Decompress(address, any_uncompressed_ptr);
+  // From here on all addresses should be decompressed.
+
+  // Regardless of whether we can read the object itself, maybe we can find its
+  // pointer in the list of known objects.
+  std::string brief = FindKnownObject(address, roots);
+  return GetHeapObjectProperties(address, memory_accessor, type_hint, brief);
+}
+
+std::unique_ptr<ObjectPropertiesResult> GetObjectPropertiesImpl(
+    uintptr_t address, d::MemoryAccessor memory_accessor, const d::Roots& roots,
+    const char* type_hint) {
+  std::vector<std::unique_ptr<ObjectProperty>> props;
+  if (static_cast<uint32_t>(address) == i::kClearedWeakHeapObjectLower32) {
+    return v8::base::make_unique<ObjectPropertiesResult>(
+        d::TypeCheckResult::kWeakRef, "cleared weak ref",
+        "v8::internal::HeapObject", std::move(props));
+  }
+  bool is_weak = (address & i::kHeapObjectTagMask) == i::kWeakHeapObjectTag;
+  if (is_weak) {
+    address &= ~i::kWeakHeapObjectMask;
+  }
+  if (i::Internals::HasHeapObjectTag(address)) {
+    std::unique_ptr<ObjectPropertiesResult> result =
+        GetHeapObjectProperties(address, memory_accessor, roots, type_hint);
+    if (is_weak) {
+      result->Prepend("weak ref to ");
+    }
+    return result;
+  }
+
+  // For smi values, construct a response with a description representing the
+  // untagged value.
+  int32_t value = i::PlatformSmiTagging::SmiToInt(address);
+  std::stringstream stream;
+  stream << value << " (0x" << std::hex << value << ")";
+  return v8::base::make_unique<ObjectPropertiesResult>(
+      d::TypeCheckResult::kSmi, stream.str(), "v8::internal::Smi",
+      std::move(props));
+}
+
+}  // namespace v8_debug_helper_internal
+
+namespace di = v8_debug_helper_internal;
+
+extern "C" {
+V8_DEBUG_HELPER_EXPORT d::ObjectPropertiesResult*
+_v8_debug_helper_GetObjectProperties(uintptr_t object,
+                                     d::MemoryAccessor memory_accessor,
+                                     const d::Roots& heap_roots,
+                                     const char* type_hint) {
+  return di::GetObjectPropertiesImpl(object, memory_accessor, heap_roots,
+                                     type_hint)
+      .release()
+      ->GetPublicView();
+}
+V8_DEBUG_HELPER_EXPORT void _v8_debug_helper_Free_ObjectPropertiesResult(
+    d::ObjectPropertiesResult* result) {
+  std::unique_ptr<di::ObjectPropertiesResult> ptr(
+      static_cast<di::ObjectPropertiesResultExtended*>(result)->base);
+}
+}
diff --git a/deps/v8/tools/debug_helper/heap-constants.cc b/deps/v8/tools/debug_helper/heap-constants.cc
new file mode 100644
index 00000000000000..2bd04206900a41
--- /dev/null
+++ b/deps/v8/tools/debug_helper/heap-constants.cc
@@ -0,0 +1,51 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "heap-constants.h"
+#include "src/common/globals.h"
+
+namespace d = v8::debug_helper;
+
+namespace v8_debug_helper_internal {
+
+std::string FindKnownObject(uintptr_t address, const d::Roots& roots) {
+  uintptr_t containing_page = address & ~i::kPageAlignmentMask;
+  uintptr_t offset_in_page = address & i::kPageAlignmentMask;
+
+  // If there's a match with a known root, then search only that page.
+  if (containing_page == roots.map_space) {
+    return FindKnownObjectInMapSpace(offset_in_page);
+  }
+  if (containing_page == roots.old_space) {
+    return FindKnownObjectInOldSpace(offset_in_page);
+  }
+  if (containing_page == roots.read_only_space) {
+    return FindKnownObjectInReadOnlySpace(offset_in_page);
+  }
+
+  // For any unknown roots, compile a list of things this object might be.
+  std::string result;
+  if (roots.map_space == 0) {
+    std::string sub_result = FindKnownObjectInMapSpace(offset_in_page);
+    if (!sub_result.empty()) {
+      result += "maybe " + sub_result;
+    }
+  }
+  if (roots.old_space == 0) {
+    std::string sub_result = FindKnownObjectInOldSpace(offset_in_page);
+    if (!sub_result.empty()) {
+      result = (result.empty() ? "" : result + ", ") + "maybe " + sub_result;
+    }
+  }
+  if (roots.read_only_space == 0) {
+    std::string sub_result = FindKnownObjectInReadOnlySpace(offset_in_page);
+    if (!sub_result.empty()) {
+      result = (result.empty() ? "" : result + ", ") + "maybe " + sub_result;
+    }
+  }
+
+  return result;
+}
+
+}  // namespace v8_debug_helper_internal
diff --git a/deps/v8/tools/debug_helper/heap-constants.h b/deps/v8/tools/debug_helper/heap-constants.h
new file mode 100644
index 00000000000000..f3149bbb47802c
--- /dev/null
+++ b/deps/v8/tools/debug_helper/heap-constants.h
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_DEBUG_HELPER_HEAP_CONSTANTS_H_
+#define V8_TOOLS_DEBUG_HELPER_HEAP_CONSTANTS_H_
+
+#include <cstdint>
+#include <string>
+
+#include "debug-helper.h"
+
+namespace d = v8::debug_helper;
+
+namespace v8_debug_helper_internal {
+
+// Functions generated by mkgrokdump:
+std::string FindKnownObjectInOldSpace(uintptr_t offset);
+std::string FindKnownObjectInReadOnlySpace(uintptr_t offset);
+std::string FindKnownObjectInMapSpace(uintptr_t offset);
+std::string FindKnownMapInstanceTypeInMapSpace(uintptr_t offset);
+std::string FindKnownMapInstanceTypeInReadOnlySpace(uintptr_t offset);
+
+std::string FindKnownObject(uintptr_t address, const d::Roots& roots);
+
+}  // namespace v8_debug_helper_internal
+
+#endif
diff --git a/deps/v8/tools/gcmole/BUILD.gn b/deps/v8/tools/gcmole/BUILD.gn
index 0434a64ff5a017..51b9ef527f7961 100644
--- a/deps/v8/tools/gcmole/BUILD.gn
+++ b/deps/v8/tools/gcmole/BUILD.gn
@@ -9,12 +9,16 @@ group("v8_run_gcmole") {
 
   data = [
     "gccause.lua",
+    "GCMOLE.gn",
     "gcmole.lua",
     "gcmole-tools/",
     "parallel.py",
     "run-gcmole.py",
+    "suspects.whitelist",
+    "test-expectations.txt",
 
     # The following contains all relevant source and build files.
+    "../debug_helper/debug-helper.h",
     "../../BUILD.gn",
     "../../base/",
     "../../include/",
diff --git a/deps/v8/tools/gcmole/GCMOLE.gn b/deps/v8/tools/gcmole/GCMOLE.gn
new file mode 100644
index 00000000000000..62da0a084ba3c3
--- /dev/null
+++ b/deps/v8/tools/gcmole/GCMOLE.gn
@@ -0,0 +1,6 @@
+action("gcmole") {
+  sources = [
+    ### gcmole(all) ###
+    "tools/gcmole/gcmole-test.cc",
+  ]
+}
diff --git a/deps/v8/tools/gcmole/README b/deps/v8/tools/gcmole/README
index 578ea56219bd9c..48785b871af354 100644
--- a/deps/v8/tools/gcmole/README
+++ b/deps/v8/tools/gcmole/README
@@ -71,6 +71,12 @@ can be ignored.
 
 If any errors were found driver exits with non-zero status.
 
+TESTING -----------------------------------------------------------------------
+
+Tests are automatically run by the main lua runner. Expectations are in
+test-expectations.txt and need to be updated whenever the sources of the tests
+in gcmole-test.cc are modified (line numbers also count).
+
 PACKAGING ---------------------------------------------------------------------
 
 gcmole is deployed on V8's buildbot infrastructure to run it as part of the
diff --git a/deps/v8/tools/gcmole/gcmole-test.cc b/deps/v8/tools/gcmole/gcmole-test.cc
index c00c6e5539d787..3af1bac3b50b38 100644
--- a/deps/v8/tools/gcmole/gcmole-test.cc
+++ b/deps/v8/tools/gcmole/gcmole-test.cc
@@ -5,26 +5,43 @@
 #include "src/execution/isolate.h"
 #include "src/handles/handles-inl.h"
 #include "src/handles/handles.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/managed.h"
 #include "src/objects/maybe-object.h"
 #include "src/objects/object-macros.h"
 
 namespace v8 {
 namespace internal {
 
+// ------- Test simple argument evaluation order problems ---------
+
 Handle<Object> CauseGC(Handle<Object> obj, Isolate* isolate) {
   isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
 
   return obj;
 }
 
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+  isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+
+  return obj;
+}
+
+Managed<Smi> CauseGCManaged(int i, Isolate* isolate) {
+  isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+
+  return Managed<Smi>::cast(Smi::FromInt(i));
+}
+
 void TwoArgumentsFunction(Object a, Object b) {
-  a->Print();
-  b->Print();
+  a.Print();
+  b.Print();
 }
 
 void TestTwoArguments(Isolate* isolate) {
   Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
   Handle<JSObject> obj2 = isolate->factory()->NewJSObjectWithNullProto();
+  // Should cause warning.
   TwoArgumentsFunction(*CauseGC(obj1, isolate), *CauseGC(obj2, isolate));
 }
 
@@ -36,13 +53,16 @@ void TwoSizeTArgumentsFunction(size_t a, size_t b) {
 void TestTwoSizeTArguments(Isolate* isolate) {
   Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
   Handle<JSObject> obj2 = isolate->factory()->NewJSObjectWithNullProto();
+  // Should cause warning.
   TwoSizeTArgumentsFunction(sizeof(*CauseGC(obj1, isolate)),
                             sizeof(*CauseGC(obj2, isolate)));
 }
 
+// --------- Test problems with method arguments ----------
+
 class SomeObject : public Object {
  public:
-  void Method(Object a) { a->Print(); }
+  void Method(Object a) { a.Print(); }
 
   SomeObject& operator=(const Object& b) {
     this->Print();
@@ -58,14 +78,57 @@ void TestMethodCall(Isolate* isolate) {
   SomeObject obj;
   Handle<SomeObject> so = handle(obj, isolate);
   Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  // Should cause warning.
   so->Method(*CauseGC(obj1, isolate));
+  // Should cause warning.
+  so->Method(CauseGCRaw(*obj1, isolate));
 }
 
 void TestOperatorCall(Isolate* isolate) {
   SomeObject obj;
   Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  // Should not cause warning.
   obj = *CauseGC(obj1, isolate);
 }
 
+// --------- Test for templated sub-classes of Object ----------
+
+void TestFollowingTemplates(Isolate* isolate) {
+  // Should cause warning.
+  CauseGCManaged(42, isolate);
+}
+
+// --------- Test for correctly resolving virtual methods ----------
+
+class BaseObject {
+ public:
+  virtual Handle<Object> VirtualCauseGC(Handle<Object> obj, Isolate* isolate) {
+    return obj;
+  }
+};
+
+class DerivedObject : public BaseObject {
+ public:
+  Handle<Object> VirtualCauseGC(Handle<Object> obj, Isolate* isolate) override {
+    isolate->heap()->CollectGarbage(OLD_SPACE,
+                                    GarbageCollectionReason::kTesting);
+
+    return obj;
+  }
+};
+
+void TestFollowingVirtualFunctions(Isolate* isolate) {
+  DerivedObject derived;
+  BaseObject* base = &derived;
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+
+  SomeObject so;
+  Handle<SomeObject> so_handle = handle(so, isolate);
+  // Should cause warning.
+  so_handle->Method(*derived.VirtualCauseGC(obj1, isolate));
+  // Should cause warning.
+  so_handle->Method(*base->VirtualCauseGC(obj1, isolate));
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1 b/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
index 718e967e3b7c47..8f7876fa519908 100644
--- a/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
+++ b/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
@@ -1 +1 @@
-3d4ba1759c3d5bc7e98c466d24fa0c43f186ba79
\ No newline at end of file
+d2f949820bf1ee7343a7b5f46987a3657aaea2e9
\ No newline at end of file
diff --git a/deps/v8/tools/gcmole/gcmole.cc b/deps/v8/tools/gcmole/gcmole.cc
index 6631583478867a..806100d381f077 100644
--- a/deps/v8/tools/gcmole/gcmole.cc
+++ b/deps/v8/tools/gcmole/gcmole.cc
@@ -47,6 +47,7 @@ namespace {
 
 typedef std::string MangledName;
 typedef std::set<MangledName> CalleesSet;
+typedef std::map<MangledName, MangledName> CalleesMap;
 
 static bool GetMangledName(clang::MangleContext* ctx,
                            const clang::NamedDecl* decl,
@@ -138,14 +139,16 @@ class CalleesPrinter : public clang::RecursiveASTVisitor<CalleesPrinter> {
   virtual bool VisitDeclRefExpr(clang::DeclRefExpr* expr) {
     // If function mentions EXTERNAL VMState add artificial garbage collection
     // mark.
-    if (IsExternalVMState(expr->getDecl())) AddCallee("CollectGarbage");
+    if (IsExternalVMState(expr->getDecl()))
+      AddCallee("CollectGarbage", "CollectGarbage");
     return true;
   }
 
   void AnalyzeFunction(const clang::FunctionDecl* f) {
     MangledName name;
     if (InV8Namespace(f) && GetMangledName(ctx_, f, &name)) {
-      AddCallee(name);
+      const std::string& function = f->getNameAsString();
+      AddCallee(name, function);
 
       const clang::FunctionDecl* body = NULL;
       if (f->hasBody(body) && !Analyzed(name)) {
@@ -176,21 +179,22 @@ class CalleesPrinter : public clang::RecursiveASTVisitor<CalleesPrinter> {
     scopes_.pop();
   }
 
-  void AddCallee(const MangledName& name) {
+  void AddCallee(const MangledName& name, const MangledName& function) {
     if (!scopes_.empty()) scopes_.top()->insert(name);
+    mangled_to_function_[name] = function;
   }
 
   void PrintCallGraph() {
     for (Callgraph::const_iterator i = callgraph_.begin(), e = callgraph_.end();
          i != e;
          ++i) {
-      std::cout << i->first << "\n";
+      std::cout << i->first << "," << mangled_to_function_[i->first] << "\n";
 
       CalleesSet* callees = i->second;
       for (CalleesSet::const_iterator j = callees->begin(), e = callees->end();
            j != e;
            ++j) {
-        std::cout << "\t" << *j << "\n";
+        std::cout << "\t" << *j << "," << mangled_to_function_[*j] << "\n";
       }
     }
   }
@@ -200,6 +204,7 @@ class CalleesPrinter : public clang::RecursiveASTVisitor<CalleesPrinter> {
 
   std::stack<CalleesSet* > scopes_;
   Callgraph callgraph_;
+  CalleesMap mangled_to_function_;
 };
 
 
@@ -234,23 +239,40 @@ class FunctionDeclarationFinder
   CalleesPrinter* callees_printer_;
 };
 
-
-static bool loaded = false;
+static bool gc_suspects_loaded = false;
 static CalleesSet gc_suspects;
-
+static CalleesSet gc_functions;
+static bool whitelist_loaded = false;
+static CalleesSet suspects_whitelist;
 
 static void LoadGCSuspects() {
-  if (loaded) return;
+  if (gc_suspects_loaded) return;
 
   std::ifstream fin("gcsuspects");
-  std::string s;
+  std::string mangled, function;
 
-  while (fin >> s) gc_suspects.insert(s);
+  while (!fin.eof()) {
+    std::getline(fin, mangled, ',');
+    gc_suspects.insert(mangled);
+    std::getline(fin, function);
+    gc_functions.insert(function);
+  }
 
-  loaded = true;
+  gc_suspects_loaded = true;
 }
 
+static void LoadSuspectsWhitelist() {
+  if (whitelist_loaded) return;
+
+  std::ifstream fin("tools/gcmole/suspects.whitelist");
+  std::string s;
+
+  while (fin >> s) suspects_whitelist.insert(s);
 
+  whitelist_loaded = true;
+}
+
+// Looks for exact match of the mangled name
 static bool KnownToCauseGC(clang::MangleContext* ctx,
                            const clang::FunctionDecl* decl) {
   LoadGCSuspects();
@@ -265,6 +287,25 @@ static bool KnownToCauseGC(clang::MangleContext* ctx,
   return false;
 }
 
+// Looks for partial match of only the function name
+static bool SuspectedToCauseGC(clang::MangleContext* ctx,
+                               const clang::FunctionDecl* decl) {
+  LoadGCSuspects();
+
+  if (!InV8Namespace(decl)) return false;
+
+  LoadSuspectsWhitelist();
+  if (suspects_whitelist.find(decl->getNameAsString()) !=
+      suspects_whitelist.end()) {
+    return false;
+  }
+
+  if (gc_functions.find(decl->getNameAsString()) != gc_functions.end()) {
+    return true;
+  }
+
+  return false;
+}
 
 static const int kNoEffect = 0;
 static const int kCausesGC = 1;
@@ -910,8 +951,30 @@ class FunctionAnalyzer {
         RepresentsRawPointerType(call->getType()));
 
     clang::FunctionDecl* callee = call->getDirectCallee();
-    if ((callee != NULL) && KnownToCauseGC(ctx_, callee)) {
-      out.setGC();
+    if (callee != NULL) {
+      if (KnownToCauseGC(ctx_, callee)) {
+        out.setGC();
+      }
+
+      clang::CXXMethodDecl* method =
+          llvm::dyn_cast_or_null<clang::CXXMethodDecl>(callee);
+      if (method != NULL && method->isVirtual()) {
+        clang::CXXMemberCallExpr* memcall =
+            llvm::dyn_cast_or_null<clang::CXXMemberCallExpr>(call);
+        if (memcall != NULL) {
+          clang::CXXMethodDecl* target = method->getDevirtualizedMethod(
+              memcall->getImplicitObjectArgument(), false);
+          if (target != NULL) {
+            if (KnownToCauseGC(ctx_, target)) {
+              out.setGC();
+            }
+          } else {
+            if (SuspectedToCauseGC(ctx_, method)) {
+              out.setGC();
+            }
+          }
+        }
+      }
     }
 
     return out;
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index ae17fdc5f6ec7c..6758973457ef6d 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -183,12 +183,19 @@ end
 
 -------------------------------------------------------------------------------
 
-local function ParseGNFile()
+local function ParseGNFile(for_test)
    local result = {}
-   local gn_files = {
-       { "BUILD.gn",             '"([^"]-%.cc)"',      ""         },
-       { "test/cctest/BUILD.gn", '"(test-[^"]-%.cc)"', "test/cctest/" }
-   }
+   local gn_files
+   if for_test then
+      gn_files = {
+         { "tools/gcmole/GCMOLE.gn",             '"([^"]-%.cc)"',      ""         }
+      }
+   else
+      gn_files = {
+         { "BUILD.gn",             '"([^"]-%.cc)"',      ""         },
+         { "test/cctest/BUILD.gn", '"(test-[^"]-%.cc)"', "test/cctest/" }
+      }
+   end
 
    for i = 1, #gn_files do
       local filename = gn_files[i][1]
@@ -231,7 +238,8 @@ local function BuildFileList(sources, props)
 end
 
 
-local gn_sources = ParseGNFile()
+local gn_sources = ParseGNFile(false)
+local gn_test_sources = ParseGNFile(true)
 
 local function FilesForArch(arch)
    return BuildFileList(gn_sources, { os = 'linux',
@@ -240,6 +248,13 @@ local function FilesForArch(arch)
                                       simulator = ''})
 end
 
+local function FilesForTest(arch)
+   return BuildFileList(gn_test_sources, { os = 'linux',
+                                      arch = arch,
+                                      mode = 'debug',
+                                      simulator = ''})
+end
+
 local mtConfig = {}
 
 mtConfig.__index = mtConfig
@@ -393,8 +408,13 @@ end
 --------------------------------------------------------------------------------
 -- Analysis
 
-local function CheckCorrectnessForArch(arch)
-   local files = FilesForArch(arch)
+local function CheckCorrectnessForArch(arch, for_test)
+   local files
+   if for_test then
+      files = FilesForTest(arch)
+   else
+      files = FilesForArch(arch)
+   end
    local cfg = ARCHITECTURES[arch]
 
    if not FLAGS.reuse_gcsuspects then
@@ -403,6 +423,7 @@ local function CheckCorrectnessForArch(arch)
 
    local processed_files = 0
    local errors_found = false
+   local output = ""
    local function SearchForErrors(filename, lines)
       processed_files = processed_files + 1
       for l in lines do
@@ -410,7 +431,11 @@ local function CheckCorrectnessForArch(arch)
             l:match "^[^:]+:%d+:%d+:" or
             l:match "error" or
             l:match "warning"
-         print(l)
+         if for_test then
+            output = output.."\n"..l
+         else
+            print(l)
+         end
       end
    end
 
@@ -427,18 +452,34 @@ local function CheckCorrectnessForArch(arch)
        processed_files,
        errors_found and "Errors found" or "No errors found")
 
-   return errors_found
+   return errors_found, output
 end
 
-local function SafeCheckCorrectnessForArch(arch)
-   local status, errors = pcall(CheckCorrectnessForArch, arch)
+local function SafeCheckCorrectnessForArch(arch, for_test)
+   local status, errors, output = pcall(CheckCorrectnessForArch, arch, for_test)
    if not status then
       print(string.format("There was an error: %s", errors))
       errors = true
    end
-   return errors
+   return errors, output
+end
+
+local function TestRun()
+   local errors, output = SafeCheckCorrectnessForArch('x64', true)
+
+   local filename = "tools/gcmole/test-expectations.txt"
+   local exp_file = assert(io.open(filename), "failed to open test expectations file")
+   local expectations = exp_file:read('*all')
+
+   if output ~= expectations then
+      log("** Output mismatch from running tests. Please run them manually.")
+   else
+      log("** Tests ran successfully")
+   end
 end
 
+TestRun()
+
 local errors = false
 
 for _, arch in ipairs(ARCHS) do
@@ -446,7 +487,7 @@ for _, arch in ipairs(ARCHS) do
       error ("Unknown arch: " .. arch)
    end
 
-   errors = SafeCheckCorrectnessForArch(arch, report) or errors
+   errors = SafeCheckCorrectnessForArch(arch, false) or errors
 end
 
 os.exit(errors and 1 or 0)
diff --git a/deps/v8/tools/gcmole/suspects.whitelist b/deps/v8/tools/gcmole/suspects.whitelist
new file mode 100644
index 00000000000000..01db7401f22fcf
--- /dev/null
+++ b/deps/v8/tools/gcmole/suspects.whitelist
@@ -0,0 +1,4 @@
+IsConstructor
+IsEval
+IsAsync
+IsPromiseAll
diff --git a/deps/v8/tools/gcmole/test-expectations.txt b/deps/v8/tools/gcmole/test-expectations.txt
new file mode 100644
index 00000000000000..36a026a12eb36e
--- /dev/null
+++ b/deps/v8/tools/gcmole/test-expectations.txt
@@ -0,0 +1,20 @@
+
+tools/gcmole/gcmole-test.cc:45:3: warning: Possible problem with evaluation order.
+  TwoArgumentsFunction(*CauseGC(obj1, isolate), *CauseGC(obj2, isolate));
+  ^
+tools/gcmole/gcmole-test.cc:57:3: warning: Possible problem with evaluation order.
+  TwoSizeTArgumentsFunction(sizeof(*CauseGC(obj1, isolate)),
+  ^
+tools/gcmole/gcmole-test.cc:82:7: warning: Possible problem with evaluation order.
+  so->Method(*CauseGC(obj1, isolate));
+      ^
+tools/gcmole/gcmole-test.cc:84:7: warning: Possible problem with evaluation order.
+  so->Method(CauseGCRaw(*obj1, isolate));
+      ^
+tools/gcmole/gcmole-test.cc:128:14: warning: Possible problem with evaluation order.
+  so_handle->Method(*derived.VirtualCauseGC(obj1, isolate));
+             ^
+tools/gcmole/gcmole-test.cc:130:14: warning: Possible problem with evaluation order.
+  so_handle->Method(*base->VirtualCauseGC(obj1, isolate));
+             ^
+6 warnings generated.
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index b5aba23220bac8..0715b07dc14af7 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -166,7 +166,7 @@
     { 'name': 'bit_field3_number_of_own_descriptors_shift',
         'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
     { 'name': 'class_Map__instance_descriptors_offset',
-       'value': 'Map::kInstanceDescriptorsOffset' },
+        'value': 'Map::kInstanceDescriptorsOffset' },
 
     { 'name': 'off_fp_context_or_frame_type',
         'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'},
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index e02571b4f8e0dc..6560758f3eced3 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -77,7 +77,7 @@ const CATEGORIES = new Map([
       'JS_TO_WASM_FUNCTION',
       'JS_TYPED_ARRAY_TYPE',
       'JS_WEAK_MAP_TYPE',
-      'MUTABLE_HEAP_NUMBER_TYPE',
+      'HEAP_NUMBER_TYPE',
       'NATIVE_CONTEXT_TYPE',
       'OBJECT_PROPERTY_DICTIONARY_TYPE',
       'ONE_BYTE_INTERNALIZED_STRING_TYPE',
diff --git a/deps/v8/tools/heap-stats/global-timeline.js b/deps/v8/tools/heap-stats/global-timeline.js
index 3830b7ca33ba58..c34ba2b9138d98 100644
--- a/deps/v8/tools/heap-stats/global-timeline.js
+++ b/deps/v8/tools/heap-stats/global-timeline.js
@@ -63,9 +63,12 @@ class GlobalTimeline extends HTMLElement {
       {type: 'number', label: 'Ptr compression benefit'},
       {type: 'string', role: 'tooltip'},
       {type: 'number', label: 'Embedder fields'},
-      {type: 'number', label: 'Tagged fields'},
+      {type: 'number', label: 'Tagged fields (excl. in-object Smis)'},
+      {type: 'number', label: 'In-object Smi-only fields'},
       {type: 'number', label: 'Other raw fields'},
-      {type: 'number', label: 'Unboxed doubles'}
+      {type: 'number', label: 'Unboxed doubles'},
+      {type: 'number', label: 'Boxed doubles'},
+      {type: 'number', label: 'String data'}
     ];
     const chart_data = [labels];
     const isolate_data = this.data[this.selection.isolate];
@@ -78,10 +81,14 @@ class GlobalTimeline extends HTMLElement {
       const data = [];
       data.push(gc_data.time * kMillis2Seconds);
       const total = data_set.tagged_fields +
+                    data_set.inobject_smi_fields +
                     data_set.embedder_fields +
                     data_set.other_raw_fields +
-                    data_set.unboxed_double_fields;
-      const ptr_compr_benefit = data_set.tagged_fields / 2;
+                    data_set.unboxed_double_fields +
+                    data_set.boxed_double_fields +
+                    data_set.string_data;
+      const ptr_compr_benefit =
+          (data_set.inobject_smi_fields + data_set.tagged_fields) / 2;
       const ptr_compr_benefit_perc = ptr_compr_benefit / total * 100;
       sum_total += total;
       sum_ptr_compr_benefit_perc += ptr_compr_benefit_perc;
@@ -93,8 +100,11 @@ class GlobalTimeline extends HTMLElement {
       data.push(tooltip);
       data.push(data_set.embedder_fields / KB);
       data.push(data_set.tagged_fields / KB);
+      data.push(data_set.inobject_smi_fields / KB);
       data.push(data_set.other_raw_fields / KB);
       data.push(data_set.unboxed_double_fields / KB);
+      data.push(data_set.boxed_double_fields / KB);
+      data.push(data_set.string_data / KB);
       chart_data.push(data);
     });
     const avg_ptr_compr_benefit_perc =
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.js b/deps/v8/tools/heap-stats/trace-file-reader.js
index 4fec9a1cb917ff..86d9d7d55105f4 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.js
+++ b/deps/v8/tools/heap-stats/trace-file-reader.js
@@ -137,11 +137,15 @@ class TraceFileReader extends HTMLElement {
   }
 
   addFieldTypeData(data, isolate, gc_id, data_set, tagged_fields,
-                   embedder_fields, unboxed_double_fields, other_raw_fields) {
+                   inobject_smi_fields, embedder_fields, unboxed_double_fields,
+                   boxed_double_fields, string_data, other_raw_fields) {
     data[isolate].gcs[gc_id][data_set].field_data = {
       tagged_fields,
+      inobject_smi_fields,
       embedder_fields,
       unboxed_double_fields,
+      boxed_double_fields,
+      string_data,
       other_raw_fields
     };
   }
@@ -217,8 +221,12 @@ class TraceFileReader extends HTMLElement {
 
                 const field_data = entry.field_data;
                 this.addFieldTypeData(data, isolate, gc_id, data_set,
-                  field_data.tagged_fields, field_data.embedder_fields,
+                  field_data.tagged_fields,
+                  field_data.inobject_smi_fields,
+                  field_data.embedder_fields,
                   field_data.unboxed_double_fields,
+                  field_data.boxed_double_fields,
+                  field_data.string_data,
                   field_data.other_raw_fields);
 
                 data[isolate].gcs[gc_id][data_set].bucket_sizes =
@@ -282,8 +290,9 @@ class TraceFileReader extends HTMLElement {
         this.createOrUpdateEntryIfNeeded(data, entry);
         this.createDatasetIfNeeded(data, entry, entry.key);
         this.addFieldTypeData(data, entry.isolate, entry.id, entry.key,
-          entry.tagged_fields, entry.embedder_fields,
-          entry.unboxed_double_fields, entry.other_raw_fields);
+          entry.tagged_fields, entry.embedder_fields, entry.inobject_smi_fields,
+          entry.unboxed_double_fields, entry.boxed_double_fields,
+          entry.string_data, entry.other_raw_fields);
       } else if (entry.type === 'instance_type_data') {
         if (entry.id in data[entry.isolate].gcs) {
           this.createOrUpdateEntryIfNeeded(data, entry);
diff --git a/deps/v8/tools/run-wasm-api-tests.py b/deps/v8/tools/run-wasm-api-tests.py
index 79f53cb927784c..ff37c8a465a921 100755
--- a/deps/v8/tools/run-wasm-api-tests.py
+++ b/deps/v8/tools/run-wasm-api-tests.py
@@ -38,7 +38,8 @@
                           "Release+Asserts", "bin")
 
 EXAMPLES = ["hello", "callback", "trap", "reflect", "global", "table",
-            "memory", "finalize", "serialize", "threads"]
+            "memory", "finalize", "serialize", "threads", "hostref", "multi",
+            "start"]
 
 CLANG = {
   "name": "Clang",
diff --git a/deps/v8/tools/testrunner/OWNERS b/deps/v8/tools/testrunner/OWNERS
index bdb1d555a4fb98..09e0096a2ee4cd 100644
--- a/deps/v8/tools/testrunner/OWNERS
+++ b/deps/v8/tools/testrunner/OWNERS
@@ -1 +1 @@
-file://INFRA_OWNERS
+file:../../INFRA_OWNERS
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 15c53358786a0c..7f9b43435ff15f 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -349,9 +349,6 @@ def _add_parser_default_options(self, parser):
                            "color, mono)")
     parser.add_option("--json-test-results",
                       help="Path to a file for storing json results.")
-    parser.add_option("--junitout", help="File name of the JUnit output")
-    parser.add_option("--junittestsuite", default="v8tests",
-                      help="The testsuite name in the JUnit output file")
     parser.add_option("--exit-after-n-failures", type="int", default=100,
                       help="Exit after the first N failures instead of "
                            "running all tests. Pass 0 to disable this feature.")
@@ -794,9 +791,6 @@ def _get_shard_info(self, options):
 
   def _create_progress_indicators(self, test_count, options):
     procs = [PROGRESS_INDICATORS[options.progress]()]
-    if options.junitout:
-      procs.append(progress.JUnitTestProgressIndicator(options.junitout,
-                                                       options.junittestsuite))
     if options.json_test_results:
       procs.append(progress.JsonTestProgressIndicator(
         self.framework_name,
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422a69b..00000000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of Google Inc. nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
-  def __init__(self, test_suite_name):
-    self.root = xml.Element("testsuite")
-    self.root.attrib["name"] = test_suite_name
-
-  def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
-    testCaseElement = xml.Element("testcase")
-    testCaseElement.attrib["name"] = test_name
-    testCaseElement.attrib["cmd"] = test_cmd
-    testCaseElement.attrib["time"] = str(round(test_duration, 3))
-    if len(test_failure):
-      failureElement = xml.Element("failure")
-      failureElement.text = test_failure
-      testCaseElement.append(failureElement)
-    self.root.append(testCaseElement)
-
-  def FinishAndWrite(self, f):
-    xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index e0b0ec41c788d6..9defdd30eefca1 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -115,7 +115,15 @@ class Pool():
   # Necessary to not overflow the queue's pipe if a keyboard interrupt happens.
   BUFFER_FACTOR = 4
 
-  def __init__(self, num_workers, heartbeat_timeout=1):
+  def __init__(self, num_workers, heartbeat_timeout=1, notify_fun=None):
+    """
+    Args:
+      num_workers: Number of worker processes to run in parallel.
+      heartbeat_timeout: Timeout in seconds for waiting for results. Each time
+          the timeout is reached, a heartbeat is signalled and timeout is reset.
+      notify_fun: Callable called to signale some events like termination. The
+          event name is passed as string.
+    """
     self.num_workers = num_workers
     self.processes = []
     self.terminated = False
@@ -130,6 +138,7 @@ def __init__(self, num_workers, heartbeat_timeout=1):
     # work_queue.
     self.processing_count = 0
     self.heartbeat_timeout = heartbeat_timeout
+    self.notify = notify_fun or (lambda x: x)
 
     # Disable sigint and sigterm to prevent subprocesses from capturing the
     # signals.
@@ -261,11 +270,13 @@ def _terminate(self):
       for p in self.processes:
         os.kill(p.pid, signal.SIGTERM)
 
+    self.notify("Joining workers")
     for p in self.processes:
       p.join()
 
     # Drain the queues to prevent stderr chatter when queues are garbage
     # collected.
+    self.notify("Draining queues")
     try:
       while True: self.work_queue.get(False)
     except:
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 4b0cf1553b39c8..fe63d0b93549ea 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -23,7 +23,7 @@
   "nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up"]],
   "slow_path": [["--force-slow-path"]],
   "stress": [["--stress-opt", "--always-opt", "--no-liftoff",
-              "--no-wasm-tier-up"]],
+              "--no-wasm-tier-up", '--stress-lazy-source-positions']],
   "stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
                                          "--wasm-code-gc",
                                          "--stress-wasm-code-gc"]],
diff --git a/deps/v8/tools/testrunner/testproc/base.py b/deps/v8/tools/testrunner/testproc/base.py
index c52c7797521fdf..6048ef5d15ee41 100644
--- a/deps/v8/tools/testrunner/testproc/base.py
+++ b/deps/v8/tools/testrunner/testproc/base.py
@@ -109,6 +109,19 @@ def is_stopped(self):
 
   ### Communication
 
+  def notify_previous(self, event):
+    self._on_event(event)
+    if self._prev_proc:
+      self._prev_proc.notify_previous(event)
+
+  def _on_event(self, event):
+    """Called when processors to the right signal events, e.g. termination.
+
+    Args:
+      event: A text describing the signalled event.
+    """
+    pass
+
   def _send_test(self, test):
     """Helper method for sending test to the next processor."""
     return self._next_proc.next_test(test)
@@ -120,7 +133,6 @@ def _send_result(self, test, result):
     self._prev_proc.result_for(test, result)
 
 
-
 class TestProcObserver(TestProc):
   """Processor used for observing the data."""
   def __init__(self):
diff --git a/deps/v8/tools/testrunner/testproc/execution.py b/deps/v8/tools/testrunner/testproc/execution.py
index 68ecf45a374f71..aaf0db1f8f2c1f 100644
--- a/deps/v8/tools/testrunner/testproc/execution.py
+++ b/deps/v8/tools/testrunner/testproc/execution.py
@@ -45,7 +45,7 @@ class ExecutionProc(base.TestProc):
 
   def __init__(self, jobs, outproc_factory=None):
     super(ExecutionProc, self).__init__()
-    self._pool = pool.Pool(jobs)
+    self._pool = pool.Pool(jobs, notify_fun=self.notify_previous)
     self._outproc_factory = outproc_factory or (lambda t: t.output_proc)
     self._tests = {}
 
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 3ba10f9528c577..8826c36ea49948 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -13,7 +13,6 @@
 import time
 
 from . import base
-from ..local import junit_output
 
 
 # Base dir of the build products for Release and Debug.
@@ -150,6 +149,10 @@ def _on_heartbeat(self):
       self._print('Still working...')
       self._print_processes_linux()
 
+  def _on_event(self, event):
+    self._print(event)
+    self._print_processes_linux()
+
 
 class DotsProgressIndicator(SimpleProgressIndicator):
   def __init__(self):
@@ -282,45 +285,6 @@ def _clear_line(self, last_length):
     print(("\r" + (" " * last_length) + "\r"), end='')
 
 
-class JUnitTestProgressIndicator(ProgressIndicator):
-  def __init__(self, junitout, junittestsuite):
-    super(JUnitTestProgressIndicator, self).__init__()
-    self._requirement = base.DROP_PASS_STDOUT
-
-    self.outputter = junit_output.JUnitTestOutput(junittestsuite)
-    if junitout:
-      self.outfile = open(junitout, "w")
-    else:
-      self.outfile = sys.stdout
-
-  def _on_result_for(self, test, result):
-    # TODO(majeski): Support for dummy/grouped results
-    fail_text = ""
-    output = result.output
-    if result.has_unexpected_output:
-      stdout = output.stdout.strip()
-      if len(stdout):
-        fail_text += "stdout:\n%s\n" % stdout
-      stderr = output.stderr.strip()
-      if len(stderr):
-        fail_text += "stderr:\n%s\n" % stderr
-      fail_text += "Command: %s" % result.cmd.to_string()
-      if output.HasCrashed():
-        fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
-      if output.HasTimedOut():
-        fail_text += "--- TIMEOUT ---"
-    self.outputter.HasRunTest(
-        test_name=str(test),
-        test_cmd=result.cmd.to_string(relative=True),
-        test_duration=output.duration,
-        test_failure=fail_text)
-
-  def finished(self):
-    self.outputter.FinishAndWrite(self.outfile)
-    if self.outfile != sys.stdout:
-      self.outfile.close()
-
-
 class JsonTestProgressIndicator(ProgressIndicator):
   def __init__(self, framework_name, json_test_results, arch, mode):
     super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/testrunner/testproc/timeout.py b/deps/v8/tools/testrunner/testproc/timeout.py
index 54dc60e9b42d8c..9a4e88c8f057ba 100644
--- a/deps/v8/tools/testrunner/testproc/timeout.py
+++ b/deps/v8/tools/testrunner/testproc/timeout.py
@@ -14,15 +14,15 @@ def __init__(self, duration_sec):
     self._start = time.time()
 
   def _on_next_test(self, test):
-    self._on_event()
+    self.__on_event()
 
   def _on_result_for(self, test, result):
-    self._on_event()
+    self.__on_event()
 
   def _on_heartbeat(self):
-    self._on_event()
+    self.__on_event()
 
-  def _on_event(self):
+  def __on_event(self):
     if not self.is_stopped:
       if time.time() - self._start > self._duration_sec:
         print('>>> Total timeout reached.')
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index 2150d7e0cc15aa..2e04e659c1305e 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -20,18 +20,9 @@
 def preprocess(input):
   input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input)
   input = re.sub(r'(\s+)operator\s*(\'[^\']+\')', r'\1/*_OPE \2*/', input)
-
-  # Mangle typeswitches to look like switch statements with the extra type
-  # information and syntax encoded in comments.
-  input = re.sub(r'(\s+)typeswitch\s*\(', r'\1/*_TYPE*/switch (', input)
-  input = re.sub(r'(\s+)case\s*\(\s*([^\:]+)\s*\)(\s*)\:\s*deferred',
-      r'\1case \2: /*_TSXDEFERRED_*/', input)
-  input = re.sub(r'(\s+)case\s*\(\s*([^\:]+)\s*\)(\s*)\:',
-      r'\1case \2: /*_TSX*/', input)
-  input = re.sub(r'(\s+)case\s*\(\s*([^\s]+)\s*\:\s*([^\:]+)\s*\)(\s*)\:\s*deferred',
-      r'\1case \3: /*_TSVDEFERRED_\2:*/', input)
-  input = re.sub(r'(\s+)case\s*\(\s*([^\s]+)\s*\:\s*([^\:]+)\s*\)(\s*)\:',
-      r'\1case \3: /*_TSV\2:*/', input)
+  input = re.sub(r'\btypeswitch\s*(\([^{]*\))\s{', r' if /*tPsW*/ \1 {', input)
+  input = re.sub(r'\bcase\s*(\([^{]*\))\s*:\s*deferred\s*{', r' if /*cAsEdEfF*/ \1 {', input)
+  input = re.sub(r'\bcase\s*(\([^{]*\))\s*:\s*{', r' if /*cA*/ \1 {', input)
 
   # Add extra space around | operators to fix union types later.
   while True:
@@ -65,15 +56,9 @@ def postprocess(output):
   output = re.sub(r'(\S+)\s*: type([,>])', r'\1: type\2', output)
   output = re.sub(r'(\n\s*)labels( [A-Z])', r'\1    labels\2', output)
   output = re.sub(r'\/\*_OPE \'([^\']+)\'\*\/', r"operator '\1'", output)
-  output = re.sub(r'\/\*_TYPE\*\/(\s*)switch', r'typeswitch', output)
-  output = re.sub(r'case (\w+)\:\s*\/\*_TSXDEFERRED_\*\/',
-      r'case (\1): deferred', output)
-  output = re.sub(r'case (\w+)\:\s*\/\*_TSX\*\/',
-      r'case (\1):', output)
-  output = re.sub(r'case (\w+)\:\s*\/\*_TSVDEFERRED_([^\:]+)\:\*\/',
-      r'case (\2: \1): deferred', output)
-  output = re.sub(r'case (\w+)\:\s*\/\*_TSV([^\:]+)\:\*\/',
-      r'case (\2: \1):', output)
+  output = re.sub(r'\bif\s*\/\*tPsW\*\/', r'typeswitch', output)
+  output = re.sub(r'\bif\s*\/\*cA\*\/\s*(\([^{]*\))\s*{', r'case \1: {', output)
+  output = re.sub(r'\bif\s*\/\*cAsEdEfF\*\/\s*(\([^{]*\))\s*{', r'case \1: deferred {', output)
   output = re.sub(r'\n_GeNeRaTeS00_\s*\/\*([^@]+)@\*\/',
       r"\n    generates '\1'", output)
   output = re.sub(r'_GeNeRaTeS00_\s*\/\*([^@]+)@\*\/',
diff --git a/deps/v8/tools/turbolizer/info-view.html b/deps/v8/tools/turbolizer/info-view.html
index b523e655aaf9d5..dc9c177f6020eb 100644
--- a/deps/v8/tools/turbolizer/info-view.html
+++ b/deps/v8/tools/turbolizer/info-view.html
@@ -107,7 +107,7 @@
       </tr>
       <tr>
         <td>^42:</td>
-        <td>Select exactly the node with id 14.</td>
+        <td>Select exactly the node with id 42.</td>
       </tr>
       <tr>
         <td>Origin:&nbsp;#42&nbsp;</td>
diff --git a/deps/v8/tools/turbolizer/package-lock.json b/deps/v8/tools/turbolizer/package-lock.json
index 9c8049fdb541a7..e30838aa3b292c 100644
--- a/deps/v8/tools/turbolizer/package-lock.json
+++ b/deps/v8/tools/turbolizer/package-lock.json
@@ -1687,9 +1687,9 @@
       "dev": true
     },
     "js-yaml": {
-      "version": "3.12.1",
-      "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.1.tgz",
-      "integrity": "sha512-um46hB9wNOKlwkHgiuyEVAybXBjwFUV0Z/RaHJblRd9DXltue9FTYvzCr9ErQrK9Adz5MU4gHWVaNUfdmrC8qA==",
+      "version": "3.13.1",
+      "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz",
+      "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==",
       "dev": true,
       "requires": {
         "argparse": "^1.0.7",
@@ -2344,9 +2344,9 @@
       "dev": true
     },
     "mixin-deep": {
-      "version": "1.3.1",
-      "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.1.tgz",
-      "integrity": "sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ==",
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
+      "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
       "requires": {
         "for-in": "^1.0.2",
         "is-extendable": "^1.0.1"
@@ -2872,9 +2872,9 @@
       }
     },
     "set-value": {
-      "version": "2.0.0",
-      "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz",
-      "integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==",
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
+      "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
       "requires": {
         "extend-shallow": "^2.0.1",
         "is-extendable": "^0.1.1",
@@ -3407,35 +3407,14 @@
       "dev": true
     },
     "union-value": {
-      "version": "1.0.0",
-      "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.0.tgz",
-      "integrity": "sha1-XHHDTLW61dzr4+oM0IIHulqhrqQ=",
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
+      "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
       "requires": {
         "arr-union": "^3.1.0",
         "get-value": "^2.0.6",
         "is-extendable": "^0.1.1",
-        "set-value": "^0.4.3"
-      },
-      "dependencies": {
-        "extend-shallow": {
-          "version": "2.0.1",
-          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
-          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
-          "requires": {
-            "is-extendable": "^0.1.0"
-          }
-        },
-        "set-value": {
-          "version": "0.4.3",
-          "resolved": "https://registry.npmjs.org/set-value/-/set-value-0.4.3.tgz",
-          "integrity": "sha1-fbCPnT0i3H945Trzw79GZuzfzPE=",
-          "requires": {
-            "extend-shallow": "^2.0.1",
-            "is-extendable": "^0.1.1",
-            "is-plain-object": "^2.0.1",
-            "to-object-path": "^0.3.0"
-          }
-        }
+        "set-value": "^2.0.1"
       }
     },
     "universalify": {
diff --git a/deps/v8/tools/turbolizer/src/disassembly-view.ts b/deps/v8/tools/turbolizer/src/disassembly-view.ts
index 4b8fc6ea2db234..0455437002ff63 100644
--- a/deps/v8/tools/turbolizer/src/disassembly-view.ts
+++ b/deps/v8/tools/turbolizer/src/disassembly-view.ts
@@ -13,6 +13,7 @@ const toolboxHTML = `<div id="disassembly-toolbox">
 <form>
   <label><input id="show-instruction-address" type="checkbox" name="instruction-address">Show addresses</label>
   <label><input id="show-instruction-binary" type="checkbox" name="instruction-binary">Show binary literal</label>
+  <label><input id="highlight-gap-instructions" type="checkbox" name="instruction-binary">Highlight gap instructions</label>
 </form>
 </div>`;
 
@@ -26,13 +27,14 @@ export class DisassemblyView extends TextView {
   offsetSelection: MySelection;
   showInstructionAddressHandler: () => void;
   showInstructionBinaryHandler: () => void;
+  highlightGapInstructionsHandler: () => void;
 
   createViewElement() {
     const pane = document.createElement('div');
     pane.setAttribute('id', "disassembly");
     pane.innerHTML =
       `<pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
-       <ul id='disassembly-list' class='nolinenums noindent'>
+       <ul class='disassembly-list nolinenums noindent'>
        </ul>
      </pre>`;
 
@@ -46,6 +48,19 @@ export class DisassemblyView extends TextView {
       associateData: (text, fragment: HTMLElement) => {
         const matches = text.match(/(?<address>0?x?[0-9a-fA-F]{8,16})(?<addressSpace>\s+)(?<offset>[0-9a-f]+)(?<offsetSpace>\s*)/);
         const offset = Number.parseInt(matches.groups["offset"], 16);
+        const instructionKind = view.sourceResolver.getInstructionKindForPCOffset(offset);
+        fragment.dataset.instructionKind = instructionKind;
+        fragment.title = view.sourceResolver.instructionKindToReadableName(instructionKind);
+        const blockIds = view.sourceResolver.getBlockIdsForOffset(offset);
+        const blockIdElement = document.createElement("SPAN");
+        blockIdElement.className = "block-id com linkable-text";
+        blockIdElement.innerText = "";
+        if (blockIds && blockIds.length > 0) {
+          blockIds.forEach(blockId => view.addHtmlElementForBlockId(blockId, fragment));
+          blockIdElement.innerText = `B${blockIds.join(",")}:`;
+          blockIdElement.dataset.blockId = `${blockIds.join(",")}`;
+        }
+        fragment.appendChild(blockIdElement);
         const addressElement = document.createElement("SPAN");
         addressElement.className = "instruction-address";
         addressElement.innerText = matches.groups["address"];
@@ -58,11 +73,13 @@ export class DisassemblyView extends TextView {
         fragment.classList.add('tag');
 
         if (!Number.isNaN(offset)) {
-          const pcOffset = view.sourceResolver.getKeyPcOffset(offset);
+          let pcOffset = view.sourceResolver.getKeyPcOffset(offset);
+          if (pcOffset == -1) pcOffset = Number(offset);
           fragment.dataset.pcOffset = `${pcOffset}`;
           addressElement.classList.add('linkable-text');
           offsetElement.classList.add('linkable-text');
         }
+        return true;
       }
     };
     const UNCLASSIFIED_STYLE = {
@@ -79,11 +96,20 @@ export class DisassemblyView extends TextView {
         fragment.innerHTML = text;
         const replacer = (match, hexOffset) => {
           const offset = Number.parseInt(hexOffset, 16);
-          const keyOffset = view.sourceResolver.getKeyPcOffset(offset);
-          return `<span class="tag linkable-text" data-pc-offset="${keyOffset}">${match}</span>`;
+          let keyOffset = view.sourceResolver.getKeyPcOffset(offset);
+          if (keyOffset == -1) keyOffset = Number(offset);
+          const blockIds = view.sourceResolver.getBlockIdsForOffset(offset);
+          let block = "";
+          let blockIdData = "";
+          if (blockIds && blockIds.length > 0) {
+            block = `B${blockIds.join(",")} `;
+            blockIdData = `data-block-id="${blockIds.join(",")}"`;
+          }
+          return `<span class="tag linkable-text" data-pc-offset="${keyOffset}" ${blockIdData}>${block}${match}</span>`;
         };
         const html = text.replace(/<.0?x?([0-9a-fA-F]+)>/g, replacer);
         fragment.innerHTML = html;
+        return true;
       }
     };
     const OPCODE_STYLE = {
@@ -91,12 +117,14 @@ export class DisassemblyView extends TextView {
     };
     const BLOCK_HEADER_STYLE = {
       associateData: function (text, fragment) {
+        if (view.sourceResolver.hasBlockStartInfo()) return false;
         const matches = /\d+/.exec(text);
-        if (!matches) return;
+        if (!matches) return true;
         const blockId = matches[0];
         fragment.dataset.blockId = blockId;
         fragment.innerHTML = text;
         fragment.className = "com block";
+        return true;
       }
     };
     const SOURCE_POSITION_HEADER_STYLE = {
@@ -135,7 +163,7 @@ export class DisassemblyView extends TextView {
 
     const linkHandler = (e: MouseEvent) => {
       if (!(e.target instanceof HTMLElement)) return;
-      const offsetAsString = e.target.dataset.pcOffset ? e.target.dataset.pcOffset : e.target.parentElement.dataset.pcOffset;
+      const offsetAsString = typeof e.target.dataset.pcOffset != "undefined" ? e.target.dataset.pcOffset : e.target.parentElement.dataset.pcOffset;
       const offset = Number.parseInt(offsetAsString, 10);
       if ((typeof offsetAsString) != "undefined" && !Number.isNaN(offset)) {
         view.offsetSelection.select([offset], true);
@@ -156,12 +184,12 @@ export class DisassemblyView extends TextView {
 
     const linkHandlerBlock = e => {
       const blockId = e.target.dataset.blockId;
-      if (typeof blockId != "undefined" && !Number.isNaN(blockId)) {
-        e.stopPropagation();
+      if (typeof blockId != "undefined") {
+        const blockIds = blockId.split(",");
         if (!e.shiftKey) {
           view.selectionHandler.clear();
         }
-        view.blockSelectionHandler.select([blockId], true);
+        view.blockSelectionHandler.select(blockIds, true);
       }
     };
     view.divNode.addEventListener('click', linkHandlerBlock);
@@ -219,6 +247,17 @@ export class DisassemblyView extends TextView {
     };
     instructionBinaryInput.addEventListener("change", showInstructionBinaryHandler);
     this.showInstructionBinaryHandler = showInstructionBinaryHandler;
+
+    const highlightGapInstructionsInput: HTMLInputElement = view.divNode.querySelector("#highlight-gap-instructions");
+    const lastHighlightGapInstructions = window.sessionStorage.getItem("highlight-gap-instructions");
+    highlightGapInstructionsInput.checked = lastHighlightGapInstructions == 'true';
+    const highlightGapInstructionsHandler = () => {
+      window.sessionStorage.setItem("highlight-gap-instructions", `${highlightGapInstructionsInput.checked}`);
+      view.divNode.classList.toggle("highlight-gap-instructions", highlightGapInstructionsInput.checked);
+    };
+
+    highlightGapInstructionsInput.addEventListener("change", highlightGapInstructionsHandler);
+    this.highlightGapInstructionsHandler = highlightGapInstructionsHandler;
   }
 
   updateSelection(scrollIntoView: boolean = false) {
@@ -285,6 +324,7 @@ export class DisassemblyView extends TextView {
     super.initializeContent(data, null);
     this.showInstructionAddressHandler();
     this.showInstructionBinaryHandler();
+    this.highlightGapInstructionsHandler();
     console.timeEnd("disassembly-view");
   }
 
diff --git a/deps/v8/tools/turbolizer/src/sequence-view.ts b/deps/v8/tools/turbolizer/src/sequence-view.ts
index e7691c688f03ca..1319f3ae1e2dc5 100644
--- a/deps/v8/tools/turbolizer/src/sequence-view.ts
+++ b/deps/v8/tools/turbolizer/src/sequence-view.ts
@@ -98,8 +98,10 @@ export class SequenceView extends TextView {
       const instNodeEl = createElement("div", "instruction-node");
 
       const instId = createElement("div", "instruction-id", instruction.id);
+      const offsets = view.sourceResolver.instructionToPcOffsets(instruction.id);
       instId.classList.add("clickable");
       instId.dataset.instructionId = instruction.id;
+      instId.setAttribute("title", `This instruction generated gap code at pc-offset 0x${offsets.gap.toString(16)}, code at pc-offset 0x${offsets.arch.toString(16)}, condition handling at pc-offset 0x${offsets.condition.toString(16)}.`);
       instNodeEl.appendChild(instId);
 
       const instContentsEl = createElement("div", "instruction-contents");
diff --git a/deps/v8/tools/turbolizer/src/source-resolver.ts b/deps/v8/tools/turbolizer/src/source-resolver.ts
index 67f9c088a225d8..588eea5b9955cc 100644
--- a/deps/v8/tools/turbolizer/src/source-resolver.ts
+++ b/deps/v8/tools/turbolizer/src/source-resolver.ts
@@ -83,6 +83,7 @@ interface InstructionsPhase {
   instructionOffsetToPCOffset?: any;
   blockIdtoInstructionRange?: any;
   nodeIdToInstructionRange?: any;
+  codeOffsetsInfo?: CodeOffsetsInfo
 }
 
 interface GraphPhase {
@@ -103,6 +104,22 @@ export interface Sequence {
   blocks: Array<any>;
 }
 
+class CodeOffsetsInfo {
+  codeStartRegisterCheck: number;
+  deoptCheck: number;
+  initPoison: number;
+  blocksStart: number;
+  outOfLineCode: number;
+  deoptimizationExits: number;
+  pools: number;
+  jumpTables: number;
+}
+export class TurbolizerInstructionStartInfo {
+  gap: number;
+  arch: number;
+  condition: number;
+}
+
 export class SourceResolver {
   nodePositionMap: Array<AnyPosition>;
   sources: Array<Source>;
@@ -115,9 +132,12 @@ export class SourceResolver {
   lineToSourcePositions: Map<string, Array<AnyPosition>>;
   nodeIdToInstructionRange: Array<[number, number]>;
   blockIdToInstructionRange: Array<[number, number]>;
-  instructionToPCOffset: Array<number>;
+  instructionToPCOffset: Array<TurbolizerInstructionStartInfo>;
   pcOffsetToInstructions: Map<number, Array<number>>;
   pcOffsets: Array<number>;
+  blockIdToPCOffset: Array<number>;
+  blockStartPCtoBlockIds: Map<number, Array<number>>;
+  codeOffsetsInfo: CodeOffsetsInfo;
 
   constructor() {
     // Maps node ids to source positions.
@@ -147,6 +167,17 @@ export class SourceResolver {
     // Maps PC offsets to instructions.
     this.pcOffsetToInstructions = new Map();
     this.pcOffsets = [];
+    this.blockIdToPCOffset = [];
+    this.blockStartPCtoBlockIds = new Map();
+    this.codeOffsetsInfo = null;
+  }
+
+  getBlockIdsForOffset(offset): Array<number> {
+    return this.blockStartPCtoBlockIds.get(offset);
+  }
+
+  hasBlockStartInfo() {
+    return this.blockIdToPCOffset.length > 0;
   }
 
   setSources(sources, mainBackup) {
@@ -369,12 +400,18 @@ export class SourceResolver {
   }
 
   readInstructionOffsetToPCOffset(instructionToPCOffset) {
-    for (const [instruction, offset] of Object.entries<number>(instructionToPCOffset)) {
-      this.instructionToPCOffset[instruction] = offset;
-      if (!this.pcOffsetToInstructions.has(offset)) {
-        this.pcOffsetToInstructions.set(offset, []);
+    for (const [instruction, numberOrInfo] of Object.entries<number | TurbolizerInstructionStartInfo>(instructionToPCOffset)) {
+      let info: TurbolizerInstructionStartInfo;
+      if (typeof numberOrInfo == "number") {
+        info = { gap: numberOrInfo, arch: numberOrInfo, condition: numberOrInfo };
+      } else {
+        info = numberOrInfo;
+      }
+      this.instructionToPCOffset[instruction] = info;
+      if (!this.pcOffsetToInstructions.has(info.gap)) {
+        this.pcOffsetToInstructions.set(info.gap, []);
       }
-      this.pcOffsetToInstructions.get(offset).push(Number(instruction));
+      this.pcOffsetToInstructions.get(info.gap).push(Number(instruction));
     }
     this.pcOffsets = Array.from(this.pcOffsetToInstructions.keys()).sort((a, b) => b - a);
   }
@@ -393,15 +430,67 @@ export class SourceResolver {
     return -1;
   }
 
-  instructionRangeToKeyPcOffsets([start, end]: [number, number]) {
+  getInstructionKindForPCOffset(offset: number) {
+    if (this.codeOffsetsInfo) {
+      if (offset >= this.codeOffsetsInfo.deoptimizationExits) {
+        if (offset >= this.codeOffsetsInfo.pools) {
+          return "pools";
+        } else if (offset >= this.codeOffsetsInfo.jumpTables) {
+          return "jump-tables";
+        } else {
+          return "deoptimization-exits";
+        }
+      }
+      if (offset < this.codeOffsetsInfo.deoptCheck) {
+        return "code-start-register";
+      } else if (offset < this.codeOffsetsInfo.initPoison) {
+        return "deopt-check";
+      } else if (offset < this.codeOffsetsInfo.blocksStart) {
+        return "init-poison";
+      }
+    }
+    const keyOffset = this.getKeyPcOffset(offset);
+    if (keyOffset != -1) {
+      const infos = this.pcOffsetToInstructions.get(keyOffset).map(instrId => this.instructionToPCOffset[instrId]).filter(info => info.gap != info.condition);
+      if (infos.length > 0) {
+        const info = infos[0];
+        if (!info || info.gap == info.condition) return "unknown";
+        if (offset < info.arch) return "gap";
+        if (offset < info.condition) return "arch";
+        return "condition";
+      }
+    }
+    return "unknown";
+  }
+
+  instructionKindToReadableName(instructionKind) {
+    switch (instructionKind) {
+      case "code-start-register": return "Check code register for right value";
+      case "deopt-check": return "Check if function was marked for deoptimization";
+      case "init-poison": return "Initialization of poison register";
+      case "gap": return "Instruction implementing a gap move";
+      case "arch": return "Instruction implementing the actual machine operation";
+      case "condition": return "Code implementing conditional after instruction";
+      case "pools": return "Data in a pool (e.g. constant pool)";
+      case "jump-tables": return "Part of a jump table";
+      case "deoptimization-exits": return "Jump to deoptimization exit";
+    }
+    return null;
+  }
+
+  instructionRangeToKeyPcOffsets([start, end]: [number, number]): Array<TurbolizerInstructionStartInfo> {
     if (start == end) return [this.instructionToPCOffset[start]];
     return this.instructionToPCOffset.slice(start, end);
   }
 
-  instructionsToKeyPcOffsets(instructionIds: Iterable<number>) {
+  instructionToPcOffsets(instr: number): TurbolizerInstructionStartInfo {
+    return this.instructionToPCOffset[instr];
+  }
+
+  instructionsToKeyPcOffsets(instructionIds: Iterable<number>): Array<number> {
     const keyPcOffsets = [];
     for (const instructionId of instructionIds) {
-      keyPcOffsets.push(this.instructionToPCOffset[instructionId]);
+      keyPcOffsets.push(this.instructionToPCOffset[instructionId].gap);
     }
     return keyPcOffsets;
   }
@@ -447,6 +536,15 @@ export class SourceResolver {
       switch (phase.type) {
         case 'disassembly':
           this.disassemblyPhase = phase;
+          if (phase['blockIdToOffset']) {
+            for (const [blockId, pc] of Object.entries<number>(phase['blockIdToOffset'])) {
+              this.blockIdToPCOffset[blockId] = pc;
+              if (!this.blockStartPCtoBlockIds.has(pc)) {
+                this.blockStartPCtoBlockIds.set(pc, []);
+              }
+              this.blockStartPCtoBlockIds.get(pc).push(Number(blockId));
+            }
+          }
           break;
         case 'schedule':
           this.phaseNames.set(phase.name, this.phases.length);
@@ -466,6 +564,9 @@ export class SourceResolver {
           if (phase.instructionOffsetToPCOffset) {
             this.readInstructionOffsetToPCOffset(phase.instructionOffsetToPCOffset);
           }
+          if (phase.codeOffsetsInfo) {
+            this.codeOffsetsInfo = phase.codeOffsetsInfo;
+          }
           break;
         case 'graph':
           const graphPhase: GraphPhase = Object.assign(phase, { highestNodeId: 0 });
diff --git a/deps/v8/tools/turbolizer/src/text-view.ts b/deps/v8/tools/turbolizer/src/text-view.ts
index 41a06eae77b934..761a16bff41a3f 100644
--- a/deps/v8/tools/turbolizer/src/text-view.ts
+++ b/deps/v8/tools/turbolizer/src/text-view.ts
@@ -129,6 +129,10 @@ export abstract class TextView extends PhaseView {
     if (this.divNode.parentNode == null) return;
     const mkVisible = new ViewElements(this.divNode.parentNode as HTMLElement);
     const view = this;
+    const elementsToSelect = view.divNode.querySelectorAll(`[data-pc-offset]`);
+    for (const el of elementsToSelect) {
+      el.classList.toggle("selected", false);
+    }
     for (const [blockId, elements] of this.blockIdToHtmlElementsMap.entries()) {
       const isSelected = view.blockSelection.isSelected(blockId);
       for (const element of elements) {
@@ -136,10 +140,6 @@ export abstract class TextView extends PhaseView {
         element.classList.toggle("selected", isSelected);
       }
     }
-    const elementsToSelect = view.divNode.querySelectorAll(`[data-pc-offset]`);
-    for (const el of elementsToSelect) {
-      el.classList.toggle("selected", false);
-    }
     for (const key of this.nodeIdToHtmlElementsMap.keys()) {
       for (const element of this.nodeIdToHtmlElementsMap.get(key)) {
         element.classList.toggle("selected", false);
@@ -170,7 +170,9 @@ export abstract class TextView extends PhaseView {
     const fragment = document.createElement("SPAN");
 
     if (typeof style.associateData == 'function') {
-      style.associateData(text, fragment);
+      if (style.associateData(text, fragment) === false) {
+         return null;
+      }
     } else {
       if (style.css != undefined) {
         const css = isIterable(style.css) ? style.css : [style.css];
@@ -198,7 +200,7 @@ export abstract class TextView extends PhaseView {
             const text = matches[0];
             if (text != '') {
               const fragment = view.createFragment(matches[0], style);
-              result.push(fragment);
+              if (fragment !== null) result.push(fragment);
             }
             line = line.substr(matches[0].length);
           }
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
index 216ca13d04b3a4..f89e716ce9f16b 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -696,3 +696,61 @@ text {
   padding-left: .5ex;
   outline: 1px dotted grey;
 }
+
+ul.disassembly-list .block-id {
+  width: 4ex;
+  display: block;
+  padding-top: 2px;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="gap"]+span+span {
+  background-color: #FAEEEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="arch"]+span+span {
+  background-color: #EEFFEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="condition"]+span+span {
+  background-color: #FFFFEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="gap"] {
+  background-color: #FAEEEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="arch"] {
+  background-color: #EEFFEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="condition"] {
+  background-color: #FFFFEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="deopt-check"] {
+  background-color: #FAEEFA;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="init-poison"] {
+  background-color: #EEFFAA;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="pools"] {
+  background-color: #6AA84F;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="code-start-register"] {
+  background-color: #FFCCCC;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="deoptimization-exits"] {
+  background-color: #CCCCFF;
+}
+
+[data-instruction-kind].selected {
+  background-color: yellow;
+}
+
+div.highlight-gap-instructions [data-instruction-kind].selected {
+  background-color: yellow;
+}
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index c6c98c04c3649b..53aaaf74dfbba8 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -31,107 +31,105 @@
   67: "ODDBALL_TYPE",
   68: "MAP_TYPE",
   69: "CODE_TYPE",
-  70: "MUTABLE_HEAP_NUMBER_TYPE",
-  71: "FOREIGN_TYPE",
-  72: "BYTE_ARRAY_TYPE",
-  73: "BYTECODE_ARRAY_TYPE",
-  74: "FREE_SPACE_TYPE",
-  75: "FIXED_DOUBLE_ARRAY_TYPE",
-  76: "FEEDBACK_METADATA_TYPE",
-  77: "FILLER_TYPE",
-  78: "ACCESS_CHECK_INFO_TYPE",
-  79: "ACCESSOR_INFO_TYPE",
-  80: "ACCESSOR_PAIR_TYPE",
-  81: "ALIASED_ARGUMENTS_ENTRY_TYPE",
-  82: "ALLOCATION_MEMENTO_TYPE",
-  83: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
-  84: "ASM_WASM_DATA_TYPE",
-  85: "ASYNC_GENERATOR_REQUEST_TYPE",
-  86: "CLASS_POSITIONS_TYPE",
-  87: "DEBUG_INFO_TYPE",
-  88: "ENUM_CACHE_TYPE",
-  89: "FUNCTION_TEMPLATE_INFO_TYPE",
-  90: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
-  91: "INTERCEPTOR_INFO_TYPE",
-  92: "INTERPRETER_DATA_TYPE",
-  93: "OBJECT_TEMPLATE_INFO_TYPE",
-  94: "PROMISE_CAPABILITY_TYPE",
-  95: "PROMISE_REACTION_TYPE",
-  96: "PROTOTYPE_INFO_TYPE",
-  97: "SCRIPT_TYPE",
-  98: "SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE",
-  99: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
-  100: "STACK_FRAME_INFO_TYPE",
-  101: "STACK_TRACE_FRAME_TYPE",
-  102: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
-  103: "TUPLE2_TYPE",
-  104: "TUPLE3_TYPE",
-  105: "WASM_CAPI_FUNCTION_DATA_TYPE",
-  106: "WASM_DEBUG_INFO_TYPE",
-  107: "WASM_EXCEPTION_TAG_TYPE",
-  108: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
-  109: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
-  110: "WASM_JS_FUNCTION_DATA_TYPE",
-  111: "CALLABLE_TASK_TYPE",
-  112: "CALLBACK_TASK_TYPE",
-  113: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
-  114: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
-  115: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
-  116: "FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE",
-  117: "INTERNAL_CLASS_TYPE",
-  118: "SMI_PAIR_TYPE",
-  119: "SMI_BOX_TYPE",
-  120: "SORT_STATE_TYPE",
-  121: "SOURCE_TEXT_MODULE_TYPE",
-  122: "SYNTHETIC_MODULE_TYPE",
-  123: "ALLOCATION_SITE_TYPE",
-  124: "EMBEDDER_DATA_ARRAY_TYPE",
-  125: "FIXED_ARRAY_TYPE",
-  126: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
-  127: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
-  128: "HASH_TABLE_TYPE",
-  129: "ORDERED_HASH_MAP_TYPE",
-  130: "ORDERED_HASH_SET_TYPE",
-  131: "ORDERED_NAME_DICTIONARY_TYPE",
-  132: "NAME_DICTIONARY_TYPE",
-  133: "GLOBAL_DICTIONARY_TYPE",
-  134: "NUMBER_DICTIONARY_TYPE",
-  135: "SIMPLE_NUMBER_DICTIONARY_TYPE",
-  136: "STRING_TABLE_TYPE",
-  137: "EPHEMERON_HASH_TABLE_TYPE",
-  138: "SCOPE_INFO_TYPE",
-  139: "SCRIPT_CONTEXT_TABLE_TYPE",
-  140: "AWAIT_CONTEXT_TYPE",
-  141: "BLOCK_CONTEXT_TYPE",
-  142: "CATCH_CONTEXT_TYPE",
-  143: "DEBUG_EVALUATE_CONTEXT_TYPE",
-  144: "EVAL_CONTEXT_TYPE",
-  145: "FUNCTION_CONTEXT_TYPE",
-  146: "MODULE_CONTEXT_TYPE",
-  147: "NATIVE_CONTEXT_TYPE",
-  148: "SCRIPT_CONTEXT_TYPE",
-  149: "WITH_CONTEXT_TYPE",
-  150: "WEAK_FIXED_ARRAY_TYPE",
-  151: "TRANSITION_ARRAY_TYPE",
-  152: "CALL_HANDLER_INFO_TYPE",
-  153: "CELL_TYPE",
-  154: "CODE_DATA_CONTAINER_TYPE",
-  155: "DESCRIPTOR_ARRAY_TYPE",
-  156: "FEEDBACK_CELL_TYPE",
-  157: "FEEDBACK_VECTOR_TYPE",
-  158: "LOAD_HANDLER_TYPE",
-  159: "PREPARSE_DATA_TYPE",
-  160: "PROPERTY_ARRAY_TYPE",
-  161: "PROPERTY_CELL_TYPE",
-  162: "SHARED_FUNCTION_INFO_TYPE",
-  163: "SMALL_ORDERED_HASH_MAP_TYPE",
-  164: "SMALL_ORDERED_HASH_SET_TYPE",
-  165: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
-  166: "STORE_HANDLER_TYPE",
-  167: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
-  168: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
-  169: "WEAK_ARRAY_LIST_TYPE",
-  170: "WEAK_CELL_TYPE",
+  70: "FOREIGN_TYPE",
+  71: "BYTE_ARRAY_TYPE",
+  72: "BYTECODE_ARRAY_TYPE",
+  73: "FREE_SPACE_TYPE",
+  74: "FIXED_DOUBLE_ARRAY_TYPE",
+  75: "FEEDBACK_METADATA_TYPE",
+  76: "FILLER_TYPE",
+  77: "ACCESS_CHECK_INFO_TYPE",
+  78: "ACCESSOR_INFO_TYPE",
+  79: "ACCESSOR_PAIR_TYPE",
+  80: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+  81: "ALLOCATION_MEMENTO_TYPE",
+  82: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+  83: "ASM_WASM_DATA_TYPE",
+  84: "ASYNC_GENERATOR_REQUEST_TYPE",
+  85: "CLASS_POSITIONS_TYPE",
+  86: "DEBUG_INFO_TYPE",
+  87: "ENUM_CACHE_TYPE",
+  88: "FUNCTION_TEMPLATE_INFO_TYPE",
+  89: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+  90: "INTERCEPTOR_INFO_TYPE",
+  91: "INTERPRETER_DATA_TYPE",
+  92: "OBJECT_TEMPLATE_INFO_TYPE",
+  93: "PROMISE_CAPABILITY_TYPE",
+  94: "PROMISE_REACTION_TYPE",
+  95: "PROTOTYPE_INFO_TYPE",
+  96: "SCRIPT_TYPE",
+  97: "SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE",
+  98: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+  99: "STACK_FRAME_INFO_TYPE",
+  100: "STACK_TRACE_FRAME_TYPE",
+  101: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+  102: "TUPLE2_TYPE",
+  103: "TUPLE3_TYPE",
+  104: "WASM_CAPI_FUNCTION_DATA_TYPE",
+  105: "WASM_DEBUG_INFO_TYPE",
+  106: "WASM_EXCEPTION_TAG_TYPE",
+  107: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+  108: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+  109: "WASM_JS_FUNCTION_DATA_TYPE",
+  110: "CALLABLE_TASK_TYPE",
+  111: "CALLBACK_TASK_TYPE",
+  112: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+  113: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+  114: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+  115: "INTERNAL_CLASS_TYPE",
+  116: "SMI_PAIR_TYPE",
+  117: "SMI_BOX_TYPE",
+  118: "SORT_STATE_TYPE",
+  119: "SOURCE_TEXT_MODULE_TYPE",
+  120: "SYNTHETIC_MODULE_TYPE",
+  121: "ALLOCATION_SITE_TYPE",
+  122: "EMBEDDER_DATA_ARRAY_TYPE",
+  123: "FIXED_ARRAY_TYPE",
+  124: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+  125: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+  126: "HASH_TABLE_TYPE",
+  127: "ORDERED_HASH_MAP_TYPE",
+  128: "ORDERED_HASH_SET_TYPE",
+  129: "ORDERED_NAME_DICTIONARY_TYPE",
+  130: "NAME_DICTIONARY_TYPE",
+  131: "GLOBAL_DICTIONARY_TYPE",
+  132: "NUMBER_DICTIONARY_TYPE",
+  133: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+  134: "STRING_TABLE_TYPE",
+  135: "EPHEMERON_HASH_TABLE_TYPE",
+  136: "SCOPE_INFO_TYPE",
+  137: "SCRIPT_CONTEXT_TABLE_TYPE",
+  138: "AWAIT_CONTEXT_TYPE",
+  139: "BLOCK_CONTEXT_TYPE",
+  140: "CATCH_CONTEXT_TYPE",
+  141: "DEBUG_EVALUATE_CONTEXT_TYPE",
+  142: "EVAL_CONTEXT_TYPE",
+  143: "FUNCTION_CONTEXT_TYPE",
+  144: "MODULE_CONTEXT_TYPE",
+  145: "NATIVE_CONTEXT_TYPE",
+  146: "SCRIPT_CONTEXT_TYPE",
+  147: "WITH_CONTEXT_TYPE",
+  148: "WEAK_FIXED_ARRAY_TYPE",
+  149: "TRANSITION_ARRAY_TYPE",
+  150: "CALL_HANDLER_INFO_TYPE",
+  151: "CELL_TYPE",
+  152: "CODE_DATA_CONTAINER_TYPE",
+  153: "DESCRIPTOR_ARRAY_TYPE",
+  154: "FEEDBACK_CELL_TYPE",
+  155: "FEEDBACK_VECTOR_TYPE",
+  156: "LOAD_HANDLER_TYPE",
+  157: "PREPARSE_DATA_TYPE",
+  158: "PROPERTY_ARRAY_TYPE",
+  159: "PROPERTY_CELL_TYPE",
+  160: "SHARED_FUNCTION_INFO_TYPE",
+  161: "SMALL_ORDERED_HASH_MAP_TYPE",
+  162: "SMALL_ORDERED_HASH_SET_TYPE",
+  163: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+  164: "STORE_HANDLER_TYPE",
+  165: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+  166: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+  167: "WEAK_ARRAY_LIST_TYPE",
+  168: "WEAK_CELL_TYPE",
   1024: "JS_PROXY_TYPE",
   1025: "JS_GLOBAL_OBJECT_TYPE",
   1026: "JS_GLOBAL_PROXY_TYPE",
@@ -192,237 +190,237 @@
 
 # List of known V8 maps.
 KNOWN_MAPS = {
-  ("read_only_space", 0x00111): (74, "FreeSpaceMap"),
-  ("read_only_space", 0x00161): (68, "MetaMap"),
-  ("read_only_space", 0x001e1): (67, "NullMap"),
-  ("read_only_space", 0x00249): (155, "DescriptorArrayMap"),
-  ("read_only_space", 0x002a9): (150, "WeakFixedArrayMap"),
-  ("read_only_space", 0x002f9): (77, "OnePointerFillerMap"),
-  ("read_only_space", 0x00349): (77, "TwoPointerFillerMap"),
-  ("read_only_space", 0x003c9): (67, "UninitializedMap"),
-  ("read_only_space", 0x00439): (8, "OneByteInternalizedStringMap"),
-  ("read_only_space", 0x004d9): (67, "UndefinedMap"),
-  ("read_only_space", 0x00539): (65, "HeapNumberMap"),
-  ("read_only_space", 0x005b9): (67, "TheHoleMap"),
-  ("read_only_space", 0x00661): (67, "BooleanMap"),
-  ("read_only_space", 0x00739): (72, "ByteArrayMap"),
-  ("read_only_space", 0x00789): (125, "FixedArrayMap"),
-  ("read_only_space", 0x007d9): (125, "FixedCOWArrayMap"),
-  ("read_only_space", 0x00829): (128, "HashTableMap"),
-  ("read_only_space", 0x00879): (64, "SymbolMap"),
-  ("read_only_space", 0x008c9): (40, "OneByteStringMap"),
-  ("read_only_space", 0x00919): (138, "ScopeInfoMap"),
-  ("read_only_space", 0x00969): (162, "SharedFunctionInfoMap"),
-  ("read_only_space", 0x009b9): (69, "CodeMap"),
-  ("read_only_space", 0x00a09): (145, "FunctionContextMap"),
-  ("read_only_space", 0x00a59): (153, "CellMap"),
-  ("read_only_space", 0x00aa9): (161, "GlobalPropertyCellMap"),
-  ("read_only_space", 0x00af9): (71, "ForeignMap"),
-  ("read_only_space", 0x00b49): (151, "TransitionArrayMap"),
-  ("read_only_space", 0x00b99): (157, "FeedbackVectorMap"),
-  ("read_only_space", 0x00c39): (67, "ArgumentsMarkerMap"),
-  ("read_only_space", 0x00cd9): (67, "ExceptionMap"),
-  ("read_only_space", 0x00d79): (67, "TerminationExceptionMap"),
-  ("read_only_space", 0x00e21): (67, "OptimizedOutMap"),
-  ("read_only_space", 0x00ec1): (67, "StaleRegisterMap"),
-  ("read_only_space", 0x00f31): (147, "NativeContextMap"),
-  ("read_only_space", 0x00f81): (146, "ModuleContextMap"),
-  ("read_only_space", 0x00fd1): (144, "EvalContextMap"),
-  ("read_only_space", 0x01021): (148, "ScriptContextMap"),
-  ("read_only_space", 0x01071): (140, "AwaitContextMap"),
-  ("read_only_space", 0x010c1): (141, "BlockContextMap"),
-  ("read_only_space", 0x01111): (142, "CatchContextMap"),
-  ("read_only_space", 0x01161): (149, "WithContextMap"),
-  ("read_only_space", 0x011b1): (143, "DebugEvaluateContextMap"),
-  ("read_only_space", 0x01201): (139, "ScriptContextTableMap"),
-  ("read_only_space", 0x01251): (127, "ClosureFeedbackCellArrayMap"),
-  ("read_only_space", 0x012a1): (76, "FeedbackMetadataArrayMap"),
-  ("read_only_space", 0x012f1): (125, "ArrayListMap"),
-  ("read_only_space", 0x01341): (66, "BigIntMap"),
-  ("read_only_space", 0x01391): (126, "ObjectBoilerplateDescriptionMap"),
-  ("read_only_space", 0x013e1): (73, "BytecodeArrayMap"),
-  ("read_only_space", 0x01431): (154, "CodeDataContainerMap"),
-  ("read_only_space", 0x01481): (75, "FixedDoubleArrayMap"),
-  ("read_only_space", 0x014d1): (133, "GlobalDictionaryMap"),
-  ("read_only_space", 0x01521): (156, "ManyClosuresCellMap"),
-  ("read_only_space", 0x01571): (125, "ModuleInfoMap"),
-  ("read_only_space", 0x015c1): (70, "MutableHeapNumberMap"),
-  ("read_only_space", 0x01611): (132, "NameDictionaryMap"),
-  ("read_only_space", 0x01661): (156, "NoClosuresCellMap"),
-  ("read_only_space", 0x016b1): (134, "NumberDictionaryMap"),
-  ("read_only_space", 0x01701): (156, "OneClosureCellMap"),
-  ("read_only_space", 0x01751): (129, "OrderedHashMapMap"),
-  ("read_only_space", 0x017a1): (130, "OrderedHashSetMap"),
-  ("read_only_space", 0x017f1): (131, "OrderedNameDictionaryMap"),
-  ("read_only_space", 0x01841): (159, "PreparseDataMap"),
-  ("read_only_space", 0x01891): (160, "PropertyArrayMap"),
-  ("read_only_space", 0x018e1): (152, "SideEffectCallHandlerInfoMap"),
-  ("read_only_space", 0x01931): (152, "SideEffectFreeCallHandlerInfoMap"),
-  ("read_only_space", 0x01981): (152, "NextCallSideEffectFreeCallHandlerInfoMap"),
-  ("read_only_space", 0x019d1): (135, "SimpleNumberDictionaryMap"),
-  ("read_only_space", 0x01a21): (125, "SloppyArgumentsElementsMap"),
-  ("read_only_space", 0x01a71): (163, "SmallOrderedHashMapMap"),
-  ("read_only_space", 0x01ac1): (164, "SmallOrderedHashSetMap"),
-  ("read_only_space", 0x01b11): (165, "SmallOrderedNameDictionaryMap"),
-  ("read_only_space", 0x01b61): (121, "SourceTextModuleMap"),
-  ("read_only_space", 0x01bb1): (136, "StringTableMap"),
-  ("read_only_space", 0x01c01): (122, "SyntheticModuleMap"),
-  ("read_only_space", 0x01c51): (167, "UncompiledDataWithoutPreparseDataMap"),
-  ("read_only_space", 0x01ca1): (168, "UncompiledDataWithPreparseDataMap"),
-  ("read_only_space", 0x01cf1): (169, "WeakArrayListMap"),
-  ("read_only_space", 0x01d41): (137, "EphemeronHashTableMap"),
-  ("read_only_space", 0x01d91): (124, "EmbedderDataArrayMap"),
-  ("read_only_space", 0x01de1): (170, "WeakCellMap"),
-  ("read_only_space", 0x01e31): (58, "NativeSourceStringMap"),
-  ("read_only_space", 0x01e81): (32, "StringMap"),
-  ("read_only_space", 0x01ed1): (41, "ConsOneByteStringMap"),
-  ("read_only_space", 0x01f21): (33, "ConsStringMap"),
-  ("read_only_space", 0x01f71): (45, "ThinOneByteStringMap"),
-  ("read_only_space", 0x01fc1): (37, "ThinStringMap"),
-  ("read_only_space", 0x02011): (35, "SlicedStringMap"),
-  ("read_only_space", 0x02061): (43, "SlicedOneByteStringMap"),
-  ("read_only_space", 0x020b1): (34, "ExternalStringMap"),
-  ("read_only_space", 0x02101): (42, "ExternalOneByteStringMap"),
-  ("read_only_space", 0x02151): (50, "UncachedExternalStringMap"),
-  ("read_only_space", 0x021a1): (0, "InternalizedStringMap"),
-  ("read_only_space", 0x021f1): (2, "ExternalInternalizedStringMap"),
-  ("read_only_space", 0x02241): (10, "ExternalOneByteInternalizedStringMap"),
-  ("read_only_space", 0x02291): (18, "UncachedExternalInternalizedStringMap"),
-  ("read_only_space", 0x022e1): (26, "UncachedExternalOneByteInternalizedStringMap"),
-  ("read_only_space", 0x02331): (58, "UncachedExternalOneByteStringMap"),
-  ("read_only_space", 0x02381): (67, "SelfReferenceMarkerMap"),
-  ("read_only_space", 0x023e9): (88, "EnumCacheMap"),
-  ("read_only_space", 0x02489): (83, "ArrayBoilerplateDescriptionMap"),
-  ("read_only_space", 0x02679): (91, "InterceptorInfoMap"),
-  ("read_only_space", 0x04e59): (78, "AccessCheckInfoMap"),
-  ("read_only_space", 0x04ea9): (79, "AccessorInfoMap"),
-  ("read_only_space", 0x04ef9): (80, "AccessorPairMap"),
-  ("read_only_space", 0x04f49): (81, "AliasedArgumentsEntryMap"),
-  ("read_only_space", 0x04f99): (82, "AllocationMementoMap"),
-  ("read_only_space", 0x04fe9): (84, "AsmWasmDataMap"),
-  ("read_only_space", 0x05039): (85, "AsyncGeneratorRequestMap"),
-  ("read_only_space", 0x05089): (86, "ClassPositionsMap"),
-  ("read_only_space", 0x050d9): (87, "DebugInfoMap"),
-  ("read_only_space", 0x05129): (89, "FunctionTemplateInfoMap"),
-  ("read_only_space", 0x05179): (90, "FunctionTemplateRareDataMap"),
-  ("read_only_space", 0x051c9): (92, "InterpreterDataMap"),
-  ("read_only_space", 0x05219): (93, "ObjectTemplateInfoMap"),
-  ("read_only_space", 0x05269): (94, "PromiseCapabilityMap"),
-  ("read_only_space", 0x052b9): (95, "PromiseReactionMap"),
-  ("read_only_space", 0x05309): (96, "PrototypeInfoMap"),
-  ("read_only_space", 0x05359): (97, "ScriptMap"),
-  ("read_only_space", 0x053a9): (98, "SourcePositionTableWithFrameCacheMap"),
-  ("read_only_space", 0x053f9): (99, "SourceTextModuleInfoEntryMap"),
-  ("read_only_space", 0x05449): (100, "StackFrameInfoMap"),
-  ("read_only_space", 0x05499): (101, "StackTraceFrameMap"),
-  ("read_only_space", 0x054e9): (102, "TemplateObjectDescriptionMap"),
-  ("read_only_space", 0x05539): (103, "Tuple2Map"),
-  ("read_only_space", 0x05589): (104, "Tuple3Map"),
-  ("read_only_space", 0x055d9): (105, "WasmCapiFunctionDataMap"),
-  ("read_only_space", 0x05629): (106, "WasmDebugInfoMap"),
-  ("read_only_space", 0x05679): (107, "WasmExceptionTagMap"),
-  ("read_only_space", 0x056c9): (108, "WasmExportedFunctionDataMap"),
-  ("read_only_space", 0x05719): (109, "WasmIndirectFunctionTableMap"),
-  ("read_only_space", 0x05769): (110, "WasmJSFunctionDataMap"),
-  ("read_only_space", 0x057b9): (111, "CallableTaskMap"),
-  ("read_only_space", 0x05809): (112, "CallbackTaskMap"),
-  ("read_only_space", 0x05859): (113, "PromiseFulfillReactionJobTaskMap"),
-  ("read_only_space", 0x058a9): (114, "PromiseRejectReactionJobTaskMap"),
-  ("read_only_space", 0x058f9): (115, "PromiseResolveThenableJobTaskMap"),
-  ("read_only_space", 0x05949): (116, "FinalizationGroupCleanupJobTaskMap"),
-  ("read_only_space", 0x05999): (117, "InternalClassMap"),
-  ("read_only_space", 0x059e9): (118, "SmiPairMap"),
-  ("read_only_space", 0x05a39): (119, "SmiBoxMap"),
-  ("read_only_space", 0x05a89): (120, "SortStateMap"),
-  ("read_only_space", 0x05ad9): (123, "AllocationSiteWithWeakNextMap"),
-  ("read_only_space", 0x05b29): (123, "AllocationSiteWithoutWeakNextMap"),
-  ("read_only_space", 0x05b79): (158, "LoadHandler1Map"),
-  ("read_only_space", 0x05bc9): (158, "LoadHandler2Map"),
-  ("read_only_space", 0x05c19): (158, "LoadHandler3Map"),
-  ("read_only_space", 0x05c69): (166, "StoreHandler0Map"),
-  ("read_only_space", 0x05cb9): (166, "StoreHandler1Map"),
-  ("read_only_space", 0x05d09): (166, "StoreHandler2Map"),
-  ("read_only_space", 0x05d59): (166, "StoreHandler3Map"),
-  ("map_space", 0x00111): (1057, "ExternalMap"),
-  ("map_space", 0x00161): (1073, "JSMessageObjectMap"),
+  ("read_only_space", 0x00119): (73, "FreeSpaceMap"),
+  ("read_only_space", 0x00169): (68, "MetaMap"),
+  ("read_only_space", 0x001e9): (67, "NullMap"),
+  ("read_only_space", 0x00251): (153, "DescriptorArrayMap"),
+  ("read_only_space", 0x002b1): (148, "WeakFixedArrayMap"),
+  ("read_only_space", 0x00301): (76, "OnePointerFillerMap"),
+  ("read_only_space", 0x00351): (76, "TwoPointerFillerMap"),
+  ("read_only_space", 0x003d1): (67, "UninitializedMap"),
+  ("read_only_space", 0x00441): (8, "OneByteInternalizedStringMap"),
+  ("read_only_space", 0x004e1): (67, "UndefinedMap"),
+  ("read_only_space", 0x00541): (65, "HeapNumberMap"),
+  ("read_only_space", 0x005c1): (67, "TheHoleMap"),
+  ("read_only_space", 0x00669): (67, "BooleanMap"),
+  ("read_only_space", 0x00741): (71, "ByteArrayMap"),
+  ("read_only_space", 0x00791): (123, "FixedArrayMap"),
+  ("read_only_space", 0x007e1): (123, "FixedCOWArrayMap"),
+  ("read_only_space", 0x00831): (126, "HashTableMap"),
+  ("read_only_space", 0x00881): (64, "SymbolMap"),
+  ("read_only_space", 0x008d1): (40, "OneByteStringMap"),
+  ("read_only_space", 0x00921): (136, "ScopeInfoMap"),
+  ("read_only_space", 0x00971): (160, "SharedFunctionInfoMap"),
+  ("read_only_space", 0x009c1): (69, "CodeMap"),
+  ("read_only_space", 0x00a11): (143, "FunctionContextMap"),
+  ("read_only_space", 0x00a61): (151, "CellMap"),
+  ("read_only_space", 0x00ab1): (159, "GlobalPropertyCellMap"),
+  ("read_only_space", 0x00b01): (70, "ForeignMap"),
+  ("read_only_space", 0x00b51): (149, "TransitionArrayMap"),
+  ("read_only_space", 0x00ba1): (155, "FeedbackVectorMap"),
+  ("read_only_space", 0x00c41): (67, "ArgumentsMarkerMap"),
+  ("read_only_space", 0x00ce1): (67, "ExceptionMap"),
+  ("read_only_space", 0x00d81): (67, "TerminationExceptionMap"),
+  ("read_only_space", 0x00e29): (67, "OptimizedOutMap"),
+  ("read_only_space", 0x00ec9): (67, "StaleRegisterMap"),
+  ("read_only_space", 0x00f39): (145, "NativeContextMap"),
+  ("read_only_space", 0x00f89): (144, "ModuleContextMap"),
+  ("read_only_space", 0x00fd9): (142, "EvalContextMap"),
+  ("read_only_space", 0x01029): (146, "ScriptContextMap"),
+  ("read_only_space", 0x01079): (138, "AwaitContextMap"),
+  ("read_only_space", 0x010c9): (139, "BlockContextMap"),
+  ("read_only_space", 0x01119): (140, "CatchContextMap"),
+  ("read_only_space", 0x01169): (147, "WithContextMap"),
+  ("read_only_space", 0x011b9): (141, "DebugEvaluateContextMap"),
+  ("read_only_space", 0x01209): (137, "ScriptContextTableMap"),
+  ("read_only_space", 0x01259): (125, "ClosureFeedbackCellArrayMap"),
+  ("read_only_space", 0x012a9): (75, "FeedbackMetadataArrayMap"),
+  ("read_only_space", 0x012f9): (123, "ArrayListMap"),
+  ("read_only_space", 0x01349): (66, "BigIntMap"),
+  ("read_only_space", 0x01399): (124, "ObjectBoilerplateDescriptionMap"),
+  ("read_only_space", 0x013e9): (72, "BytecodeArrayMap"),
+  ("read_only_space", 0x01439): (152, "CodeDataContainerMap"),
+  ("read_only_space", 0x01489): (74, "FixedDoubleArrayMap"),
+  ("read_only_space", 0x014d9): (131, "GlobalDictionaryMap"),
+  ("read_only_space", 0x01529): (154, "ManyClosuresCellMap"),
+  ("read_only_space", 0x01579): (123, "ModuleInfoMap"),
+  ("read_only_space", 0x015c9): (130, "NameDictionaryMap"),
+  ("read_only_space", 0x01619): (154, "NoClosuresCellMap"),
+  ("read_only_space", 0x01669): (132, "NumberDictionaryMap"),
+  ("read_only_space", 0x016b9): (154, "OneClosureCellMap"),
+  ("read_only_space", 0x01709): (127, "OrderedHashMapMap"),
+  ("read_only_space", 0x01759): (128, "OrderedHashSetMap"),
+  ("read_only_space", 0x017a9): (129, "OrderedNameDictionaryMap"),
+  ("read_only_space", 0x017f9): (157, "PreparseDataMap"),
+  ("read_only_space", 0x01849): (158, "PropertyArrayMap"),
+  ("read_only_space", 0x01899): (150, "SideEffectCallHandlerInfoMap"),
+  ("read_only_space", 0x018e9): (150, "SideEffectFreeCallHandlerInfoMap"),
+  ("read_only_space", 0x01939): (150, "NextCallSideEffectFreeCallHandlerInfoMap"),
+  ("read_only_space", 0x01989): (133, "SimpleNumberDictionaryMap"),
+  ("read_only_space", 0x019d9): (123, "SloppyArgumentsElementsMap"),
+  ("read_only_space", 0x01a29): (161, "SmallOrderedHashMapMap"),
+  ("read_only_space", 0x01a79): (162, "SmallOrderedHashSetMap"),
+  ("read_only_space", 0x01ac9): (163, "SmallOrderedNameDictionaryMap"),
+  ("read_only_space", 0x01b19): (119, "SourceTextModuleMap"),
+  ("read_only_space", 0x01b69): (134, "StringTableMap"),
+  ("read_only_space", 0x01bb9): (120, "SyntheticModuleMap"),
+  ("read_only_space", 0x01c09): (165, "UncompiledDataWithoutPreparseDataMap"),
+  ("read_only_space", 0x01c59): (166, "UncompiledDataWithPreparseDataMap"),
+  ("read_only_space", 0x01ca9): (167, "WeakArrayListMap"),
+  ("read_only_space", 0x01cf9): (135, "EphemeronHashTableMap"),
+  ("read_only_space", 0x01d49): (122, "EmbedderDataArrayMap"),
+  ("read_only_space", 0x01d99): (168, "WeakCellMap"),
+  ("read_only_space", 0x01de9): (58, "NativeSourceStringMap"),
+  ("read_only_space", 0x01e39): (32, "StringMap"),
+  ("read_only_space", 0x01e89): (41, "ConsOneByteStringMap"),
+  ("read_only_space", 0x01ed9): (33, "ConsStringMap"),
+  ("read_only_space", 0x01f29): (45, "ThinOneByteStringMap"),
+  ("read_only_space", 0x01f79): (37, "ThinStringMap"),
+  ("read_only_space", 0x01fc9): (35, "SlicedStringMap"),
+  ("read_only_space", 0x02019): (43, "SlicedOneByteStringMap"),
+  ("read_only_space", 0x02069): (34, "ExternalStringMap"),
+  ("read_only_space", 0x020b9): (42, "ExternalOneByteStringMap"),
+  ("read_only_space", 0x02109): (50, "UncachedExternalStringMap"),
+  ("read_only_space", 0x02159): (0, "InternalizedStringMap"),
+  ("read_only_space", 0x021a9): (2, "ExternalInternalizedStringMap"),
+  ("read_only_space", 0x021f9): (10, "ExternalOneByteInternalizedStringMap"),
+  ("read_only_space", 0x02249): (18, "UncachedExternalInternalizedStringMap"),
+  ("read_only_space", 0x02299): (26, "UncachedExternalOneByteInternalizedStringMap"),
+  ("read_only_space", 0x022e9): (58, "UncachedExternalOneByteStringMap"),
+  ("read_only_space", 0x02339): (67, "SelfReferenceMarkerMap"),
+  ("read_only_space", 0x023a1): (87, "EnumCacheMap"),
+  ("read_only_space", 0x02441): (82, "ArrayBoilerplateDescriptionMap"),
+  ("read_only_space", 0x02631): (90, "InterceptorInfoMap"),
+  ("read_only_space", 0x04eb1): (77, "AccessCheckInfoMap"),
+  ("read_only_space", 0x04f01): (78, "AccessorInfoMap"),
+  ("read_only_space", 0x04f51): (79, "AccessorPairMap"),
+  ("read_only_space", 0x04fa1): (80, "AliasedArgumentsEntryMap"),
+  ("read_only_space", 0x04ff1): (81, "AllocationMementoMap"),
+  ("read_only_space", 0x05041): (83, "AsmWasmDataMap"),
+  ("read_only_space", 0x05091): (84, "AsyncGeneratorRequestMap"),
+  ("read_only_space", 0x050e1): (85, "ClassPositionsMap"),
+  ("read_only_space", 0x05131): (86, "DebugInfoMap"),
+  ("read_only_space", 0x05181): (88, "FunctionTemplateInfoMap"),
+  ("read_only_space", 0x051d1): (89, "FunctionTemplateRareDataMap"),
+  ("read_only_space", 0x05221): (91, "InterpreterDataMap"),
+  ("read_only_space", 0x05271): (92, "ObjectTemplateInfoMap"),
+  ("read_only_space", 0x052c1): (93, "PromiseCapabilityMap"),
+  ("read_only_space", 0x05311): (94, "PromiseReactionMap"),
+  ("read_only_space", 0x05361): (95, "PrototypeInfoMap"),
+  ("read_only_space", 0x053b1): (96, "ScriptMap"),
+  ("read_only_space", 0x05401): (97, "SourcePositionTableWithFrameCacheMap"),
+  ("read_only_space", 0x05451): (98, "SourceTextModuleInfoEntryMap"),
+  ("read_only_space", 0x054a1): (99, "StackFrameInfoMap"),
+  ("read_only_space", 0x054f1): (100, "StackTraceFrameMap"),
+  ("read_only_space", 0x05541): (101, "TemplateObjectDescriptionMap"),
+  ("read_only_space", 0x05591): (102, "Tuple2Map"),
+  ("read_only_space", 0x055e1): (103, "Tuple3Map"),
+  ("read_only_space", 0x05631): (104, "WasmCapiFunctionDataMap"),
+  ("read_only_space", 0x05681): (105, "WasmDebugInfoMap"),
+  ("read_only_space", 0x056d1): (106, "WasmExceptionTagMap"),
+  ("read_only_space", 0x05721): (107, "WasmExportedFunctionDataMap"),
+  ("read_only_space", 0x05771): (108, "WasmIndirectFunctionTableMap"),
+  ("read_only_space", 0x057c1): (109, "WasmJSFunctionDataMap"),
+  ("read_only_space", 0x05811): (110, "CallableTaskMap"),
+  ("read_only_space", 0x05861): (111, "CallbackTaskMap"),
+  ("read_only_space", 0x058b1): (112, "PromiseFulfillReactionJobTaskMap"),
+  ("read_only_space", 0x05901): (113, "PromiseRejectReactionJobTaskMap"),
+  ("read_only_space", 0x05951): (114, "PromiseResolveThenableJobTaskMap"),
+  ("read_only_space", 0x059a1): (115, "InternalClassMap"),
+  ("read_only_space", 0x059f1): (116, "SmiPairMap"),
+  ("read_only_space", 0x05a41): (117, "SmiBoxMap"),
+  ("read_only_space", 0x05a91): (118, "SortStateMap"),
+  ("read_only_space", 0x05ae1): (121, "AllocationSiteWithWeakNextMap"),
+  ("read_only_space", 0x05b31): (121, "AllocationSiteWithoutWeakNextMap"),
+  ("read_only_space", 0x05b81): (156, "LoadHandler1Map"),
+  ("read_only_space", 0x05bd1): (156, "LoadHandler2Map"),
+  ("read_only_space", 0x05c21): (156, "LoadHandler3Map"),
+  ("read_only_space", 0x05c71): (164, "StoreHandler0Map"),
+  ("read_only_space", 0x05cc1): (164, "StoreHandler1Map"),
+  ("read_only_space", 0x05d11): (164, "StoreHandler2Map"),
+  ("read_only_space", 0x05d61): (164, "StoreHandler3Map"),
+  ("map_space", 0x00119): (1057, "ExternalMap"),
+  ("map_space", 0x00169): (1073, "JSMessageObjectMap"),
 }
 
 # List of known V8 objects.
 KNOWN_OBJECTS = {
-  ("read_only_space", 0x001b1): "NullValue",
-  ("read_only_space", 0x00231): "EmptyDescriptorArray",
-  ("read_only_space", 0x00299): "EmptyWeakFixedArray",
-  ("read_only_space", 0x00399): "UninitializedValue",
-  ("read_only_space", 0x004a9): "UndefinedValue",
-  ("read_only_space", 0x00529): "NanValue",
-  ("read_only_space", 0x00589): "TheHoleValue",
-  ("read_only_space", 0x00621): "HoleNanValue",
-  ("read_only_space", 0x00631): "TrueValue",
-  ("read_only_space", 0x006e1): "FalseValue",
-  ("read_only_space", 0x00729): "empty_string",
-  ("read_only_space", 0x00be9): "EmptyScopeInfo",
-  ("read_only_space", 0x00bf9): "EmptyFixedArray",
-  ("read_only_space", 0x00c09): "ArgumentsMarker",
-  ("read_only_space", 0x00ca9): "Exception",
-  ("read_only_space", 0x00d49): "TerminationException",
-  ("read_only_space", 0x00df1): "OptimizedOut",
-  ("read_only_space", 0x00e91): "StaleRegister",
-  ("read_only_space", 0x023d1): "EmptyEnumCache",
-  ("read_only_space", 0x02439): "EmptyPropertyArray",
-  ("read_only_space", 0x02449): "EmptyByteArray",
-  ("read_only_space", 0x02459): "EmptyObjectBoilerplateDescription",
-  ("read_only_space", 0x02471): "EmptyArrayBoilerplateDescription",
-  ("read_only_space", 0x024d9): "EmptyClosureFeedbackCellArray",
-  ("read_only_space", 0x024e9): "EmptySloppyArgumentsElements",
-  ("read_only_space", 0x02509): "EmptySlowElementDictionary",
-  ("read_only_space", 0x02551): "EmptyOrderedHashMap",
-  ("read_only_space", 0x02579): "EmptyOrderedHashSet",
-  ("read_only_space", 0x025a1): "EmptyFeedbackMetadata",
-  ("read_only_space", 0x025b1): "EmptyPropertyCell",
-  ("read_only_space", 0x025d9): "EmptyPropertyDictionary",
-  ("read_only_space", 0x02629): "NoOpInterceptorInfo",
-  ("read_only_space", 0x026c9): "EmptyWeakArrayList",
-  ("read_only_space", 0x026e1): "InfinityValue",
-  ("read_only_space", 0x026f1): "MinusZeroValue",
-  ("read_only_space", 0x02701): "MinusInfinityValue",
-  ("read_only_space", 0x02711): "SelfReferenceMarker",
-  ("read_only_space", 0x02769): "OffHeapTrampolineRelocationInfo",
-  ("read_only_space", 0x02781): "TrampolineTrivialCodeDataContainer",
-  ("read_only_space", 0x02799): "TrampolinePromiseRejectionCodeDataContainer",
-  ("read_only_space", 0x027b1): "HashSeed",
-  ("old_space", 0x00111): "ArgumentsIteratorAccessor",
-  ("old_space", 0x00181): "ArrayLengthAccessor",
-  ("old_space", 0x001f1): "BoundFunctionLengthAccessor",
-  ("old_space", 0x00261): "BoundFunctionNameAccessor",
-  ("old_space", 0x002d1): "ErrorStackAccessor",
-  ("old_space", 0x00341): "FunctionArgumentsAccessor",
-  ("old_space", 0x003b1): "FunctionCallerAccessor",
-  ("old_space", 0x00421): "FunctionNameAccessor",
-  ("old_space", 0x00491): "FunctionLengthAccessor",
-  ("old_space", 0x00501): "FunctionPrototypeAccessor",
-  ("old_space", 0x00571): "StringLengthAccessor",
-  ("old_space", 0x005e1): "InvalidPrototypeValidityCell",
-  ("old_space", 0x005f1): "EmptyScript",
-  ("old_space", 0x00671): "ManyClosuresCell",
-  ("old_space", 0x00689): "ArrayConstructorProtector",
-  ("old_space", 0x00699): "NoElementsProtector",
-  ("old_space", 0x006c1): "IsConcatSpreadableProtector",
-  ("old_space", 0x006d1): "ArraySpeciesProtector",
-  ("old_space", 0x006f9): "TypedArraySpeciesProtector",
-  ("old_space", 0x00721): "PromiseSpeciesProtector",
-  ("old_space", 0x00749): "StringLengthProtector",
-  ("old_space", 0x00759): "ArrayIteratorProtector",
-  ("old_space", 0x00781): "ArrayBufferDetachingProtector",
-  ("old_space", 0x007a9): "PromiseHookProtector",
-  ("old_space", 0x007d1): "PromiseResolveProtector",
-  ("old_space", 0x007e1): "MapIteratorProtector",
-  ("old_space", 0x00809): "PromiseThenProtector",
-  ("old_space", 0x00831): "SetIteratorProtector",
-  ("old_space", 0x00859): "StringIteratorProtector",
-  ("old_space", 0x00881): "SingleCharacterStringCache",
-  ("old_space", 0x01091): "StringSplitCache",
-  ("old_space", 0x018a1): "RegExpMultipleCache",
-  ("old_space", 0x020b1): "BuiltinsConstantsTable",
+  ("read_only_space", 0x001b9): "NullValue",
+  ("read_only_space", 0x00239): "EmptyDescriptorArray",
+  ("read_only_space", 0x002a1): "EmptyWeakFixedArray",
+  ("read_only_space", 0x003a1): "UninitializedValue",
+  ("read_only_space", 0x004b1): "UndefinedValue",
+  ("read_only_space", 0x00531): "NanValue",
+  ("read_only_space", 0x00591): "TheHoleValue",
+  ("read_only_space", 0x00629): "HoleNanValue",
+  ("read_only_space", 0x00639): "TrueValue",
+  ("read_only_space", 0x006e9): "FalseValue",
+  ("read_only_space", 0x00731): "empty_string",
+  ("read_only_space", 0x00bf1): "EmptyScopeInfo",
+  ("read_only_space", 0x00c01): "EmptyFixedArray",
+  ("read_only_space", 0x00c11): "ArgumentsMarker",
+  ("read_only_space", 0x00cb1): "Exception",
+  ("read_only_space", 0x00d51): "TerminationException",
+  ("read_only_space", 0x00df9): "OptimizedOut",
+  ("read_only_space", 0x00e99): "StaleRegister",
+  ("read_only_space", 0x02389): "EmptyEnumCache",
+  ("read_only_space", 0x023f1): "EmptyPropertyArray",
+  ("read_only_space", 0x02401): "EmptyByteArray",
+  ("read_only_space", 0x02411): "EmptyObjectBoilerplateDescription",
+  ("read_only_space", 0x02429): "EmptyArrayBoilerplateDescription",
+  ("read_only_space", 0x02491): "EmptyClosureFeedbackCellArray",
+  ("read_only_space", 0x024a1): "EmptySloppyArgumentsElements",
+  ("read_only_space", 0x024c1): "EmptySlowElementDictionary",
+  ("read_only_space", 0x02509): "EmptyOrderedHashMap",
+  ("read_only_space", 0x02531): "EmptyOrderedHashSet",
+  ("read_only_space", 0x02559): "EmptyFeedbackMetadata",
+  ("read_only_space", 0x02569): "EmptyPropertyCell",
+  ("read_only_space", 0x02591): "EmptyPropertyDictionary",
+  ("read_only_space", 0x025e1): "NoOpInterceptorInfo",
+  ("read_only_space", 0x02681): "EmptyWeakArrayList",
+  ("read_only_space", 0x02699): "InfinityValue",
+  ("read_only_space", 0x026a9): "MinusZeroValue",
+  ("read_only_space", 0x026b9): "MinusInfinityValue",
+  ("read_only_space", 0x026c9): "SelfReferenceMarker",
+  ("read_only_space", 0x02721): "OffHeapTrampolineRelocationInfo",
+  ("read_only_space", 0x02739): "TrampolineTrivialCodeDataContainer",
+  ("read_only_space", 0x02751): "TrampolinePromiseRejectionCodeDataContainer",
+  ("read_only_space", 0x02769): "GlobalThisBindingScopeInfo",
+  ("read_only_space", 0x027d1): "EmptyFunctionScopeInfo",
+  ("read_only_space", 0x02821): "HashSeed",
+  ("old_space", 0x00119): "ArgumentsIteratorAccessor",
+  ("old_space", 0x00189): "ArrayLengthAccessor",
+  ("old_space", 0x001f9): "BoundFunctionLengthAccessor",
+  ("old_space", 0x00269): "BoundFunctionNameAccessor",
+  ("old_space", 0x002d9): "ErrorStackAccessor",
+  ("old_space", 0x00349): "FunctionArgumentsAccessor",
+  ("old_space", 0x003b9): "FunctionCallerAccessor",
+  ("old_space", 0x00429): "FunctionNameAccessor",
+  ("old_space", 0x00499): "FunctionLengthAccessor",
+  ("old_space", 0x00509): "FunctionPrototypeAccessor",
+  ("old_space", 0x00579): "StringLengthAccessor",
+  ("old_space", 0x005e9): "InvalidPrototypeValidityCell",
+  ("old_space", 0x005f9): "EmptyScript",
+  ("old_space", 0x00679): "ManyClosuresCell",
+  ("old_space", 0x00691): "ArrayConstructorProtector",
+  ("old_space", 0x006a1): "NoElementsProtector",
+  ("old_space", 0x006c9): "IsConcatSpreadableProtector",
+  ("old_space", 0x006d9): "ArraySpeciesProtector",
+  ("old_space", 0x00701): "TypedArraySpeciesProtector",
+  ("old_space", 0x00729): "PromiseSpeciesProtector",
+  ("old_space", 0x00751): "StringLengthProtector",
+  ("old_space", 0x00761): "ArrayIteratorProtector",
+  ("old_space", 0x00789): "ArrayBufferDetachingProtector",
+  ("old_space", 0x007b1): "PromiseHookProtector",
+  ("old_space", 0x007d9): "PromiseResolveProtector",
+  ("old_space", 0x007e9): "MapIteratorProtector",
+  ("old_space", 0x00811): "PromiseThenProtector",
+  ("old_space", 0x00839): "SetIteratorProtector",
+  ("old_space", 0x00861): "StringIteratorProtector",
+  ("old_space", 0x00889): "SingleCharacterStringCache",
+  ("old_space", 0x01099): "StringSplitCache",
+  ("old_space", 0x018a9): "RegExpMultipleCache",
+  ("old_space", 0x020b9): "BuiltinsConstantsTable",
 }
 
 # List of known V8 Frame Markers.
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index d029ffe6046e1f..01688648eb0315 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -30,6 +30,8 @@ V8_DIR="${TOOLS_WASM_DIR}/../.."
 SPEC_TEST_DIR=${V8_DIR}/test/wasm-spec-tests
 TMP_DIR=${SPEC_TEST_DIR}/tmp
 
+JS_API_TEST_DIR=${V8_DIR}/test/wasm-js
+
 log_and_run cd ${V8_DIR}
 
 log_and_run rm -rf ${SPEC_TEST_DIR}/tests
@@ -40,29 +42,40 @@ log_and_run mkdir ${SPEC_TEST_DIR}/tests/proposals
 log_and_run rm -rf ${TMP_DIR}
 log_and_run mkdir ${TMP_DIR}
 
+log_and_run rm -rf ${JS_API_TEST_DIR}/tests
+log_and_run mkdir ${JS_API_TEST_DIR}/tests
+log_and_run mkdir ${JS_API_TEST_DIR}/tests/proposals
+
 ###############################################################################
 # Generate the spec tests.
 ###############################################################################
 
-log_and_run cd ${V8_DIR}/test/wasm-js/data/interpreter
+echo Process spec
+log_and_run cd ${TMP_DIR}
+log_and_run git clone https://github.com/WebAssembly/spec
+log_and_run cd spec/interpreter
+
 # The next step requires that ocaml is installed. See the README.md in
-# ${V8_DIR}/test/wasm-js/data/interpreter/.
+# https://github.com/WebAssembly/spec/tree/master/interpreter/.
 log_and_run make clean opt
 
-log_and_run cd ${V8_DIR}/test/wasm-js/data/test/core
+log_and_run cd ${TMP_DIR}/spec/test/core
 log_and_run cp *.wast ${SPEC_TEST_DIR}/tests/
 
-log_and_run ./run.py --wasm ${V8_DIR}/test/wasm-js/data/interpreter/wasm --out ${TMP_DIR}
+log_and_run ./run.py --wasm ${TMP_DIR}/spec/interpreter/wasm --out ${TMP_DIR}
 log_and_run cp ${TMP_DIR}/*.js ${SPEC_TEST_DIR}/tests/
 
+log_and_run cp -r ${TMP_DIR}/spec/test/js-api/* ${JS_API_TEST_DIR}/tests
+
 ###############################################################################
 # Generate the proposal tests.
 ###############################################################################
 
-repos='bulk-memory-operations reference-types'
+repos='bulk-memory-operations reference-types js-types'
 
 for repo in ${repos}; do
   echo "Process ${repo}"
+  echo ">> Process core tests"
   log_and_run cd ${TMP_DIR}
   log_and_run git clone https://github.com/WebAssembly/${repo}
   # Compile the spec interpreter to generate the .js test cases later.
@@ -76,13 +89,27 @@ for repo in ${repos}; do
   for abs_filename in ${TMP_DIR}/${repo}/test/core/*.wast; do
     rel_filename="$(basename -- $abs_filename)"
     test_name=${rel_filename%.wast}
-    spec_filename=${V8_DIR}/test/wasm-js/data/test/core/${rel_filename}
+    spec_filename=${TMP_DIR}/spec/test/core/${rel_filename}
     if [ ! -f "$spec_filename" ] || ! cmp -s $abs_filename $spec_filename ; then
       log_and_run cp ${rel_filename} ${SPEC_TEST_DIR}/tests/proposals/${repo}/
       log_and_run ./run.py --wasm ../../interpreter/wasm ${rel_filename} --out _build 2> /dev/null
     fi
   done
   log_and_run cp _build/*.js ${SPEC_TEST_DIR}/tests/proposals/${repo}/
+
+  echo ">> Process js-api tests"
+  log_and_run mkdir ${JS_API_TEST_DIR}/tests/proposals/${repo}
+  log_and_run cp -r ${TMP_DIR}/${repo}/test/js-api/* ${JS_API_TEST_DIR}/tests/proposals/${repo}
+  # Delete duplicate tests
+  log_and_run cd ${JS_API_TEST_DIR}/tests
+  for spec_test_name in $(find ./ -name '*.any.js' -not -wholename '*/proposals/*'); do
+    proposal_test_name="proposals/${repo}/${spec_test_name}"
+    if [ -f "$proposal_test_name" ] && cmp -s $spec_test_name $proposal_test_name ; then
+      log_and_run rm $proposal_test_name
+    elif [ -f "$proposal_test_name" ]; then
+      echo "keep" $proposal_test_name
+    fi
+  done
 done
 
 ###############################################################################
@@ -95,6 +122,10 @@ echo "The following files will get uploaded:"
 ls -R tests
 echo
 
+cd ${JS_API_TEST_DIR}
+ls -R tests
+echo
+
 log_and_run rm -rf ${TMP_DIR}
 
 ###############################################################################
@@ -111,3 +142,6 @@ echo "* When the script asks you for your project-id, use 0."
 echo "****************************************************************************"
 log_and_run cd ${SPEC_TEST_DIR}
 log_and_run upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
+
+log_and_run cd ${JS_API_TEST_DIR}
+log_and_run upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 9a80a32344e663..1540f5f52a5dfb 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,4 +7,6 @@ A Smi balks into a war and says:
 The doubles heard this and started to unbox.
 The Smi looked at them when a crazy v8-autoroll account showed up...
 The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles..........
+The bartender starts to shake the bottles..............
+I can't add trailing whitespaces, so I'm adding this line.
+I'm starting to think that just adding trailing whitespaces might not be bad.
diff --git a/deps/v8/tools/windbg.js b/deps/v8/tools/windbg.js
index 3df14f4a2e812f..91877b4c616370 100644
--- a/deps/v8/tools/windbg.js
+++ b/deps/v8/tools/windbg.js
@@ -1,4 +1,4 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
+// Copyright 2019 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -11,6 +11,7 @@
 =============================================================================*/
 
 function help() {
+  if (supports_call_command()) {
   print("--------------------------------------------------------------------");
   print("  LIVE debugging only");
   print("--------------------------------------------------------------------");
@@ -27,11 +28,22 @@ function help() {
   print("  !jsbp() or !jsbp");
   print("      sets bp in v8::internal::Execution::Call");
   print("");
+  }
+
   print("--------------------------------------------------------------------");
-  print("  Managed heap");
+  print("  Setup of the script");
   print("--------------------------------------------------------------------");
+  print("  !set_module(\"module_name_no_extension\")");
+  print("      we'll try the usual suspects for where v8's code might have");
+  print("      been linked into, but you can also set it manually,");
+  print("      e.g. !set_module(\"v8_for_testing\")");
   print("  !set_iso(isolate_address)");
   print("      call this function before using !mem or other heap routines");
+  print("");
+
+  print("--------------------------------------------------------------------");
+  print("  Managed heap");
+  print("--------------------------------------------------------------------");
   print("  !mem or !mem(\"space1[ space2 ...]\")");
   print("      prints memory chunks from the 'space' owned by the heap in the");
   print("      isolate set by !set_iso; valid values for 'space' are:");
@@ -42,15 +54,59 @@ function help() {
   print("      prints name of the space and address of the MemoryChunk the");
   print("      'address' is from, e.g. !where(0x235cb869f9)");
   print("");
+
+  print("--------------------------------------------------------------------");
+  print("  Managed objects");
+  print("--------------------------------------------------------------------");
+  print("  !jot(tagged_addr, depth)");
+  print("      dumps the tree of objects using 'tagged_addr' as a root,");
+  print("      assumes that pointer fields are aligned at ptr_size boundary,");
+  print("      unspecified depth means 'unlimited',");
+  print("      e.g. !jot(0x235cb869f9, 2), !jot 0x235cb869f9");
+  print("  !jo_in_range(start_addr, end_addr)");
+  print("      prints address/map pointers of objects found inside the range");
+  print("      specified by 'start_addr' and 'end_addr', assumes the object");
+  print("      pointers to be aligned at ptr_size boundary,");
+  print("      e.g. !jo_in_range(0x235cb869f8 - 0x100, 0x235cb869f8 + 0x1a0");
+  print("  !jo_prev(address, max_slots = 100)");
+  print("      prints address and map pointer of the nearest object within");
+  print("      'max_slots' before the given 'address', assumes the object");
+  print("      pointers to be aligned at ptr_size boundary,");
+  print("      e.g. !jo_prev 0x235cb869f8, !jo_prev(0x235cb869f9, 16)");
+  print("  !jo_next(address, max_slots = 100)");
+  print("      prints address and map pointer of the nearest object within");
+  print("      'max_slots' following the given 'address', assumes the object");
+  print("      pointers to be aligned at ptr_size boundary,");
+  print("      e.g. !jo_next 0x235cb869f8, !jo_next(0x235cb869f9, 20)");
+  print("");
+
+  print("--------------------------------------------------------------------");
+  print("  Miscellaneous");
+  print("--------------------------------------------------------------------");
+  print("  !dp(address, count = 10)");
+  print("      similar to the built-in 'dp' command but augments output with");
+  print("      more data for values that are managed pointers, note that it");
+  print("      aligns the given 'address' at ptr_sized boundary,");
+  print("      e.g. !dp 0x235cb869f9, !dp(0x235cb869f9, 500), !dp @rsp");
+  print("  !handles(print_handles = false)");
+  print("      prints stats for handles, if 'print_handles' is true will");
+  print("      output all handles as well,");
+  print("      e.g. !handles, !handles(), !handles(true)");
+  print("");
+
   print("--------------------------------------------------------------------");
   print("  To run any function from this script (live or postmortem):");
   print("");
   print("  dx @$scriptContents.function_name(args)");
   print("      e.g. dx @$scriptContents.pointer_size()");
-  print("      e.g. dx @$scriptContents.module_name(\"v8_for_test\")");
+  print("      e.g. dx @$scriptContents.is_map(0x235cb869f9)");
   print("--------------------------------------------------------------------");
 }
 
+/*=============================================================================
+  On scrip load
+=============================================================================*/
+
 /*=============================================================================
   Output
 =============================================================================*/
@@ -67,27 +123,60 @@ function print_filtered(obj, filter) {
 }
 
 function inspect(s) {
-  for (var k of Reflect.ownKeys(s)) {
-    print(k + " => " + Reflect.get(s, k));
+  for (let k of Reflect.ownKeys(s)) {
+    // Attempting to print either of:
+    // 'Reflect.get(s, k)', 'typeof Reflect.get(s, k)', 's[k]'
+    // might throw: "Error: Object does not have a size",
+    // while 'typeof s[k]' returns 'undefined' and prints the full list of
+    // properties. Oh well...
+    print(`${k} => ${typeof s[k]}`);
   }
 }
 
+function hex(number) {
+  return `0x${number.toString(16)}`;
+}
 
 /*=============================================================================
   Utils (postmortem and live)
 =============================================================================*/
+// WinDbg wraps large integers into objects that fail isInteger test (and,
+// consequently fail isSafeInteger test even if the original value was a safe
+// integer). I cannot figure out how to extract the original value from the
+// wrapper object so doing it via conversion to a string. Brrr. Ugly.
+function int(val) {
+  if (typeof val === 'number') {
+    return Number.isInteger(val) ? val : undefined;
+  }
+  if (typeof val === 'object') {
+    let n = parseInt(val.toString());
+    return isNaN(n) ? undefined : n;
+  }
+  return undefined;
+}
+
+function is_live_session() {
+  // Assume that there is a single session (not sure how to get multiple ones
+  // going, maybe, in kernel debugging?).
+  return (host.namespace.Debugger.Sessions[0].Attributes.Target.IsLiveTarget);
+}
+
+function is_TTD_session() {
+  // Assume that there is a single session (not sure how to get multiple ones
+  // going, maybe, in kernel debugging?).
+  return (host.namespace.Debugger.Sessions[0].Attributes.Target.IsTTDTarget);
+}
+
+function supports_call_command() {
+  return is_live_session() && !is_TTD_session();
+}
+
 function cast(address, type_name) {
   return host.createTypedObject(address, module_name(), type_name);
 }
 
-// Failed to figure out how to get pointer size from the debugger's data model,
-// so we parse it out from sizeof(void*) output.
 function pointer_size() {
-  let ctl = host.namespace.Debugger.Utility.Control;
-  let sizeof = ctl.ExecuteCommand("?? sizeof(void*)");
-  let output = "";
-  for (output of sizeof) {} // unsigned int64 8
-  return parseInt(output.trim().split(" ").pop());
+  return host.namespace.Debugger.Sessions[0].Attributes.Machine.PointerSize;
 }
 
 function poi(address) {
@@ -105,8 +194,7 @@ function get_register(name) {
 
 // In debug builds v8 code is compiled into v8.dll, and in release builds
 // the code is compiled directly into the executable. If you are debugging some
-// other embedder, invoke module_name explicitly from the debugger and provide
-// the module name to use.
+// other embedder, run !set_module and provide the module name to use.
 const known_exes = ["d8", "unittests", "mksnapshot", "chrome", "chromium"];
 let module_name_cache;
 function module_name(use_this_module) {
@@ -138,10 +226,20 @@ function module_name(use_this_module) {
       }
     }
   }
+
+  if (!module_name_cache) {
+    print(`ERROR. Couldn't determine module name for v8's symbols.`);
+    print(`Please run !set_module (e.g. "!set_module \"v8_for_testing\"")`);
+  }
   return module_name_cache;
 };
 
 function make_call(fn) {
+  if (!supports_call_command()) {
+    print("ERROR: This command is supported in live sessions only!");
+    return;
+  }
+
   // .call resets current frame to the top one, so have to manually remember
   // and restore it after making the call.
   let curframe = host.namespace.Debugger.State.DebuggerVariables.curframe;
@@ -151,21 +249,6 @@ function make_call(fn) {
   return output;
 }
 
-// Skips the meta output about the .call invocation.
-function make_call_and_print_return(fn) {
-  let output = make_call(fn);
-  let print_line = false;
-  for (let line of output) {
-    if (print_line) {
-      print(line);
-      break;
-    }
-    if (line.includes(".call returns")) {
-      print_line = true;
-    }
-  }
-}
-
 
 /*=============================================================================
   Wrappers around V8's printing functions and other utils for live-debugging
@@ -206,11 +289,11 @@ function print_object_from_handle(handle_to_object) {
   point at any continuous memory that contains Object pointers.
 -----------------------------------------------------------------------------*/
 function print_objects_array(start_address, count) {
+  const ptr_size = pointer_size();
   let ctl = host.namespace.Debugger.Utility.Control;
-  let psize = pointer_size();
   let addr_int = start_address;
   for (let i = 0; i < count; i++) {
-    const addr_hex = `0x${addr_int.toString(16)}`;
+    const addr_hex = hex(addr_int);
 
     // TODO: Tried using createPointerObject but it throws unknown exception
     // from ChakraCore. Why?
@@ -223,7 +306,7 @@ function print_objects_array(start_address, count) {
     print(`${addr_hex} -> ${deref}`);
     print_object(deref);
 
-    addr_int += psize;
+    addr_int += ptr_size;
   }
 }
 
@@ -245,6 +328,168 @@ function set_isolate_address(addr) {
   isolate_address = addr;
 }
 
+function is_map(addr) {
+  let address = int(addr);
+  if (!Number.isSafeInteger(address) || address % 2 == 0) return false;
+
+  // the first field in all objects, including maps, is a map pointer, but for
+  // maps the pointer is always the same - the meta map that points to itself.
+  const map_addr = int(poi(address - 1));
+  if (!Number.isSafeInteger(map_addr)) return false;
+
+  const map_map_addr = int(poi(map_addr - 1));
+  if (!Number.isSafeInteger(map_map_addr)) return false;
+
+  return (map_addr === map_map_addr);
+}
+
+function is_likely_object(addr) {
+  let address = int(addr);
+  if (!Number.isSafeInteger(address) || address % 2 == 0) return false;
+
+  // the first field in all objects must be a map pointer
+  return is_map(poi(address - 1));
+}
+
+function find_object_near(aligned_addr, max_distance, step_op) {
+  if (!step_op) {
+    const step = pointer_size();
+    const prev =
+      find_object_near(aligned_addr, max_distance, x => x - step);
+    const next =
+      find_object_near(aligned_addr, max_distance, x => x + step);
+
+    if (!prev) return next;
+    if (!next) return prev;
+    return (addr - prev <= next - addr) ? prev : next;
+  }
+
+  let maybe_map_addr = poi(aligned_addr);
+  let iters = 0;
+  while (maybe_map_addr && iters < max_distance) {
+    if (is_map(maybe_map_addr)) {
+      return aligned_addr;
+    }
+    aligned_addr = step_op(aligned_addr);
+    maybe_map_addr = poi(aligned_addr);
+    iters++;
+  }
+}
+
+function find_object_prev(addr, max_distance) {
+  if (!Number.isSafeInteger(int(addr))) return;
+
+  const ptr_size = pointer_size();
+  const aligned_addr = addr - (addr % ptr_size);
+  return find_object_near(aligned_addr, max_distance, x => x - ptr_size);
+}
+
+function find_object_next(addr, max_distance) {
+  if (!Number.isSafeInteger(int(addr))) return;
+
+  const ptr_size = pointer_size();
+  const aligned_addr = addr - (addr % ptr_size) + ptr_size;
+  return find_object_near(aligned_addr, max_distance, x => x + ptr_size);
+}
+
+function print_object_prev(addr, max_slots = 100) {
+  let obj_addr = find_object_prev(addr, max_slots);
+  if (!obj_addr) {
+    print(
+      `No object found within ${max_slots} slots prior to ${hex(addr)}`);
+  }
+  else {
+    print(
+      `found object: ${hex(obj_addr + 1)} : ${hex(poi(obj_addr))}`);
+  }
+}
+
+function print_object_next(addr, max_slots = 100) {
+  let obj_addr = find_object_next(addr, max_slots);
+  if (!obj_addr) {
+    print(
+      `No object found within ${max_slots} slots following ${hex(addr)}`);
+  }
+  else {
+    print(
+      `found object: ${hex(obj_addr + 1)} : ${hex(poi(obj_addr))}`);
+  }
+}
+
+// This function assumes that pointers to objects are stored at ptr-size aligned
+// boundaries.
+function print_objects_in_range(start, end){
+  if (!Number.isSafeInteger(int(start)) || !Number.isSafeInteger(int(end))) {
+    return;
+  }
+
+  const ptr_size = pointer_size();
+  let iters = (end - start) / ptr_size;
+  let cur = start;
+  print(`===============================================`);
+  print(`objects in range ${hex(start)} - ${hex(end)}`);
+  print(`===============================================`);
+  let count = 0;
+  while (cur && cur < end) {
+    let obj = find_object_next(cur, iters);
+    if (obj) {
+      count++;
+      print(`${hex(obj + 1)} : ${hex(poi(obj))}`);
+      iters  = (end - cur) / ptr_size;
+    }
+    cur = obj + ptr_size;
+  }
+  print(`===============================================`);
+  print(`found ${count} objects in range ${hex(start)} - ${hex(end)}`)
+  print(`===============================================`);
+}
+
+// This function assumes the pointer fields to be ptr-size aligned.
+function print_objects_tree(root, depth_limit) {
+  if(!is_likely_object(root)) {
+    print(`${hex(root)} doesn't look like an object`);
+    return;
+  }
+
+  let path = [];
+
+  function impl(obj, depth, depth_limit) {
+    const ptr_size = pointer_size();
+    // print the current object and its map pointer
+    const this_obj =
+      `${" ".repeat(2 * depth)}${hex(obj)} : ${hex(poi(obj - 1))}`;
+    const cutoff = depth_limit && depth == depth_limit - 1;
+    print(`${this_obj}${cutoff ? " (...)" : ""}`);
+    if (cutoff) return;
+
+    path[depth] = obj;
+    path.length = depth + 1;
+    let cur = obj - 1 + ptr_size;
+
+    // Scan downwards until an address that is likely to be at the start of
+    // another object, in which case it's time to pop out from the recursion.
+    let iter = 0; // an arbitrary guard to avoid hanging the debugger
+    let seen = new Set(path);
+    while (!is_likely_object(cur + 1) && iter < 100) {
+      iter++;
+      let field = poi(cur);
+      if (is_likely_object(field)) {
+        if (seen.has(field)) {
+          print(
+            `${" ".repeat(2 * depth + 2)}cycle: ${hex(cur)}->${hex(field)}`);
+        }
+        else {
+          impl(field, depth + 1, depth_limit);
+        }
+      }
+      cur += ptr_size;
+    }
+  }
+  print(`===============================================`);
+  impl(root, 0, depth_limit);
+  print(`===============================================`);
+}
+
 /*-----------------------------------------------------------------------------
     Memory in each Space is organized into a linked list of memory chunks
 -----------------------------------------------------------------------------*/
@@ -262,11 +507,10 @@ function print_memory_chunk_list(space_type, front, top, age_mark) {
   let cur = front;
   while (!cur.isNull) {
     let imm = cur.flags_ & NEVER_EVACUATE ? "*" : " ";
-    let addr = `0x${cur.address.toString(16)}`;
-    let area =
-      `0x${cur.area_start_.toString(16)} - 0x${cur.area_end_.toString(16)}`;
+    let addr = hex(cur.address);
+    let area = `${hex(cur.area_start_)} - ${hex(cur.area_end_)}`;
     let dt = `dt ${addr} ${module_name()}!v8::internal::MemoryChunk`;
-    print(`${imm}    ${addr}:\t ${area} (0x${cur.size_.toString(16)}) : ${dt}`);
+    print(`${imm}    ${addr}:\t ${area} (${hex(cur.size_)}) : ${dt}`);
     cur = cur.list_node_.next_;
   }
   print("");
@@ -307,18 +551,16 @@ function get_chunks() {
 }
 
 function find_chunk(address) {
-  // if 'address' is greater than Number.MAX_SAFE_INTEGER, comparison ops on it
-  // throw  "Error: 64 bit value loses precision on conversion to number"
-  try {
-    let chunks = get_chunks(isolate_address);
-    for (let c of chunks) {
-      let chunk = cast(c.address, "v8::internal::MemoryChunk");
-      if (address >= chunk.area_start_ && address < chunk.area_end_) {
-        return c;
-      }
+  if (!Number.isSafeInteger(int(address))) return undefined;
+
+  let chunks = get_chunks(isolate_address);
+  for (let c of chunks) {
+    let chunk = cast(c.address, "v8::internal::MemoryChunk");
+    if (address >= chunk.area_start_ && address < chunk.area_end_) {
+      return c;
     }
   }
-  catch (e) { }
+
   return undefined;
 }
 
@@ -391,12 +633,111 @@ function print_owning_space(address) {
   }
 
   let c = find_chunk(address);
-  let addr = `0x${address.toString(16)}`;
   if (c) {
-      print(`${addr} is in ${c.space} (chunk: 0x${c.address.toString(16)})`);
+      print(`${hex(address)} is in ${c.space} (chunk: ${hex(c.address)})`);
   }
   else {
-      print(`Address ${addr} is not in managed heap`);
+      print(`Address ${hex(address)} is not in managed heap`);
+  }
+}
+
+/*-----------------------------------------------------------------------------
+
+-----------------------------------------------------------------------------*/
+function print_handles_data(print_handles = false) {
+  if (isolate_address == 0) {
+    print("Please call !set_iso(isolate_address) first.");
+    return;
+  }
+
+  let iso = cast(isolate_address, "v8::internal::Isolate");
+  let hsd = iso.handle_scope_data_;
+  let hsimpl = iso.handle_scope_implementer_;
+
+  // depth level
+  print(`Nested depth level: ${hsd.level}`);
+
+  // count of handles
+  const ptr_size = pointer_size();
+  let blocks = hsimpl.blocks_;
+  const block_size = 1022; // v8::internal::KB - 2
+  const first_block = blocks.data_.address;
+  const last_block = (blocks.size_ == 0)
+                     ? first_block
+                     : first_block + ptr_size * (blocks.size_ - 1);
+
+  const count = (blocks.size_ == 0)
+              ? 0
+              : (blocks.size_ - 1) * block_size +
+                (hsd.next.address - poi(last_block))/ptr_size;
+  print(`Currently tracking ${count} local handles`);
+
+  // print the handles
+  if (print_handles && count > 0) {
+    for (let block = first_block; block < last_block;
+         block += block_size * ptr_size) {
+      print(`Handles in block at ${hex(block)}`);
+      for (let i = 0; i < block_size; i++) {
+        const location = poi(block + i * ptr_size);
+        print(`  ${hex(location)}->${hex(poi(location))}`);
+      }
+    }
+
+    let location = poi(last_block);
+    print(`Handles in block at ${hex(last_block)}`);
+    for (let location = poi(last_block); location < hsd.next.address;
+         location += ptr_size) {
+      print(`  ${hex(location)}->${hex(poi(location))}`);
+    }
+  }
+
+  // where will the next handle allocate at?
+  const prefix = "Next handle's location will be";
+  if (hsd.next.address < hsd.limit.address) {
+    print(`${prefix} at ${hex(hsd.next.address)}`);
+  }
+  else if (hsimpl.spare_) {
+    const location = hsimpl.spare_.address;
+    print(`${prefix} from the spare block at ${hex(location)}`);
+  }
+  else {
+    print(`${prefix} from a new block to be allocated`);
+  }
+}
+
+function pad_right(addr) {
+  let addr_hex = hex(addr);
+  return `${addr_hex}${" ".repeat(pointer_size() * 2 + 2 - addr_hex.length)}`;
+}
+
+// TODO irinayat: would be nice to identify handles and smi as well
+function dp(addr, count = 10) {
+  if (isolate_address == 0) {
+    print(`To see where objects are located, run !set_iso.`);
+  }
+
+  if (!Number.isSafeInteger(int(addr))) {
+    print(`${hex(addr)} doesn't look like a valid address`);
+    return;
+  }
+
+  const ptr_size = pointer_size();
+  let aligned_addr = addr - (addr % ptr_size);
+  let val = poi(aligned_addr);
+  let iter = 0;
+  while (val && iter < count) {
+    const augm_map = is_map(val) ? "map" : "";
+    const augm_obj = is_likely_object(val) && !is_map(val) ? "obj" : "";
+    const augm_other = !is_map(val) && !is_likely_object(val) ? "val" : "";
+    let c = find_chunk(val);
+    const augm_space = c ? ` in ${c.space}` : "";
+    const augm = `${augm_map}${augm_obj}${augm_other}${augm_space}`;
+
+    print(`${pad_right(aligned_addr)} ${pad_right(val)}   ${augm}`);
+
+    aligned_addr += ptr_size;
+    val = poi(aligned_addr);
+    iter++;
   }
 }
 
@@ -412,8 +753,17 @@ function initializeScript() {
       new host.functionAlias(print_js_stack, "jst"),
 
       new host.functionAlias(set_isolate_address, "set_iso"),
+      new host.functionAlias(module_name, "set_module"),
       new host.functionAlias(print_memory, "mem"),
       new host.functionAlias(print_owning_space, "where"),
+      new host.functionAlias(print_handles_data, "handles"),
+
+      new host.functionAlias(print_object_prev, "jo_prev"),
+      new host.functionAlias(print_object_next, "jo_next"),
+      new host.functionAlias(print_objects_in_range, "jo_in_range"),
+      new host.functionAlias(print_objects_tree, "jot"),
+
+      new host.functionAlias(dp, "dp"),
 
       new host.functionAlias(set_user_js_bp, "jsbp"),
   ]