diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index a95f3cc34adf..99495dd46b6c 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,74 +1,3 @@ -2011-10-13: Version 3.7.0 - - Fixed array handling for Object.defineOwnProperty (ES5 conformance). - - Fixed issue 1757 (string slices of external strings). - - Fixed issue 1759 (ARM). - - Added flag --noclever-optimizations to disable some things that - caused trouble in the past. - - Added flag --stress-compaction for testing. - - Added flag --harmony to activate all experimental Harmony features. - - -2011-10-10: Version 3.6.6 - - Added a GC pause visualization tool. - - Added presubmit=no and werror=no flags to Makefile. - - ES5/Test262 conformance improvements. - - Fixed compilation issues with GCC 4.5.x (issue 1743). - - Bug fixes and performance improvements on all platforms. - - -2011-10-05: Version 3.6.5 - - New incremental garbage collector. - - Removed the hard heap size limit (soft heap size limit is still - 700/1400Mbytes by default). - - Implemented ES5 generic Array.prototype.toString (Issue 1361). - - V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415). - - Fixed x64 RegExp start-of-string bug (Issues 1746, 1748). - - Fixed propertyIsEnumerable for numeric properties (Issue 1692). - - Fixed the MinGW and Windows 2000 builds. - - Fixed "Prototype chain is not searched if named property handler does - not set a property" (Issue 1636). - - Made the RegExp.prototype object be a RegExp object (Issue 1217). - - Disallowed future reserved words as labels in strict mode. - - Fixed string split to correctly coerce the separator to a string - (Issue 1711). - - API: Added an optional source length field to the Extension - constructor. - - API: Added Debug::DisableAgent to match existing Debug::EnableAgent - (Issue 1573). - - Added "native" target to Makefile for the benefit of Linux distros. - - Fixed: debugger stops stepping outside evaluate (Issue 1639). - - More work on ES-Harmony proxies. Still hidden behind a flag. - - Bug fixes and performance improvements on all platforms. - - 2011-09-15: Version 3.6.4 Fixed d8's broken readline history. diff --git a/deps/v8/Makefile b/deps/v8/Makefile index 76f45d7f2e77..a7b27317a3ad 100644 --- a/deps/v8/Makefile +++ b/deps/v8/Makefile @@ -32,7 +32,6 @@ LINK ?= "g++" OUTDIR ?= out TESTJOBS ?= -j16 GYPFLAGS ?= -TESTFLAGS ?= # Special build flags. Use them like this: "make library=shared" @@ -51,10 +50,6 @@ endif ifeq ($(disassembler), on) GYPFLAGS += -Dv8_enable_disassembler=1 endif -# objectprint=on -ifeq ($(objectprint), on) - GYPFLAGS += -Dv8_object_print=1 -endif # snapshot=off ifeq ($(snapshot), off) GYPFLAGS += -Dv8_use_snapshot='false' @@ -77,21 +72,12 @@ endif ifdef soname_version GYPFLAGS += -Dsoname_version=$(soname_version) endif -# werror=no -ifeq ($(werror), no) - GYPFLAGS += -Dwerror='' -endif -# presubmit=no -ifeq ($(presubmit), no) - TESTFLAGS += --no-presubmit -endif # ----------------- available targets: -------------------- # - "dependencies": pulls in external dependencies (currently: GYP) # - any arch listed in ARCHES (see below) # - any mode listed in MODES # - every combination ., e.g. "ia32.release" -# - "native": current host's architecture, release mode # - any of the above with .check appended, e.g. "ia32.release.check" # - default (no target specified): build all ARCHES and MODES # - "check": build all targets and run all tests @@ -117,7 +103,7 @@ CHECKS = $(addsuffix .check,$(BUILDS)) # File where previously used GYPFLAGS are stored. ENVFILE = $(OUTDIR)/environment -.PHONY: all check clean dependencies $(ENVFILE).new native \ +.PHONY: all check clean dependencies $(ENVFILE).new \ $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \ $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) @@ -138,31 +124,21 @@ $(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@) python -c "print raw_input().capitalize()") \ builddir="$(shell pwd)/$(OUTDIR)/$@" -native: $(OUTDIR)/Makefile-native - @$(MAKE) -C "$(OUTDIR)" -f Makefile-native \ - CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \ - builddir="$(shell pwd)/$(OUTDIR)/$@" - # Test targets. check: all - @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ - $(TESTFLAGS) + @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) $(addsuffix .check,$(MODES)): $$(basename $$@) @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ - --mode=$(basename $@) $(TESTFLAGS) + --mode=$(basename $@) $(addsuffix .check,$(ARCHES)): $$(basename $$@) @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ - --arch=$(basename $@) $(TESTFLAGS) + --arch=$(basename $@) $(CHECKS): $$(basename $$@) @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ - --arch-and-mode=$(basename $@) $(TESTFLAGS) - -native.check: native - @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \ - --arch-and-mode=. $(TESTFLAGS) + --arch-and-mode=$(basename $@) # Clean targets. You can clean each architecture individually, or everything. $(addsuffix .clean,$(ARCHES)): @@ -171,12 +147,7 @@ $(addsuffix .clean,$(ARCHES)): rm -rf $(OUTDIR)/$(basename $@).debug find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete -native.clean: - rm -f $(OUTDIR)/Makefile-native - rm -rf $(OUTDIR)/native - find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete - -clean: $(addsuffix .clean,$(ARCHES)) native.clean +clean: $(addsuffix .clean,$(ARCHES)) # GYP file generation targets. $(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE) @@ -194,10 +165,6 @@ $(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE) -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \ -S-arm $(GYPFLAGS) -$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE) - build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS) - # Replaces the old with the new environment file if they're different, which # will trigger GYP to regenerate Makefiles. $(ENVFILE): $(ENVFILE).new diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 1dcdce4a8ce6..f9c33caae5d9 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -288,6 +288,7 @@ V8_EXTRA_FLAGS = { 'gcc': { 'all': { 'WARNINGFLAGS': ['-Wall', + '-Werror', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor'] @@ -381,7 +382,7 @@ MKSNAPSHOT_EXTRA_FLAGS = { DTOA_EXTRA_FLAGS = { 'gcc': { 'all': { - 'WARNINGFLAGS': ['-Wno-uninitialized'], + 'WARNINGFLAGS': ['-Werror', '-Wno-uninitialized'], 'CCFLAGS': GCC_DTOA_EXTRA_CCFLAGS } }, diff --git a/deps/v8/benchmarks/spinning-balls/index.html b/deps/v8/benchmarks/spinning-balls/index.html deleted file mode 100644 index d01f31f37308..000000000000 --- a/deps/v8/benchmarks/spinning-balls/index.html +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - diff --git a/deps/v8/benchmarks/spinning-balls/splay-tree.js b/deps/v8/benchmarks/spinning-balls/splay-tree.js deleted file mode 100644 index a88e4cbce160..000000000000 --- a/deps/v8/benchmarks/spinning-balls/splay-tree.js +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/** - * Constructs a Splay tree. A splay tree is a self-balancing binary - * search tree with the additional property that recently accessed - * elements are quick to access again. It performs basic operations - * such as insertion, look-up and removal in O(log(n)) amortized time. - * - * @constructor - */ -function SplayTree() { -}; - - -/** - * Pointer to the root node of the tree. - * - * @type {SplayTree.Node} - * @private - */ -SplayTree.prototype.root_ = null; - - -/** - * @return {boolean} Whether the tree is empty. - */ -SplayTree.prototype.isEmpty = function() { - return !this.root_; -}; - - -/** - * Inserts a node into the tree with the specified key and value if - * the tree does not already contain a node with the specified key. If - * the value is inserted, it becomes the root of the tree. - * - * @param {number} key Key to insert into the tree. - * @param {*} value Value to insert into the tree. - */ -SplayTree.prototype.insert = function(key, value) { - if (this.isEmpty()) { - this.root_ = new SplayTree.Node(key, value); - return; - } - // Splay on the key to move the last node on the search path for - // the key to the root of the tree. - this.splay_(key); - if (this.root_.key == key) { - return; - } - var node = new SplayTree.Node(key, value); - if (key > this.root_.key) { - node.left = this.root_; - node.right = this.root_.right; - this.root_.right = null; - } else { - node.right = this.root_; - node.left = this.root_.left; - this.root_.left = null; - } - this.root_ = node; -}; - - -/** - * Removes a node with the specified key from the tree if the tree - * contains a node with this key. The removed node is returned. If the - * key is not found, an exception is thrown. - * - * @param {number} key Key to find and remove from the tree. - * @return {SplayTree.Node} The removed node. - */ -SplayTree.prototype.remove = function(key) { - if (this.isEmpty()) { - throw Error('Key not found: ' + key); - } - this.splay_(key); - if (this.root_.key != key) { - throw Error('Key not found: ' + key); - } - var removed = this.root_; - if (!this.root_.left) { - this.root_ = this.root_.right; - } else { - var right = this.root_.right; - this.root_ = this.root_.left; - // Splay to make sure that the new root has an empty right child. - this.splay_(key); - // Insert the original right child as the right child of the new - // root. - this.root_.right = right; - } - return removed; -}; - - -/** - * Returns the node having the specified key or null if the tree doesn't contain - * a node with the specified key. - * - * @param {number} key Key to find in the tree. - * @return {SplayTree.Node} Node having the specified key. - */ -SplayTree.prototype.find = function(key) { - if (this.isEmpty()) { - return null; - } - this.splay_(key); - return this.root_.key == key ? this.root_ : null; -}; - - -/** - * @return {SplayTree.Node} Node having the maximum key value. - */ -SplayTree.prototype.findMax = function(opt_startNode) { - if (this.isEmpty()) { - return null; - } - var current = opt_startNode || this.root_; - while (current.right) { - current = current.right; - } - return current; -}; - - -/** - * @return {SplayTree.Node} Node having the maximum key value that - * is less than the specified key value. - */ -SplayTree.prototype.findGreatestLessThan = function(key) { - if (this.isEmpty()) { - return null; - } - // Splay on the key to move the node with the given key or the last - // node on the search path to the top of the tree. - this.splay_(key); - // Now the result is either the root node or the greatest node in - // the left subtree. - if (this.root_.key < key) { - return this.root_; - } else if (this.root_.left) { - return this.findMax(this.root_.left); - } else { - return null; - } -}; - - -/** - * @return {Array<*>} An array containing all the keys of tree's nodes. - */ -SplayTree.prototype.exportKeys = function() { - var result = []; - if (!this.isEmpty()) { - this.root_.traverse_(function(node) { result.push(node.key); }); - } - return result; -}; - - -/** - * Perform the splay operation for the given key. Moves the node with - * the given key to the top of the tree. If no node has the given - * key, the last node on the search path is moved to the top of the - * tree. This is the simplified top-down splaying algorithm from: - * "Self-adjusting Binary Search Trees" by Sleator and Tarjan - * - * @param {number} key Key to splay the tree on. - * @private - */ -SplayTree.prototype.splay_ = function(key) { - if (this.isEmpty()) { - return; - } - // Create a dummy node. The use of the dummy node is a bit - // counter-intuitive: The right child of the dummy node will hold - // the L tree of the algorithm. The left child of the dummy node - // will hold the R tree of the algorithm. Using a dummy node, left - // and right will always be nodes and we avoid special cases. - var dummy, left, right; - dummy = left = right = new SplayTree.Node(null, null); - var current = this.root_; - while (true) { - if (key < current.key) { - if (!current.left) { - break; - } - if (key < current.left.key) { - // Rotate right. - var tmp = current.left; - current.left = tmp.right; - tmp.right = current; - current = tmp; - if (!current.left) { - break; - } - } - // Link right. - right.left = current; - right = current; - current = current.left; - } else if (key > current.key) { - if (!current.right) { - break; - } - if (key > current.right.key) { - // Rotate left. - var tmp = current.right; - current.right = tmp.left; - tmp.left = current; - current = tmp; - if (!current.right) { - break; - } - } - // Link left. - left.right = current; - left = current; - current = current.right; - } else { - break; - } - } - // Assemble. - left.right = current.left; - right.left = current.right; - current.left = dummy.right; - current.right = dummy.left; - this.root_ = current; -}; - - -/** - * Constructs a Splay tree node. - * - * @param {number} key Key. - * @param {*} value Value. - */ -SplayTree.Node = function(key, value) { - this.key = key; - this.value = value; -}; - - -/** - * @type {SplayTree.Node} - */ -SplayTree.Node.prototype.left = null; - - -/** - * @type {SplayTree.Node} - */ -SplayTree.Node.prototype.right = null; - - -/** - * Performs an ordered traversal of the subtree starting at - * this SplayTree.Node. - * - * @param {function(SplayTree.Node)} f Visitor function. - * @private - */ -SplayTree.Node.prototype.traverse_ = function(f) { - var current = this; - while (current) { - var left = current.left; - if (left) left.traverse_(f); - f(current); - current = current.right; - } -}; - -SplayTree.prototype.traverseBreadthFirst = function (f) { - if (f(this.root_.value)) return; - - var stack = [this.root_]; - var length = 1; - - while (length > 0) { - var new_stack = new Array(stack.length * 2); - var new_length = 0; - for (var i = 0; i < length; i++) { - var n = stack[i]; - var l = n.left; - var r = n.right; - if (l) { - if (f(l.value)) return; - new_stack[new_length++] = l; - } - if (r) { - if (f(r.value)) return; - new_stack[new_length++] = r; - } - } - stack = new_stack; - length = new_length; - } -}; diff --git a/deps/v8/benchmarks/spinning-balls/v.js b/deps/v8/benchmarks/spinning-balls/v.js deleted file mode 100644 index 87366d939303..000000000000 --- a/deps/v8/benchmarks/spinning-balls/v.js +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -/** - * This function provides requestAnimationFrame in a cross browser way. - * http://paulirish.com/2011/requestanimationframe-for-smart-animating/ - */ -if ( !window.requestAnimationFrame ) { - window.requestAnimationFrame = ( function() { - return window.webkitRequestAnimationFrame || - window.mozRequestAnimationFrame || - window.oRequestAnimationFrame || - window.msRequestAnimationFrame || - function(callback, element) { - window.setTimeout( callback, 1000 / 60 ); - }; - } )(); -} - -var kNPoints = 8000; -var kNModifications = 20; -var kNVisiblePoints = 200; -var kDecaySpeed = 20; - -var kPointRadius = 4; -var kInitialLifeForce = 100; - -var livePoints = void 0; -var dyingPoints = void 0; -var scene = void 0; -var renderingStartTime = void 0; -var scene = void 0; -var pausePlot = void 0; -var splayTree = void 0; - - -function Point(x, y, z, payload) { - this.x = x; - this.y = y; - this.z = z; - - this.next = null; - this.prev = null; - this.payload = payload; - this.lifeForce = kInitialLifeForce; -} - - -Point.prototype.color = function () { - return "rgba(0, 0, 0, " + (this.lifeForce / kInitialLifeForce) + ")"; -}; - - -Point.prototype.decay = function () { - this.lifeForce -= kDecaySpeed; - return this.lifeForce <= 0; -}; - - -function PointsList() { - this.head = null; - this.count = 0; -} - - -PointsList.prototype.add = function (point) { - if (this.head !== null) this.head.prev = point; - point.next = this.head; - this.head = point; - this.count++; -} - - -PointsList.prototype.remove = function (point) { - if (point.next !== null) { - point.next.prev = point.prev; - } - if (point.prev !== null) { - point.prev.next = point.next; - } else { - this.head = point.next; - } - this.count--; -} - - -function GeneratePayloadTree(depth, tag) { - if (depth == 0) { - return { - array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], - string : 'String for key ' + tag + ' in leaf node' - }; - } else { - return { - left: GeneratePayloadTree(depth - 1, tag), - right: GeneratePayloadTree(depth - 1, tag) - }; - } -} - - -// To make the benchmark results predictable, we replace Math.random -// with a 100% deterministic alternative. -Math.random = (function() { - var seed = 49734321; - return function() { - // Robert Jenkins' 32 bit integer hash function. - seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff; - seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff; - seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff; - seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff; - seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff; - seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff; - return (seed & 0xfffffff) / 0x10000000; - }; -})(); - - -function GenerateKey() { - // The benchmark framework guarantees that Math.random is - // deterministic; see base.js. - return Math.random(); -} - -function CreateNewPoint() { - // Insert new node with a unique key. - var key; - do { key = GenerateKey(); } while (splayTree.find(key) != null); - - var point = new Point(Math.random() * 40 - 20, - Math.random() * 40 - 20, - Math.random() * 40 - 20, - GeneratePayloadTree(5, "" + key)); - - livePoints.add(point); - - splayTree.insert(key, point); - return key; -} - -function ModifyPointsSet() { - if (livePoints.count < kNPoints) { - for (var i = 0; i < kNModifications; i++) { - CreateNewPoint(); - } - } else if (kNModifications === 20) { - kNModifications = 80; - kDecay = 30; - } - - for (var i = 0; i < kNModifications; i++) { - var key = CreateNewPoint(); - var greatest = splayTree.findGreatestLessThan(key); - if (greatest == null) { - var point = splayTree.remove(key).value; - } else { - var point = splayTree.remove(greatest.key).value; - } - livePoints.remove(point); - point.payload = null; - dyingPoints.add(point); - } -} - - -function PausePlot(width, height, size) { - var canvas = document.createElement("canvas"); - canvas.width = this.width = width; - canvas.height = this.height = height; - document.body.appendChild(canvas); - - this.ctx = canvas.getContext('2d'); - - this.maxPause = 0; - this.size = size; - - // Initialize cyclic buffer for pauses. - this.pauses = new Array(this.size); - this.start = this.size; - this.idx = 0; -} - - -PausePlot.prototype.addPause = function (p) { - if (this.idx === this.size) { - this.idx = 0; - } - - if (this.idx === this.start) { - this.start++; - } - - if (this.start === this.size) { - this.start = 0; - } - - this.pauses[this.idx++] = p; -}; - - -PausePlot.prototype.iteratePauses = function (f) { - if (this.start < this.idx) { - for (var i = this.start; i < this.idx; i++) { - f.call(this, i - this.start, this.pauses[i]); - } - } else { - for (var i = this.start; i < this.size; i++) { - f.call(this, i - this.start, this.pauses[i]); - } - - var offs = this.size - this.start; - for (var i = 0; i < this.idx; i++) { - f.call(this, i + offs, this.pauses[i]); - } - } -}; - - -PausePlot.prototype.draw = function () { - var first = null; - this.iteratePauses(function (i, v) { - if (first === null) { - first = v; - } - this.maxPause = Math.max(v, this.maxPause); - }); - - var dx = this.width / this.size; - var dy = this.height / this.maxPause; - - this.ctx.save(); - this.ctx.clearRect(0, 0, 480, 240); - this.ctx.beginPath(); - this.ctx.moveTo(1, dy * this.pauses[this.start]); - var p = first; - this.iteratePauses(function (i, v) { - var delta = v - p; - var x = 1 + dx * i; - var y = dy * v; - this.ctx.lineTo(x, y); - if (delta > 2 * (p / 3)) { - this.ctx.font = "bold 12px sans-serif"; - this.ctx.textBaseline = "bottom"; - this.ctx.fillText(v + "ms", x + 2, y); - } - p = v; - }); - this.ctx.strokeStyle = "black"; - this.ctx.stroke(); - this.ctx.restore(); -} - - -function Scene(width, height) { - var canvas = document.createElement("canvas"); - canvas.width = width; - canvas.height = height; - document.body.appendChild(canvas); - - this.ctx = canvas.getContext('2d'); - this.width = canvas.width; - this.height = canvas.height; - - // Projection configuration. - this.x0 = canvas.width / 2; - this.y0 = canvas.height / 2; - this.z0 = 100; - this.f = 1000; // Focal length. - - // Camera is rotating around y-axis. - this.angle = 0; -} - - -Scene.prototype.drawPoint = function (x, y, z, color) { - // Rotate the camera around y-axis. - var rx = x * Math.cos(this.angle) - z * Math.sin(this.angle); - var ry = y; - var rz = x * Math.sin(this.angle) + z * Math.cos(this.angle); - - // Perform perspective projection. - var px = (this.f * rx) / (rz - this.z0) + this.x0; - var py = (this.f * ry) / (rz - this.z0) + this.y0; - - this.ctx.save(); - this.ctx.fillStyle = color - this.ctx.beginPath(); - this.ctx.arc(px, py, kPointRadius, 0, 2 * Math.PI, true); - this.ctx.fill(); - this.ctx.restore(); -}; - - -Scene.prototype.drawDyingPoints = function () { - var point_next = null; - for (var point = dyingPoints.head; point !== null; point = point_next) { - // Rotate the scene around y-axis. - scene.drawPoint(point.x, point.y, point.z, point.color()); - - point_next = point.next; - - // Decay the current point and remove it from the list - // if it's life-force ran out. - if (point.decay()) { - dyingPoints.remove(point); - } - } -}; - - -Scene.prototype.draw = function () { - this.ctx.save(); - this.ctx.clearRect(0, 0, this.width, this.height); - this.drawDyingPoints(); - this.ctx.restore(); - - this.angle += Math.PI / 90.0; -}; - - -function render() { - if (typeof renderingStartTime === 'undefined') { - renderingStartTime = Date.now(); - } - - ModifyPointsSet(); - - scene.draw(); - - var renderingEndTime = Date.now(); - var pause = renderingEndTime - renderingStartTime; - pausePlot.addPause(pause); - renderingStartTime = renderingEndTime; - - pausePlot.draw(); - - div.innerHTML = - livePoints.count + "/" + dyingPoints.count + " " + - pause + "(max = " + pausePlot.maxPause + ") ms" ; - - // Schedule next frame. - requestAnimationFrame(render); -} - - -function init() { - livePoints = new PointsList; - dyingPoints = new PointsList; - - splayTree = new SplayTree(); - - scene = new Scene(640, 480); - - div = document.createElement("div"); - document.body.appendChild(div); - - pausePlot = new PausePlot(480, 240, 160); -} - - -init(); -render(); diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index a6579ed9e1bb..4e896e019a85 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -60,8 +60,6 @@ 'v8_enable_disassembler%': 0, - 'v8_object_print%': 0, - 'v8_enable_gdbjit%': 0, # Enable profiling support. Only required on Windows. @@ -74,7 +72,6 @@ 'v8_use_snapshot%': 'true', 'host_os%': '<(OS)', 'v8_use_liveobjectlist%': 'false', - 'werror%': '-Werror', # For a shared library build, results in "libv8-<(soname_version).so". 'soname_version%': '', @@ -87,9 +84,6 @@ ['v8_enable_disassembler==1', { 'defines': ['ENABLE_DISASSEMBLER',], }], - ['v8_object_print==1', { - 'defines': ['OBJECT_PRINT',], - }], ['v8_enable_gdbjit==1', { 'defines': ['ENABLE_GDB_JIT_INTERFACE',], }], @@ -190,9 +184,6 @@ }], ], }], - ['OS=="solaris"', { - 'defines': [ '__C99FEATURES__=1' ], # isinf() etc. - }], ], 'configurations': { 'Debug': { @@ -227,7 +218,7 @@ 'cflags': [ '-I/usr/local/include' ], }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', { - 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', + 'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor' ], }], ], @@ -270,6 +261,7 @@ }], ['OS=="win"', { 'msvs_configuration_attributes': { + 'OutputDirectory': '$(SolutionDir)$(ConfigurationName)', 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', 'CharacterSet': '1', }, diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi index f24d9f834105..cb5e1330397b 100644 --- a/deps/v8/build/standalone.gypi +++ b/deps/v8/build/standalone.gypi @@ -35,30 +35,25 @@ 'msvs_multi_core_compile%': '1', 'variables': { 'variables': { - 'variables': { - 'conditions': [ - [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', { - # This handles the Linux platforms we generally deal with. Anything - # else gets passed through, which probably won't work very well; such - # hosts should pass an explicit target_arch to gyp. - 'host_arch%': - ' GetNativeFunction(v8::Handle name) { return v8::Handle(); } - const char* name() const { return name_; } - size_t source_length() const { return source_length_; } - const String::ExternalAsciiStringResource* source() const { - return &source_; } + const char* name() { return name_; } + const char* source() { return source_; } int dependency_count() { return dep_count_; } const char** dependencies() { return deps_; } void set_auto_enable(bool value) { auto_enable_ = value; } @@ -2495,8 +2476,7 @@ class V8EXPORT Extension { // NOLINT private: const char* name_; - size_t source_length_; // expected to initialize before source_ - ExternalAsciiStringResourceImpl source_; + const char* source_; int dep_count_; const char** deps_; bool auto_enable_; @@ -3518,9 +3498,9 @@ class V8EXPORT Context { * * v8::Locker is a scoped lock object. While it's * active (i.e. between its construction and destruction) the current thread is - * allowed to use the locked isolate. V8 guarantees that an isolate can be - * locked by at most one thread at any time. In other words, the scope of a - * v8::Locker is a critical section. + * allowed to use the locked isolate. V8 guarantees that an isolate can be locked + * by at most one thread at any time. In other words, the scope of a v8::Locker is + * a critical section. * * Sample usage: * \code @@ -3622,8 +3602,8 @@ class V8EXPORT Locker { static void StopPreemption(); /** - * Returns whether or not the locker for a given isolate, or default isolate - * if NULL is given, is locked by the current thread. + * Returns whether or not the locker for a given isolate, or default isolate if NULL is given, + * is locked by the current thread. */ static bool IsLocked(Isolate* isolate = NULL); @@ -3789,7 +3769,7 @@ class Internals { static const int kFullStringRepresentationMask = 0x07; static const int kExternalTwoByteRepresentationTag = 0x02; - static const int kJSObjectType = 0xa6; + static const int kJSObjectType = 0xa3; static const int kFirstNonstringType = 0x80; static const int kForeignType = 0x85; diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript old mode 100755 new mode 100644 index f3ae8078ba90..52607f15c5e4 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -84,7 +84,6 @@ SOURCES = { hydrogen.cc hydrogen-instructions.cc ic.cc - incremental-marking.cc inspector.cc interpreter-irregexp.cc isolate.cc @@ -134,7 +133,6 @@ SOURCES = { v8utils.cc variables.cc version.cc - store-buffer.cc zone.cc extensions/gc-extension.cc extensions/externalize-string-extension.cc diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index a03b7411c3cd..479be5af15b1 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -185,10 +185,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) { int end_marker; heap_stats.end_marker = &end_marker; i::Isolate* isolate = i::Isolate::Current(); - // BUG(1718): - // Don't use the take_snapshot since we don't support HeapIterator here - // without doing a special GC. - isolate->heap()->RecordStats(&heap_stats, false); + isolate->heap()->RecordStats(&heap_stats, take_snapshot); i::V8::SetFatalError(); FatalErrorCallback callback = GetFatalErrorHandler(); { @@ -504,12 +501,9 @@ void RegisterExtension(Extension* that) { Extension::Extension(const char* name, const char* source, int dep_count, - const char** deps, - int source_length) + const char** deps) : name_(name), - source_length_(source_length >= 0 ? - source_length : (source ? strlen(source) : 0)), - source_(source, source_length_), + source_(source), dep_count_(dep_count), deps_(deps), auto_enable_(false) { } @@ -1413,7 +1407,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) { ScriptData* ScriptData::PreCompile(const char* input, int length) { i::Utf8ToUC16CharacterStream stream( reinterpret_cast(input), length); - return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping); + return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping); } @@ -1422,10 +1416,10 @@ ScriptData* ScriptData::PreCompile(v8::Handle source) { if (str->IsExternalTwoByteString()) { i::ExternalTwoByteStringUC16CharacterStream stream( i::Handle::cast(str), 0, str->length()); - return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping); + return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping); } else { i::GenericStringUC16CharacterStream stream(str, 0, str->length()); - return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping); + return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping); } } @@ -1787,7 +1781,7 @@ v8::Handle Message::GetStackTrace() const { static i::Handle CallV8HeapFunction(const char* name, i::Handle recv, int argc, - i::Handle argv[], + i::Object** argv[], bool* has_pending_exception) { i::Isolate* isolate = i::Isolate::Current(); i::Handle fmt_str = isolate->factory()->LookupAsciiSymbol(name); @@ -1804,10 +1798,10 @@ static i::Handle CallV8HeapFunction(const char* name, static i::Handle CallV8HeapFunction(const char* name, i::Handle data, bool* has_pending_exception) { - i::Handle argv[] = { data }; + i::Object** argv[1] = { data.location() }; return CallV8HeapFunction(name, i::Isolate::Current()->js_builtins_object(), - ARRAY_SIZE(argv), + 1, argv, has_pending_exception); } @@ -2627,11 +2621,10 @@ bool Value::Equals(Handle that) const { if (obj->IsJSObject() && other->IsJSObject()) { return *obj == *other; } - i::Handle args[] = { other }; + i::Object** args[1] = { other.location() }; EXCEPTION_PREAMBLE(isolate); i::Handle result = - CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args, - &has_pending_exception); + CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception); EXCEPTION_BAILOUT_CHECK(isolate, false); return *result == i::Smi::FromInt(i::EQUAL); } @@ -3211,10 +3204,21 @@ bool v8::Object::SetHiddenValue(v8::Handle key, ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle self = Utils::OpenHandle(this); - i::Handle key_obj = Utils::OpenHandle(*key); + i::Handle hidden_props(i::GetHiddenProperties( + self, + i::JSObject::ALLOW_CREATION)); + i::Handle key_obj = Utils::OpenHandle(*key); i::Handle value_obj = Utils::OpenHandle(*value); - i::Handle result = i::SetHiddenProperty(self, key_obj, value_obj); - return *result == *self; + EXCEPTION_PREAMBLE(isolate); + i::Handle obj = i::SetProperty( + hidden_props, + key_obj, + value_obj, + static_cast(None), + i::kNonStrictMode); + has_pending_exception = obj.is_null(); + EXCEPTION_BAILOUT_CHECK(isolate, false); + return true; } @@ -3224,9 +3228,20 @@ v8::Local v8::Object::GetHiddenValue(v8::Handle key) { return Local()); ENTER_V8(isolate); i::Handle self = Utils::OpenHandle(this); + i::Handle hidden_props(i::GetHiddenProperties( + self, + i::JSObject::OMIT_CREATION)); + if (hidden_props->IsUndefined()) { + return v8::Local(); + } i::Handle key_obj = Utils::OpenHandle(*key); - i::Handle result(self->GetHiddenProperty(*key_obj)); - if (result->IsUndefined()) return v8::Local(); + EXCEPTION_PREAMBLE(isolate); + i::Handle result = i::GetProperty(hidden_props, key_obj); + has_pending_exception = result.is_null(); + EXCEPTION_BAILOUT_CHECK(isolate, v8::Local()); + if (result->IsUndefined()) { + return v8::Local(); + } return Utils::ToLocal(result); } @@ -3237,9 +3252,15 @@ bool v8::Object::DeleteHiddenValue(v8::Handle key) { ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle self = Utils::OpenHandle(this); + i::Handle hidden_props(i::GetHiddenProperties( + self, + i::JSObject::OMIT_CREATION)); + if (hidden_props->IsUndefined()) { + return true; + } + i::Handle js_obj(i::JSObject::cast(*hidden_props)); i::Handle key_obj = Utils::OpenHandle(*key); - self->DeleteHiddenProperty(*key_obj); - return true; + return i::DeleteProperty(js_obj, key_obj)->IsTrue(); } @@ -3289,12 +3310,22 @@ void PrepareExternalArrayElements(i::Handle object, i::Handle array = isolate->factory()->NewExternalArray(length, array_type, data); - i::Handle external_array_map = - isolate->factory()->GetElementsTransitionMap( - object, - GetElementsKindFromExternalArrayType(array_type)); - - object->set_map(*external_array_map); + // If the object already has external elements, create a new, unique + // map if the element type is now changing, because assumptions about + // generated code based on the receiver's map will be invalid. + i::Handle elements(object->elements()); + bool cant_reuse_map = + elements->map()->IsUndefined() || + !elements->map()->has_external_array_elements() || + elements->map() != isolate->heap()->MapForExternalArrayType(array_type); + if (cant_reuse_map) { + i::Handle external_array_map = + isolate->factory()->GetElementsTransitionMap( + i::Handle(object->map()), + GetElementsKindFromExternalArrayType(array_type), + object->HasFastProperties()); + object->set_map(*external_array_map); + } object->set_elements(*array); } @@ -3453,8 +3484,7 @@ bool v8::Object::IsCallable() { } -Local Object::CallAsFunction(v8::Handle recv, - int argc, +Local Object::CallAsFunction(v8::Handle recv, int argc, v8::Handle argv[]) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::CallAsFunction()", @@ -3465,7 +3495,7 @@ Local Object::CallAsFunction(v8::Handle recv, i::Handle obj = Utils::OpenHandle(this); i::Handle recv_obj = Utils::OpenHandle(*recv); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); - i::Handle* args = reinterpret_cast*>(argv); + i::Object*** args = reinterpret_cast(argv); i::Handle fun = i::Handle(); if (obj->IsJSFunction()) { fun = i::Handle::cast(obj); @@ -3495,7 +3525,7 @@ Local Object::CallAsConstructor(int argc, i::HandleScope scope(isolate); i::Handle obj = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); - i::Handle* args = reinterpret_cast*>(argv); + i::Object*** args = reinterpret_cast(argv); if (obj->IsJSFunction()) { i::Handle fun = i::Handle::cast(obj); EXCEPTION_PREAMBLE(isolate); @@ -3537,7 +3567,7 @@ Local Function::NewInstance(int argc, HandleScope scope; i::Handle function = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); - i::Handle* args = reinterpret_cast*>(argv); + i::Object*** args = reinterpret_cast(argv); EXCEPTION_PREAMBLE(isolate); i::Handle returned = i::Execution::New(function, argc, args, &has_pending_exception); @@ -3558,7 +3588,7 @@ Local Function::Call(v8::Handle recv, int argc, i::Handle fun = Utils::OpenHandle(this); i::Handle recv_obj = Utils::OpenHandle(*recv); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); - i::Handle* args = reinterpret_cast*>(argv); + i::Object*** args = reinterpret_cast(argv); EXCEPTION_PREAMBLE(isolate); i::Handle returned = i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception); @@ -3769,11 +3799,10 @@ bool v8::String::IsExternalAscii() const { void v8::String::VerifyExternalStringResource( v8::String::ExternalStringResource* value) const { i::Handle str = Utils::OpenHandle(this); - const v8::String::ExternalStringResource* expected; + v8::String::ExternalStringResource* expected; if (i::StringShape(*str).IsExternalTwoByte()) { - const void* resource = - i::Handle::cast(str)->resource(); - expected = reinterpret_cast(resource); + void* resource = i::Handle::cast(str)->resource(); + expected = reinterpret_cast(resource); } else { expected = NULL; } @@ -3781,7 +3810,7 @@ void v8::String::VerifyExternalStringResource( } -const v8::String::ExternalAsciiStringResource* +v8::String::ExternalAsciiStringResource* v8::String::GetExternalAsciiStringResource() const { i::Handle str = Utils::OpenHandle(this); if (IsDeadCheck(str->GetIsolate(), @@ -3789,9 +3818,8 @@ const v8::String::ExternalAsciiStringResource* return NULL; } if (i::StringShape(*str).IsExternalAscii()) { - const void* resource = - i::Handle::cast(str)->resource(); - return reinterpret_cast(resource); + void* resource = i::Handle::cast(str)->resource(); + return reinterpret_cast(resource); } else { return NULL; } @@ -3981,7 +4009,7 @@ bool v8::V8::IdleNotification() { void v8::V8::LowMemoryNotification() { i::Isolate* isolate = i::Isolate::Current(); if (!isolate->IsInitialized()) return; - isolate->heap()->CollectAllAvailableGarbage(); + isolate->heap()->CollectAllGarbage(true); } @@ -4500,7 +4528,6 @@ bool v8::String::MakeExternal( bool v8::String::CanMakeExternal() { - if (!internal::FLAG_clever_optimizations) return false; i::Handle obj = Utils::OpenHandle(this); i::Isolate* isolate = obj->GetIsolate(); if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false; @@ -5453,12 +5480,6 @@ bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) { wait_for_connection); } - -void Debug::DisableAgent() { - return i::Isolate::Current()->debugger()->StopAgent(); -} - - void Debug::ProcessDebugMessages() { i::Execution::ProcessDebugMesssages(true); } diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 93cecf52b68c..3e19a45385b3 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -77,11 +77,6 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); Assembler::set_target_address_at(pc_, target); - if (host() != NULL && IsCodeTarget(rmode_)) { - Object* target_code = Code::GetCodeFromTargetAddress(target); - host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( - host(), this, HeapObject::cast(target_code)); - } } @@ -106,10 +101,6 @@ Object** RelocInfo::target_object_address() { void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, reinterpret_cast
(target)); - if (host() != NULL && target->IsHeapObject()) { - host()->GetHeap()->incremental_marking()->RecordWrite( - host(), &Memory::Object_at(pc_), HeapObject::cast(target)); - } } @@ -140,12 +131,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; - if (host() != NULL) { - // TODO(1550) We are passing NULL as a slot because cell can never be on - // evacuation candidate. - host()->GetHeap()->incremental_marking()->RecordWrite( - host(), NULL, cell); - } } @@ -162,11 +147,6 @@ void RelocInfo::set_call_address(Address target) { ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target; - if (host() != NULL) { - Object* target_code = Code::GetCodeFromTargetAddress(target); - host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( - host(), this, HeapObject::cast(target_code)); - } } @@ -215,7 +195,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() { void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - visitor->VisitEmbeddedPointer(this); + visitor->VisitPointer(target_object_address()); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { @@ -241,7 +221,7 @@ template void RelocInfo::Visit(Heap* heap) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitEmbeddedPointer(heap, this); + StaticVisitor::VisitPointer(heap, target_object_address()); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 329493a340db..0ec36921ab77 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -78,9 +78,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() { void CpuFeatures::Probe() { - unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() | - CpuFeaturesImpliedByCompiler()); - ASSERT(supported_ == 0 || supported_ == standard_features); + ASSERT(!initialized_); #ifdef DEBUG initialized_ = true; #endif @@ -88,7 +86,8 @@ void CpuFeatures::Probe() { // Get the features implied by the OS and the compiler settings. This is the // minimal set of features which is also alowed for generated code in the // snapshot. - supported_ |= standard_features; + supported_ |= OS::CpuFeaturesImpliedByPlatform(); + supported_ |= CpuFeaturesImpliedByCompiler(); if (Serializer::enabled()) { // No probing for features if we might serialize (generate snapshot). @@ -2506,8 +2505,7 @@ void Assembler::dd(uint32_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - // We do not try to reuse pool constants. - RelocInfo rinfo(pc_, rmode, data, NULL); + RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { // Adjust code for new modes. ASSERT(RelocInfo::IsDebugBreakSlot(rmode) @@ -2539,7 +2537,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { } ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { - RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL); + RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId()); ClearRecordedAstId(); reloc_info_writer.Write(&reloc_info_with_ast_id); } else { diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index d19b64da54a6..9a586936fe0c 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -1209,10 +1209,6 @@ class Assembler : public AssemblerBase { PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Read/patch instructions - Instr instr_at(int pos) { return *reinterpret_cast(buffer_ + pos); } - void instr_at_put(int pos, Instr instr) { - *reinterpret_cast(buffer_ + pos) = instr; - } static Instr instr_at(byte* pc) { return *reinterpret_cast(pc); } static void instr_at_put(byte* pc, Instr instr) { *reinterpret_cast(pc) = instr; @@ -1267,6 +1263,12 @@ class Assembler : public AssemblerBase { int buffer_space() const { return reloc_info_writer.pos() - pc_; } + // Read/patch instructions + Instr instr_at(int pos) { return *reinterpret_cast(buffer_ + pos); } + void instr_at_put(int pos, Instr instr) { + *reinterpret_cast(buffer_ + pos) = instr; + } + // Decode branch instruction at pos and return branch target pos int target_at(int pos); diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 32b7896a525d..60d2081c2937 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -582,11 +582,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ bind(&convert_argument); __ push(function); // Preserve the function. __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); - __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); - } + __ EnterInternalFrame(); + __ push(r0); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + __ LeaveInternalFrame(); __ pop(function); __ mov(argument, r0); __ b(&argument_is_string); @@ -602,11 +601,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // create a string wrapper. __ bind(&gc_required); __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(argument); - __ CallRuntime(Runtime::kNewStringWrapper, 1); - } + __ EnterInternalFrame(); + __ push(argument); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + __ LeaveInternalFrame(); __ Ret(); } @@ -619,12 +617,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // -- sp[...]: constructor arguments // ----------------------------------- - Label slow, non_function_call; + Label non_function_call; // Check that the function is not a smi. __ JumpIfSmi(r1, &non_function_call); // Check that the function is a JSFunction. __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &slow); + __ b(ne, &non_function_call); // Jump to the function-specific construct stub. __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); @@ -633,19 +631,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // r0: number of arguments // r1: called object - // r2: object type - Label do_call; - __ bind(&slow); - __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE)); - __ b(ne, &non_function_call); - __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); - __ jmp(&do_call); - __ bind(&non_function_call); - __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); - __ bind(&do_call); // Set expected number of arguments to zero (not changing r0). __ mov(r2, Operand(0, RelocInfo::NONE)); + __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ SetCallKind(r5, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); @@ -661,329 +650,321 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, Isolate* isolate = masm->isolate(); // Enter a construct frame. - { - FrameScope scope(masm, StackFrame::CONSTRUCT); - - // Preserve the two incoming parameters on the stack. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); - __ push(r0); // Smi-tagged arguments count. - __ push(r1); // Constructor function. - - // Try to allocate the object without transitioning into C code. If any of - // the preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - Label undo_allocation; + __ EnterConstructFrame(); + + // Preserve the two incoming parameters on the stack. + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ push(r0); // Smi-tagged arguments count. + __ push(r1); // Constructor function. + + // Try to allocate the object without transitioning into C code. If any of the + // preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(isolate); - __ mov(r2, Operand(debug_step_in_fp)); - __ ldr(r2, MemOperand(r2)); - __ tst(r2, r2); - __ b(ne, &rt_call); + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(isolate); + __ mov(r2, Operand(debug_step_in_fp)); + __ ldr(r2, MemOperand(r2)); + __ tst(r2, r2); + __ b(ne, &rt_call); #endif - // Load the initial map and verify that it is in fact a map. - // r1: constructor function - __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(r2, &rt_call); - __ CompareObjectType(r2, r3, r4, MAP_TYPE); - __ b(ne, &rt_call); - - // Check that the constructor is not constructing a JSFunction (see - // comments in Runtime_NewObject in runtime.cc). In which case the - // initial map's instance type would be JS_FUNCTION_TYPE. - // r1: constructor function - // r2: initial map - __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); - __ b(eq, &rt_call); + // Load the initial map and verify that it is in fact a map. + // r1: constructor function + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ JumpIfSmi(r2, &rt_call); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ b(ne, &rt_call); - if (count_constructions) { - Label allocate; - // Decrease generous allocation count. - __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - MemOperand constructor_count = - FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset); - __ ldrb(r4, constructor_count); - __ sub(r4, r4, Operand(1), SetCC); - __ strb(r4, constructor_count); - __ b(ne, &allocate); - - __ Push(r1, r2); - - __ push(r1); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - - __ pop(r2); - __ pop(r1); - - __ bind(&allocate); - } + // Check that the constructor is not constructing a JSFunction (see comments + // in Runtime_NewObject in runtime.cc). In which case the initial map's + // instance type would be JS_FUNCTION_TYPE. + // r1: constructor function + // r2: initial map + __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); + __ b(eq, &rt_call); + + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + MemOperand constructor_count = + FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset); + __ ldrb(r4, constructor_count); + __ sub(r4, r4, Operand(1), SetCC); + __ strb(r4, constructor_count); + __ b(ne, &allocate); + + __ Push(r1, r2); + + __ push(r1); // constructor + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + + __ pop(r2); + __ pop(r1); + + __ bind(&allocate); + } - // Now allocate the JSObject on the heap. - // r1: constructor function - // r2: initial map - __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); - __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); - - // Allocated the JSObject, now initialize the fields. Map is set to - // initial map and properties and elements are set to empty fixed array. - // r1: constructor function - // r2: initial map - // r3: object size - // r4: JSObject (not tagged) - __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); - __ mov(r5, r4); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - - // Fill all the in-object properties with the appropriate filler. - // r1: constructor function - // r2: initial map - // r3: object size (in words) - // r4: JSObject (not tagged) - // r5: First in-object property of JSObject (not tagged) - __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + // Now allocate the JSObject on the heap. + // r1: constructor function + // r2: initial map + __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); + __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); + + // Allocated the JSObject, now initialize the fields. Map is set to initial + // map and properties and elements are set to empty fixed array. + // r1: constructor function + // r2: initial map + // r3: object size + // r4: JSObject (not tagged) + __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); + __ mov(r5, r4); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + + // Fill all the in-object properties with the appropriate filler. + // r1: constructor function + // r2: initial map + // r3: object size (in words) + // r4: JSObject (not tagged) + // r5: First in-object property of JSObject (not tagged) + __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + { Label loop, entry; if (count_constructions) { - __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); - __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, - kBitsPerByte); - __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2)); - // r0: offset of first field after pre-allocated fields - if (FLAG_debug_code) { - __ cmp(r0, r6); - __ Assert(le, "Unexpected number of pre-allocated property fields."); - } - __ InitializeFieldsWithFiller(r5, r0, r7); // To allow for truncation. __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex); + } else { + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); } - __ InitializeFieldsWithFiller(r5, r6, r7); - - // Add the object tag to make the JSObject real, so that we can continue - // and jump into the continuation code at any time from now on. Any - // failures need to undo the allocation, so that the heap is in a - // consistent state and verifiable. - __ add(r4, r4, Operand(kHeapObjectTag)); - - // Check if a non-empty properties array is needed. Continue with - // allocated object if not fall through to runtime call if it is. - // r1: constructor function - // r4: JSObject - // r5: start of next object (not tagged) - __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); - // The field instance sizes contains both pre-allocated property fields - // and in-object properties. - __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); - __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, - kBitsPerByte); - __ add(r3, r3, Operand(r6)); - __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte, - kBitsPerByte); - __ sub(r3, r3, Operand(r6), SetCC); - - // Done if no extra properties are to be allocated. - __ b(eq, &allocated); - __ Assert(pl, "Property allocation count failed."); - - // Scale the number of elements by pointer size and add the header for - // FixedArrays to the start of the next object calculation from above. - // r1: constructor - // r3: number of elements in properties array - // r4: JSObject - // r5: start of next object - __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); - __ AllocateInNewSpace( - r0, - r5, - r6, - r2, - &undo_allocation, - static_cast(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); - - // Initialize the FixedArray. - // r1: constructor - // r3: number of elements in properties array - // r4: JSObject - // r5: FixedArray (not tagged) - __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); - __ mov(r2, r5); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); - __ mov(r0, Operand(r3, LSL, kSmiTagSize)); - __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); - - // Initialize the fields to undefined. - // r1: constructor function - // r2: First element of FixedArray (not tagged) - // r3: number of elements in properties array - // r4: JSObject - // r5: FixedArray (not tagged) - __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); - { Label loop, entry; - if (count_constructions) { - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); - } else if (FLAG_debug_code) { - __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); - __ cmp(r7, r8); - __ Assert(eq, "Undefined value not loaded."); - } - __ b(&entry); - __ bind(&loop); - __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); - __ bind(&entry); - __ cmp(r2, r6); - __ b(lt, &loop); - } - - // Store the initialized FixedArray into the properties field of - // the JSObject - // r1: constructor function - // r4: JSObject - // r5: FixedArray (not tagged) - __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. - __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); - - // Continue with JSObject being successfully allocated - // r1: constructor function - // r4: JSObject - __ jmp(&allocated); - - // Undo the setting of the new top so that the heap is verifiable. For - // example, the map's unused properties potentially do not match the - // allocated objects unused properties. - // r4: JSObject (previous new top) - __ bind(&undo_allocation); - __ UndoAllocationInNewSpace(r4, r5); + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r5, r6); + __ b(lt, &loop); } - // Allocate the new receiver object using the runtime call. - // r1: constructor function - __ bind(&rt_call); - __ push(r1); // argument for Runtime_NewObject - __ CallRuntime(Runtime::kNewObject, 1); - __ mov(r4, r0); + // Add the object tag to make the JSObject real, so that we can continue and + // jump into the continuation code at any time from now on. Any failures + // need to undo the allocation, so that the heap is in a consistent state + // and verifiable. + __ add(r4, r4, Operand(kHeapObjectTag)); - // Receiver for constructor call allocated. + // Check if a non-empty properties array is needed. Continue with allocated + // object if not fall through to runtime call if it is. + // r1: constructor function + // r4: JSObject + // r5: start of next object (not tagged) + __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); + // The field instance sizes contains both pre-allocated property fields and + // in-object properties. + __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); + __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8); + __ add(r3, r3, Operand(r6)); + __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8); + __ sub(r3, r3, Operand(r6), SetCC); + + // Done if no extra properties are to be allocated. + __ b(eq, &allocated); + __ Assert(pl, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: start of next object + __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ AllocateInNewSpace( + r0, + r5, + r6, + r2, + &undo_allocation, + static_cast(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); + + // Initialize the FixedArray. + // r1: constructor + // r3: number of elements in properties array // r4: JSObject - __ bind(&allocated); - __ push(r4); - - // Push the function and the allocated receiver from the stack. - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(r1, MemOperand(sp, kPointerSize)); - __ push(r1); // Constructor function. - __ push(r4); // Receiver. - - // Reload the number of arguments from the stack. + // r5: FixedArray (not tagged) + __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); + __ mov(r2, r5); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + __ mov(r0, Operand(r3, LSL, kSmiTagSize)); + __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); + + // Initialize the fields to undefined. // r1: constructor function - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); - - // Setup pointer to last argument. - __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); - - // Setup number of arguments for function call below - __ mov(r0, Operand(r3, LSR, kSmiTagSize)); - - // Copy arguments and receiver to the expression stack. - // r0: number of arguments - // r2: address of last argument (caller sp) + // r2: First element of FixedArray (not tagged) + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + { Label loop, entry; + if (count_constructions) { + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + } else if (FLAG_debug_code) { + __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); + __ cmp(r7, r8); + __ Assert(eq, "Undefined value not loaded."); + } + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r2, r6); + __ b(lt, &loop); + } + + // Store the initialized FixedArray into the properties field of + // the JSObject // r1: constructor function - // r3: number of arguments (smi-tagged) - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - Label loop, entry; - __ b(&entry); - __ bind(&loop); - __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1)); - __ push(ip); - __ bind(&entry); - __ sub(r3, r3, Operand(2), SetCC); - __ b(ge, &loop); + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. + __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); - // Call the function. - // r0: number of arguments + // Continue with JSObject being successfully allocated // r1: constructor function - if (is_api_function) { - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - Handle code = - masm->isolate()->builtins()->HandleApiCallConstruct(); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, - RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); - } else { - ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } + // r4: JSObject + __ jmp(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // r4: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(r4, r5); + } - // Pop the function from the stack. - // sp[0]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ pop(); + // Allocate the new receiver object using the runtime call. + // r1: constructor function + __ bind(&rt_call); + __ push(r1); // argument for Runtime_NewObject + __ CallRuntime(Runtime::kNewObject, 1); + __ mov(r4, r0); + + // Receiver for constructor call allocated. + // r4: JSObject + __ bind(&allocated); + __ push(r4); + + // Push the function and the allocated receiver from the stack. + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(r1, MemOperand(sp, kPointerSize)); + __ push(r1); // Constructor function. + __ push(r4); // Receiver. + + // Reload the number of arguments from the stack. + // r1: constructor function + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); + + // Setup pointer to last argument. + __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); + + // Setup number of arguments for function call below + __ mov(r0, Operand(r3, LSR, kSmiTagSize)); + + // Copy arguments and receiver to the expression stack. + // r0: number of arguments + // r2: address of last argument (caller sp) + // r1: constructor function + // r3: number of arguments (smi-tagged) + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + Label loop, entry; + __ b(&entry); + __ bind(&loop); + __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1)); + __ push(ip); + __ bind(&entry); + __ sub(r3, r3, Operand(2), SetCC); + __ b(ge, &loop); - // Restore context from the frame. - // r0: result - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; - - // If the result is a smi, it is *not* an object in the ECMA sense. - // r0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ JumpIfSmi(r0, &use_receiver); - - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &exit); - - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ ldr(r0, MemOperand(sp)); - - // Remove receiver from the stack, remove caller arguments, and - // return. - __ bind(&exit); - // r0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); - - // Leave construct frame. + // Call the function. + // r0: number of arguments + // r1: constructor function + if (is_api_function) { + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + Handle code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, + RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); + } else { + ParameterCount actual(r0); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); } + // Pop the function from the stack. + // sp[0]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ pop(); + + // Restore context from the frame. + // r0: result + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + + // If the result is a smi, it is *not* an object in the ECMA sense. + // r0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ JumpIfSmi(r0, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, &exit); + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ ldr(r0, MemOperand(sp)); + + // Remove receiver from the stack, remove caller arguments, and + // return. + __ bind(&exit); + // r0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); + __ LeaveConstructFrame(); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(kPointerSize)); __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2); @@ -1016,64 +997,63 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r4: argv // r5-r7, cp may be clobbered - // Clear the context before we push it when entering the internal frame. + // Clear the context before we push it when entering the JS frame. __ mov(cp, Operand(0, RelocInfo::NONE)); // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Set up the context from the function argument. - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Set up the context from the function argument. + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - // Set up the roots register. - ExternalReference roots_address = - ExternalReference::roots_address(masm->isolate()); - __ mov(r10, Operand(roots_address)); + // Set up the roots register. + ExternalReference roots_address = + ExternalReference::roots_address(masm->isolate()); + __ mov(r10, Operand(roots_address)); - // Push the function and the receiver onto the stack. - __ push(r1); - __ push(r2); + // Push the function and the receiver onto the stack. + __ push(r1); + __ push(r2); - // Copy arguments to the stack in a loop. - // r1: function - // r3: argc - // r4: argv, i.e. points to first arg - Label loop, entry; - __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2)); - // r2 points past last arg. - __ b(&entry); - __ bind(&loop); - __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter - __ ldr(r0, MemOperand(r0)); // dereference handle - __ push(r0); // push parameter - __ bind(&entry); - __ cmp(r4, r2); - __ b(ne, &loop); - - // Initialize all JavaScript callee-saved registers, since they will be seen - // by the garbage collector as part of handlers. - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); - __ mov(r5, Operand(r4)); - __ mov(r6, Operand(r4)); - __ mov(r7, Operand(r4)); - if (kR9Available == 1) { - __ mov(r9, Operand(r4)); - } + // Copy arguments to the stack in a loop. + // r1: function + // r3: argc + // r4: argv, i.e. points to first arg + Label loop, entry; + __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2)); + // r2 points past last arg. + __ b(&entry); + __ bind(&loop); + __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter + __ ldr(r0, MemOperand(r0)); // dereference handle + __ push(r0); // push parameter + __ bind(&entry); + __ cmp(r4, r2); + __ b(ne, &loop); + + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + __ mov(r5, Operand(r4)); + __ mov(r6, Operand(r4)); + __ mov(r7, Operand(r4)); + if (kR9Available == 1) { + __ mov(r9, Operand(r4)); + } - // Invoke the code and pass argc as r0. - __ mov(r0, Operand(r3)); - if (is_construct) { - __ Call(masm->isolate()->builtins()->JSConstructCall()); - } else { - ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } - // Exit the JS frame and remove the parameters (except function), and - // return. - // Respect ABI stack constraint. + // Invoke the code and pass argc as r0. + __ mov(r0, Operand(r3)); + if (is_construct) { + __ Call(masm->isolate()->builtins()->JSConstructCall()); + } else { + ParameterCount actual(r0); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); } + + // Exit the JS frame and remove the parameters (except function), and return. + // Respect ABI stack constraint. + __ LeaveInternalFrame(); __ Jump(lr); // r0: result @@ -1092,27 +1072,26 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_LazyCompile(MacroAssembler* masm) { // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Preserve the function. - __ push(r1); - // Push call kind information. - __ push(r5); + // Preserve the function. + __ push(r1); + // Push call kind information. + __ push(r5); - // Push the function on the stack as the argument to the runtime function. - __ push(r1); - __ CallRuntime(Runtime::kLazyCompile, 1); - // Calculate the entry point. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Push the function on the stack as the argument to the runtime function. + __ push(r1); + __ CallRuntime(Runtime::kLazyCompile, 1); + // Calculate the entry point. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore call kind information. - __ pop(r5); - // Restore saved function. - __ pop(r1); + // Restore call kind information. + __ pop(r5); + // Restore saved function. + __ pop(r1); - // Tear down internal frame. - } + // Tear down temporary frame. + __ LeaveInternalFrame(); // Do a tail-call of the compiled function. __ Jump(r2); @@ -1121,27 +1100,26 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Preserve the function. - __ push(r1); - // Push call kind information. - __ push(r5); + // Preserve the function. + __ push(r1); + // Push call kind information. + __ push(r5); - // Push the function on the stack as the argument to the runtime function. - __ push(r1); - __ CallRuntime(Runtime::kLazyRecompile, 1); - // Calculate the entry point. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Push the function on the stack as the argument to the runtime function. + __ push(r1); + __ CallRuntime(Runtime::kLazyRecompile, 1); + // Calculate the entry point. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore call kind information. - __ pop(r5); - // Restore saved function. - __ pop(r1); + // Restore call kind information. + __ pop(r5); + // Restore saved function. + __ pop(r1); - // Tear down internal frame. - } + // Tear down temporary frame. + __ LeaveInternalFrame(); // Do a tail-call of the compiled function. __ Jump(r2); @@ -1150,13 +1128,12 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - // Pass the function and deoptimization type to the runtime system. - __ mov(r0, Operand(Smi::FromInt(static_cast(type)))); - __ push(r0); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); - } + __ EnterInternalFrame(); + // Pass the function and deoptimization type to the runtime system. + __ mov(r0, Operand(Smi::FromInt(static_cast(type)))); + __ push(r0); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + __ LeaveInternalFrame(); // Get the full codegen state from the stack and untag it -> r6. __ ldr(r6, MemOperand(sp, 0 * kPointerSize)); @@ -1196,10 +1173,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { // the registers without worrying about which of them contain // pointers. This seems a bit fragile. __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kNotifyOSR, 0); - } + __ EnterInternalFrame(); + __ CallRuntime(Runtime::kNotifyOSR, 0); + __ LeaveInternalFrame(); __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); __ Ret(); } @@ -1215,11 +1191,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame and push it as an // argument to the on-stack replacement function. __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); - __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); - } + __ EnterInternalFrame(); + __ push(r0); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + __ LeaveInternalFrame(); // If the result was -1 it means that we couldn't optimize the // function. Just return and continue in the unoptimized version. @@ -1301,23 +1276,17 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ b(ge, &shift_arguments); __ bind(&convert_to_object); + __ EnterInternalFrame(); // In order to preserve argument count. + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged. + __ push(r0); - { - // Enter an internal frame in order to preserve argument count. - FrameScope scope(masm, StackFrame::INTERNAL); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged. - __ push(r0); - - __ push(r2); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(r2, r0); - - __ pop(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - - // Exit the internal frame. - } + __ push(r2); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(r2, r0); + __ pop(r0); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ LeaveInternalFrame(); // Restore the function to r1, and the flag to r4. __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); __ mov(r4, Operand(0, RelocInfo::NONE)); @@ -1437,157 +1406,156 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { const int kRecvOffset = 3 * kPointerSize; const int kFunctionOffset = 4 * kPointerSize; - { - FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function - __ push(r0); - __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array - __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); - // Make r2 the space we have left. The stack might already be overflowed - // here which will cause r2 to become negative. - __ sub(r2, sp, r2); - // Check if the arguments will overflow the stack. - __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ b(gt, &okay); // Signed comparison. - - // Out of stack space. - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ push(r1); - __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); - // End of stack check. - - // Push current limit and index. - __ bind(&okay); - __ push(r0); // limit - __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index - __ push(r1); - - // Get the receiver. - __ ldr(r0, MemOperand(fp, kRecvOffset)); - - // Check that the function is a JS function (otherwise it must be a proxy). - Label push_receiver; - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &push_receiver); - - // Change context eagerly to get the right global object if necessary. - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - // Load the shared function info while the function is still in r1. - __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - - // Compute the receiver. - // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; - __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + - kSmiTagSize))); - __ b(ne, &push_receiver); - - // Do not transform the receiver for strict mode functions. - __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, &push_receiver); + __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function + __ push(r0); + __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array + __ push(r0); + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); + // Make r2 the space we have left. The stack might already be overflowed + // here which will cause r2 to become negative. + __ sub(r2, sp, r2); + // Check if the arguments will overflow the stack. + __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ b(gt, &okay); // Signed comparison. + + // Out of stack space. + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ push(r1); + __ push(r0); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + // End of stack check. - // Compute the receiver in non-strict mode. - __ JumpIfSmi(r0, &call_to_object); - __ LoadRoot(r1, Heap::kNullValueRootIndex); - __ cmp(r0, r1); - __ b(eq, &use_global_receiver); - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r1); - __ b(eq, &use_global_receiver); + // Push current limit and index. + __ bind(&okay); + __ push(r0); // limit + __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index + __ push(r1); - // Check if the receiver is already a JavaScript object. - // r0: receiver - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &push_receiver); + // Get the receiver. + __ ldr(r0, MemOperand(fp, kRecvOffset)); - // Convert the receiver to a regular object. - // r0: receiver - __ bind(&call_to_object); - __ push(r0); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ b(&push_receiver); + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &push_receiver); - // Use the current global receiver object as the receiver. - __ bind(&use_global_receiver); - const int kGlobalOffset = - Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; - __ ldr(r0, FieldMemOperand(cp, kGlobalOffset)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); - __ ldr(r0, FieldMemOperand(r0, kGlobalOffset)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); - - // Push the receiver. - // r0: receiver - __ bind(&push_receiver); - __ push(r0); + // Change context eagerly to get the right global object if necessary. + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in r1. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - // Copy all arguments from the array to the stack. - Label entry, loop; - __ ldr(r0, MemOperand(fp, kIndexOffset)); - __ b(&entry); + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &push_receiver); + + // Do not transform the receiver for strict mode functions. + __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); + __ b(ne, &push_receiver); + + // Compute the receiver in non-strict mode. + __ JumpIfSmi(r0, &call_to_object); + __ LoadRoot(r1, Heap::kNullValueRootIndex); + __ cmp(r0, r1); + __ b(eq, &use_global_receiver); + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r1); + __ b(eq, &use_global_receiver); + + // Check if the receiver is already a JavaScript object. + // r0: receiver + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, &push_receiver); + + // Convert the receiver to a regular object. + // r0: receiver + __ bind(&call_to_object); + __ push(r0); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ b(&push_receiver); + + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + __ ldr(r0, FieldMemOperand(cp, kGlobalOffset)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); + __ ldr(r0, FieldMemOperand(r0, kGlobalOffset)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); + + // Push the receiver. + // r0: receiver + __ bind(&push_receiver); + __ push(r0); - // Load the current argument from the arguments array and push it to the - // stack. - // r0: current argument index - __ bind(&loop); - __ ldr(r1, MemOperand(fp, kArgsOffset)); - __ push(r1); - __ push(r0); + // Copy all arguments from the array to the stack. + Label entry, loop; + __ ldr(r0, MemOperand(fp, kIndexOffset)); + __ b(&entry); - // Call the runtime to access the property in the arguments array. - __ CallRuntime(Runtime::kGetProperty, 2); - __ push(r0); + // Load the current argument from the arguments array and push it to the + // stack. + // r0: current argument index + __ bind(&loop); + __ ldr(r1, MemOperand(fp, kArgsOffset)); + __ push(r1); + __ push(r0); - // Use inline caching to access the arguments. - __ ldr(r0, MemOperand(fp, kIndexOffset)); - __ add(r0, r0, Operand(1 << kSmiTagSize)); - __ str(r0, MemOperand(fp, kIndexOffset)); + // Call the runtime to access the property in the arguments array. + __ CallRuntime(Runtime::kGetProperty, 2); + __ push(r0); - // Test if the copy loop has finished copying all the elements from the - // arguments object. - __ bind(&entry); - __ ldr(r1, MemOperand(fp, kLimitOffset)); - __ cmp(r0, r1); - __ b(ne, &loop); + // Use inline caching to access the arguments. + __ ldr(r0, MemOperand(fp, kIndexOffset)); + __ add(r0, r0, Operand(1 << kSmiTagSize)); + __ str(r0, MemOperand(fp, kIndexOffset)); - // Invoke the function. - Label call_proxy; - ParameterCount actual(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &call_proxy); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Test if the copy loop has finished copying all the elements from the + // arguments object. + __ bind(&entry); + __ ldr(r1, MemOperand(fp, kLimitOffset)); + __ cmp(r0, r1); + __ b(ne, &loop); + + // Invoke the function. + Label call_proxy; + ParameterCount actual(r0); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &call_proxy); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); - frame_scope.GenerateLeaveFrame(); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Jump(lr); + // Tear down the internal frame and remove function, receiver and args. + __ LeaveInternalFrame(); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Jump(lr); - // Invoke the function proxy. - __ bind(&call_proxy); - __ push(r1); // add function proxy as last argument - __ add(r0, r0, Operand(1)); - __ mov(r2, Operand(0, RelocInfo::NONE)); - __ SetCallKind(r5, CALL_AS_METHOD); - __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); - __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(r1); // add function proxy as last argument + __ add(r0, r0, Operand(1)); + __ mov(r2, Operand(0, RelocInfo::NONE)); + __ SetCallKind(r5, CALL_AS_METHOD); + __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); + __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); - // Tear down the internal frame and remove function, receiver and args. - } + __ LeaveInternalFrame(); __ add(sp, sp, Operand(3 * kPointerSize)); __ Jump(lr); } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 44923a1843d6..e65f6d9b69c0 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -189,72 +189,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } -void FastNewBlockContextStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [sp]: function. - // [sp + kPointerSize]: serialized scope info - - // Try to allocate the context in new space. - Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace(FixedArray::SizeFor(length), - r0, r1, r2, &gc, TAG_OBJECT); - - // Load the function from the stack. - __ ldr(r3, MemOperand(sp, 0)); - - // Load the serialized scope info from the stack. - __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); - - // Setup the object header. - __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); - __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ mov(r2, Operand(Smi::FromInt(length))); - __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); - - // If this block context is nested in the global context we get a smi - // sentinel instead of a function. The block context should get the - // canonical empty function of the global context as its closure which - // we still have to look up. - Label after_sentinel; - __ JumpIfNotSmi(r3, &after_sentinel); - if (FLAG_debug_code) { - const char* message = "Expected 0 as a Smi sentinel"; - __ cmp(r3, Operand::Zero()); - __ Assert(eq, message); - } - __ ldr(r3, GlobalObjectOperand()); - __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset)); - __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); - __ bind(&after_sentinel); - - // Setup the fixed slots. - __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); - __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); - __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); - - // Copy the global object from the previous context. - __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX)); - __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX)); - - // Initialize the rest of the slots to the hole value. - __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); - for (int i = 0; i < slots_; i++) { - __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); - } - - // Remove the on-stack argument and return. - __ mov(cp, r0); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - // Need to collect. Call into runtime system. - __ bind(&gc); - __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); -} - - void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // @@ -904,11 +838,9 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); - } + // Call C routine that may not cause GC or other trouble. + __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), + 0, 2); // Store answer in the overwritable heap number. Double returned in // registers r0 and r1 or in d0. if (masm->use_eabi_hardfloat()) { @@ -925,29 +857,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( } -bool WriteInt32ToHeapNumberStub::IsPregenerated() { - // These variants are compiled ahead of time. See next method. - if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { - return true; - } - if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { - return true; - } - // Other register combinations are generated as and when they are needed, - // so it is unsafe to call them from stubs (we can't generate a stub while - // we are generating a stub). - return false; -} - - -void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { - WriteInt32ToHeapNumberStub stub1(r1, r0, r2); - WriteInt32ToHeapNumberStub stub2(r2, r0, r3); - stub1.GetCode()->set_is_pregenerated(true); - stub2.GetCode()->set_is_pregenerated(true); -} - - // See comment for class. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { Label max_negative_int; @@ -1288,8 +1197,6 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } - - AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 0, 2); __ pop(pc); // Return. @@ -1307,7 +1214,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // If either operand is a JS object or an oddball value, then they are // not equal since their pointers are different. // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); Label first_non_object; // Get the type of the first operand into r2 and compare it with // FIRST_SPEC_OBJECT_TYPE. @@ -1699,8 +1606,6 @@ void CompareStub::Generate(MacroAssembler* masm) { // The stub expects its argument in the tos_ register and returns its result in // it, too: zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { - // This stub overrides SometimesSetsUpAFrame() to return false. That means - // we cannot call anything that could cause a GC from this stub. // This stub uses VFP3 instructions. CpuFeatures::Scope scope(VFP3); @@ -1808,41 +1713,6 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { } -void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { - // We don't allow a GC during a store buffer overflow so there is no need to - // store the registers in any particular way, but we do have to store and - // restore them. - __ stm(db_w, sp, kCallerSaved | lr.bit()); - if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP3); - __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vstr(reg, MemOperand(sp, i * kDoubleSize)); - } - } - const int argument_count = 1; - const int fp_argument_count = 0; - const Register scratch = r1; - - AllowExternalCallThatCantCauseGC scope(masm); - __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); - __ mov(r0, Operand(ExternalReference::isolate_address())); - __ CallCFunction( - ExternalReference::store_buffer_overflow_function(masm->isolate()), - argument_count); - if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP3); - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vldr(reg, MemOperand(sp, i * kDoubleSize)); - } - __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); - } - __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). -} - - void UnaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name = NULL; // Make g++ happy. @@ -1996,13 +1866,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r1, Operand(r0)); - __ pop(r0); - } + __ EnterInternalFrame(); + __ push(r0); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r1, Operand(r0)); + __ pop(r0); + __ LeaveInternalFrame(); __ bind(&heapnumber_allocated); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); @@ -2043,14 +1912,13 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); // Push the heap number, not the untagged int32. - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r2, r0); // Move the new heap number into r2. - // Get the heap number into r0, now that the new heap number is in r2. - __ pop(r0); - } + __ EnterInternalFrame(); + __ push(r0); // Push the heap number, not the untagged int32. + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r2, r0); // Move the new heap number into r2. + // Get the heap number into r0, now that the new heap number is in r2. + __ pop(r0); + __ LeaveInternalFrame(); // Convert the heap number in r0 to an untagged integer in r1. // This can't go slow-case because it's the same number we already @@ -2160,10 +2028,6 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); - switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -3269,11 +3133,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); - __ CallRuntime(RuntimeFunction(), 1); - } + __ EnterInternalFrame(); + __ push(r0); + __ CallRuntime(RuntimeFunction(), 1); + __ LeaveInternalFrame(); __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ Ret(); @@ -3286,15 +3149,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // We return the value in d2 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - { - FrameScope scope(masm, StackFrame::INTERNAL); - - // Allocate an aligned object larger than a HeapNumber. - ASSERT(4 * kPointerSize >= HeapNumber::kSize); - __ mov(scratch0, Operand(4 * kPointerSize)); - __ push(scratch0); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - } + __ EnterInternalFrame(); + + // Allocate an aligned object larger than a HeapNumber. + ASSERT(4 * kPointerSize >= HeapNumber::kSize); + __ mov(scratch0, Operand(4 * kPointerSize)); + __ push(scratch0); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + __ LeaveInternalFrame(); __ Ret(); } } @@ -3311,7 +3173,6 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, } else { __ vmov(r0, r1, d2); } - AllowExternalCallThatCantCauseGC scope(masm); switch (type_) { case TranscendentalCache::SIN: __ CallCFunction(ExternalReference::math_sin_double_function(isolate), @@ -3407,14 +3268,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); __ PrepareCallCFunction(1, 1, scratch); __ SetCallCDoubleArguments(double_base, exponent); - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::power_double_int_function(masm->isolate()), - 1, 1); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); - } + __ CallCFunction( + ExternalReference::power_double_int_function(masm->isolate()), + 1, 1); + __ pop(lr); + __ GetCFunctionDoubleResult(double_result); __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(r0, heapnumber); @@ -3440,14 +3298,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); __ PrepareCallCFunction(0, 2, scratch); __ SetCallCDoubleArguments(double_base, double_exponent); - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), - 0, 2); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); - } + __ CallCFunction( + ExternalReference::power_double_double_function(masm->isolate()), + 0, 2); + __ pop(lr); + __ GetCFunctionDoubleResult(double_result); __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(r0, heapnumber); @@ -3464,37 +3319,6 @@ bool CEntryStub::NeedsImmovableCode() { } -bool CEntryStub::IsPregenerated() { - return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && - result_size_ == 1; -} - - -void CodeStub::GenerateStubsAheadOfTime() { - CEntryStub::GenerateAheadOfTime(); - WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); - StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); - RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); -} - - -void CodeStub::GenerateFPStubs() { - CEntryStub save_doubles(1, kSaveFPRegs); - Handle code = save_doubles.GetCode(); - code->set_is_pregenerated(true); - StoreBufferOverflowStub stub(kSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); - code->GetIsolate()->set_fp_stubs_generated(true); -} - - -void CEntryStub::GenerateAheadOfTime() { - CEntryStub stub(1, kDontSaveFPRegs); - Handle code = stub.GetCode(); - code->set_is_pregenerated(true); -} - - void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { __ Throw(r0); } @@ -3606,7 +3430,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ b(eq, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. - __ mov(r3, Operand(isolate->factory()->the_hole_value())); + __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); + __ ldr(r3, MemOperand(ip)); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(ip)); @@ -3644,7 +3469,6 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ sub(r6, r6, Operand(kPointerSize)); // Enter the exit frame that transitions from JavaScript to C++. - FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame(save_doubles_); // Setup argc and the builtin function in callee-saved registers. @@ -3789,7 +3613,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // saved values before returning a failure to C. // Clear any pending exceptions. - __ mov(r5, Operand(isolate->factory()->the_hole_value())); + __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); + __ ldr(r5, MemOperand(ip)); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ str(r5, MemOperand(ip)); @@ -4026,11 +3851,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } else { - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(r0, r1); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); - } + __ EnterInternalFrame(); + __ Push(r0, r1); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + __ LeaveInternalFrame(); __ cmp(r0, Operand::Zero()); __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); @@ -4426,6 +4250,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); #else // V8_INTERPRETED_REGEXP + if (!FLAG_regexp_entry_native) { + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + return; + } // Stack frame on entry. // sp[0]: last_match_info (expected JSArray) @@ -4652,7 +4480,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // For arguments 4 and 3 get string length, calculate start of string data and // calculate the shift of the index (0 for ASCII and 1 for two byte). - __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ eor(r3, r3, Operand(1)); // Load the length from the original subject string from the previous stack // frame. Therefore we have to use fp, which points exactly to two pointer @@ -4703,7 +4532,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r1, Operand(isolate->factory()->the_hole_value())); + __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate))); + __ ldr(r1, MemOperand(r1, 0)); __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(r2, 0)); @@ -4745,25 +4575,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ str(r2, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastCaptureCountOffset)); // Store last subject and last input. + __ mov(r3, last_match_info_elements); // Moved up to reduce latency. __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastSubjectOffset)); - __ mov(r2, subject); - __ RecordWriteField(last_match_info_elements, - RegExpImpl::kLastSubjectOffset, - r2, - r7, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastInputOffset)); - __ RecordWriteField(last_match_info_elements, - RegExpImpl::kLastInputOffset, - subject, - r7, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ mov(r3, last_match_info_elements); + __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = @@ -4891,22 +4712,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } -void CallFunctionStub::FinishCode(Code* code) { - code->set_has_function_cache(false); -} - - -void CallFunctionStub::Clear(Heap* heap, Address address) { - UNREACHABLE(); -} - - -Object* CallFunctionStub::GetCachedValue(Address address) { - UNREACHABLE(); - return NULL; -} - - void CallFunctionStub::Generate(MacroAssembler* masm) { Label slow, non_function; @@ -5084,26 +4889,23 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { __ cmp(result_, Operand(ip)); __ b(ne, &call_runtime_); // Get the first of the two strings and load its instance type. - __ ldr(result_, FieldMemOperand(object_, ConsString::kFirstOffset)); + __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); __ jmp(&assure_seq_string); // SlicedString, unpack and add offset. __ bind(&sliced_string); __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset)); __ add(scratch_, scratch_, result_); - __ ldr(result_, FieldMemOperand(object_, SlicedString::kParentOffset)); + __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset)); // Assure that we are dealing with a sequential string. Go to runtime if not. __ bind(&assure_seq_string); - __ ldr(result_, FieldMemOperand(result_, HeapObject::kMapOffset)); + __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); // Check that parent is not an external string. Go to runtime otherwise. STATIC_ASSERT(kSeqStringTag == 0); __ tst(result_, Operand(kStringRepresentationMask)); __ b(ne, &call_runtime_); - // Actually fetch the parent string if it is confirmed to be sequential. - STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset); - __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset)); // Check for 1-byte or 2-byte string. __ bind(&flat_string); @@ -6623,13 +6425,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { // Call the runtime system in a fresh internal frame. ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(r1, r0); - __ mov(ip, Operand(Smi::FromInt(op_))); - __ push(ip); - __ CallExternalReference(miss, 3); - } + __ EnterInternalFrame(); + __ Push(r1, r0); + __ mov(ip, Operand(Smi::FromInt(op_))); + __ push(ip); + __ CallExternalReference(miss, 3); + __ LeaveInternalFrame(); // Compute the entry point of the rewritten stub. __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Restore registers. @@ -6812,8 +6613,6 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { - // This stub overrides SometimesSetsUpAFrame() to return false. That means - // we cannot call anything that could cause a GC from this stub. // Registers: // result: StringDictionary to probe // r1: key @@ -6903,267 +6702,6 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } -struct AheadOfTimeWriteBarrierStubList { - Register object, value, address; - RememberedSetAction action; -}; - - -struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { - // Used in RegExpExecStub. - { r6, r4, r7, EMIT_REMEMBERED_SET }, - { r6, r2, r7, EMIT_REMEMBERED_SET }, - // Used in CompileArrayPushCall. - // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. - // Also used in KeyedStoreIC::GenerateGeneric. - { r3, r4, r5, EMIT_REMEMBERED_SET }, - // Used in CompileStoreGlobal. - { r4, r1, r2, OMIT_REMEMBERED_SET }, - // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. - { r1, r2, r3, EMIT_REMEMBERED_SET }, - { r3, r2, r1, EMIT_REMEMBERED_SET }, - // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. - { r2, r1, r3, EMIT_REMEMBERED_SET }, - { r3, r1, r2, EMIT_REMEMBERED_SET }, - // KeyedStoreStubCompiler::GenerateStoreFastElement. - { r4, r2, r3, EMIT_REMEMBERED_SET }, - // Null termination. - { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} -}; - - -bool RecordWriteStub::IsPregenerated() { - for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; - !entry->object.is(no_reg); - entry++) { - if (object_.is(entry->object) && - value_.is(entry->value) && - address_.is(entry->address) && - remembered_set_action_ == entry->action && - save_fp_regs_mode_ == kDontSaveFPRegs) { - return true; - } - } - return false; -} - - -bool StoreBufferOverflowStub::IsPregenerated() { - return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); -} - - -void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { - StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode()->set_is_pregenerated(true); -} - - -void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { - for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; - !entry->object.is(no_reg); - entry++) { - RecordWriteStub stub(entry->object, - entry->value, - entry->address, - entry->action, - kDontSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); - } -} - - -// Takes the input in 3 registers: address_ value_ and object_. A pointer to -// the value has just been written into the object, now this stub makes sure -// we keep the GC informed. The word in the object where the value has been -// written is in the address register. -void RecordWriteStub::Generate(MacroAssembler* masm) { - Label skip_to_incremental_noncompacting; - Label skip_to_incremental_compacting; - - // The first two instructions are generated with labels so as to get the - // offset fixed up correctly by the bind(Label*) call. We patch it back and - // forth between a compare instructions (a nop in this position) and the - // real branch when we start and stop incremental heap marking. - // See RecordWriteStub::Patch for details. - __ b(&skip_to_incremental_noncompacting); - __ b(&skip_to_incremental_compacting); - - if (remembered_set_action_ == EMIT_REMEMBERED_SET) { - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - } - __ Ret(); - - __ bind(&skip_to_incremental_noncompacting); - GenerateIncremental(masm, INCREMENTAL); - - __ bind(&skip_to_incremental_compacting); - GenerateIncremental(masm, INCREMENTAL_COMPACTION); - - // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. - // Will be checked in IncrementalMarking::ActivateGeneratedStub. - ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); - ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); - PatchBranchIntoNop(masm, 0); - PatchBranchIntoNop(masm, Assembler::kInstrSize); -} - - -void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { - regs_.Save(masm); - - if (remembered_set_action_ == EMIT_REMEMBERED_SET) { - Label dont_need_remembered_set; - - __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); - __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. - regs_.scratch0(), - &dont_need_remembered_set); - - __ CheckPageFlag(regs_.object(), - regs_.scratch0(), - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - ne, - &dont_need_remembered_set); - - // First notify the incremental marker if necessary, then update the - // remembered set. - CheckNeedsToInformIncrementalMarker( - masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); - InformIncrementalMarker(masm, mode); - regs_.Restore(masm); - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - - __ bind(&dont_need_remembered_set); - } - - CheckNeedsToInformIncrementalMarker( - masm, kReturnOnNoNeedToInformIncrementalMarker, mode); - InformIncrementalMarker(masm, mode); - regs_.Restore(masm); - __ Ret(); -} - - -void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { - regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); - int argument_count = 3; - __ PrepareCallCFunction(argument_count, regs_.scratch0()); - Register address = - r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); - ASSERT(!address.is(regs_.object())); - ASSERT(!address.is(r0)); - __ Move(address, regs_.address()); - __ Move(r0, regs_.object()); - if (mode == INCREMENTAL_COMPACTION) { - __ Move(r1, address); - } else { - ASSERT(mode == INCREMENTAL); - __ ldr(r1, MemOperand(address, 0)); - } - __ mov(r2, Operand(ExternalReference::isolate_address())); - - AllowExternalCallThatCantCauseGC scope(masm); - if (mode == INCREMENTAL_COMPACTION) { - __ CallCFunction( - ExternalReference::incremental_evacuation_record_write_function( - masm->isolate()), - argument_count); - } else { - ASSERT(mode == INCREMENTAL); - __ CallCFunction( - ExternalReference::incremental_marking_record_write_function( - masm->isolate()), - argument_count); - } - regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); -} - - -void RecordWriteStub::CheckNeedsToInformIncrementalMarker( - MacroAssembler* masm, - OnNoNeedToInformIncrementalMarker on_no_need, - Mode mode) { - Label on_black; - Label need_incremental; - Label need_incremental_pop_scratch; - - // Let's look at the color of the object: If it is not black we don't have - // to inform the incremental marker. - __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); - - regs_.Restore(masm); - if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - } else { - __ Ret(); - } - - __ bind(&on_black); - - // Get the value from the slot. - __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); - - if (mode == INCREMENTAL_COMPACTION) { - Label ensure_not_white; - - __ CheckPageFlag(regs_.scratch0(), // Contains value. - regs_.scratch1(), // Scratch. - MemoryChunk::kEvacuationCandidateMask, - eq, - &ensure_not_white); - - __ CheckPageFlag(regs_.object(), - regs_.scratch1(), // Scratch. - MemoryChunk::kSkipEvacuationSlotsRecordingMask, - eq, - &need_incremental); - - __ bind(&ensure_not_white); - } - - // We need extra registers for this, so we push the object and the address - // register temporarily. - __ Push(regs_.object(), regs_.address()); - __ EnsureNotWhite(regs_.scratch0(), // The value. - regs_.scratch1(), // Scratch. - regs_.object(), // Scratch. - regs_.address(), // Scratch. - &need_incremental_pop_scratch); - __ Pop(regs_.object(), regs_.address()); - - regs_.Restore(masm); - if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - } else { - __ Ret(); - } - - __ bind(&need_incremental_pop_scratch); - __ Pop(regs_.object(), regs_.address()); - - __ bind(&need_incremental); - - // Fall through when we need to inform the incremental marker. -} - - #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 3ba75bab130c..557f7e6d4118 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -58,25 +58,6 @@ class TranscendentalCacheStub: public CodeStub { }; -class StoreBufferOverflowStub: public CodeStub { - public: - explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) { } - - void Generate(MacroAssembler* masm); - - virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); - virtual bool SometimesSetsUpAFrame() { return false; } - - private: - SaveFPRegsMode save_doubles_; - - Major MajorKey() { return StoreBufferOverflow; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } -}; - - class UnaryOpStub: public CodeStub { public: UnaryOpStub(Token::Value op, @@ -342,9 +323,6 @@ class WriteInt32ToHeapNumberStub : public CodeStub { the_heap_number_(the_heap_number), scratch_(scratch) { } - bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); - private: Register the_int_; Register the_heap_number_; @@ -393,225 +371,6 @@ class NumberToStringStub: public CodeStub { }; -class RecordWriteStub: public CodeStub { - public: - RecordWriteStub(Register object, - Register value, - Register address, - RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode) - : object_(object), - value_(value), - address_(address), - remembered_set_action_(remembered_set_action), - save_fp_regs_mode_(fp_mode), - regs_(object, // An input reg. - address, // An input reg. - value) { // One scratch reg. - } - - enum Mode { - STORE_BUFFER_ONLY, - INCREMENTAL, - INCREMENTAL_COMPACTION - }; - - virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); - virtual bool SometimesSetsUpAFrame() { return false; } - - static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { - masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); - ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos))); - } - - static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { - masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); - ASSERT(Assembler::IsBranch(masm->instr_at(pos))); - } - - static Mode GetMode(Code* stub) { - Instr first_instruction = Assembler::instr_at(stub->instruction_start()); - Instr second_instruction = Assembler::instr_at(stub->instruction_start() + - Assembler::kInstrSize); - - if (Assembler::IsBranch(first_instruction)) { - return INCREMENTAL; - } - - ASSERT(Assembler::IsTstImmediate(first_instruction)); - - if (Assembler::IsBranch(second_instruction)) { - return INCREMENTAL_COMPACTION; - } - - ASSERT(Assembler::IsTstImmediate(second_instruction)); - - return STORE_BUFFER_ONLY; - } - - static void Patch(Code* stub, Mode mode) { - MacroAssembler masm(NULL, - stub->instruction_start(), - stub->instruction_size()); - switch (mode) { - case STORE_BUFFER_ONLY: - ASSERT(GetMode(stub) == INCREMENTAL || - GetMode(stub) == INCREMENTAL_COMPACTION); - PatchBranchIntoNop(&masm, 0); - PatchBranchIntoNop(&masm, Assembler::kInstrSize); - break; - case INCREMENTAL: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); - PatchNopIntoBranch(&masm, 0); - break; - case INCREMENTAL_COMPACTION: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); - PatchNopIntoBranch(&masm, Assembler::kInstrSize); - break; - } - ASSERT(GetMode(stub) == mode); - CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize); - } - - private: - // This is a helper class for freeing up 3 scratch registers. The input is - // two registers that must be preserved and one scratch register provided by - // the caller. - class RegisterAllocation { - public: - RegisterAllocation(Register object, - Register address, - Register scratch0) - : object_(object), - address_(address), - scratch0_(scratch0) { - ASSERT(!AreAliased(scratch0, object, address, no_reg)); - scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_); - } - - void Save(MacroAssembler* masm) { - ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); - // We don't have to save scratch0_ because it was given to us as - // a scratch register. - masm->push(scratch1_); - } - - void Restore(MacroAssembler* masm) { - masm->pop(scratch1_); - } - - // If we have to call into C then we need to save and restore all caller- - // saved registers that were not already preserved. The scratch registers - // will be restored by other means so we don't bother pushing them here. - void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { - masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); - if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP3); - masm->sub(sp, - sp, - Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); - // Save all VFP registers except d0. - for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); - } - } - } - - inline void RestoreCallerSaveRegisters(MacroAssembler*masm, - SaveFPRegsMode mode) { - if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP3); - // Restore all VFP registers except d0. - for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); - } - masm->add(sp, - sp, - Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); - } - masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); - } - - inline Register object() { return object_; } - inline Register address() { return address_; } - inline Register scratch0() { return scratch0_; } - inline Register scratch1() { return scratch1_; } - - private: - Register object_; - Register address_; - Register scratch0_; - Register scratch1_; - - Register GetRegThatIsNotOneOf(Register r1, - Register r2, - Register r3) { - for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { - Register candidate = Register::FromAllocationIndex(i); - if (candidate.is(r1)) continue; - if (candidate.is(r2)) continue; - if (candidate.is(r3)) continue; - return candidate; - } - UNREACHABLE(); - return no_reg; - } - friend class RecordWriteStub; - }; - - enum OnNoNeedToInformIncrementalMarker { - kReturnOnNoNeedToInformIncrementalMarker, - kUpdateRememberedSetOnNoNeedToInformIncrementalMarker - }; - - void Generate(MacroAssembler* masm); - void GenerateIncremental(MacroAssembler* masm, Mode mode); - void CheckNeedsToInformIncrementalMarker( - MacroAssembler* masm, - OnNoNeedToInformIncrementalMarker on_no_need, - Mode mode); - void InformIncrementalMarker(MacroAssembler* masm, Mode mode); - - Major MajorKey() { return RecordWrite; } - - int MinorKey() { - return ObjectBits::encode(object_.code()) | - ValueBits::encode(value_.code()) | - AddressBits::encode(address_.code()) | - RememberedSetActionBits::encode(remembered_set_action_) | - SaveFPRegsModeBits::encode(save_fp_regs_mode_); - } - - bool MustBeInStubCache() { - // All stubs must be registered in the stub cache - // otherwise IncrementalMarker would not be able to find - // and patch it. - return true; - } - - void Activate(Code* code) { - code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); - } - - class ObjectBits: public BitField {}; - class ValueBits: public BitField {}; - class AddressBits: public BitField {}; - class RememberedSetActionBits: public BitField {}; - class SaveFPRegsModeBits: public BitField {}; - - Register object_; - Register value_; - Register address_; - RememberedSetAction remembered_set_action_; - SaveFPRegsMode save_fp_regs_mode_; - Label slow_; - RegisterAllocation regs_; -}; - - // Enter C code from generated RegExp code in a way that allows // the C code to fix the return address in case of a GC. // Currently only needed on ARM. @@ -816,8 +575,6 @@ class StringDictionaryLookupStub: public CodeStub { Register r0, Register r1); - virtual bool SometimesSetsUpAFrame() { return false; } - private: static const int kInlinedProbes = 4; static const int kTotalProbes = 20; @@ -830,7 +587,7 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryLookup; } + Major MajorKey() { return StringDictionaryNegativeLookup; } int MinorKey() { return LookupModeBits::encode(mode_); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 3993ed02bed7..bf748a9b6ac7 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -38,16 +38,12 @@ namespace internal { // Platform-specific RuntimeCallHelper functions. void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { - masm->EnterFrame(StackFrame::INTERNAL); - ASSERT(!masm->has_frame()); - masm->set_has_frame(true); + masm->EnterInternalFrame(); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { - masm->LeaveFrame(StackFrame::INTERNAL); - ASSERT(masm->has_frame()); - masm->set_has_frame(false); + masm->LeaveInternalFrame(); } diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 1c0d508d2d3d..d27982abac11 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -69,6 +69,16 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); + // Constants related to patching of inlined load/store. + static int GetInlinedKeyedLoadInstructionsAfterPatch() { + return FLAG_debug_code ? 32 : 13; + } + static const int kInlinedKeyedStoreInstructionsAfterPatch = 8; + static int GetInlinedNamedStoreInstructionsAfterPatch() { + ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1); + return Isolate::Current()->inlined_write_barrier_size() + 4; + } + private: DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index b866f9cc8d9c..07a22722c8bf 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -132,58 +132,56 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList object_regs, RegList non_object_regs) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - - // Store the registers containing live values on the expression stack to - // make sure that these are correctly updated during GC. Non object values - // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); - if ((object_regs | non_object_regs) != 0) { - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - if (FLAG_debug_code) { - __ tst(reg, Operand(0xc0000000)); - __ Assert(eq, "Unable to encode value as smi"); - } - __ mov(reg, Operand(reg, LSL, kSmiTagSize)); + __ EnterInternalFrame(); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + if ((object_regs | non_object_regs) != 0) { + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ tst(reg, Operand(0xc0000000)); + __ Assert(eq, "Unable to encode value as smi"); } + __ mov(reg, Operand(reg, LSL, kSmiTagSize)); } - __ stm(db_w, sp, object_regs | non_object_regs); } + __ stm(db_w, sp, object_regs | non_object_regs); + } #ifdef DEBUG - __ RecordComment("// Calling from debug break to runtime - come in - over"); + __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments - __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); - - CEntryStub ceb(1); - __ CallStub(&ceb); - - // Restore the register values from the expression stack. - if ((object_regs | non_object_regs) != 0) { - __ ldm(ia_w, sp, object_regs | non_object_regs); - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - __ mov(reg, Operand(reg, LSR, kSmiTagSize)); - } - if (FLAG_debug_code && - (((object_regs |non_object_regs) & (1 << r)) == 0)) { - __ mov(reg, Operand(kDebugZapValue)); - } + __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments + __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); + + CEntryStub ceb(1); + __ CallStub(&ceb); + + // Restore the register values from the expression stack. + if ((object_regs | non_object_regs) != 0) { + __ ldm(ia_w, sp, object_regs | non_object_regs); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + __ mov(reg, Operand(reg, LSR, kSmiTagSize)); + } + if (FLAG_debug_code && + (((object_regs |non_object_regs) & (1 << r)) == 0)) { + __ mov(reg, Operand(kDebugZapValue)); } } - - // Leave the internal frame. } + __ LeaveInternalFrame(); + // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index bb03d740d158..00357f76dbe2 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -112,19 +112,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } #endif - Isolate* isolate = code->GetIsolate(); - // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); - DeoptimizerData* data = isolate->deoptimizer_data(); + DeoptimizerData* data = code->GetIsolate()->deoptimizer_data(); node->set_next(data->deoptimizing_code_list_); data->deoptimizing_code_list_ = node; - // We might be in the middle of incremental marking with compaction. - // Tell collector to treat this code object in a special way and - // ignore all slots that might have been recorded on it. - isolate->heap()->mark_compact_collector()->InvalidateCode(code); - // Set the code for the function to non-optimized version. function->ReplaceCode(function->shared()->code()); @@ -141,8 +134,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, - Address pc_after, +void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { const int kInstrSize = Assembler::kInstrSize; @@ -177,13 +169,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, reinterpret_cast(check_code->entry())); Memory::uint32_at(stack_check_address_pointer) = reinterpret_cast(replacement_code->entry()); - - RelocInfo rinfo(pc_after - 2 * kInstrSize, - RelocInfo::CODE_TARGET, - 0, - unoptimized_code); - unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode( - unoptimized_code, &rinfo, replacement_code); } @@ -208,9 +193,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, reinterpret_cast(replacement_code->entry())); Memory::uint32_at(stack_check_address_pointer) = reinterpret_cast(check_code->entry()); - - check_code->GetHeap()->incremental_marking()-> - RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code); } @@ -650,10 +632,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(r5, Operand(ExternalReference::isolate_address())); __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. // Call Deoptimizer::New(). - { - AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); - } + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); // Preserve "deoptimizer" object in register r0 and get the input // frame descriptor pointer to r1 (deoptimizer->input_); @@ -707,11 +686,8 @@ void Deoptimizer::EntryGenerator::Generate() { // r0: deoptimizer object; r1: scratch. __ PrepareCallCFunction(1, r1); // Call Deoptimizer::ComputeOutputFrames(). - { - AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); - } + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate), 1); __ pop(r0); // Restore deoptimizer object (class Deoptimizer). // Replace the current (input) frame with the output frames. diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index c66ceee931d7..26bbd82d000a 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -70,16 +70,6 @@ static const RegList kCalleeSaved = 1 << 10 | // r10 v7 1 << 11; // r11 v8 (fp in JavaScript code) -// When calling into C++ (only for C++ calls that can't cause a GC). -// The call code will take care of lr, fp, etc. -static const RegList kCallerSaved = - 1 << 0 | // r0 - 1 << 1 | // r1 - 1 << 2 | // r2 - 1 << 3 | // r3 - 1 << 9; // r9 - - static const int kNumCalleeSaved = 7 + kR9Available; // Double registers d8 to d15 are callee-saved. diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 353ce5b10646..50ed8b1da738 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -39,7 +39,6 @@ #include "stub-cache.h" #include "arm/code-stubs-arm.h" -#include "arm/macro-assembler-arm.h" namespace v8 { namespace internal { @@ -156,11 +155,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ bind(&ok); } - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done below). - FrameScope frame_scope(masm_, StackFrame::MANUAL); - int locals_count = info->scope()->num_stack_slots(); __ Push(lr, fp, cp, r1); @@ -206,12 +200,13 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // Load parameter from stack. __ ldr(r0, MemOperand(fp, parameter_offset)); // Store it in the context. - MemOperand target = ContextOperand(cp, var->index()); - __ str(r0, target); - - // Update the write barrier. - __ RecordWriteContextSlot( - cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs); + __ mov(r1, Operand(Context::SlotOffset(var->index()))); + __ str(r0, MemOperand(cp, r1)); + // Update the write barrier. This clobbers all involved + // registers, so we have to use two more registers to avoid + // clobbering cp. + __ mov(r2, Operand(cp)); + __ RecordWrite(r2, Operand(r1), r3, r0); } } } @@ -269,7 +264,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { int ignored = 0; - EmitDeclaration(scope()->function(), CONST, NULL, &ignored); + EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored); } VisitDeclarations(scope()->declarations()); } @@ -670,15 +665,12 @@ void FullCodeGenerator::SetVar(Variable* var, ASSERT(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ str(src, location); - // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { - __ RecordWriteContextSlot(scratch0, - location.offset(), - src, - scratch1, - kLRHasBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(scratch0, + Operand(Context::SlotOffset(var->index())), + scratch1, + src); } } @@ -711,7 +703,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state, void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - VariableMode mode, + Variable::Mode mode, FunctionLiteral* function, int* global_count) { // If it was not possible to allocate the variable at compile time, we @@ -729,7 +721,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); VisitForAccumulatorValue(function); __ str(result_register(), StackOperand(variable)); - } else if (mode == CONST || mode == LET) { + } else if (mode == Variable::CONST || mode == Variable::LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ str(ip, StackOperand(variable)); @@ -754,16 +746,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ str(result_register(), ContextOperand(cp, variable->index())); int offset = Context::SlotOffset(variable->index()); // We know that we have written a function, which is not a smi. - __ RecordWriteContextSlot(cp, - offset, - result_register(), - r2, - kLRHasBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ mov(r1, Operand(cp)); + __ RecordWrite(r1, Operand(offset), r2, result_register()); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (mode == CONST || mode == LET) { + } else if (mode == Variable::CONST || mode == Variable::LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ str(ip, ContextOperand(cp, variable->index())); @@ -776,8 +762,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); __ mov(r2, Operand(variable->name())); // Declaration nodes are always introduced in one of three modes. - ASSERT(mode == VAR || mode == CONST || mode == LET); - PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE; + ASSERT(mode == Variable::VAR || + mode == Variable::CONST || + mode == Variable::LET); + PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE; __ mov(r1, Operand(Smi::FromInt(attr))); // Push initial value, if any. // Note: For variables we must not push an initial value (such as @@ -787,7 +775,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ Push(cp, r2, r1); // Push initial value for function declaration. VisitForStackValue(function); - } else if (mode == CONST || mode == LET) { + } else if (mode == Variable::CONST || mode == Variable::LET) { __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); __ Push(cp, r2, r1, r0); } else { @@ -1217,23 +1205,15 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. - if (var->mode() == DYNAMIC_GLOBAL) { + if (var->mode() == Variable::DYNAMIC_GLOBAL) { EmitLoadGlobalCheckExtensions(var, typeof_state, slow); __ jmp(done); - } else if (var->mode() == DYNAMIC_LOCAL) { + } else if (var->mode() == Variable::DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == CONST || - local->mode() == LET) { + if (local->mode() == Variable::CONST) { __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); - if (local->mode() == CONST) { - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); - } else { // LET - __ b(ne, done); - __ mov(r0, Operand(var->name())); - __ push(r0); - __ CallRuntime(Runtime::kThrowReferenceError, 1); - } + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); } __ jmp(done); } @@ -1266,13 +1246,13 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { Comment cmnt(masm_, var->IsContextSlot() ? "Context variable" : "Stack variable"); - if (var->mode() != LET && var->mode() != CONST) { + if (var->mode() != Variable::LET && var->mode() != Variable::CONST) { context()->Plug(var); } else { // Let and const need a read barrier. GetVar(r0, var); __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); - if (var->mode() == LET) { + if (var->mode() == Variable::LET) { Label done; __ b(ne, &done); __ mov(r0, Operand(var->name())); @@ -1510,23 +1490,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { VisitForAccumulatorValue(subexpr); // Store the subexpression value in the array's elements. - __ ldr(r6, MemOperand(sp)); // Copy of array literal. - __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); + __ ldr(r1, MemOperand(sp)); // Copy of array literal. + __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ str(result_register(), FieldMemOperand(r1, offset)); - Label no_map_change; - __ JumpIfSmi(result_register(), &no_map_change); // Update the write barrier for the array store with r0 as the scratch // register. - __ RecordWriteField( - r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ CheckFastSmiOnlyElements(r3, r2, &no_map_change); - __ push(r6); // Copy of array literal. - __ CallRuntime(Runtime::kNonSmiElementStored, 1); - __ bind(&no_map_change); + __ RecordWrite(r1, Operand(offset), r2, result_register()); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } @@ -1873,7 +1844,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); } - } else if (var->mode() == LET && op != Token::INIT_LET) { + } else if (var->mode() == Variable::LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. if (var->IsLookupSlot()) { __ push(r0); // Value. @@ -1898,12 +1869,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // RecordWrite may destroy all its register arguments. __ mov(r3, result_register()); int offset = Context::SlotOffset(var->index()); - __ RecordWriteContextSlot( - r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r1, Operand(offset), r2, r3); } } - } else if (var->mode() != CONST) { + } else if (var->mode() != Variable::CONST) { // Assignment to var or initializing assignment to let. if (var->IsStackAllocated() || var->IsContextSlot()) { MemOperand location = VarOperand(var, r1); @@ -1917,9 +1887,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ str(r0, location); if (var->IsContextSlot()) { __ mov(r3, r0); - int offset = Context::SlotOffset(var->index()); - __ RecordWriteContextSlot( - r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3); } } else { ASSERT(var->IsLookupSlot()); @@ -2139,8 +2107,10 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, __ push(r1); // Push the strict mode flag. In harmony mode every eval call // is a strict mode eval call. - StrictModeFlag strict_mode = - FLAG_harmony_scoping ? kStrictMode : strict_mode_flag(); + StrictModeFlag strict_mode = strict_mode_flag(); + if (FLAG_harmony_block_scoping) { + strict_mode = kStrictMode; + } __ mov(r1, Operand(Smi::FromInt(strict_mode))); __ push(r1); @@ -2186,7 +2156,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // context lookup in the runtime system. Label done; Variable* var = proxy->var(); - if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) { + if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) { Label slow; EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow); // Push the function and resolve eval. @@ -2692,24 +2662,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. - // Assume that there are only two callable types, and one of them is at - // either end of the type range for JS object types. Saves extra comparisons. - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE); // Map is now in r0. __ b(lt, &null); - STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == - FIRST_SPEC_OBJECT_TYPE + 1); - __ b(eq, &function); - - __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE)); - STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == - LAST_SPEC_OBJECT_TYPE - 1); - __ b(eq, &function); - // Assume that there is no larger type. - STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); - - // Check if the constructor in the map is a JS function. + + // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and + // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after + // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. + STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == + LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); + __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE)); + __ b(ge, &function); + + // Check if the constructor in the map is a function. __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset)); __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); __ b(ne, &non_function_constructor); @@ -2887,9 +2853,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList* args) { __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. - __ mov(r2, r0); - __ RecordWriteField( - r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3); __ bind(&done); context()->Plug(r0); @@ -3177,31 +3141,16 @@ void FullCodeGenerator::EmitSwapElements(ZoneList* args) { __ str(scratch1, MemOperand(index2, 0)); __ str(scratch2, MemOperand(index1, 0)); - Label no_remembered_set; - __ CheckPageFlag(elements, - scratch1, - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - ne, - &no_remembered_set); + Label new_space; + __ InNewSpace(elements, scratch1, eq, &new_space); // Possible optimization: do a check that both values are Smis // (or them and test against Smi mask.) - // We are swapping two objects in an array and the incremental marker never - // pauses in the middle of scanning a single object. Therefore the - // incremental marker is not disturbed, so we don't need to call the - // RecordWrite stub that notifies the incremental marker. - __ RememberedSetHelper(elements, - index1, - scratch2, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - __ RememberedSetHelper(elements, - index2, - scratch2, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); + __ mov(scratch1, elements); + __ RecordWriteHelper(elements, index1, scratch2); + __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements. - __ bind(&no_remembered_set); + __ bind(&new_space); // We are done. Drop elements from the stack, and return undefined. __ Drop(3); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); @@ -3949,14 +3898,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle check) { - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - context()->PrepareTest(&materialize_true, &materialize_false, - &if_true, &if_false, &fall_through); - + Handle check, + Label* if_true, + Label* if_false, + Label* fall_through) { { AccumulatorValueContext context(this); VisitForTypeofValue(expr); } @@ -3997,11 +3942,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else if (check->Equals(isolate()->heap()->function_symbol())) { __ JumpIfSmi(r0, if_false); - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); - __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); - __ b(eq, if_true); - __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE)); - Split(eq, if_true, if_false, fall_through); + __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE); + Split(ge, if_true, if_false, fall_through); + } else if (check->Equals(isolate()->heap()->object_symbol())) { __ JumpIfSmi(r0, if_false); if (!FLAG_harmony_typeof) { @@ -4020,7 +3963,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else { if (if_false != fall_through) __ jmp(if_false); } - context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, + Label* if_true, + Label* if_false, + Label* fall_through) { + VisitForAccumulatorValue(expr); + PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); + + __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); + Split(eq, if_true, if_false, fall_through); } @@ -4028,12 +3982,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); SetSourcePosition(expr->position()); - // First we try a fast inlined version of the compare when one of - // the operands is a literal. - if (TryLiteralCompare(expr)) return; - // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. + Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4041,6 +3992,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { + context()->Plug(if_true, if_false); + return; + } + Token::Value op = expr->op(); VisitForStackValue(expr->left()); switch (op) { @@ -4127,9 +4085,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } -void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, - Expression* sub_expr, - NilValue nil) { +void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { + Comment cmnt(masm_, "[ CompareToNull"); Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4137,21 +4094,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - VisitForAccumulatorValue(sub_expr); + VisitForAccumulatorValue(expr->expression()); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - Heap::RootListIndex nil_value = nil == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ LoadRoot(r1, nil_value); + __ LoadRoot(r1, Heap::kNullValueRootIndex); __ cmp(r0, r1); - if (expr->op() == Token::EQ_STRICT) { + if (expr->is_strict()) { Split(eq, if_true, if_false, fall_through); } else { - Heap::RootListIndex other_nil_value = nil == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; __ b(eq, if_true); - __ LoadRoot(r1, other_nil_value); + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); __ cmp(r0, r1); __ b(eq, if_true); __ JumpIfSmi(r0, if_false); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 6e0badca1d76..2e49cae92899 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -208,8 +208,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update the write barrier. Make sure not to clobber the value. __ mov(scratch1, value); - __ RecordWrite( - elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); + __ RecordWrite(elements, scratch2, scratch1); } @@ -505,22 +504,21 @@ static void GenerateCallMiss(MacroAssembler* masm, // Get the receiver of the function from the stack. __ ldr(r3, MemOperand(sp, argc * kPointerSize)); - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Push the receiver and the name of the function. - __ Push(r3, r2); + // Push the receiver and the name of the function. + __ Push(r3, r2); - // Call the entry. - __ mov(r0, Operand(2)); - __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); + // Call the entry. + __ mov(r0, Operand(2)); + __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); - CEntryStub stub(1); - __ CallStub(&stub); + CEntryStub stub(1); + __ CallStub(&stub); - // Move result to r1 and leave the internal frame. - __ mov(r1, Operand(r0)); - } + // Move result to r1 and leave the internal frame. + __ mov(r1, Operand(r0)); + __ LeaveInternalFrame(); // Check if the receiver is a global object of some sort. // This can happen only for regular CallIC but not KeyedCallIC. @@ -652,13 +650,12 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r2); // save the key - __ Push(r1, r2); // pass the receiver and the key - __ CallRuntime(Runtime::kKeyedGetProperty, 2); - __ pop(r2); // restore the key - } + __ EnterInternalFrame(); + __ push(r2); // save the key + __ Push(r1, r2); // pass the receiver and the key + __ CallRuntime(Runtime::kKeyedGetProperty, 2); + __ pop(r2); // restore the key + __ LeaveInternalFrame(); __ mov(r1, r0); __ jmp(&do_call); @@ -911,8 +908,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow); __ str(r0, mapped_location); __ add(r6, r3, r5); - __ mov(r9, r0); - __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r3, r6, r9); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in r3. @@ -920,8 +916,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow); __ str(r0, unmapped_location); __ add(r6, r3, r4); - __ mov(r9, r0); - __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r3, r6, r9); __ Ret(); __ bind(&slow); GenerateMiss(masm, false); @@ -1272,17 +1267,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- r2 : receiver // -- lr : return address // ----------------------------------- - Label slow, array, extra, check_if_double_array; - Label fast_object_with_map_check, fast_object_without_map_check; - Label fast_double_with_map_check, fast_double_without_map_check; + Label slow, fast, array, extra; // Register usage. Register value = r0; Register key = r1; Register receiver = r2; Register elements = r3; // Elements array of the receiver. - Register elements_map = r6; - Register receiver_map = r7; // r4 and r5 are used as general scratch registers. // Check that the key is a smi. @@ -1290,26 +1281,35 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // Check that the object isn't a smi. __ JumpIfSmi(receiver, &slow); // Get the map of the object. - __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that the receiver does not require access checks. We need // to do this because this generic stub does not perform map checks. - __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); + __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); __ b(ne, &slow); // Check if the object is a JS array or not. - __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ cmp(r4, Operand(JS_ARRAY_TYPE)); __ b(eq, &array); // Check that the object is some kind of JSObject. - __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); + __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE)); __ b(lt, &slow); + __ cmp(r4, Operand(JS_PROXY_TYPE)); + __ b(eq, &slow); + __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); + __ b(eq, &slow); // Object case: Check key against length in the elements array. __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + // Check that the object is in fast mode and writable. + __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(r4, ip); + __ b(ne, &slow); // Check array bounds. Both the key and the length of FixedArray are smis. __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); - __ b(lo, &fast_object_with_map_check); + __ b(lo, &fast); // Slow case, handle jump to runtime. __ bind(&slow); @@ -1330,31 +1330,21 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); __ b(hs, &slow); - __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_array_map())); - __ b(ne, &check_if_double_array); // Calculate key + 1 as smi. STATIC_ASSERT(kSmiTag == 0); __ add(r4, key, Operand(Smi::FromInt(1))); __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ b(&fast_object_without_map_check); - - __ bind(&check_if_double_array); - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_double_array_map())); - __ b(ne, &slow); - // Add 1 to key, and go to common element store code for doubles. - STATIC_ASSERT(kSmiTag == 0); - __ add(r4, key, Operand(Smi::FromInt(1))); - __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ jmp(&fast_double_without_map_check); + __ b(&fast); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it // is the length is always a smi. __ bind(&array); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(r4, ip); + __ b(ne, &slow); // Check the key against the length in the array. __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1362,57 +1352,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ b(hs, &extra); // Fall through to fast case. - __ bind(&fast_object_with_map_check); - Register scratch_value = r4; - Register address = r5; - __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_array_map())); - __ b(ne, &fast_double_with_map_check); - __ bind(&fast_object_without_map_check); - // Smi stores don't require further checks. - Label non_smi_value; - __ JumpIfNotSmi(value, &non_smi_value); - // It's irrelevant whether array is smi-only or not when writing a smi. - __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value, MemOperand(address)); - __ Ret(); - - __ bind(&non_smi_value); - // Escape to slow case when writing non-smi into smi-only array. - __ CheckFastObjectElements(receiver_map, scratch_value, &slow); - // Fast elements array, store the value to the elements backing store. - __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value, MemOperand(address)); + __ bind(&fast); + // Fast case, store the value to the elements backing store. + __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value, MemOperand(r5)); + // Skip write barrier if the written value is a smi. + __ tst(value, Operand(kSmiTagMask)); + __ Ret(eq); // Update write barrier for the elements array address. - __ mov(scratch_value, value); // Preserve the value which is returned. - __ RecordWrite(elements, - address, - scratch_value, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ Ret(); + __ sub(r4, r5, Operand(elements)); + __ RecordWrite(elements, Operand(r4), r5, r6); - __ bind(&fast_double_with_map_check); - // Check for fast double array case. If this fails, call through to the - // runtime. - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_double_array_map())); - __ b(ne, &slow); - __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, - key, - receiver, - elements, - r4, - r5, - r6, - r7, - &slow); __ Ret(); } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 84959397b651..30ccd05beecb 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -212,11 +212,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { } -void LIsNilAndBranch::PrintDataTo(StringStream* stream) { +void LIsNullAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); InputAt(0)->PrintTo(stream); - stream->Add(kind() == kStrictEquality ? " === " : " == "); - stream->Add(nil() == kNullValue ? "null" : "undefined"); + stream->Add(is_strict() ? " === null" : " == null"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } @@ -712,9 +711,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble( LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); - int argument_index_accumulator = 0; - instr->set_environment(CreateEnvironment(hydrogen_env, - &argument_index_accumulator)); + instr->set_environment(CreateEnvironment(hydrogen_env)); return instr; } @@ -997,13 +994,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } -LEnvironment* LChunkBuilder::CreateEnvironment( - HEnvironment* hydrogen_env, - int* argument_index_accumulator) { +LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { if (hydrogen_env == NULL) return NULL; - LEnvironment* outer = - CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); + LEnvironment* outer = CreateEnvironment(hydrogen_env->outer()); int ast_id = hydrogen_env->ast_id(); ASSERT(ast_id != AstNode::kNoNumber); int value_count = hydrogen_env->length(); @@ -1013,6 +1007,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment( argument_count_, value_count, outer); + int argument_index = 0; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1021,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment( if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument((*argument_index_accumulator)++); + op = new LArgument(argument_index++); } else { op = UseAny(value); } @@ -1449,9 +1444,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( } -LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LIsNilAndBranch(UseRegisterAtStart(instr->value())); + return new LIsNullAndBranch(UseRegisterAtStart(instr->value())); } @@ -1739,7 +1734,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LLoadGlobalCell* result = new LLoadGlobalCell; - return instr->RequiresHoleCheck() + return instr->check_hole_value() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } @@ -1753,11 +1748,14 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { - LOperand* temp = TempRegister(); - LOperand* value = UseTempRegister(instr->value()); - LInstruction* result = new LStoreGlobalCell(value, temp); - if (instr->RequiresHoleCheck()) result = AssignEnvironment(result); - return result; + if (instr->check_hole_value()) { + LOperand* temp = TempRegister(); + LOperand* value = UseRegister(instr->value()); + return AssignEnvironment(new LStoreGlobalCell(value, temp)); + } else { + LOperand* value = UseRegisterAtStart(instr->value()); + return new LStoreGlobalCell(value, NULL); + } } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 73c7e459c3d1..8c18760fd109 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -107,7 +107,7 @@ class LCodeGen; V(Integer32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNilAndBranch) \ + V(IsNullAndBranch) \ V(IsObjectAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -627,17 +627,16 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { }; -class LIsNilAndBranch: public LControlInstruction<1, 0> { +class LIsNullAndBranch: public LControlInstruction<1, 0> { public: - explicit LIsNilAndBranch(LOperand* value) { + explicit LIsNullAndBranch(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) + DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch) - EqualityKind kind() const { return hydrogen()->kind(); } - NilValue nil() const { return hydrogen()->nil(); } + bool is_strict() const { return hydrogen()->is_strict(); } virtual void PrintDataTo(StringStream* stream); }; @@ -2160,8 +2159,7 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, int ast_id); void ClearInstructionPendingDeoptimizationEnvironment(); - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, - int* argument_index_accumulator); + LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); void VisitInstruction(HInstruction* current); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 70ef8848167c..f5d744914977 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -82,14 +82,6 @@ bool LCodeGen::GenerateCode() { status_ = GENERATING; CpuFeatures::Scope scope1(VFP3); CpuFeatures::Scope scope2(ARMv7); - - CodeStub::GenerateFPStubs(); - - // Open a frame scope to indicate that there is a frame on the stack. The - // NONE indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::NONE); - return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && @@ -214,11 +206,13 @@ bool LCodeGen::GeneratePrologue() { // Load parameter from stack. __ ldr(r0, MemOperand(fp, parameter_offset)); // Store it in the context. - MemOperand target = ContextOperand(cp, var->index()); - __ str(r0, target); - // Update the write barrier. This clobbers r3 and r0. - __ RecordWriteContextSlot( - cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); + __ mov(r1, Operand(Context::SlotOffset(var->index()))); + __ str(r0, MemOperand(cp, r1)); + // Update the write barrier. This clobbers all involved + // registers, so we have to use two more registers to avoid + // clobbering cp. + __ mov(r2, Operand(cp)); + __ RecordWrite(r2, Operand(r1), r3, r0); } } Comment(";;; End allocate local context"); @@ -268,9 +262,6 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); - Comment(";;; Deferred code @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); code->Generate(); __ jmp(code->exit()); } @@ -748,7 +739,7 @@ void LCodeGen::RecordSafepoint( int deoptimization_index) { ASSERT(expected_safepoint_kind_ == kind); - const ZoneList* operands = pointers->GetNormalizedOperands(); + const ZoneList* operands = pointers->operands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deoptimization_index); for (int i = 0; i < operands->length(); i++) { @@ -1041,7 +1032,6 @@ void LCodeGen::DoDivI(LDivI* instr) { virtual void Generate() { codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); } - virtual LInstruction* instr() { return instr_; } private: LDivI* instr_; }; @@ -1753,35 +1743,25 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { } -void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { +void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { Register scratch = scratch0(); Register reg = ToRegister(instr->InputAt(0)); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - // If the expression is known to be untagged or a smi, then it's definitely - // not null, and it can't be a an undetectable object. - if (instr->hydrogen()->representation().IsSpecialization() || - instr->hydrogen()->type().IsSmi()) { - EmitGoto(false_block); - return; - } + // TODO(fsc): If the expression is known to be a smi, then it's + // definitely not null. Jump to the false block. int true_block = chunk_->LookupDestination(instr->true_block_id()); - Heap::RootListIndex nil_value = instr->nil() == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ LoadRoot(ip, nil_value); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + __ LoadRoot(ip, Heap::kNullValueRootIndex); __ cmp(reg, ip); - if (instr->kind() == kStrictEquality) { + if (instr->is_strict()) { EmitBranch(true_block, false_block, eq); } else { - Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); __ b(eq, true_label); - __ LoadRoot(ip, other_nil_value); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(reg, ip); __ b(eq, true_label); __ JumpIfSmi(reg, false_label); @@ -1938,36 +1918,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true, ASSERT(!input.is(temp)); ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. __ JumpIfSmi(input, is_false); + __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); + __ b(lt, is_false); + // Map is now in temp. + // Functions have class 'Function'. + __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE); if (class_name->IsEqualTo(CStrVector("Function"))) { - // Assuming the following assertions, we can use the same compares to test - // for both being a function type and being in the object type range. - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); - STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == - FIRST_SPEC_OBJECT_TYPE + 1); - STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == - LAST_SPEC_OBJECT_TYPE - 1); - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); - __ b(lt, is_false); - __ b(eq, is_true); - __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); - __ b(eq, is_true); + __ b(ge, is_true); } else { - // Faster code path to avoid two compares: subtract lower bound from the - // actual type and do a signed compare with the width of the type range. - __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); - __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); - __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); - __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); - __ b(gt, is_false); + __ b(ge, is_false); } - // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); + // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and + // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after + // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. + STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == + LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); + // Objects with a non-function constructor have class 'Object'. __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); if (class_name->IsEqualTo(CStrVector("Object"))) { @@ -2044,8 +2016,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { virtual void Generate() { codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); } - virtual LInstruction* instr() { return instr_; } + Label* map_check() { return &map_check_; } + private: LInstanceOfKnownGlobal* instr_; Label map_check_; @@ -2207,7 +2180,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); __ mov(ip, Operand(Handle(instr->hydrogen()->cell()))); __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); - if (instr->hydrogen()->RequiresHoleCheck()) { + if (instr->hydrogen()->check_hole_value()) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ cmp(result, ip); DeoptimizeIf(eq, instr->environment()); @@ -2230,7 +2203,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { Register value = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); - Register scratch2 = ToRegister(instr->TempAt(0)); // Load the cell. __ mov(scratch, Operand(Handle(instr->hydrogen()->cell()))); @@ -2239,7 +2211,8 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { // been deleted from the property dictionary. In that case, we need // to update the property details in the property dictionary to mark // it as no longer deleted. - if (instr->hydrogen()->RequiresHoleCheck()) { + if (instr->hydrogen()->check_hole_value()) { + Register scratch2 = ToRegister(instr->TempAt(0)); __ ldr(scratch2, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); @@ -2249,15 +2222,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { // Store the value. __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); - - // Cells are always in the remembered set. - __ RecordWriteField(scratch, - JSGlobalPropertyCell::kValueOffset, - value, - scratch2, - kLRHasBeenSaved, - kSaveFPRegs, - OMIT_REMEMBERED_SET); } @@ -2283,15 +2247,10 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { Register context = ToRegister(instr->context()); Register value = ToRegister(instr->value()); - MemOperand target = ContextOperand(context, instr->slot_index()); - __ str(value, target); + __ str(value, ContextOperand(context, instr->slot_index())); if (instr->needs_write_barrier()) { - __ RecordWriteContextSlot(context, - target.offset(), - value, - scratch0(), - kLRHasBeenSaved, - kSaveFPRegs); + int offset = Context::SlotOffset(instr->slot_index()); + __ RecordWrite(context, Operand(offset), value, scratch0()); } } @@ -2541,9 +2500,13 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); + if (instr->hydrogen()->RequiresHoleCheck()) { + // TODO(danno): If no hole check is required, there is no need to allocate + // elements into a temporary register, instead scratch can be used. + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); + } __ vldr(result, elements, 0); } @@ -2614,7 +2577,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -2944,7 +2906,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { virtual void Generate() { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } - virtual LInstruction* instr() { return instr_; } private: LUnaryMathOperation* instr_; }; @@ -3241,7 +3202,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ Drop(1); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -3301,8 +3262,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ str(value, FieldMemOperand(object, offset)); if (instr->needs_write_barrier()) { // Update the write barrier for the object for in-object properties. - __ RecordWriteField( - object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs); + __ RecordWrite(object, Operand(offset), value, scratch); } } else { __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); @@ -3310,8 +3270,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. - __ RecordWriteField( - scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs); + __ RecordWrite(scratch, Operand(offset), value, object); } } } @@ -3342,13 +3301,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Register scratch = scratch0(); - // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS - // conversion, so it deopts in that case. - if (instr->hydrogen()->ValueNeedsSmiCheck()) { - __ tst(value, Operand(kSmiTagMask)); - DeoptimizeIf(ne, instr->environment()); - } - // Do the store. if (instr->key()->IsConstantOperand()) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); @@ -3363,8 +3315,8 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { if (instr->hydrogen()->NeedsWriteBarrier()) { // Compute address of modified element and store it into key register. - __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs); + __ add(key, scratch, Operand(FixedArray::kHeaderSize)); + __ RecordWrite(elements, key, value); } } @@ -3465,7 +3417,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3501,7 +3452,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } - virtual LInstruction* instr() { return instr_; } private: LStringCharCodeAt* instr_; }; @@ -3625,7 +3575,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } - virtual LInstruction* instr() { return instr_; } private: LStringCharFromCode* instr_; }; @@ -3697,7 +3646,6 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } - virtual LInstruction* instr() { return instr_; } private: LNumberTagI* instr_; }; @@ -3763,7 +3711,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } - virtual LInstruction* instr() { return instr_; } private: LNumberTagD* instr_; }; @@ -3872,6 +3819,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, } +class DeferredTaggedToI: public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } + private: + LTaggedToI* instr_; +}; + + void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Register input_reg = ToRegister(instr->InputAt(0)); Register scratch1 = scratch0(); @@ -3954,16 +3911,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LTaggedToI* instr_; - }; - LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -4396,12 +4343,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = ne; } else if (type_name->Equals(heap()->function_symbol())) { - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE); - __ b(eq, true_label); - __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE)); - final_branch_condition = eq; + __ CompareObjectType(input, input, scratch, + FIRST_CALLABLE_SPEC_OBJECT_TYPE); + final_branch_condition = ge; } else if (type_name->Equals(heap()->object_symbol())) { __ JumpIfSmi(input, false_label); @@ -4523,7 +4468,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } - virtual LInstruction* instr() { return instr_; } private: LStackCheck* instr_; }; diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 711e4595e7a6..ead848903464 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -376,20 +376,16 @@ class LCodeGen BASE_EMBEDDED { class LDeferredCode: public ZoneObject { public: explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { + : codegen_(codegen), external_exit_(NULL) { codegen->AddDeferredCode(this); } virtual ~LDeferredCode() { } virtual void Generate() = 0; - virtual LInstruction* instr() = 0; void SetExit(Label *exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - int instruction_index() const { return instruction_index_; } protected: LCodeGen* codegen() const { return codegen_; } @@ -400,7 +396,6 @@ class LDeferredCode: public ZoneObject { Label entry_; Label exit_; Label* external_exit_; - int instruction_index_; }; } } // namespace v8::internal diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 918f9ebe060d..f37f31021854 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -42,8 +42,7 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) : Assembler(arg_isolate, buffer, size), generating_stub_(false), - allow_stub_calls_(true), - has_frame_(false) { + allow_stub_calls_(true) { if (isolate() != NULL) { code_object_ = Handle(isolate()->heap()->undefined_value(), isolate()); @@ -407,6 +406,32 @@ void MacroAssembler::StoreRoot(Register source, } +void MacroAssembler::RecordWriteHelper(Register object, + Register address, + Register scratch) { + if (emit_debug_code()) { + // Check that the object is not in new space. + Label not_in_new_space; + InNewSpace(object, scratch, ne, ¬_in_new_space); + Abort("new-space object passed to RecordWriteHelper"); + bind(¬_in_new_space); + } + + // Calculate page address. + Bfc(object, 0, kPageSizeBits); + + // Calculate region number. + Ubfx(address, address, Page::kRegionSizeLog2, + kPageSizeBits - Page::kRegionSizeLog2); + + // Mark region dirty. + ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset)); + mov(ip, Operand(1)); + orr(scratch, scratch, Operand(ip, LSL, address)); + str(scratch, MemOperand(object, Page::kDirtyFlagOffset)); +} + + void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cond, @@ -418,52 +443,38 @@ void MacroAssembler::InNewSpace(Register object, } -void MacroAssembler::RecordWriteField( - Register object, - int offset, - Register value, - Register dst, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - // First, check if a write barrier is even needed. The tests below - // catch stores of Smis. - Label done; +// Will clobber 4 registers: object, offset, scratch, ip. The +// register 'object' contains a heap object pointer. The heap object +// tag is shifted away. +void MacroAssembler::RecordWrite(Register object, + Operand offset, + Register scratch0, + Register scratch1) { + // The compiled code assumes that record write doesn't change the + // context register, so we check that none of the clobbered + // registers are cp. + ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp)); - // Skip barrier if writing a smi. - if (smi_check == INLINE_SMI_CHECK) { - JumpIfSmi(value, &done); - } + Label done; - // Although the object register is tagged, the offset is relative to the start - // of the object, so so offset must be a multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize)); + // First, test that the object is not in the new space. We cannot set + // region marks for new space pages. + InNewSpace(object, scratch0, eq, &done); - add(dst, object, Operand(offset - kHeapObjectTag)); - if (emit_debug_code()) { - Label ok; - tst(dst, Operand((1 << kPointerSizeLog2) - 1)); - b(eq, &ok); - stop("Unaligned cell in write barrier"); - bind(&ok); - } + // Add offset into the object. + add(scratch0, object, offset); - RecordWrite(object, - dst, - value, - lr_status, - save_fp, - remembered_set_action, - OMIT_SMI_CHECK); + // Record the actual write. + RecordWriteHelper(object, scratch0, scratch1); bind(&done); - // Clobber clobbered input registers when running with the debug-code flag + // Clobber all input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(value, Operand(BitCast(kZapValue + 4))); - mov(dst, Operand(BitCast(kZapValue + 8))); + mov(object, Operand(BitCast(kZapValue))); + mov(scratch0, Operand(BitCast(kZapValue))); + mov(scratch1, Operand(BitCast(kZapValue))); } } @@ -473,94 +484,29 @@ void MacroAssembler::RecordWriteField( // tag is shifted away. void MacroAssembler::RecordWrite(Register object, Register address, - Register value, - LinkRegisterStatus lr_status, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { + Register scratch) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are cp. - ASSERT(!address.is(cp) && !value.is(cp)); + ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp)); Label done; - if (smi_check == INLINE_SMI_CHECK) { - ASSERT_EQ(0, kSmiTag); - tst(value, Operand(kSmiTagMask)); - b(eq, &done); - } - - CheckPageFlag(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, - eq, - &done); - CheckPageFlag(object, - value, // Used as scratch. - MemoryChunk::kPointersFromHereAreInterestingMask, - eq, - &done); + // First, test that the object is not in the new space. We cannot set + // region marks for new space pages. + InNewSpace(object, scratch, eq, &done); // Record the actual write. - if (lr_status == kLRHasNotBeenSaved) { - push(lr); - } - RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); - CallStub(&stub); - if (lr_status == kLRHasNotBeenSaved) { - pop(lr); - } + RecordWriteHelper(object, address, scratch); bind(&done); - // Clobber clobbered registers when running with the debug-code flag + // Clobber all input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(address, Operand(BitCast(kZapValue + 12))); - mov(value, Operand(BitCast(kZapValue + 16))); - } -} - - -void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. - Register address, - Register scratch, - SaveFPRegsMode fp_mode, - RememberedSetFinalAction and_then) { - Label done; - if (FLAG_debug_code) { - Label ok; - JumpIfNotInNewSpace(object, scratch, &ok); - stop("Remembered set pointer is in new space"); - bind(&ok); - } - // Load store buffer top. - ExternalReference store_buffer = - ExternalReference::store_buffer_top(isolate()); - mov(ip, Operand(store_buffer)); - ldr(scratch, MemOperand(ip)); - // Store pointer to buffer and increment buffer top. - str(address, MemOperand(scratch, kPointerSize, PostIndex)); - // Write back new top of buffer. - str(scratch, MemOperand(ip)); - // Call stub on end of buffer. - // Check for end of buffer. - tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); - if (and_then == kFallThroughAtEnd) { - b(eq, &done); - } else { - ASSERT(and_then == kReturnAtEnd); - Ret(eq); - } - push(lr); - StoreBufferOverflowStub store_buffer_overflow = - StoreBufferOverflowStub(fp_mode); - CallStub(&store_buffer_overflow); - pop(lr); - bind(&done); - if (and_then == kReturnAtEnd) { - Ret(); + mov(object, Operand(BitCast(kZapValue))); + mov(address, Operand(BitCast(kZapValue))); + mov(scratch, Operand(BitCast(kZapValue))); } } @@ -1015,9 +961,6 @@ void MacroAssembler::InvokeCode(Register code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - Label done; InvokePrologue(expected, actual, Handle::null(), code, &done, flag, @@ -1045,9 +988,6 @@ void MacroAssembler::InvokeCode(Handle code, RelocInfo::Mode rmode, InvokeFlag flag, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - Label done; InvokePrologue(expected, actual, code, no_reg, &done, flag, @@ -1071,9 +1011,6 @@ void MacroAssembler::InvokeFunction(Register fun, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - // Contract with called JS functions requires that function is passed in r1. ASSERT(fun.is(r1)); @@ -1098,9 +1035,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function, const ParameterCount& actual, InvokeFlag flag, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - ASSERT(function->is_compiled()); // Get the function and setup the context. @@ -1156,10 +1090,10 @@ void MacroAssembler::IsObjectJSStringType(Register object, #ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { + ASSERT(allow_stub_calls()); mov(r0, Operand(0, RelocInfo::NONE)); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); CEntryStub ces(1); - ASSERT(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } #endif @@ -1859,127 +1793,13 @@ void MacroAssembler::CompareRoot(Register obj, void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 0); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); b(hi, fail); } -void MacroAssembler::CheckFastObjectElements(Register map, - Register scratch, - Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); - ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); - b(ls, fail); - cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); - b(hi, fail); -} - - -void MacroAssembler::CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); - b(hi, fail); -} - - -void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, - Register key_reg, - Register receiver_reg, - Register elements_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Label* fail) { - Label smi_value, maybe_nan, have_double_value, is_nan, done; - Register mantissa_reg = scratch2; - Register exponent_reg = scratch3; - - // Handle smi values specially. - JumpIfSmi(value_reg, &smi_value); - - // Ensure that the object is a heap number - CheckMap(value_reg, - scratch1, - isolate()->factory()->heap_number_map(), - fail, - DONT_DO_SMI_CHECK); - - // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 - // in the exponent. - mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); - ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); - cmp(exponent_reg, scratch1); - b(ge, &maybe_nan); - - ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - - bind(&have_double_value); - add(scratch1, elements_reg, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - str(exponent_reg, FieldMemOperand(scratch1, offset)); - jmp(&done); - - bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - b(gt, &is_nan); - ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - cmp(mantissa_reg, Operand(0)); - b(eq, &have_double_value); - bind(&is_nan); - // Load canonical NaN for storing into the double array. - uint64_t nan_int64 = BitCast( - FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - mov(mantissa_reg, Operand(static_cast(nan_int64))); - mov(exponent_reg, Operand(static_cast(nan_int64 >> 32))); - jmp(&have_double_value); - - bind(&smi_value); - add(scratch1, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - add(scratch1, scratch1, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - // scratch1 is now effective address of the double element - - FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(VFP3)) { - destination = FloatingPointHelper::kVFPRegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } - - Register untagged_value = receiver_reg; - SmiUntag(untagged_value, value_reg); - FloatingPointHelper::ConvertIntToDouble(this, - untagged_value, - destination, - d0, - mantissa_reg, - exponent_reg, - scratch4, - s2); - if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP3); - vstr(d0, scratch1, 0); - } else { - str(mantissa_reg, MemOperand(scratch1, 0)); - str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); - } - bind(&done); -} - - void MacroAssembler::CheckMap(Register obj, Register scratch, Handle map, @@ -2075,13 +1895,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { - ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond); } MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { - ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -2093,12 +1913,13 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); } MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) { + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -2201,12 +2022,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( } -bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { - if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; - return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); -} - - void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { add(sp, sp, Operand(num_arguments * kPointerSize)); @@ -2602,7 +2417,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); mov(r0, Operand(function->nargs)); mov(r1, Operand(ExternalReference(function, isolate()))); - CEntryStub stub(1, kSaveFPRegs); + CEntryStub stub(1); + stub.SaveDoubles(); CallStub(&stub); } @@ -2675,9 +2491,6 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { - // You can't call a builtin without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - GetBuiltinEntry(r2, id); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(r2)); @@ -2809,20 +2622,14 @@ void MacroAssembler::Abort(const char* msg) { RecordComment(msg); } #endif + // Disable stub call restrictions to always allow calls to abort. + AllowStubCallsScope allow_scope(this, true); mov(r0, Operand(p0)); push(r0); mov(r0, Operand(Smi::FromInt(p1 - p0))); push(r0); - // Disable stub call restrictions to always allow calls to abort. - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kAbort, 2); - } else { - CallRuntime(Runtime::kAbort, 2); - } + CallRuntime(Runtime::kAbort, 2); // will not return here if (is_const_pool_blocked()) { // If the calling code cares about the exact number of @@ -3123,19 +2930,6 @@ void MacroAssembler::CopyBytes(Register src, } -void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, - Register end_offset, - Register filler) { - Label loop, entry; - b(&entry); - bind(&loop); - str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); - bind(&entry); - cmp(start_offset, end_offset); - b(lt, &loop); -} - - void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. Register source, // Input. Register scratch) { @@ -3295,15 +3089,23 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { - mov(ip, Operand(function)); - CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); + CallCFunctionHelper(no_reg, + function, + ip, + num_reg_arguments, + num_double_arguments); } void MacroAssembler::CallCFunction(Register function, - int num_reg_arguments, - int num_double_arguments) { - CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); + Register scratch, + int num_reg_arguments, + int num_double_arguments) { + CallCFunctionHelper(function, + ExternalReference::the_hole_value_location(isolate()), + scratch, + num_reg_arguments, + num_double_arguments); } @@ -3314,15 +3116,17 @@ void MacroAssembler::CallCFunction(ExternalReference function, void MacroAssembler::CallCFunction(Register function, + Register scratch, int num_arguments) { - CallCFunction(function, num_arguments, 0); + CallCFunction(function, scratch, num_arguments, 0); } void MacroAssembler::CallCFunctionHelper(Register function, + ExternalReference function_reference, + Register scratch, int num_reg_arguments, int num_double_arguments) { - ASSERT(has_frame()); // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. @@ -3346,6 +3150,10 @@ void MacroAssembler::CallCFunctionHelper(Register function, // Just call directly. The function called cannot cause a GC, or // allow preemption, so the return address in the link register // stays correct. + if (function.is(no_reg)) { + mov(scratch, Operand(function_reference)); + function = scratch; + } Call(function); int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); @@ -3377,185 +3185,6 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, } -void MacroAssembler::CheckPageFlag( - Register object, - Register scratch, - int mask, - Condition cc, - Label* condition_met) { - and_(scratch, object, Operand(~Page::kPageAlignmentMask)); - ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); - tst(scratch, Operand(mask)); - b(cc, condition_met); -} - - -void MacroAssembler::JumpIfBlack(Register object, - Register scratch0, - Register scratch1, - Label* on_black) { - HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); -} - - -void MacroAssembler::HasColor(Register object, - Register bitmap_scratch, - Register mask_scratch, - Label* has_color, - int first_bit, - int second_bit) { - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); - - GetMarkBits(object, bitmap_scratch, mask_scratch); - - Label other_color, word_boundary; - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - tst(ip, Operand(mask_scratch)); - b(first_bit == 1 ? eq : ne, &other_color); - // Shift left 1 by adding. - add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); - b(eq, &word_boundary); - tst(ip, Operand(mask_scratch)); - b(second_bit == 1 ? ne : eq, has_color); - jmp(&other_color); - - bind(&word_boundary); - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); - tst(ip, Operand(1)); - b(second_bit == 1 ? ne : eq, has_color); - bind(&other_color); -} - - -// Detect some, but not all, common pointer-free objects. This is used by the -// incremental write barrier which doesn't care about oddballs (they are always -// marked black immediately so this code is not hit). -void MacroAssembler::JumpIfDataObject(Register value, - Register scratch, - Label* not_data_object) { - Label is_data_object; - ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); - CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); - b(eq, &is_data_object); - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); - // If it's a string and it's not a cons string then it's an object containing - // no GC pointers. - ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); - b(ne, not_data_object); - bind(&is_data_object); -} - - -void MacroAssembler::GetMarkBits(Register addr_reg, - Register bitmap_reg, - Register mask_reg) { - ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); - and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); - Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); - const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; - Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); - add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); - mov(ip, Operand(1)); - mov(mask_reg, Operand(ip, LSL, mask_reg)); -} - - -void MacroAssembler::EnsureNotWhite( - Register value, - Register bitmap_scratch, - Register mask_scratch, - Register load_scratch, - Label* value_is_white_and_not_data) { - ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); - GetMarkBits(value, bitmap_scratch, mask_scratch); - - // If the value is black or grey we don't need to do anything. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); - - Label done; - - // Since both black and grey have a 1 in the first position and white does - // not have a 1 there we only need to check one bit. - ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - tst(mask_scratch, load_scratch); - b(ne, &done); - - if (FLAG_debug_code) { - // Check for impossible bit pattern. - Label ok; - // LSL may overflow, making the check conservative. - tst(load_scratch, Operand(mask_scratch, LSL, 1)); - b(eq, &ok); - stop("Impossible marking bit pattern"); - bind(&ok); - } - - // Value is white. We check whether it is data that doesn't need scanning. - // Currently only checks for HeapNumber and non-cons strings. - Register map = load_scratch; // Holds map while checking type. - Register length = load_scratch; // Holds length of object after testing type. - Label is_data_object; - - // Check for heap-number - ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); - CompareRoot(map, Heap::kHeapNumberMapRootIndex); - mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); - b(eq, &is_data_object); - - // Check for strings. - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); - // If it's a string and it's not a cons string then it's an object containing - // no GC pointers. - Register instance_type = load_scratch; - ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); - tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); - b(ne, value_is_white_and_not_data); - // It's a non-indirect (non-cons and non-slice) string. - // If it's external, the length is just ExternalString::kSize. - // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). - // External strings are the only ones with the kExternalStringTag bit - // set. - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); - ASSERT_EQ(0, kConsStringTag & kExternalStringTag); - tst(instance_type, Operand(kExternalStringTag)); - mov(length, Operand(ExternalString::kSize), LeaveCC, ne); - b(ne, &is_data_object); - - // Sequential string, either ASCII or UC16. - // For ASCII (char-size of 1) we shift the smi tag away to get the length. - // For UC16 (char-size of 2) we just leave the smi tag in place, thereby - // getting the length multiplied by 2. - ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); - ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - ldr(ip, FieldMemOperand(value, String::kLengthOffset)); - tst(instance_type, Operand(kStringEncodingMask)); - mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); - add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); - and_(length, length, Operand(~kObjectAlignmentMask)); - - bind(&is_data_object); - // Value is a data object, and it is white. Mark it black. Since we know - // that the object is white we can make it black by flipping one bit. - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - orr(ip, ip, Operand(mask_scratch)); - str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - - and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); - add(ip, ip, Operand(length)); - str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); - - bind(&done); -} - - void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { Usat(output_reg, 8, Operand(input_reg)); } @@ -3605,17 +3234,6 @@ void MacroAssembler::LoadInstanceDescriptors(Register map, } -bool AreAliased(Register r1, Register r2, Register r3, Register r4) { - if (r1.is(r2)) return true; - if (r1.is(r3)) return true; - if (r1.is(r4)) return true; - if (r2.is(r3)) return true; - if (r2.is(r4)) return true; - if (r3.is(r4)) return true; - return false; -} - - CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 8ee468a91703..6084fde2d367 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -29,7 +29,6 @@ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #include "assembler.h" -#include "frames.h" #include "v8globals.h" namespace v8 { @@ -80,14 +79,6 @@ enum ObjectToDoubleFlags { }; -enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; -enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; -enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; - - -bool AreAliased(Register r1, Register r2, Register r3, Register r4); - - // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -166,126 +157,40 @@ class MacroAssembler: public Assembler { Heap::RootListIndex index, Condition cond = al); - // --------------------------------------------------------------------------- - // GC Support - - void IncrementalMarkingRecordWriteHelper(Register object, - Register value, - Register address); - - enum RememberedSetFinalAction { - kReturnAtEnd, - kFallThroughAtEnd - }; - - // Record in the remembered set the fact that we have a pointer to new space - // at the address pointed to by the addr register. Only works if addr is not - // in new space. - void RememberedSetHelper(Register object, // Used for debug code. - Register addr, - Register scratch, - SaveFPRegsMode save_fp, - RememberedSetFinalAction and_then); - - void CheckPageFlag(Register object, - Register scratch, - int mask, - Condition cc, - Label* condition_met); - - // Check if object is in new space. Jumps if the object is not in new space. - // The register scratch can be object itself, but scratch will be clobbered. - void JumpIfNotInNewSpace(Register object, - Register scratch, - Label* branch) { - InNewSpace(object, scratch, ne, branch); - } - // Check if object is in new space. Jumps if the object is in new space. - // The register scratch can be object itself, but it will be clobbered. - void JumpIfInNewSpace(Register object, - Register scratch, - Label* branch) { - InNewSpace(object, scratch, eq, branch); - } + // Check if object is in new space. + // scratch can be object itself, but it will be clobbered. + void InNewSpace(Register object, + Register scratch, + Condition cond, // eq for new space, ne otherwise + Label* branch); - // Check if an object has a given incremental marking color. - void HasColor(Register object, - Register scratch0, - Register scratch1, - Label* has_color, - int first_bit, - int second_bit); - void JumpIfBlack(Register object, - Register scratch0, - Register scratch1, - Label* on_black); - - // Checks the color of an object. If the object is already grey or black - // then we just fall through, since it is already live. If it is white and - // we can determine that it doesn't need to be scanned, then we just mark it - // black and fall through. For the rest we jump to the label so the - // incremental marker can fix its assumptions. - void EnsureNotWhite(Register object, - Register scratch1, - Register scratch2, - Register scratch3, - Label* object_is_white_and_not_data); + // For the page containing |object| mark the region covering [address] + // dirty. The object address must be in the first 8K of an allocated page. + void RecordWriteHelper(Register object, + Register address, + Register scratch); - // Detects conservatively whether an object is data-only, ie it does need to - // be scanned by the garbage collector. - void JumpIfDataObject(Register value, - Register scratch, - Label* not_data_object); - - // Notify the garbage collector that we wrote a pointer into an object. - // |object| is the object being stored into, |value| is the object being - // stored. value and scratch registers are clobbered by the operation. - // The offset is the offset from the start of the object, not the offset from - // the tagged HeapObject pointer. For use with FieldOperand(reg, off). - void RecordWriteField( - Register object, - int offset, - Register value, - Register scratch, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); - - // As above, but the offset has the tag presubtracted. For use with - // MemOperand(reg, off). - inline void RecordWriteContextSlot( - Register context, - int offset, - Register value, - Register scratch, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK) { - RecordWriteField(context, - offset + kHeapObjectTag, - value, - scratch, - lr_status, - save_fp, - remembered_set_action, - smi_check); - } + // For the page containing |object| mark the region covering + // [object+offset] dirty. The object address must be in the first 8K + // of an allocated page. The 'scratch' registers are used in the + // implementation and all 3 registers are clobbered by the + // operation, as well as the ip register. RecordWrite updates the + // write barrier even when storing smis. + void RecordWrite(Register object, + Operand offset, + Register scratch0, + Register scratch1); - // For a given |object| notify the garbage collector that the slot |address| - // has been written. |value| is the object being stored. The value and - // address registers are clobbered by the operation. - void RecordWrite( - Register object, - Register address, - Register value, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + // For the page containing |object| mark the region covering + // [address] dirty. The object address must be in the first 8K of an + // allocated page. All 3 registers are clobbered by the operation, + // as well as the ip register. RecordWrite updates the write barrier + // even when storing smis. + void RecordWrite(Register object, + Register address, + Register scratch); // Push a handle. void Push(Handle handle); @@ -413,6 +318,16 @@ class MacroAssembler: public Assembler { const double imm, const Condition cond = al); + + // --------------------------------------------------------------------------- + // Activation frames + + void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } + void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } + + void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } + void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } + // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. void EnterExitFrame(bool save_doubles, int stack_space = 0); @@ -654,13 +569,6 @@ class MacroAssembler: public Assembler { Register length, Register scratch); - // Initialize fields with filler values. Fields starting at |start_offset| - // not including end_offset are overwritten with the value in |filler|. At - // the end the loop, |start_offset| takes the value of |end_offset|. - void InitializeFieldsWithFiller(Register start_offset, - Register end_offset, - Register filler); - // --------------------------------------------------------------------------- // Support functions. @@ -700,31 +608,6 @@ class MacroAssembler: public Assembler { Register scratch, Label* fail); - // Check if a map for a JSObject indicates that the object can have both smi - // and HeapObject elements. Jump to the specified label if it does not. - void CheckFastObjectElements(Register map, - Register scratch, - Label* fail); - - // Check if a map for a JSObject indicates that the object has fast smi only - // elements. Jump to the specified label if it does not. - void CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail); - - // Check to see if maybe_number can be stored as a double in - // FastDoubleElements. If it can, store it at the index specified by key in - // the FastDoubleElements array elements, otherwise jump to fail. - void StoreNumberToDoubleElements(Register value_reg, - Register key_reg, - Register receiver_reg, - Register elements_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Label* fail); - // Check if the map of an object is equal to a specified map (either // given directly or as an index into the root list) and branch to // label if not. Skip the smi check if not required (object is known @@ -947,11 +830,11 @@ class MacroAssembler: public Assembler { // return address (unless this is somehow accounted for by the called // function). void CallCFunction(ExternalReference function, int num_arguments); - void CallCFunction(Register function, int num_arguments); + void CallCFunction(Register function, Register scratch, int num_arguments); void CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments); - void CallCFunction(Register function, + void CallCFunction(Register function, Register scratch, int num_reg_arguments, int num_double_arguments); @@ -1019,9 +902,6 @@ class MacroAssembler: public Assembler { bool generating_stub() { return generating_stub_; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } - void set_has_frame(bool value) { has_frame_ = value; } - bool has_frame() { return has_frame_; } - inline bool AllowThisStubCall(CodeStub* stub); // EABI variant for double arguments in use. bool use_eabi_hardfloat() { @@ -1168,12 +1048,10 @@ class MacroAssembler: public Assembler { void LoadInstanceDescriptors(Register map, Register descriptors); - // Activation support. - void EnterFrame(StackFrame::Type type); - void LeaveFrame(StackFrame::Type type); - private: void CallCFunctionHelper(Register function, + ExternalReference function_reference, + Register scratch, int num_reg_arguments, int num_double_arguments); @@ -1189,25 +1067,16 @@ class MacroAssembler: public Assembler { const CallWrapper& call_wrapper, CallKind call_kind); + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + void InitializeNewString(Register string, Register length, Heap::RootListIndex map_index, Register scratch1, Register scratch2); - // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. - void InNewSpace(Register object, - Register scratch, - Condition cond, // eq for new space, ne otherwise. - Label* branch); - - // Helper for finding the mark bits for an address. Afterwards, the - // bitmap register points at the word with the mark bits and the mask - // the position of the first bit. Leaves addr_reg unchanged. - inline void GetMarkBits(Register addr_reg, - Register bitmap_reg, - Register mask_reg); - // Compute memory operands for safepoint stack slots. static int SafepointRegisterStackIndex(int reg_code); MemOperand SafepointRegisterSlot(Register reg); @@ -1215,7 +1084,6 @@ class MacroAssembler: public Assembler { bool generating_stub_; bool allow_stub_calls_; - bool has_frame_; // This handle will be patched with the code object on installation. Handle code_object_; diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index c8764679383d..cd76edbf15e7 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -371,12 +371,9 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( // Isolate. __ mov(r3, Operand(ExternalReference::isolate_address())); - { - AllowExternalCallThatCantCauseGC scope(masm_); - ExternalReference function = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); - __ CallCFunction(function, argument_count); - } + ExternalReference function = + ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + __ CallCFunction(function, argument_count); // Check if function returned non-zero for success or zero for failure. __ cmp(r0, Operand(0, RelocInfo::NONE)); @@ -614,12 +611,6 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Entry code: __ bind(&entry_label_); - - // Tell the system that we have a stack frame. Because the type is MANUAL, no - // is generated. - FrameScope scope(masm_, StackFrame::MANUAL); - - // Actually emit code to start a new stack frame. // Push arguments // Save callee-save registers. // Start new stack frame. diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 570420262232..6af535553fb8 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1618,8 +1618,6 @@ void Simulator::HandleRList(Instruction* instr, bool load) { ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address); intptr_t* address = reinterpret_cast(start_address); - // Catch null pointers a little earlier. - ASSERT(start_address > 8191 || start_address < 0); int reg = 0; while (rlist != 0) { if ((rlist & 1) != 0) { diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 4558afe68aa4..f8565924b196 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -431,13 +431,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the now unused name_reg as a scratch register. - __ mov(name_reg, r0); - __ RecordWriteField(receiver_reg, - offset, - name_reg, - scratch, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -450,13 +444,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. - __ mov(name_reg, r0); - __ RecordWriteField(scratch, - offset, - name_reg, - receiver_reg, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg); } // Return the value (register r0). @@ -565,10 +553,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) { } -static MaybeObject* GenerateFastApiDirectCall( - MacroAssembler* masm, - const CallOptimization& optimization, - int argc) { +static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, + const CallOptimization& optimization, + int argc) { // ----------- S t a t e ------------- // -- sp[0] : holder (set by CheckPrototypes) // -- sp[4] : callee js function @@ -604,8 +591,6 @@ static MaybeObject* GenerateFastApiDirectCall( ApiFunction fun(api_function_address); const int kApiStackSpace = 4; - - FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); // r0 = v8::Arguments& @@ -631,11 +616,9 @@ static MaybeObject* GenerateFastApiDirectCall( ExternalReference ref = ExternalReference(&fun, ExternalReference::DIRECT_API_CALL, masm->isolate()); - AllowExternalCallThatCantCauseGC scope(masm); return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } - class CallInterceptorCompiler BASE_EMBEDDED { public: CallInterceptorCompiler(StubCompiler* stub_compiler, @@ -811,7 +794,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { miss_label); // Call a runtime function to load the interceptor property. - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); // Save the name_ register across the call. __ push(name_); @@ -828,8 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - - // Leave the internal frame. + __ LeaveInternalFrame(); } void LoadWithInterceptor(MacroAssembler* masm, @@ -838,19 +820,18 @@ class CallInterceptorCompiler BASE_EMBEDDED { JSObject* holder_obj, Register scratch, Label* interceptor_succeeded) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(holder, name_); - - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); - - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - } + __ EnterInternalFrame(); + __ Push(holder, name_); + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + __ LeaveInternalFrame(); // If interceptor returns no-result sentinel, call the constant function. __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); @@ -1247,10 +1228,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, ApiFunction fun(getter_address); const int kApiStackSpace = 1; - - FrameScope frame_scope(masm(), StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - // Create AccessorInfo instance on the stack above the exit frame with // scratch2 (internal::Object **args_) as the data. __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); @@ -1310,44 +1288,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - { - FrameScope frame_scope(masm(), StackFrame::INTERNAL); + __ EnterInternalFrame(); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ Push(receiver, holder_reg, name_reg); - } else { - __ Push(holder_reg, name_reg); - } - - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method.) - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); - __ cmp(r0, scratch1); - __ b(eq, &interceptor_failed); - frame_scope.GenerateLeaveFrame(); - __ Ret(); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ Push(receiver, holder_reg, name_reg); + } else { + __ Push(holder_reg, name_reg); + } - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); + __ cmp(r0, scratch1); + __ b(eq, &interceptor_failed); + __ LeaveInternalFrame(); + __ Ret(); - // Leave the internal frame. + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); } + __ LeaveInternalFrame(); + // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into |holder| register. if (interceptor_holder != lookup->holder()) { @@ -1580,7 +1556,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, DONT_DO_SMI_CHECK); if (argc == 1) { // Otherwise fall through to call the builtin. - Label attempt_to_grow_elements; + Label exit, with_write_barrier, attempt_to_grow_elements; // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1595,15 +1571,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ cmp(r0, r4); __ b(gt, &attempt_to_grow_elements); - // Check if value is a smi. - Label with_write_barrier; - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ JumpIfNotSmi(r4, &with_write_barrier); - // Save new length. __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); // Push the element. + __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); // We may need a register containing the address end_elements below, // so write back the value in end_elements. __ add(end_elements, elements, @@ -1613,31 +1585,14 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); // Check for a smi. + __ JumpIfNotSmi(r4, &with_write_barrier); + __ bind(&exit); __ Drop(argc + 1); __ Ret(); __ bind(&with_write_barrier); - - __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ CheckFastSmiOnlyElements(r6, r6, &call_builtin); - - // Save new length. - __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - - // Push the element. - // We may need a register containing the address end_elements below, - // so write back the value in end_elements. - __ add(end_elements, elements, - Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); - - __ RecordWrite(elements, - end_elements, - r4, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ InNewSpace(elements, r4, eq, &exit); + __ RecordWriteHelper(elements, end_elements, r4); __ Drop(argc + 1); __ Ret(); @@ -1649,15 +1604,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ b(&call_builtin); } - __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize)); - // Growing elements that are SMI-only requires special handling in case - // the new element is non-Smi. For now, delegate to the builtin. - Label no_fast_elements_check; - __ JumpIfSmi(r2, &no_fast_elements_check); - __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ CheckFastObjectElements(r7, r7, &call_builtin); - __ bind(&no_fast_elements_check); - Isolate* isolate = masm()->isolate(); ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate); @@ -1684,7 +1630,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // Update new_space_allocation_top. __ str(r6, MemOperand(r7)); // Push the argument. - __ str(r2, MemOperand(end_elements)); + __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize)); + __ str(r6, MemOperand(end_elements)); // Fill the rest with holes. __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); for (int i = 1; i < kAllocationDelta; i++) { @@ -2766,15 +2713,6 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Store the value in the cell. __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); - __ mov(r1, r0); - __ RecordWriteField(r4, - JSGlobalPropertyCell::kValueOffset, - r1, - r2, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET); - Counters* counters = masm()->isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3); __ Ret(); @@ -3178,7 +3116,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { } -MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic( +MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic( MapList* receiver_maps, CodeList* handler_ics) { // ----------- S t a t e ------------- @@ -3274,10 +3212,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { } -MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( +MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( MapList* receiver_maps, - CodeList* handler_stubs, - MapList* transitioned_maps) { + CodeList* handler_ics) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -3290,20 +3227,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( int receiver_count = receiver_maps->length(); __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); - for (int i = 0; i < receiver_count; ++i) { - Handle map(receiver_maps->at(i)); - Handle code(handler_stubs->at(i)); + for (int current = 0; current < receiver_count; ++current) { + Handle map(receiver_maps->at(current)); + Handle code(handler_ics->at(current)); __ mov(ip, Operand(map)); __ cmp(r3, ip); - if (transitioned_maps->at(i) == NULL) { - __ Jump(code, RelocInfo::CODE_TARGET, eq); - } else { - Label next_map; - __ b(eq, &next_map); - __ mov(r4, Operand(Handle(transitioned_maps->at(i)))); - __ Jump(code, RelocInfo::CODE_TARGET, al); - __ bind(&next_map); - } + __ Jump(code, RelocInfo::CODE_TARGET, eq); } __ bind(&miss); @@ -3525,7 +3454,6 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3612,7 +3540,6 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3953,7 +3880,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4017,7 +3943,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4157,7 +4082,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4310,10 +4234,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement( - MacroAssembler* masm, - bool is_js_array, - ElementsKind elements_kind) { +void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, + bool is_js_array) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -4322,7 +4244,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // -- r3 : scratch // -- r4 : scratch (elements) // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic; Register value_reg = r0; Register key_reg = r1; @@ -4355,33 +4277,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ cmp(key_reg, scratch); __ b(hs, &miss_force_generic); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { - __ JumpIfNotSmi(value_reg, &transition_elements_kind); - __ add(scratch, - elements_reg, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ add(scratch, - scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value_reg, MemOperand(scratch)); - } else { - ASSERT(elements_kind == FAST_ELEMENTS); - __ add(scratch, - elements_reg, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ add(scratch, - scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value_reg, MemOperand(scratch)); - __ mov(receiver_reg, value_reg); - __ RecordWrite(elements_reg, // Object. - scratch, // Address. - receiver_reg, // Value. - kLRHasNotBeenSaved, - kDontSaveFPRegs); - } + __ add(scratch, + elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ str(value_reg, + MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ RecordWrite(scratch, + Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize), + receiver_reg , elements_reg); + // value_reg (r0) is preserved. // Done. __ Ret(); @@ -4390,10 +4294,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( Handle ic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ Jump(ic, RelocInfo::CODE_TARGET); - - __ bind(&transition_elements_kind); - Handle ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic_miss, RelocInfo::CODE_TARGET); } @@ -4409,15 +4309,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r4 : scratch // -- r5 : scratch // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value; Register value_reg = r0; Register key_reg = r1; Register receiver_reg = r2; - Register elements_reg = r3; - Register scratch1 = r4; - Register scratch2 = r5; - Register scratch3 = r6; + Register scratch = r3; + Register elements_reg = r4; + Register mantissa_reg = r5; + Register exponent_reg = r6; Register scratch4 = r7; // This stub is meant to be tail-jumped to, the receiver must already @@ -4429,25 +4329,90 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Check that the key is within bounds. if (is_js_array) { - __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); } else { - __ ldr(scratch1, + __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); } // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. - __ cmp(key_reg, scratch1); + __ cmp(key_reg, scratch); __ b(hs, &miss_force_generic); - __ StoreNumberToDoubleElements(value_reg, - key_reg, - receiver_reg, - elements_reg, - scratch1, - scratch2, - scratch3, - scratch4, - &transition_elements_kind); + // Handle smi values specially. + __ JumpIfSmi(value_reg, &smi_value); + + // Ensure that the object is a heap number + __ CheckMap(value_reg, + scratch, + masm->isolate()->factory()->heap_number_map(), + &miss_force_generic, + DONT_DO_SMI_CHECK); + + // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 + // in the exponent. + __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); + __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); + __ cmp(exponent_reg, scratch); + __ b(ge, &maybe_nan); + + __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + + __ bind(&have_double_value); + __ add(scratch, elements_reg, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize)); + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + __ str(exponent_reg, FieldMemOperand(scratch, offset)); + __ Ret(); + + __ bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + __ b(gt, &is_nan); + __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + __ cmp(mantissa_reg, Operand(0)); + __ b(eq, &have_double_value); + __ bind(&is_nan); + // Load canonical NaN for storing into the double array. + uint64_t nan_int64 = BitCast( + FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + __ mov(mantissa_reg, Operand(static_cast(nan_int64))); + __ mov(exponent_reg, Operand(static_cast(nan_int64 >> 32))); + __ jmp(&have_double_value); + + __ bind(&smi_value); + __ add(scratch, elements_reg, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + __ add(scratch, scratch, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + // scratch is now effective address of the double element + + FloatingPointHelper::Destination destination; + if (CpuFeatures::IsSupported(VFP3)) { + destination = FloatingPointHelper::kVFPRegisters; + } else { + destination = FloatingPointHelper::kCoreRegisters; + } + + Register untagged_value = receiver_reg; + __ SmiUntag(untagged_value, value_reg); + FloatingPointHelper::ConvertIntToDouble( + masm, + untagged_value, + destination, + d0, + mantissa_reg, + exponent_reg, + scratch4, + s2); + if (destination == FloatingPointHelper::kVFPRegisters) { + CpuFeatures::Scope scope(VFP3); + __ vstr(d0, scratch, 0); + } else { + __ str(mantissa_reg, MemOperand(scratch, 0)); + __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes)); + } __ Ret(); // Handle store cache miss, replacing the ic with the generic stub. @@ -4455,10 +4420,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Handle ic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ Jump(ic, RelocInfo::CODE_TARGET); - - __ bind(&transition_elements_kind); - Handle ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic_miss, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index e1d7c2064e53..4dd23c8bb464 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -201,14 +201,17 @@ function ConvertToString(x) { function ConvertToLocaleString(e) { - if (IS_NULL_OR_UNDEFINED(e)) { + if (e == null) { return ''; } else { - // According to ES5, seciton 15.4.4.3, the toLocaleString conversion - // must throw a TypeError if ToObject(e).toLocaleString isn't - // callable. + // e_obj's toLocaleString might be overwritten, check if it is a function. + // Call ToString if toLocaleString is not a function. + // See issue 877615. var e_obj = ToObject(e); - return %ToString(e_obj.toLocaleString()); + if (IS_SPEC_FUNCTION(e_obj.toLocaleString)) + return ToString(e_obj.toLocaleString()); + else + return ToString(e); } } @@ -378,31 +381,18 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) { function ArrayToString() { - var array; - var func; - if (IS_ARRAY(this)) { - func = this.join; - if (func === ArrayJoin) { - return Join(this, this.length, ',', ConvertToString); - } - array = this; - } else { - array = ToObject(this); - func = array.join; + if (!IS_ARRAY(this)) { + throw new $TypeError('Array.prototype.toString is not generic'); } - if (!IS_SPEC_FUNCTION(func)) { - return %_CallFunction(array, ObjectToString); - } - return %_CallFunction(array, func); + return Join(this, this.length, ',', ConvertToString); } function ArrayToLocaleString() { - var array = ToObject(this); - var arrayLen = array.length; - var len = TO_UINT32(arrayLen); - if (len === 0) return ""; - return Join(array, len, ',', ConvertToLocaleString); + if (!IS_ARRAY(this)) { + throw new $TypeError('Array.prototype.toString is not generic'); + } + return Join(this, this.length, ',', ConvertToLocaleString); } @@ -1003,24 +993,21 @@ function ArrayFilter(f, receiver) { ["Array.prototype.filter"]); } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping and side effects are visible. - var array = ToObject(this); - var length = ToUint32(array.length); - if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - + // Pull out the length so that modifications to the length in the + // loop will not affect the looping. + var length = ToUint32(this.length); var result = []; var result_length = 0; for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - if (%_CallFunction(receiver, current, i, array, f)) { + var current = this[i]; + if (!IS_UNDEFINED(current) || i in this) { + if (%_CallFunction(receiver, current, i, this, f)) { result[result_length++] = current; } } @@ -1035,22 +1022,19 @@ function ArrayForEach(f, receiver) { ["Array.prototype.forEach"]); } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping and side effects are visible. - var array = ToObject(this); - var length = TO_UINT32(array.length); - if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - + // Pull out the length so that modifications to the length in the + // loop will not affect the looping. + var length = TO_UINT32(this.length); for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - %_CallFunction(receiver, current, i, array, f); + var current = this[i]; + if (!IS_UNDEFINED(current) || i in this) { + %_CallFunction(receiver, current, i, this, f); } } } @@ -1064,22 +1048,19 @@ function ArraySome(f, receiver) { ["Array.prototype.some"]); } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping and side effects are visible. - var array = ToObject(this); - var length = TO_UINT32(array.length); - if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - + // Pull out the length so that modifications to the length in the + // loop will not affect the looping. + var length = TO_UINT32(this.length); for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - if (%_CallFunction(receiver, current, i, array, f)) return true; + var current = this[i]; + if (!IS_UNDEFINED(current) || i in this) { + if (%_CallFunction(receiver, current, i, this, f)) return true; } } return false; @@ -1092,22 +1073,19 @@ function ArrayEvery(f, receiver) { ["Array.prototype.every"]); } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping and side effects are visible. - var array = ToObject(this); - var length = TO_UINT32(array.length); - if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - + // Pull out the length so that modifications to the length in the + // loop will not affect the looping. + var length = TO_UINT32(this.length); for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - if (!%_CallFunction(receiver, current, i, array, f)) return false; + var current = this[i]; + if (!IS_UNDEFINED(current) || i in this) { + if (!%_CallFunction(receiver, current, i, this, f)) return false; } } return true; @@ -1119,24 +1097,21 @@ function ArrayMap(f, receiver) { ["Array.prototype.map"]); } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping and side effects are visible. - var array = ToObject(this); - var length = TO_UINT32(array.length); - if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - + // Pull out the length so that modifications to the length in the + // loop will not affect the looping. + var length = TO_UINT32(this.length); var result = new $Array(); var accumulator = new InternalArray(length); for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - accumulator[i] = %_CallFunction(receiver, current, i, array, f); + var current = this[i]; + if (!IS_UNDEFINED(current) || i in this) { + accumulator[i] = %_CallFunction(receiver, current, i, this, f); } } %MoveArrayContents(accumulator, result); @@ -1270,20 +1245,19 @@ function ArrayReduce(callback, current) { ["Array.prototype.reduce"]); } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping and side effects are visible. - var array = ToObject(this); - var length = ToUint32(array.length); - if (!IS_SPEC_FUNCTION(callback)) { throw MakeTypeError('called_non_callable', [callback]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping. + var length = ToUint32(this.length); var i = 0; + find_initial: if (%_ArgumentsLength() < 2) { for (; i < length; i++) { - current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { + current = this[i]; + if (!IS_UNDEFINED(current) || i in this) { i++; break find_initial; } @@ -1293,9 +1267,9 @@ function ArrayReduce(callback, current) { var receiver = %GetDefaultReceiver(callback); for (; i < length; i++) { - var element = array[i]; - if (!IS_UNDEFINED(element) || i in array) { - current = %_CallFunction(receiver, current, element, i, array, callback); + var element = this[i]; + if (!IS_UNDEFINED(element) || i in this) { + current = %_CallFunction(receiver, current, element, i, this, callback); } } return current; @@ -1307,20 +1281,15 @@ function ArrayReduceRight(callback, current) { ["Array.prototype.reduceRight"]); } - // Pull out the length so that side effects are visible before the - // callback function is checked. - var array = ToObject(this); - var length = ToUint32(array.length); - if (!IS_SPEC_FUNCTION(callback)) { throw MakeTypeError('called_non_callable', [callback]); } + var i = ToUint32(this.length) - 1; - var i = length - 1; find_initial: if (%_ArgumentsLength() < 2) { for (; i >= 0; i--) { - current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { + current = this[i]; + if (!IS_UNDEFINED(current) || i in this) { i--; break find_initial; } @@ -1330,9 +1299,9 @@ function ArrayReduceRight(callback, current) { var receiver = %GetDefaultReceiver(callback); for (; i >= 0; i--) { - var element = array[i]; - if (!IS_UNDEFINED(element) || i in array) { - current = %_CallFunction(receiver, current, element, i, array, callback); + var element = this[i]; + if (!IS_UNDEFINED(element) || i in this) { + current = %_CallFunction(receiver, current, element, i, this, callback); } } return current; diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index bda85e69decc..ad5f3508160b 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -38,7 +38,6 @@ #include "deoptimizer.h" #include "execution.h" #include "ic-inl.h" -#include "incremental-marking.h" #include "factory.h" #include "runtime.h" #include "runtime-profiler.h" @@ -48,7 +47,6 @@ #include "ast.h" #include "regexp-macro-assembler.h" #include "platform.h" -#include "store-buffer.h" // Include native regexp-macro-assembler. #ifndef V8_INTERPRETED_REGEXP #if V8_TARGET_ARCH_IA32 @@ -518,7 +516,6 @@ void RelocIterator::next() { RelocIterator::RelocIterator(Code* code, int mode_mask) { - rinfo_.host_ = code; rinfo_.pc_ = code->instruction_start(); rinfo_.data_ = 0; // Relocation info is read backwards. @@ -739,38 +736,9 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref) : address_(table_ref.address()) {} -ExternalReference ExternalReference:: - incremental_marking_record_write_function(Isolate* isolate) { - return ExternalReference(Redirect( - isolate, - FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode))); -} - - -ExternalReference ExternalReference:: - incremental_evacuation_record_write_function(Isolate* isolate) { - return ExternalReference(Redirect( - isolate, - FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode))); -} - - -ExternalReference ExternalReference:: - store_buffer_overflow_function(Isolate* isolate) { - return ExternalReference(Redirect( - isolate, - FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow))); -} - - -ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) { - return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache))); -} - - ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) { - return - ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC))); + return ExternalReference(Redirect(isolate, + FUNCTION_ADDR(Runtime::PerformGC))); } @@ -834,6 +802,17 @@ ExternalReference ExternalReference::keyed_lookup_cache_field_offsets( } +ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) { + return ExternalReference(isolate->factory()->the_hole_value().location()); +} + + +ExternalReference ExternalReference::arguments_marker_location( + Isolate* isolate) { + return ExternalReference(isolate->factory()->arguments_marker().location()); +} + + ExternalReference ExternalReference::roots_address(Isolate* isolate) { return ExternalReference(isolate->heap()->roots_address()); } @@ -861,14 +840,9 @@ ExternalReference ExternalReference::new_space_start(Isolate* isolate) { } -ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { - return ExternalReference(isolate->heap()->store_buffer()->TopAddress()); -} - - ExternalReference ExternalReference::new_space_mask(Isolate* isolate) { - return ExternalReference(reinterpret_cast
( - isolate->heap()->NewSpaceMask())); + Address mask = reinterpret_cast
(isolate->heap()->NewSpaceMask()); + return ExternalReference(mask); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index e5661c9f1264..d58034df0d75 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -143,9 +143,6 @@ class Label BASE_EMBEDDED { }; -enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs }; - - // ----------------------------------------------------------------------------- // Relocation information @@ -219,9 +216,8 @@ class RelocInfo BASE_EMBEDDED { RelocInfo() {} - - RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host) - : pc_(pc), rmode_(rmode), data_(data), host_(host) { + RelocInfo(byte* pc, Mode rmode, intptr_t data) + : pc_(pc), rmode_(rmode), data_(data) { } static inline bool IsConstructCall(Mode mode) { @@ -230,9 +226,6 @@ class RelocInfo BASE_EMBEDDED { static inline bool IsCodeTarget(Mode mode) { return mode <= LAST_CODE_ENUM; } - static inline bool IsEmbeddedObject(Mode mode) { - return mode == EMBEDDED_OBJECT; - } // Is the relocation mode affected by GC? static inline bool IsGCRelocMode(Mode mode) { return mode <= LAST_GCED_ENUM; @@ -265,7 +258,6 @@ class RelocInfo BASE_EMBEDDED { void set_pc(byte* pc) { pc_ = pc; } Mode rmode() const { return rmode_; } intptr_t data() const { return data_; } - Code* host() const { return host_; } // Apply a relocation by delta bytes INLINE(void apply(intptr_t delta)); @@ -361,7 +353,6 @@ class RelocInfo BASE_EMBEDDED { byte* pc_; Mode rmode_; intptr_t data_; - Code* host_; #ifdef V8_TARGET_ARCH_MIPS // Code and Embedded Object pointers in mips are stored split // across two consecutive 32-bit instructions. Heap management @@ -570,13 +561,6 @@ class ExternalReference BASE_EMBEDDED { // pattern. This means that they have to be added to the // ExternalReferenceTable in serialize.cc manually. - static ExternalReference incremental_marking_record_write_function( - Isolate* isolate); - static ExternalReference incremental_evacuation_record_write_function( - Isolate* isolate); - static ExternalReference store_buffer_overflow_function( - Isolate* isolate); - static ExternalReference flush_icache_function(Isolate* isolate); static ExternalReference perform_gc_function(Isolate* isolate); static ExternalReference fill_heap_number_with_random_function( Isolate* isolate); @@ -593,6 +577,12 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); + // Static variable Factory::the_hole_value.location() + static ExternalReference the_hole_value_location(Isolate* isolate); + + // Static variable Factory::arguments_marker.location() + static ExternalReference arguments_marker_location(Isolate* isolate); + // Static variable Heap::roots_address() static ExternalReference roots_address(Isolate* isolate); @@ -616,10 +606,6 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference new_space_start(Isolate* isolate); static ExternalReference new_space_mask(Isolate* isolate); static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate); - static ExternalReference new_space_mark_bits(Isolate* isolate); - - // Write barrier. - static ExternalReference store_buffer_top(Isolate* isolate); // Used for fast allocation in generated code. static ExternalReference new_space_allocation_top_address(Isolate* isolate); diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index d493814544a5..418cc432b67b 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -327,77 +327,56 @@ bool BinaryOperation::ResultOverwriteAllowed() { } -static bool IsTypeof(Expression* expr) { - UnaryOperation* maybe_unary = expr->AsUnaryOperation(); - return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF; -} - - -// Check for the pattern: typeof equals . -static bool MatchLiteralCompareTypeof(Expression* left, - Token::Value op, - Expression* right, - Expression** expr, - Handle* check) { - if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) { - *expr = left->AsUnaryOperation()->expression(); - *check = Handle::cast(right->AsLiteral()->handle()); - return true; - } - return false; -} - - bool CompareOperation::IsLiteralCompareTypeof(Expression** expr, Handle* check) { - return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) || - MatchLiteralCompareTypeof(right_, op_, left_, expr, check); -} - - -static bool IsVoidOfLiteral(Expression* expr) { - UnaryOperation* maybe_unary = expr->AsUnaryOperation(); - return maybe_unary != NULL && - maybe_unary->op() == Token::VOID && - maybe_unary->expression()->AsLiteral() != NULL; -} - + if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false; + + UnaryOperation* left_unary = left_->AsUnaryOperation(); + UnaryOperation* right_unary = right_->AsUnaryOperation(); + Literal* left_literal = left_->AsLiteral(); + Literal* right_literal = right_->AsLiteral(); + + // Check for the pattern: typeof == . + if (left_unary != NULL && left_unary->op() == Token::TYPEOF && + right_literal != NULL && right_literal->handle()->IsString()) { + *expr = left_unary->expression(); + *check = Handle::cast(right_literal->handle()); + return true; + } -// Check for the pattern: void equals -static bool MatchLiteralCompareUndefined(Expression* left, - Token::Value op, - Expression* right, - Expression** expr) { - if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) { - *expr = right; + // Check for the pattern: == typeof . + if (right_unary != NULL && right_unary->op() == Token::TYPEOF && + left_literal != NULL && left_literal->handle()->IsString()) { + *expr = right_unary->expression(); + *check = Handle::cast(left_literal->handle()); return true; } + return false; } bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) { - return MatchLiteralCompareUndefined(left_, op_, right_, expr) || - MatchLiteralCompareUndefined(right_, op_, left_, expr); -} + if (op_ != Token::EQ_STRICT) return false; + UnaryOperation* left_unary = left_->AsUnaryOperation(); + UnaryOperation* right_unary = right_->AsUnaryOperation(); -// Check for the pattern: null equals -static bool MatchLiteralCompareNull(Expression* left, - Token::Value op, - Expression* right, - Expression** expr) { - if (left->IsNullLiteral() && Token::IsEqualityOp(op)) { - *expr = right; + // Check for the pattern: === void . + if (right_unary != NULL && right_unary->op() == Token::VOID && + right_unary->expression()->AsLiteral() != NULL) { + *expr = left_; return true; } - return false; -} + // Check for the pattern: void === . + if (left_unary != NULL && left_unary->op() == Token::VOID && + left_unary->expression()->AsLiteral() != NULL) { + *expr = right_; + return true; + } -bool CompareOperation::IsLiteralCompareNull(Expression** expr) { - return MatchLiteralCompareNull(left_, op_, right_, expr) || - MatchLiteralCompareNull(right_, op_, left_, expr); + return false; } @@ -550,9 +529,7 @@ bool Conditional::IsInlineable() const { bool VariableProxy::IsInlineable() const { - return var()->IsUnallocated() - || var()->IsStackAllocated() - || var()->IsContextSlot(); + return var()->IsUnallocated() || var()->IsStackAllocated(); } @@ -621,6 +598,11 @@ bool CompareOperation::IsInlineable() const { } +bool CompareToNull::IsInlineable() const { + return expression()->IsInlineable(); +} + + bool CountOperation::IsInlineable() const { return expression()->IsInlineable(); } @@ -764,41 +746,37 @@ bool Call::ComputeGlobalTarget(Handle global, void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind) { - is_monomorphic_ = oracle->CallIsMonomorphic(this); Property* property = expression()->AsProperty(); - if (property == NULL) { - // Function call. Specialize for monomorphic calls. - if (is_monomorphic_) target_ = oracle->GetCallTarget(this); - } else { - // Method call. Specialize for the receiver types seen at runtime. - Literal* key = property->key()->AsLiteral(); - ASSERT(key != NULL && key->handle()->IsString()); - Handle name = Handle::cast(key->handle()); - receiver_types_.Clear(); - oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_); + ASSERT(property != NULL); + // Specialize for the receiver types seen at runtime. + Literal* key = property->key()->AsLiteral(); + ASSERT(key != NULL && key->handle()->IsString()); + Handle name = Handle::cast(key->handle()); + receiver_types_.Clear(); + oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_); #ifdef DEBUG - if (FLAG_enable_slow_asserts) { - int length = receiver_types_.length(); - for (int i = 0; i < length; i++) { - Handle map = receiver_types_.at(i); - ASSERT(!map.is_null() && *map != NULL); - } + if (FLAG_enable_slow_asserts) { + int length = receiver_types_.length(); + for (int i = 0; i < length; i++) { + Handle map = receiver_types_.at(i); + ASSERT(!map.is_null() && *map != NULL); } + } #endif - check_type_ = oracle->GetCallCheckType(this); - if (is_monomorphic_) { - Handle map; - if (receiver_types_.length() > 0) { - ASSERT(check_type_ == RECEIVER_MAP_CHECK); - map = receiver_types_.at(0); - } else { - ASSERT(check_type_ != RECEIVER_MAP_CHECK); - holder_ = Handle( - oracle->GetPrototypeForPrimitiveCheck(check_type_)); - map = Handle(holder_->map()); - } - is_monomorphic_ = ComputeTarget(map, name); + is_monomorphic_ = oracle->CallIsMonomorphic(this); + check_type_ = oracle->GetCallCheckType(this); + if (is_monomorphic_) { + Handle map; + if (receiver_types_.length() > 0) { + ASSERT(check_type_ == RECEIVER_MAP_CHECK); + map = receiver_types_.at(0); + } else { + ASSERT(check_type_ != RECEIVER_MAP_CHECK); + holder_ = Handle( + oracle->GetPrototypeForPrimitiveCheck(check_type_)); + map = Handle(holder_->map()); } + is_monomorphic_ = ComputeTarget(map, name); } } diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index 0efc4835c4f1..b56205f9a677 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -90,6 +90,7 @@ namespace internal { V(CountOperation) \ V(BinaryOperation) \ V(CompareOperation) \ + V(CompareToNull) \ V(ThisFunction) #define AST_NODE_LIST(V) \ @@ -288,12 +289,6 @@ class Expression: public AstNode { // True iff the expression is a literal represented as a smi. virtual bool IsSmiLiteral() { return false; } - // True iff the expression is a string literal. - virtual bool IsStringLiteral() { return false; } - - // True iff the expression is the null literal. - virtual bool IsNullLiteral() { return false; } - // Type feedback information for assignments and properties. virtual bool IsMonomorphic() { UNREACHABLE(); @@ -398,29 +393,31 @@ class Block: public BreakableStatement { class Declaration: public AstNode { public: Declaration(VariableProxy* proxy, - VariableMode mode, + Variable::Mode mode, FunctionLiteral* fun, Scope* scope) : proxy_(proxy), mode_(mode), fun_(fun), scope_(scope) { - ASSERT(mode == VAR || mode == CONST || mode == LET); + ASSERT(mode == Variable::VAR || + mode == Variable::CONST || + mode == Variable::LET); // At the moment there are no "const functions"'s in JavaScript... - ASSERT(fun == NULL || mode == VAR || mode == LET); + ASSERT(fun == NULL || mode == Variable::VAR || mode == Variable::LET); } DECLARE_NODE_TYPE(Declaration) VariableProxy* proxy() const { return proxy_; } - VariableMode mode() const { return mode_; } + Variable::Mode mode() const { return mode_; } FunctionLiteral* fun() const { return fun_; } // may be NULL virtual bool IsInlineable() const; Scope* scope() const { return scope_; } private: VariableProxy* proxy_; - VariableMode mode_; + Variable::Mode mode_; FunctionLiteral* fun_; // Nested scope from which the declaration originated. @@ -894,8 +891,6 @@ class Literal: public Expression { virtual bool IsTrivial() { return true; } virtual bool IsSmiLiteral() { return handle_->IsSmi(); } - virtual bool IsStringLiteral() { return handle_->IsString(); } - virtual bool IsNullLiteral() { return handle_->IsNull(); } // Check if this literal is identical to the other literal. bool IsIdenticalTo(const Literal* other) const { @@ -1470,7 +1465,6 @@ class CompareOperation: public Expression { // Match special cases. bool IsLiteralCompareTypeof(Expression** expr, Handle* check); bool IsLiteralCompareUndefined(Expression** expr); - bool IsLiteralCompareNull(Expression** expr); private: Token::Value op_; @@ -1483,6 +1477,25 @@ class CompareOperation: public Expression { }; +class CompareToNull: public Expression { + public: + CompareToNull(Isolate* isolate, bool is_strict, Expression* expression) + : Expression(isolate), is_strict_(is_strict), expression_(expression) { } + + DECLARE_NODE_TYPE(CompareToNull) + + virtual bool IsInlineable() const; + + bool is_strict() const { return is_strict_; } + Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; } + Expression* expression() const { return expression_; } + + private: + bool is_strict_; + Expression* expression_; +}; + + class Conditional: public Expression { public: Conditional(Isolate* isolate, diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index dc722cb7493a..f07e625ec019 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -34,7 +34,6 @@ #include "debug.h" #include "execution.h" #include "global-handles.h" -#include "isolate-inl.h" #include "macro-assembler.h" #include "natives.h" #include "objects-visiting.h" @@ -996,26 +995,6 @@ void Genesis::InitializeGlobal(Handle inner_global, initial_map->instance_size() + 5 * kPointerSize); initial_map->set_instance_descriptors(*descriptors); initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map)); - - // RegExp prototype object is itself a RegExp. - Handle proto_map = factory->CopyMapDropTransitions(initial_map); - proto_map->set_prototype(global_context()->initial_object_prototype()); - Handle proto = factory->NewJSObjectFromMap(proto_map); - proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, - heap->empty_string()); - proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, - heap->false_value()); - proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, - heap->false_value()); - proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, - heap->false_value()); - proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, - Smi::FromInt(0), - SKIP_WRITE_BARRIER); // It's a Smi. - initial_map->set_prototype(*proto); - factory->SetRegExpIrregexpData(Handle::cast(proto), - JSRegExp::IRREGEXP, factory->empty_string(), - JSRegExp::Flags(0), 0); } { // -- J S O N @@ -1097,11 +1076,6 @@ void Genesis::InitializeGlobal(Handle inner_global, elements->set(0, *array); array = factory->NewFixedArray(0); elements->set(1, *array); - Handle non_strict_arguments_elements_map = - factory->GetElementsTransitionMap(result, - NON_STRICT_ARGUMENTS_ELEMENTS); - result->set_map(*non_strict_arguments_elements_map); - ASSERT(result->HasNonStrictArgumentsElements()); result->set_elements(*elements); global_context()->set_aliased_arguments_boilerplate(*result); } @@ -1353,8 +1327,6 @@ void Genesis::InstallNativeFunctions() { configure_instance_fun); INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun); INSTALL_NATIVE(JSObject, "functionCache", function_cache); - INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor", - to_complete_property_descriptor); } void Genesis::InstallExperimentalNativeFunctions() { @@ -1583,18 +1555,6 @@ bool Genesis::InstallNatives() { isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); array_function->shared()->DontAdaptArguments(); - // InternalArrays should not use Smi-Only array optimizations. There are too - // many places in the C++ runtime code (e.g. RegEx) that assume that - // elements in InternalArrays can be set to non-Smi values without going - // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT - // transition easy to trap. Moreover, they rarely are smi-only. - MaybeObject* maybe_map = - array_function->initial_map()->CopyDropTransitions(); - Map* new_map; - if (!maybe_map->To(&new_map)) return maybe_map; - new_map->set_elements_kind(FAST_ELEMENTS); - array_function->set_initial_map(new_map); - // Make "length" magic on instances. Handle array_descriptors = factory()->CopyAppendForeignDescriptor( @@ -1978,15 +1938,14 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) { if (!InstallExtension(extension->dependencies()[i])) return false; } Isolate* isolate = Isolate::Current(); - Handle source_code = - isolate->factory()->NewExternalStringFromAscii(extension->source()); - bool result = CompileScriptCached( - CStrVector(extension->name()), - source_code, - isolate->bootstrapper()->extensions_cache(), - extension, - Handle(isolate->context()), - false); + Vector source = CStrVector(extension->source()); + Handle source_code = isolate->factory()->NewStringFromAscii(source); + bool result = CompileScriptCached(CStrVector(extension->name()), + source_code, + isolate->bootstrapper()->extensions_cache(), + extension, + Handle(isolate->context()), + false); ASSERT(isolate->has_pending_exception() != result); if (!result) { isolate->clear_pending_exception(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index d513200f0b93..e6a0699f0769 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -33,7 +33,6 @@ #include "builtins.h" #include "gdb-jit.h" #include "ic-inl.h" -#include "mark-compact.h" #include "vm-state-inl.h" namespace v8 { @@ -203,7 +202,7 @@ BUILTIN(ArrayCodeGeneric) { } // 'array' now contains the JSArray we should initialize. - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastElements()); // Optimize the case where there is one argument and the argument is a // small smi. @@ -216,8 +215,7 @@ BUILTIN(ArrayCodeGeneric) { { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } - MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj)); - if (maybe_obj->IsFailure()) return maybe_obj; + array->SetContent(FixedArray::cast(obj)); return array; } } @@ -241,11 +239,6 @@ BUILTIN(ArrayCodeGeneric) { if (!maybe_obj->ToObject(&obj)) return maybe_obj; } - // Set length and elements on the array. - MaybeObject* maybe_object = - array->EnsureCanContainElements(FixedArray::cast(obj)); - if (maybe_object->IsFailure()) return maybe_object; - AssertNoAllocation no_gc; FixedArray* elms = FixedArray::cast(obj); WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); @@ -254,6 +247,7 @@ BUILTIN(ArrayCodeGeneric) { elms->set(index, args[index+1], mode); } + // Set length and elements on the array. array->set_elements(FixedArray::cast(obj)); array->set_length(len); @@ -301,7 +295,6 @@ static void CopyElements(Heap* heap, if (mode == UPDATE_WRITE_BARRIER) { heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); } - heap->incremental_marking()->RecordWrites(dst); } @@ -320,7 +313,6 @@ static void MoveElements(Heap* heap, if (mode == UPDATE_WRITE_BARRIER) { heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); } - heap->incremental_marking()->RecordWrites(dst); } @@ -366,14 +358,6 @@ static FixedArray* LeftTrimFixedArray(Heap* heap, former_start[to_trim] = heap->fixed_array_map(); former_start[to_trim + 1] = Smi::FromInt(len - to_trim); - // Maintain marking consistency for HeapObjectIterator and - // IncrementalMarking. - int size_delta = to_trim * kPointerSize; - if (heap->marking()->TransferMark(elms->address(), - elms->address() + size_delta)) { - MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta); - } - return FixedArray::cast(HeapObject::FromAddress( elms->address() + to_trim * kPointerSize)); } @@ -400,42 +384,20 @@ static bool ArrayPrototypeHasNoElements(Heap* heap, MUST_USE_RESULT static inline MaybeObject* EnsureJSArrayWithWritableFastElements( - Heap* heap, Object* receiver, Arguments* args, int first_added_arg) { + Heap* heap, Object* receiver) { if (!receiver->IsJSArray()) return NULL; JSArray* array = JSArray::cast(receiver); HeapObject* elms = array->elements(); - Map* map = elms->map(); - if (map == heap->fixed_array_map()) { - if (args == NULL || !array->HasFastSmiOnlyElements()) { - return elms; - } - } else if (map == heap->fixed_cow_array_map()) { - MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); - if (args == NULL || !array->HasFastSmiOnlyElements() || - maybe_writable_result->IsFailure()) { - return maybe_writable_result; - } - } else { - return NULL; + if (elms->map() == heap->fixed_array_map()) return elms; + if (elms->map() == heap->fixed_cow_array_map()) { + return array->EnsureWritableFastElements(); } - - // Need to ensure that the arguments passed in args can be contained in - // the array. - int args_length = args->length(); - if (first_added_arg >= args_length) return array->elements(); - - MaybeObject* maybe_array = array->EnsureCanContainElements( - args, - first_added_arg, - args_length - first_added_arg); - if (maybe_array->IsFailure()) return maybe_array; - return array->elements(); + return NULL; } static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap, JSArray* receiver) { - if (!FLAG_clever_optimizations) return false; Context* global_context = heap->isolate()->context()->global_context(); JSObject* array_proto = JSObject::cast(global_context->array_function()->prototype()); @@ -451,18 +413,20 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin( HandleScope handleScope(isolate); Handle js_builtin = - GetProperty(Handle(isolate->global_context()->builtins()), - name); - Handle function = Handle::cast(js_builtin); - int argc = args.length() - 1; - ScopedVector > argv(argc); - for (int i = 0; i < argc; ++i) { - argv[i] = args.at(i + 1); - } - bool pending_exception; + GetProperty(Handle( + isolate->global_context()->builtins()), + name); + ASSERT(js_builtin->IsJSFunction()); + Handle function(Handle::cast(js_builtin)); + ScopedVector argv(args.length() - 1); + int n_args = args.length() - 1; + for (int i = 0; i < n_args; i++) { + argv[i] = args.at(i + 1).location(); + } + bool pending_exception = false; Handle result = Execution::Call(function, args.receiver(), - argc, + n_args, argv.start(), &pending_exception); if (pending_exception) return Failure::Exception(); @@ -475,7 +439,7 @@ BUILTIN(ArrayPush) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); + EnsureJSArrayWithWritableFastElements(heap, receiver); if (maybe_elms_obj == NULL) { return CallJsBuiltin(isolate, "ArrayPush", args); } @@ -511,6 +475,7 @@ BUILTIN(ArrayPush) { FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; + array->set_elements(elms); } // Add the provided values. @@ -520,10 +485,6 @@ BUILTIN(ArrayPush) { elms->set(index + len, args[index + 1], mode); } - if (elms != array->elements()) { - array->set_elements(elms); - } - // Set the length. array->set_length(Smi::FromInt(new_length)); return Smi::FromInt(new_length); @@ -535,7 +496,7 @@ BUILTIN(ArrayPop) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + EnsureJSArrayWithWritableFastElements(heap, receiver); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; } @@ -568,7 +529,7 @@ BUILTIN(ArrayShift) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + EnsureJSArrayWithWritableFastElements(heap, receiver); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayShift", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; @@ -578,7 +539,7 @@ BUILTIN(ArrayShift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastElements()); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); @@ -590,7 +551,9 @@ BUILTIN(ArrayShift) { } if (!heap->lo_space()->Contains(elms)) { - array->set_elements(LeftTrimFixedArray(heap, elms, 1)); + // As elms still in the same space they used to be, + // there is no need to update region dirty mark. + array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER); } else { // Shift the elements. AssertNoAllocation no_gc; @@ -610,7 +573,7 @@ BUILTIN(ArrayUnshift) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + EnsureJSArrayWithWritableFastElements(heap, receiver); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayUnshift", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; @@ -620,7 +583,7 @@ BUILTIN(ArrayUnshift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastElements()); int len = Smi::cast(array->length())->value(); int to_add = args.length() - 1; @@ -629,10 +592,6 @@ BUILTIN(ArrayUnshift) { // we should never hit this case. ASSERT(to_add <= (Smi::kMaxValue - len)); - MaybeObject* maybe_object = - array->EnsureCanContainElements(&args, 1, to_add); - if (maybe_object->IsFailure()) return maybe_object; - if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; @@ -641,11 +600,13 @@ BUILTIN(ArrayUnshift) { if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* new_elms = FixedArray::cast(obj); + AssertNoAllocation no_gc; if (len > 0) { CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len); } FillWithHoles(heap, new_elms, new_length, capacity); + elms = new_elms; array->set_elements(elms); } else { @@ -673,7 +634,7 @@ BUILTIN(ArraySlice) { int len = -1; if (receiver->IsJSArray()) { JSArray* array = JSArray::cast(receiver); - if (!array->HasFastTypeElements() || + if (!array->HasFastElements() || !IsJSArrayFastElementMovingAllowed(heap, array)) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -689,7 +650,7 @@ BUILTIN(ArraySlice) { bool is_arguments_object_with_fast_elements = receiver->IsJSObject() && JSObject::cast(receiver)->map() == arguments_map - && JSObject::cast(receiver)->HasFastTypeElements(); + && JSObject::cast(receiver)->HasFastElements(); if (!is_arguments_object_with_fast_elements) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -760,10 +721,6 @@ BUILTIN(ArraySlice) { } FixedArray* result_elms = FixedArray::cast(result); - MaybeObject* maybe_object = - result_array->EnsureCanContainElements(result_elms); - if (maybe_object->IsFailure()) return maybe_object; - AssertNoAllocation no_gc; CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len); @@ -781,7 +738,7 @@ BUILTIN(ArraySplice) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); + EnsureJSArrayWithWritableFastElements(heap, receiver); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArraySplice", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; @@ -791,7 +748,7 @@ BUILTIN(ArraySplice) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastElements()); int len = Smi::cast(array->length())->value(); @@ -868,9 +825,9 @@ BUILTIN(ArraySplice) { } int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; + int new_length = len - actual_delete_count + item_count; - bool elms_changed = false; if (item_count < actual_delete_count) { // Shrink the array. const bool trim_array = !heap->lo_space()->Contains(elms) && @@ -885,8 +842,7 @@ BUILTIN(ArraySplice) { } elms = LeftTrimFixedArray(heap, elms, delta); - - elms_changed = true; + array->set_elements(elms, SKIP_WRITE_BARRIER); } else { AssertNoAllocation no_gc; MoveElements(heap, &no_gc, @@ -926,7 +882,7 @@ BUILTIN(ArraySplice) { FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; - elms_changed = true; + array->set_elements(elms); } else { AssertNoAllocation no_gc; MoveElements(heap, &no_gc, @@ -942,10 +898,6 @@ BUILTIN(ArraySplice) { elms->set(k, args[3 + k - actual_start], mode); } - if (elms_changed) { - array->set_elements(elms); - } - // Set the length. array->set_length(Smi::FromInt(new_length)); @@ -968,7 +920,7 @@ BUILTIN(ArrayConcat) { int result_len = 0; for (int i = 0; i < n_arguments; i++) { Object* arg = args[i]; - if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements() + if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements() || JSArray::cast(arg)->GetPrototype() != array_proto) { return CallJsBuiltin(isolate, "ArrayConcat", args); } @@ -1004,17 +956,6 @@ BUILTIN(ArrayConcat) { } FixedArray* result_elms = FixedArray::cast(result); - // Ensure element type transitions happen before copying elements in. - if (result_array->HasFastSmiOnlyElements()) { - for (int i = 0; i < n_arguments; i++) { - JSArray* array = JSArray::cast(args[i]); - if (!array->HasFastSmiOnlyElements()) { - result_array->EnsureCanContainNonSmiElements(); - break; - } - } - } - // Copy data. AssertNoAllocation no_gc; int start_pos = 0; @@ -1666,22 +1607,20 @@ void Builtins::Setup(bool create_heap_objects) { const BuiltinDesc* functions = BuiltinFunctionTable::functions(); // For now we generate builtin adaptor code into a stack-allocated - // buffer, before copying it into individual code objects. Be careful - // with alignment, some platforms don't like unaligned code. - union { int force_alignment; byte buffer[4*KB]; } u; + // buffer, before copying it into individual code objects. + byte buffer[4*KB]; // Traverse the list of builtins and generate an adaptor in a // separate code object for each one. for (int i = 0; i < builtin_count; i++) { if (create_heap_objects) { - MacroAssembler masm(isolate, u.buffer, sizeof u.buffer); + MacroAssembler masm(isolate, buffer, sizeof buffer); // Generate the code/adaptor. typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments); Generator g = FUNCTION_CAST(functions[i].generator); // We pass all arguments to the generator, but it may not use all of // them. This works because the first arguments are on top of the // stack. - ASSERT(!masm.has_frame()); g(&masm, functions[i].name, functions[i].extra_args); // Move the code into the object heap. CodeDesc desc; diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc index 9241d265826b..30a67a661b41 100644 --- a/deps/v8/src/cached-powers.cc +++ b/deps/v8/src/cached-powers.cc @@ -134,12 +134,14 @@ static const CachedPower kCachedPowers[] = { }; static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers); -static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent. +static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent; static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10) -// Difference between the decimal exponents in the table above. -const int PowersOfTenCache::kDecimalExponentDistance = 8; -const int PowersOfTenCache::kMinDecimalExponent = -348; -const int PowersOfTenCache::kMaxDecimalExponent = 340; +const int PowersOfTenCache::kDecimalExponentDistance = + kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent; +const int PowersOfTenCache::kMinDecimalExponent = + kCachedPowers[0].decimal_exponent; +const int PowersOfTenCache::kMaxDecimalExponent = + kCachedPowers[kCachedPowersLength - 1].decimal_exponent; void PowersOfTenCache::GetCachedPowerForBinaryExponentRange( int min_exponent, diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 4bc2603c5348..00da4cba623a 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -52,12 +52,11 @@ void CodeStub::GenerateCode(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. masm->isolate()->counters()->code_stubs()->Increment(); - // Nested stubs are not allowed for leaves. - AllowStubCallsScope allow_scope(masm, false); + // Nested stubs are not allowed for leafs. + AllowStubCallsScope allow_scope(masm, AllowsStubCalls()); // Generate the code for the stub. masm->set_generating_stub(true); - NoCurrentFrameScope scope(masm); Generate(masm); } @@ -128,10 +127,8 @@ Handle CodeStub::GetCode() { GetKey(), new_object); heap->public_set_code_stubs(*dict); + code = *new_object; - Activate(code); - } else { - CHECK(IsPregenerated() == code->is_pregenerated()); } ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code)); @@ -169,11 +166,7 @@ MaybeObject* CodeStub::TryGetCode() { heap->code_stubs()->AtNumberPut(GetKey(), code); if (maybe_new_object->ToObject(&new_object)) { heap->public_set_code_stubs(NumberDictionary::cast(new_object)); - } else if (MustBeInStubCache()) { - return maybe_new_object; } - - Activate(code); } return code; @@ -195,11 +188,6 @@ const char* CodeStub::MajorName(CodeStub::Major major_key, } -void CodeStub::PrintName(StringStream* stream) { - stream->Add("%s", MajorName(MajorKey(), false)); -} - - int ICCompareStub::MinorKey() { return OpField::encode(op_ - Token::EQ) | StateField::encode(state_); } @@ -257,7 +245,6 @@ void InstanceofStub::PrintName(StringStream* stream) { void KeyedLoadElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: KeyedLoadStubCompiler::GenerateLoadFastElement(masm); break; case FAST_DOUBLE_ELEMENTS: @@ -287,11 +274,7 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) { void KeyedStoreElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: { - KeyedStoreStubCompiler::GenerateStoreFastElement(masm, - is_js_array_, - elements_kind_); - } + KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_); break; case FAST_DOUBLE_ELEMENTS: KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, @@ -319,20 +302,24 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) { void ArgumentsAccessStub::PrintName(StringStream* stream) { - stream->Add("ArgumentsAccessStub_"); + const char* type_name = NULL; // Make g++ happy. switch (type_) { - case READ_ELEMENT: stream->Add("ReadElement"); break; - case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break; - case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break; - case NEW_STRICT: stream->Add("NewStrict"); break; + case READ_ELEMENT: type_name = "ReadElement"; break; + case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break; + case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break; + case NEW_STRICT: type_name = "NewStrict"; break; } + stream->Add("ArgumentsAccessStub_%s", type_name); } void CallFunctionStub::PrintName(StringStream* stream) { - stream->Add("CallFunctionStub_Args%d", argc_); - if (ReceiverMightBeImplicit()) stream->Add("_Implicit"); - if (RecordCallTarget()) stream->Add("_Recording"); + const char* flags_name = NULL; // Make g++ happy. + switch (flags_) { + case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break; + case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break; + } + stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name); } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index acfbd469f0ff..64c89b93d13d 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -45,23 +45,27 @@ namespace internal { V(Compare) \ V(CompareIC) \ V(MathPow) \ - V(RecordWrite) \ - V(StoreBufferOverflow) \ - V(RegExpExec) \ V(TranscendentalCache) \ V(Instanceof) \ + /* All stubs above this line only exist in a few versions, which are */ \ + /* generated ahead of time. Therefore compiling a call to one of */ \ + /* them can't cause a new stub to be compiled, so compiling a call to */ \ + /* them is GC safe. The ones below this line exist in many variants */ \ + /* so code compiling a call to one can cause a GC. This means they */ \ + /* can't be called from other stubs, since stub generation code is */ \ + /* not GC safe. */ \ V(ConvertToDouble) \ V(WriteInt32ToHeapNumber) \ V(StackCheck) \ V(FastNewClosure) \ V(FastNewContext) \ - V(FastNewBlockContext) \ V(FastCloneShallowArray) \ V(RevertToNumber) \ V(ToBoolean) \ V(ToNumber) \ V(CounterOp) \ V(ArgumentsAccess) \ + V(RegExpExec) \ V(RegExpConstructResult) \ V(NumberToString) \ V(CEntry) \ @@ -69,7 +73,7 @@ namespace internal { V(KeyedLoadElement) \ V(KeyedStoreElement) \ V(DebuggerStatement) \ - V(StringDictionaryLookup) + V(StringDictionaryNegativeLookup) // List of code stubs only used on ARM platforms. #ifdef V8_TARGET_ARCH_ARM @@ -138,27 +142,6 @@ class CodeStub BASE_EMBEDDED { virtual ~CodeStub() {} - bool CompilingCallsToThisStubIsGCSafe() { - bool is_pregenerated = IsPregenerated(); - Code* code = NULL; - CHECK(!is_pregenerated || FindCodeInCache(&code)); - return is_pregenerated; - } - - // See comment above, where Instanceof is defined. - virtual bool IsPregenerated() { return false; } - - static void GenerateStubsAheadOfTime(); - static void GenerateFPStubs(); - - // Some stubs put untagged junk on the stack that cannot be scanned by the - // GC. This means that we must be statically sure that no GC can occur while - // they are running. If that is the case they should override this to return - // true, which will cause an assertion if we try to call something that can - // GC or if we try to put a stack frame on top of the junk, which would not - // result in a traversable stack. - virtual bool SometimesSetsUpAFrame() { return true; } - protected: static const int kMajorBits = 6; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits; @@ -181,14 +164,6 @@ class CodeStub BASE_EMBEDDED { // Finish the code object after it has been generated. virtual void FinishCode(Code* code) { } - // Returns true if TryGetCode should fail if it failed - // to register newly generated stub in the stub cache. - virtual bool MustBeInStubCache() { return false; } - - // Activate newly generated stub. Is called after - // registering stub in the stub cache. - virtual void Activate(Code* code) { } - // Returns information for computing the number key. virtual Major MajorKey() = 0; virtual int MinorKey() = 0; @@ -203,7 +178,9 @@ class CodeStub BASE_EMBEDDED { // Returns a name for logging/debugging purposes. SmartArrayPointer GetName(); - virtual void PrintName(StringStream* stream); + virtual void PrintName(StringStream* stream) { + stream->Add("%s", MajorName(MajorKey(), false)); + } // Returns whether the code generated for this stub needs to be allocated as // a fixed (non-moveable) code object. @@ -216,6 +193,9 @@ class CodeStub BASE_EMBEDDED { MajorKeyBits::encode(MajorKey()); } + // See comment above, where Instanceof is defined. + bool AllowsStubCalls() { return MajorKey() <= Instanceof; } + class MajorKeyBits: public BitField {}; class MinorKeyBits: public BitField {}; @@ -324,7 +304,7 @@ class FastNewContextStub : public CodeStub { static const int kMaximumSlots = 64; explicit FastNewContextStub(int slots) : slots_(slots) { - ASSERT(slots_ > 0 && slots_ <= kMaximumSlots); + ASSERT(slots_ > 0 && slots <= kMaximumSlots); } void Generate(MacroAssembler* masm); @@ -337,24 +317,6 @@ class FastNewContextStub : public CodeStub { }; -class FastNewBlockContextStub : public CodeStub { - public: - static const int kMaximumSlots = 64; - - explicit FastNewBlockContextStub(int slots) : slots_(slots) { - ASSERT(slots_ > 0 && slots_ <= kMaximumSlots); - } - - void Generate(MacroAssembler* masm); - - private: - int slots_; - - Major MajorKey() { return FastNewBlockContext; } - int MinorKey() { return slots_; } -}; - - class FastCloneShallowArrayStub : public CodeStub { public: // Maximum length of copied elements array. @@ -569,18 +531,11 @@ class CompareStub: public CodeStub { class CEntryStub : public CodeStub { public: - explicit CEntryStub(int result_size, - SaveFPRegsMode save_doubles = kDontSaveFPRegs) - : result_size_(result_size), save_doubles_(save_doubles) { } + explicit CEntryStub(int result_size) + : result_size_(result_size), save_doubles_(false) { } void Generate(MacroAssembler* masm); - - // The version of this stub that doesn't save doubles is generated ahead of - // time, so it's OK to call it from other stubs that can't cope with GC during - // their code generation. On machines that always have gp registers (x64) we - // can generate both variants ahead of time. - virtual bool IsPregenerated(); - static void GenerateAheadOfTime(); + void SaveDoubles() { save_doubles_ = true; } private: void GenerateCore(MacroAssembler* masm, @@ -595,7 +550,7 @@ class CEntryStub : public CodeStub { // Number of pointers/values returned. const int result_size_; - SaveFPRegsMode save_doubles_; + bool save_doubles_; Major MajorKey() { return CEntry; } int MinorKey(); @@ -692,32 +647,10 @@ class CallFunctionStub: public CodeStub { void Generate(MacroAssembler* masm); - virtual void FinishCode(Code* code); - - static void Clear(Heap* heap, Address address); - - static Object* GetCachedValue(Address address); - static int ExtractArgcFromMinorKey(int minor_key) { return ArgcBits::decode(minor_key); } - // The object that indicates an uninitialized cache. - static Handle UninitializedSentinel(Isolate* isolate) { - return isolate->factory()->the_hole_value(); - } - - // A raw version of the uninitialized sentinel that's safe to read during - // garbage collection (e.g., for patching the cache). - static Object* RawUninitializedSentinel(Heap* heap) { - return heap->raw_unchecked_the_hole_value(); - } - - // The object that indicates a megamorphic state. - static Handle MegamorphicSentinel(Isolate* isolate) { - return isolate->factory()->undefined_value(); - } - private: int argc_; CallFunctionFlags flags_; @@ -725,8 +658,8 @@ class CallFunctionStub: public CodeStub { virtual void PrintName(StringStream* stream); // Minor key encoding in 32 bits with Bitfield . - class FlagBits: public BitField {}; - class ArgcBits: public BitField {}; + class FlagBits: public BitField {}; + class ArgcBits: public BitField {}; Major MajorKey() { return CallFunction; } int MinorKey() { @@ -737,10 +670,6 @@ class CallFunctionStub: public CodeStub { bool ReceiverMightBeImplicit() { return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0; } - - bool RecordCallTarget() { - return (flags_ & RECORD_CALL_TARGET) != 0; - } }; @@ -1005,8 +934,6 @@ class ToBooleanStub: public CodeStub { virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; } virtual void PrintName(StringStream* stream); - virtual bool SometimesSetsUpAFrame() { return false; } - private: Major MajorKey() { return ToBoolean; } int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); } diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index ceea7b9feabd..cdc9ba155327 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -218,8 +218,8 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) { int CEntryStub::MinorKey() { - int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0; ASSERT(result_size_ == 1 || result_size_ == 2); + int result = save_doubles_ ? 1 : 0; #ifdef _WIN64 return result | ((result_size_ == 1) ? 0 : 2); #else diff --git a/deps/v8/src/compiler-intrinsics.h b/deps/v8/src/compiler-intrinsics.h deleted file mode 100644 index 3b9c59ea537e..000000000000 --- a/deps/v8/src/compiler-intrinsics.h +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_COMPILER_INTRINSICS_H_ -#define V8_COMPILER_INTRINSICS_H_ - -namespace v8 { -namespace internal { - -class CompilerIntrinsics { - public: - // Returns number of zero bits preceding least significant 1 bit. - // Undefined for zero value. - INLINE(static int CountTrailingZeros(uint32_t value)); - - // Returns number of zero bits following most significant 1 bit. - // Undefined for zero value. - INLINE(static int CountLeadingZeros(uint32_t value)); -}; - -#ifdef __GNUC__ -int CompilerIntrinsics::CountTrailingZeros(uint32_t value) { - return __builtin_ctz(value); -} - -int CompilerIntrinsics::CountLeadingZeros(uint32_t value) { - return __builtin_clz(value); -} - -#elif defined(_MSC_VER) - -#pragma intrinsic(_BitScanForward) -#pragma intrinsic(_BitScanReverse) - -int CompilerIntrinsics::CountTrailingZeros(uint32_t value) { - unsigned long result; //NOLINT - _BitScanForward(&result, static_cast(value)); //NOLINT - return static_cast(result); -} - -int CompilerIntrinsics::CountLeadingZeros(uint32_t value) { - unsigned long result; //NOLINT - _BitScanReverse(&result, static_cast(value)); //NOLINT - return 31 - static_cast(result); -} - -#else -#error Unsupported compiler -#endif - -} } // namespace v8::internal - -#endif // V8_COMPILER_INTRINSICS_H_ diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 4979a7f86638..5e1c4a9789dd 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -36,7 +36,6 @@ #include "full-codegen.h" #include "gdb-jit.h" #include "hydrogen.h" -#include "isolate-inl.h" #include "lithium.h" #include "liveedit.h" #include "parser.h" @@ -276,7 +275,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { } Handle global_context(info->closure()->context()->global_context()); - TypeFeedbackOracle oracle(code, global_context, info->isolate()); + TypeFeedbackOracle oracle(code, global_context); HGraphBuilder builder(info, &oracle); HPhase phase(HPhase::kTotal); HGraph* graph = builder.CreateGraph(); @@ -480,7 +479,8 @@ Handle Compiler::Compile(Handle source, // that would be compiled lazily anyway, so we skip the preparse step // in that case too. ScriptDataImpl* pre_data = input_pre_data; - bool harmony_scoping = natives != NATIVES_CODE && FLAG_harmony_scoping; + bool harmony_block_scoping = natives != NATIVES_CODE && + FLAG_harmony_block_scoping; if (pre_data == NULL && source_length >= FLAG_min_preparse_length) { if (source->IsExternalTwoByteString()) { @@ -488,12 +488,12 @@ Handle Compiler::Compile(Handle source, Handle::cast(source), 0, source->length()); pre_data = ParserApi::PartialPreParse(&stream, extension, - harmony_scoping); + harmony_block_scoping); } else { GenericStringUC16CharacterStream stream(source, 0, source->length()); pre_data = ParserApi::PartialPreParse(&stream, extension, - harmony_scoping); + harmony_block_scoping); } } @@ -516,9 +516,6 @@ Handle Compiler::Compile(Handle source, info.MarkAsGlobal(); info.SetExtension(extension); info.SetPreParseData(pre_data); - if (natives == NATIVES_CODE) { - info.MarkAsAllowingNativesSyntax(); - } result = MakeFunctionInfo(&info); if (extension == NULL && !result.is_null()) { compilation_cache->PutScript(source, result); diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index 09aa23dec984..69ab27d9c819 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -83,12 +83,6 @@ class CompilationInfo BASE_EMBEDDED { ASSERT(is_lazy()); flags_ |= IsInLoop::encode(true); } - void MarkAsAllowingNativesSyntax() { - flags_ |= IsNativesSyntaxAllowed::encode(true); - } - bool allows_natives_syntax() const { - return IsNativesSyntaxAllowed::decode(flags_); - } void MarkAsNative() { flags_ |= IsNative::encode(true); } @@ -199,8 +193,6 @@ class CompilationInfo BASE_EMBEDDED { class IsInLoop: public BitField {}; // Strict mode - used in eager compilation. class IsStrictMode: public BitField {}; - // Native syntax (%-stuff) allowed? - class IsNativesSyntaxAllowed: public BitField {}; // Is this a function from our natives. class IsNative: public BitField {}; diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc index 0cda43049255..4f93abdff1d0 100644 --- a/deps/v8/src/contexts.cc +++ b/deps/v8/src/contexts.cc @@ -86,14 +86,14 @@ void Context::set_global_proxy(JSObject* object) { Handle Context::Lookup(Handle name, ContextLookupFlags flags, - int* index, + int* index_, PropertyAttributes* attributes, BindingFlags* binding_flags) { Isolate* isolate = GetIsolate(); Handle context(this, isolate); bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0; - *index = -1; + *index_ = -1; *attributes = ABSENT; *binding_flags = MISSING_BINDING; @@ -110,50 +110,70 @@ Handle Context::Lookup(Handle name, PrintF("\n"); } - // 1. Check global objects, subjects of with, and extension objects. - if (context->IsGlobalContext() || - context->IsWithContext() || - (context->IsFunctionContext() && context->has_extension())) { - Handle object(JSObject::cast(context->extension()), isolate); - // Context extension objects needs to behave as if they have no - // prototype. So even if we want to follow prototype chains, we need - // to only do a local lookup for context extension objects. - if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 || - object->IsJSContextExtensionObject()) { - *attributes = object->GetLocalPropertyAttribute(*name); + // Check extension/with/global object. + if (!context->IsBlockContext() && context->has_extension()) { + if (context->IsCatchContext()) { + // Catch contexts have the variable name in the extension slot. + if (name->Equals(String::cast(context->extension()))) { + if (FLAG_trace_contexts) { + PrintF("=> found in catch context\n"); + } + *index_ = Context::THROWN_OBJECT_INDEX; + *attributes = NONE; + *binding_flags = MUTABLE_IS_INITIALIZED; + return context; + } } else { - *attributes = object->GetPropertyAttribute(*name); - } - if (*attributes != ABSENT) { - if (FLAG_trace_contexts) { - PrintF("=> found property in context object %p\n", - reinterpret_cast(*object)); + ASSERT(context->IsGlobalContext() || + context->IsFunctionContext() || + context->IsWithContext()); + // Global, function, and with contexts may have an object in the + // extension slot. + Handle extension(JSObject::cast(context->extension()), + isolate); + // Context extension objects needs to behave as if they have no + // prototype. So even if we want to follow prototype chains, we + // need to only do a local lookup for context extension objects. + if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 || + extension->IsJSContextExtensionObject()) { + *attributes = extension->GetLocalPropertyAttribute(*name); + } else { + *attributes = extension->GetPropertyAttribute(*name); + } + if (*attributes != ABSENT) { + // property found + if (FLAG_trace_contexts) { + PrintF("=> found property in context object %p\n", + reinterpret_cast(*extension)); + } + return extension; } - return object; } } - // 2. Check the context proper if it has slots. + // Check serialized scope information of functions and blocks. Only + // functions can have parameters, and a function name. if (context->IsFunctionContext() || context->IsBlockContext()) { - // Use serialized scope information of functions and blocks to search - // for the context index. + // We may have context-local slots. Check locals in the context. Handle scope_info; if (context->IsFunctionContext()) { scope_info = Handle( context->closure()->shared()->scope_info(), isolate); } else { + ASSERT(context->IsBlockContext()); scope_info = Handle( SerializedScopeInfo::cast(context->extension()), isolate); } - VariableMode mode; - int slot_index = scope_info->ContextSlotIndex(*name, &mode); - ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS); - if (slot_index >= 0) { + + Variable::Mode mode; + int index = scope_info->ContextSlotIndex(*name, &mode); + ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); + if (index >= 0) { if (FLAG_trace_contexts) { PrintF("=> found local in context slot %d (mode = %d)\n", - slot_index, mode); + index, mode); } - *index = slot_index; + *index_ = index; // Note: Fixed context slots are statically allocated by the compiler. // Statically allocated variables always have a statically known mode, // which is the mode with which they were declared when added to the @@ -161,23 +181,23 @@ Handle Context::Lookup(Handle name, // declared variables that were introduced through declaration nodes) // must not appear here. switch (mode) { - case INTERNAL: // Fall through. - case VAR: + case Variable::INTERNAL: // Fall through. + case Variable::VAR: *attributes = NONE; *binding_flags = MUTABLE_IS_INITIALIZED; break; - case LET: + case Variable::LET: *attributes = NONE; *binding_flags = MUTABLE_CHECK_INITIALIZED; break; - case CONST: + case Variable::CONST: *attributes = READ_ONLY; *binding_flags = IMMUTABLE_CHECK_INITIALIZED; break; - case DYNAMIC: - case DYNAMIC_GLOBAL: - case DYNAMIC_LOCAL: - case TEMPORARY: + case Variable::DYNAMIC: + case Variable::DYNAMIC_GLOBAL: + case Variable::DYNAMIC_LOCAL: + case Variable::TEMPORARY: UNREACHABLE(); break; } @@ -186,34 +206,22 @@ Handle Context::Lookup(Handle name, // Check the slot corresponding to the intermediate context holding // only the function name variable. - if (follow_context_chain && context->IsFunctionContext()) { - int function_index = scope_info->FunctionContextSlotIndex(*name); - if (function_index >= 0) { + if (follow_context_chain) { + int index = scope_info->FunctionContextSlotIndex(*name); + if (index >= 0) { if (FLAG_trace_contexts) { PrintF("=> found intermediate function in context slot %d\n", - function_index); + index); } - *index = function_index; + *index_ = index; *attributes = READ_ONLY; *binding_flags = IMMUTABLE_IS_INITIALIZED; return context; } } - - } else if (context->IsCatchContext()) { - // Catch contexts have the variable name in the extension slot. - if (name->Equals(String::cast(context->extension()))) { - if (FLAG_trace_contexts) { - PrintF("=> found in catch context\n"); - } - *index = Context::THROWN_OBJECT_INDEX; - *attributes = NONE; - *binding_flags = MUTABLE_IS_INITIALIZED; - return context; - } } - // 3. Prepare to continue with the previous (next outermost) context. + // Proceed with the previous context. if (context->IsGlobalContext()) { follow_context_chain = false; } else { @@ -245,7 +253,7 @@ bool Context::GlobalIfNotShadowedByEval(Handle name) { // Check non-parameter locals. Handle scope_info( context->closure()->shared()->scope_info()); - VariableMode mode; + Variable::Mode mode; int index = scope_info->ContextSlotIndex(*name, &mode); ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); if (index >= 0) return false; diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index b80475f0f7c4..505f86c8ca5b 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -134,8 +134,6 @@ enum BindingFlags { V(MAP_CACHE_INDEX, Object, map_cache) \ V(CONTEXT_DATA_INDEX, Object, data) \ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ - V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \ - to_complete_property_descriptor) \ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \ V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) @@ -254,7 +252,6 @@ class Context: public FixedArray { OUT_OF_MEMORY_INDEX, CONTEXT_DATA_INDEX, ALLOW_CODE_GEN_FROM_STRINGS_INDEX, - TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, DERIVED_HAS_TRAP_INDEX, DERIVED_GET_TRAP_INDEX, DERIVED_SET_TRAP_INDEX, @@ -333,6 +330,12 @@ class Context: public FixedArray { // Mark the global context with out of memory. inline void mark_out_of_memory(); + // The exception holder is the object used as a with object in + // the implementation of a catch block. + bool is_exception_holder(Object* object) { + return IsCatchContext() && extension() == object; + } + // A global context hold a list of all functions which have been optimized. void AddOptimizedFunction(JSFunction* function); void RemoveOptimizedFunction(JSFunction* function); @@ -352,25 +355,29 @@ class Context: public FixedArray { #undef GLOBAL_CONTEXT_FIELD_ACCESSORS // Lookup the the slot called name, starting with the current context. - // There are three possibilities: + // There are 4 possible outcomes: + // + // 1) index_ >= 0 && result->IsContext(): + // most common case, the result is a Context, and index is the + // context slot index, and the slot exists. + // attributes == READ_ONLY for the function name variable, NONE otherwise. // - // 1) result->IsContext(): - // The binding was found in a context. *index is always the - // non-negative slot index. *attributes is NONE for var and let - // declarations, READ_ONLY for const declarations (never ABSENT). + // 2) index_ >= 0 && result->IsJSObject(): + // the result is the JSObject arguments object, the index is the parameter + // index, i.e., key into the arguments object, and the property exists. + // attributes != ABSENT. // - // 2) result->IsJSObject(): - // The binding was found as a named property in a context extension - // object (i.e., was introduced via eval), as a property on the subject - // of with, or as a property of the global object. *index is -1 and - // *attributes is not ABSENT. + // 3) index_ < 0 && result->IsJSObject(): + // the result is the JSObject extension context or the global object, + // and the name is the property name, and the property exists. + // attributes != ABSENT. // - // 3) result.is_null(): - // There was no binding found, *index is always -1 and *attributes is - // always ABSENT. + // 4) index_ < 0 && result.is_null(): + // there was no context found with the corresponding property. + // attributes == ABSENT. Handle Lookup(Handle name, ContextLookupFlags flags, - int* index, + int* index_, PropertyAttributes* attributes, BindingFlags* binding_flags); diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index 8bc11bf83dcb..41cf0d54c219 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -47,7 +47,7 @@ namespace v8 { namespace internal { static inline double JunkStringValue() { - return BitCast(kQuietNaNMask); + return std::numeric_limits::quiet_NaN(); } diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h index 31aaf6b73770..e51ad6501cb7 100644 --- a/deps/v8/src/conversions.h +++ b/deps/v8/src/conversions.h @@ -28,6 +28,8 @@ #ifndef V8_CONVERSIONS_H_ #define V8_CONVERSIONS_H_ +#include + #include "utils.h" namespace v8 { diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc index 8fbc876dabbd..adefba73227c 100644 --- a/deps/v8/src/d8-debug.cc +++ b/deps/v8/src/d8-debug.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2008 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,7 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#ifdef ENABLE_DEBUGGER_SUPPORT #include "d8.h" #include "d8-debug.h" @@ -368,5 +367,3 @@ void KeyboardThread::Run() { } // namespace v8 - -#endif // ENABLE_DEBUGGER_SUPPORT diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index a516576fafda..55f0d4c2ab57 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -146,11 +146,11 @@ bool Shell::ExecuteString(Handle source, Handle name, bool print_result, bool report_exceptions) { -#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) +#ifndef V8_SHARED bool FLAG_debugger = i::FLAG_debugger; #else bool FLAG_debugger = false; -#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT +#endif // V8_SHARED HandleScope handle_scope; TryCatch try_catch; options.script_executed = true; @@ -594,7 +594,6 @@ void Shell::InstallUtilityScript() { Context::Scope utility_scope(utility_context_); #ifdef ENABLE_DEBUGGER_SUPPORT - if (i::FLAG_debugger) printf("JavaScript debugger enabled\n"); // Install the debugger object in the utility scope i::Debug* debug = i::Isolate::Current()->debug(); debug->Load(); @@ -817,7 +816,7 @@ void Shell::OnExit() { static FILE* FOpen(const char* path, const char* mode) { -#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64)) +#if (defined(_WIN32) || defined(_WIN64)) FILE* result; if (fopen_s(&result, path, mode) == 0) { return result; @@ -901,6 +900,9 @@ void Shell::RunShell() { #ifndef V8_SHARED console = LineEditor::Get(); printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name()); + if (i::FLAG_debugger) { + printf("JavaScript debugger enabled\n"); + } console->Open(); while (true) { i::SmartArrayPointer input = console->Prompt(Shell::kPrompt); @@ -1251,22 +1253,14 @@ int Shell::RunMain(int argc, char* argv[]) { Locker lock; HandleScope scope; Persistent context = CreateEvaluationContext(); - if (options.last_run) { - // Keep using the same context in the interactive shell. - evaluation_context_ = context; -#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) - // If the interactive debugger is enabled make sure to activate - // it before running the files passed on the command line. - if (i::FLAG_debugger) { - InstallUtilityScript(); - } -#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT - } { Context::Scope cscope(context); options.isolate_sources[0].Execute(); } - if (!options.last_run) { + if (options.last_run) { + // Keep using the same context in the interactive shell + evaluation_context_ = context; + } else { context.Dispose(); } @@ -1337,11 +1331,9 @@ int Shell::Main(int argc, char* argv[]) { if (( options.interactive_shell || !options.script_executed ) && !options.test_shell ) { -#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) - if (!i::FLAG_debugger) { - InstallUtilityScript(); - } -#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT +#ifndef V8_SHARED + InstallUtilityScript(); +#endif // V8_SHARED RunShell(); } diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 3d79485b5733..a229d39c3edc 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -40,7 +40,6 @@ #include "global-handles.h" #include "ic.h" #include "ic-inl.h" -#include "isolate-inl.h" #include "list.h" #include "messages.h" #include "natives.h" @@ -402,15 +401,15 @@ void BreakLocationIterator::PrepareStepIn() { // Step in can only be prepared if currently positioned on an IC call, // construct call or CallFunction stub call. Address target = rinfo()->target_address(); - Handle target_code(Code::GetCodeFromTargetAddress(target)); - if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) { + Handle code(Code::GetCodeFromTargetAddress(target)); + if (code->is_call_stub() || code->is_keyed_call_stub()) { // Step in through IC call is handled by the runtime system. Therefore make // sure that the any current IC is cleared and the runtime system is // called. If the executing code has a debug break at the location change // the call in the original code as it is the code there that will be // executed in place of the debug break call. - Handle stub = ComputeCallDebugPrepareStepIn( - target_code->arguments_count(), target_code->kind()); + Handle stub = ComputeCallDebugPrepareStepIn(code->arguments_count(), + code->kind()); if (IsDebugBreak()) { original_rinfo()->set_target_address(stub->entry()); } else { @@ -420,7 +419,7 @@ void BreakLocationIterator::PrepareStepIn() { #ifdef DEBUG // All the following stuff is needed only for assertion checks so the code // is wrapped in ifdef. - Handle maybe_call_function_stub = target_code; + Handle maybe_call_function_stub = code; if (IsDebugBreak()) { Address original_target = original_rinfo()->target_address(); maybe_call_function_stub = @@ -437,9 +436,8 @@ void BreakLocationIterator::PrepareStepIn() { // Step in through CallFunction stub should also be prepared by caller of // this function (Debug::PrepareStep) which should flood target function // with breakpoints. - ASSERT(RelocInfo::IsConstructCall(rmode()) || - target_code->is_inline_cache_stub() || - is_call_function_stub); + ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub() + || is_call_function_stub); #endif } } @@ -476,11 +474,11 @@ void BreakLocationIterator::SetDebugBreakAtIC() { RelocInfo::Mode mode = rmode(); if (RelocInfo::IsCodeTarget(mode)) { Address target = rinfo()->target_address(); - Handle target_code(Code::GetCodeFromTargetAddress(target)); + Handle code(Code::GetCodeFromTargetAddress(target)); // Patch the code to invoke the builtin debug break function matching the // calling convention used by the call site. - Handle dbgbrk_code(Debug::FindDebugBreak(target_code, mode)); + Handle dbgbrk_code(Debug::FindDebugBreak(code, mode)); rinfo()->set_target_address(dbgbrk_code->entry()); } } @@ -774,7 +772,7 @@ bool Debug::CompileDebuggerScript(int index) { // Execute the shared function in the debugger context. Handle context = isolate->global_context(); - bool caught_exception; + bool caught_exception = false; Handle function = factory->NewFunctionFromSharedFunctionInfo(function_info, context); @@ -1105,13 +1103,14 @@ bool Debug::CheckBreakPoint(Handle break_point_object) { Handle break_id = factory->NewNumberFromInt(Debug::break_id()); // Call HandleBreakPointx. - bool caught_exception; - Handle argv[] = { break_id, break_point_object }; + bool caught_exception = false; + const int argc = 2; + Object** argv[argc] = { + break_id.location(), + reinterpret_cast(break_point_object.location()) + }; Handle result = Execution::TryCall(check_break_point, - isolate_->js_builtins_object(), - ARRAY_SIZE(argv), - argv, - &caught_exception); + isolate_->js_builtins_object(), argc, argv, &caught_exception); // If exception or non boolean result handle as not triggered if (caught_exception || !result->IsBoolean()) { @@ -1733,10 +1732,6 @@ void Debug::PrepareForBreakPoints() { if (!has_break_points_) { Deoptimizer::DeoptimizeAll(); - // We are going to iterate heap to find all functions without - // debug break slots. - isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask); - AssertNoAllocation no_allocation; Builtins* builtins = isolate_->builtins(); Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile); @@ -2002,10 +1997,9 @@ void Debug::CreateScriptCache() { // Perform two GCs to get rid of all unreferenced scripts. The first GC gets // rid of all the cached script wrappers and the second gets rid of the - // scripts which are no longer referenced. The second also sweeps precisely, - // which saves us doing yet another GC to make the heap iterable. - heap->CollectAllGarbage(Heap::kNoGCFlags); - heap->CollectAllGarbage(Heap::kMakeHeapIterableMask); + // scripts which are no longer referenced. + heap->CollectAllGarbage(false); + heap->CollectAllGarbage(false); ASSERT(script_cache_ == NULL); script_cache_ = new ScriptCache(); @@ -2013,8 +2007,6 @@ void Debug::CreateScriptCache() { // Scan heap for Script objects. int count = 0; HeapIterator iterator; - AssertNoAllocation no_allocation; - for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsScript() && Script::cast(obj)->HasValidSource()) { script_cache_->Add(Handle