From 98b22c6236a9894efa5284d6e691c5bcedaff023 Mon Sep 17 00:00:00 2001 From: Xiaohong Gong Date: Sun, 3 Feb 2019 14:00:34 +0800 Subject: [PATCH] Use add(extended) to generate array index offset for AArch64. We want to find a better way to implement array offset calculation. The old pattern for an int array member loading is: and x4, x3, #0xffffffff lsl x4, x4, #2 add x4, x4, #0x10 ldr x0, [x2, x4] And the optimized pattern is: add x1, x2, w3, uxtw #2 ldr x0, [x1, #16] Here is a jmh unit test for int array member loading: private final int func(int[] arr) { int ret = 0; for (int i=0; i < field0; i++) { ret += arr[i]; } return ret; } And the jmh performance results: without this patch with this patch units 1173501.595 948290.420 us/op Here are the additional issues fixed related to this patch: 1. Fix an issue in MatchProcessor, so that we can process platform specific node as a matchable node. 2. Close this optimization for substratevm because the derived reference is not supported yet in SVM. mx bootstrap passes. mx gate -o -t 'CTW:hosted' passes. Change-Id: Ia34f2bf044ca6863be075cf000b29524ca44c42d --- .../aarch64/test/AArch64ArrayAddressTest.java | 224 ++++++++++++++++++ .../aarch64/AArch64AddressLoweringByUse.java | 86 +++++-- .../core/aarch64/AArch64NodeMatchRules.java | 61 +++++ .../core/aarch64/AArch64PointerAddNode.java | 83 +++++++ .../core/match/processor/MatchProcessor.java | 5 +- .../compiler/core/test/MatchRuleTest.java | 24 +- .../hotspot/HotSpotReferenceMapBuilder.java | 6 +- .../compiler/lir/dfa/RegStackValueSet.java | 7 +- .../lir/phases/EconomyAllocationStage.java | 5 +- .../aarch64/SubstrateAArch64Backend.java | 2 +- 10 files changed, 477 insertions(+), 26 deletions(-) create mode 100644 compiler/src/org.graalvm.compiler.core.aarch64.test/src/org/graalvm/compiler/core/aarch64/test/AArch64ArrayAddressTest.java create mode 100644 compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64PointerAddNode.java diff --git a/compiler/src/org.graalvm.compiler.core.aarch64.test/src/org/graalvm/compiler/core/aarch64/test/AArch64ArrayAddressTest.java b/compiler/src/org.graalvm.compiler.core.aarch64.test/src/org/graalvm/compiler/core/aarch64/test/AArch64ArrayAddressTest.java new file mode 100644 index 000000000000..28b9cddca8ce --- /dev/null +++ b/compiler/src/org.graalvm.compiler.core.aarch64.test/src/org/graalvm/compiler/core/aarch64/test/AArch64ArrayAddressTest.java @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Arm Limited. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package org.graalvm.compiler.core.aarch64.test; + +import org.graalvm.compiler.lir.LIRInstruction; +import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ExtendedAddShiftOp; +import org.junit.Test; + +import java.util.ArrayDeque; +import java.util.HashSet; +import java.util.Set; +import java.util.function.Predicate; + +public class AArch64ArrayAddressTest extends AArch64MatchRuleTest { + private static final Predicate predicate = op -> (op instanceof ExtendedAddShiftOp); + + public static byte loadByte(byte[] arr, int n) { + return arr[n]; + } + + @Test + public void testLoadByte() { + byte[] arr = {3, 4, 5, 6, 7, 8}; + test("loadByte", arr, 5); + checkLIR("loadByte", predicate, 1, 1); + } + + public static char loadChar(char[] arr, int n) { + return arr[n]; + } + + @Test + public void testLoadChar() { + char[] arr = {'a', 'b', 'c', 'd', 'e', 'f', 'g'}; + test("loadChar", arr, 5); + checkLIR("loadChar", predicate, 1, 1); + } + + public static short loadShort(short[] arr, int n) { + return arr[n]; + } + + @Test + public void testLoadShort() { + short[] arr = {3, 4, 5, 6, 7, 8}; + test("loadShort", arr, 5); + checkLIR("loadShort", predicate, 1, 1); + } + + public static int loadInt(int[] arr, int n) { + return arr[n]; + } + + @Test + public void testLoadInt() { + int[] arr = {3, 4, 5, 6, 7, 8}; + test("loadInt", arr, 5); + checkLIR("loadInt", predicate, 1, 1); + } + + public static long loadLong(long[] arr, int n) { + return arr[n]; + } + + @Test + public void testLoadLong() { + long[] arr = {3L, 4L, 5L, 6L, 7L, 8L}; + test("loadLong", arr, 5); + checkLIR("loadLong", predicate, 1, 1); + } + + public static float loadFloat(float[] arr, int n) { + return arr[n]; + } + + @Test + public void testLoadFloat() { + float[] arr = {3.0F, 4.0F, 5.0F, 6.0F, 7.0F, 8.0F}; + test("loadFloat", arr, 5); + checkLIR("loadFloat", predicate, 1, 1); + } + + public static double loadDouble(double[] arr, int n) { + return arr[n]; + } + + @Test + public void testLoadDouble() { + double[] arr = {3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; + test("loadDouble", arr, 5); + checkLIR("loadDouble", predicate, 1, 1); + } + + public static String loadObject(String[] arr, int n) { + return arr[n]; + } + + @Test + public void testLoadObject() { + String[] arr = {"ac", "ad", "ew", "asf", "sdad", "aff"}; + test("loadObject", arr, 5); + checkLIRforAll("loadObject", predicate, 1); + } + + public static int storeInt(int[] arr, int n) { + arr[n] = n * n; + return arr[n]; + } + + @Test + public void testStoreInt() { + int[] arr = {3, 4, 5, 6, 7, 8}; + test("storeInt", arr, 5); + checkLIRforAll("storeInt", predicate, 1); + } + + public static Integer loadAndStoreObject(Integer[] arr, int i) { + if (arr[i] > 0) { + return 0; + } + arr[i] += 3; + return arr[i]; + } + + @Test + public void testLoadAndStoreObject() { + Integer[] arr = new Integer[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + test("loadAndStoreObject", arr, 2); + checkLIRforAll("loadAndStoreObject", predicate, 2); + } + + public static int useArrayInLoop(int[] arr) { + int ret = 0; + for (int i = 0; i < arr.length; i++) { + ret += arr[i]; + } + return ret; + } + + @Test + public void testUseArrayInLoop() { + int[] arr = {1, 2, 3, 4, 5, 6, 7, 8}; + test("useArrayInLoop", arr); + checkLIRforAll("useArrayInLoop", predicate, 1); + } + + public static int useArrayDeque(ArrayDeque ad) { + ad.addFirst(4); + return ad.removeFirst(); + } + + @Test + public void testUseArrayDeque() { + ArrayDeque ad = new ArrayDeque<>(); + test("useArrayDeque", ad); + } + + // Array load test when the index is narrowed firstly. + private static class Frame { + int index; + + Frame(int index) { + this.index = index; + } + } + + private static final Frame[] frameCache = new Frame[256]; + + private static Frame newFrame(byte data) { + return frameCache[data & 255]; + } + + public static int getFrameIndex(int n) { + return newFrame((byte) n).index; + } + + @Test + public void testGetFrameIndex() { + for (int i = 0; i < 256; i++) { + frameCache[i] = new Frame(i * i); + } + test("getFrameIndex", 258); + checkLIRforAll("getFrameIndex", predicate, 1); + } + + static Set allBarcodes = new HashSet<>(); + static Set localBarcodes = new HashSet<>(); + + public static long useConstReferenceAsBase(long l) { + localBarcodes.add(l); + allBarcodes.add(l); + return l; + } + + @Test + public void testUseConstReferenceAsBase() { + test("useConstReferenceAsBase", 2L); + int l = localBarcodes.size() + allBarcodes.size(); + test("useConstReferenceAsBase", (long) l); + } +} diff --git a/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64AddressLoweringByUse.java b/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64AddressLoweringByUse.java index c4cfe324fa4e..0b0e731311c9 100644 --- a/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64AddressLoweringByUse.java +++ b/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64AddressLoweringByUse.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,12 +26,14 @@ package org.graalvm.compiler.core.aarch64; -import org.graalvm.compiler.asm.aarch64.AArch64Address; +import org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode; import org.graalvm.compiler.core.common.LIRKind; import org.graalvm.compiler.core.common.NumUtil; import org.graalvm.compiler.core.common.type.Stamp; import org.graalvm.compiler.nodes.ValueNode; import org.graalvm.compiler.nodes.calc.AddNode; +import org.graalvm.compiler.nodes.calc.LeftShiftNode; +import org.graalvm.compiler.nodes.calc.ZeroExtendNode; import org.graalvm.compiler.nodes.memory.address.AddressNode; import org.graalvm.compiler.nodes.memory.address.OffsetAddressNode; import org.graalvm.compiler.phases.common.AddressLoweringByUsePhase; @@ -41,9 +43,15 @@ public class AArch64AddressLoweringByUse extends AddressLoweringByUsePhase.AddressLoweringByUse { private AArch64LIRKindTool kindtool; + private boolean supportsDerivedReference; public AArch64AddressLoweringByUse(AArch64LIRKindTool kindtool) { + this(kindtool, true); + } + + public AArch64AddressLoweringByUse(AArch64LIRKindTool kindtool, boolean supportsDerivedReference) { this.kindtool = kindtool; + this.supportsDerivedReference = supportsDerivedReference; } @Override @@ -76,8 +84,8 @@ private AddressNode doLower(Stamp stamp, ValueNode base, ValueNode index) { return base.graph().unique(ret); } - protected boolean improve(AArch64Kind kind, AArch64AddressNode ret) { - AArch64Address.AddressingMode mode = ret.getAddressingMode(); + private boolean improve(AArch64Kind kind, AArch64AddressNode ret) { + AddressingMode mode = ret.getAddressingMode(); // if we have already set a displacement or set to base only mode then we are done if (isDisplacementMode(mode) || isBaseOnlyMode(mode)) { return false; @@ -128,8 +136,8 @@ protected boolean improve(AArch64Kind kind, AArch64AddressNode ret) { ValueNode child = add.getX(); if (child.isJavaConstant() && child.asJavaConstant().getJavaKind().isNumericInteger()) { long newDisp = disp + child.asJavaConstant().asLong(); - AArch64Address.AddressingMode newMode = immediateMode(kind, newDisp); - if (newMode != AArch64Address.AddressingMode.REGISTER_OFFSET) { + AddressingMode newMode = immediateMode(kind, newDisp); + if (newMode != AddressingMode.REGISTER_OFFSET) { disp = newDisp; mode = newMode; base = add.getY(); @@ -140,8 +148,8 @@ protected boolean improve(AArch64Kind kind, AArch64AddressNode ret) { child = add.getY(); if (child.isJavaConstant() && child.asJavaConstant().getJavaKind().isNumericInteger()) { long newDisp = disp + child.asJavaConstant().asLong(); - AArch64Address.AddressingMode newMode = immediateMode(kind, newDisp); - if (newMode != AArch64Address.AddressingMode.REGISTER_OFFSET) { + AddressingMode newMode = immediateMode(kind, newDisp); + if (newMode != AddressingMode.REGISTER_OFFSET) { disp = newDisp; mode = newMode; base = add.getX(); @@ -159,12 +167,54 @@ protected boolean improve(AArch64Kind kind, AArch64AddressNode ret) { } else { // reset to base register only ret.setIndex(null); - ret.setDisplacement(0, 1, AArch64Address.AddressingMode.BASE_REGISTER_ONLY); + ret.setDisplacement(0, 1, AddressingMode.BASE_REGISTER_ONLY); } return true; } } } + + // We try to convert (OffsetAddress base (Add (LeftShift (Ext i) k) #imm)) + // to (AArch64AddressNode (AArch64PointerAdd (base (LeftShift (Ext i) k)) #imm) + if (supportsDerivedReference && index != null && index instanceof AddNode && index.getStackKind().isNumericInteger()) { + ValueNode x = ((AddNode) index).getX(); + ValueNode y = ((AddNode) index).getY(); + ValueNode objHeadOffset = null; + ValueNode scaledIndex = null; + if (x.isConstant()) { + objHeadOffset = x; + scaledIndex = y; + } else if (y.isConstant()) { + objHeadOffset = y; + scaledIndex = x; + } + + if (scaledIndex == null || objHeadOffset == null) { + return false; + } + + ZeroExtendNode wordIndex = null; + if (scaledIndex instanceof LeftShiftNode) { + ValueNode var = ((LeftShiftNode) scaledIndex).getX(); + ValueNode amount = ((LeftShiftNode) scaledIndex).getY(); + if (amount.isConstant() && var instanceof ZeroExtendNode) { + int s = amount.asJavaConstant().asInt(); + if (s >= 0 && s <= 4) { + wordIndex = (ZeroExtendNode) var; + } + } + } else if (scaledIndex instanceof ZeroExtendNode) { + wordIndex = (ZeroExtendNode) scaledIndex; + } + + if (wordIndex != null) { + AArch64PointerAddNode addP = base.graph().unique(new AArch64PointerAddNode(base, scaledIndex)); + ret.setBase(addP); + ret.setIndex(objHeadOffset); + return true; + } + } + // nope cannot improve this any more return false; } @@ -180,7 +230,7 @@ private AArch64Kind getAArch64Kind(Stamp stamp) { return (AArch64Kind) lirKind.getPlatformKind(); } - private static AArch64Address.AddressingMode immediateMode(AArch64Kind kind, long value) { + private static AddressingMode immediateMode(AArch64Kind kind, long value) { if (kind != null) { int size = kind.getSizeInBytes(); // this next test should never really fail @@ -189,32 +239,32 @@ private static AArch64Address.AddressingMode immediateMode(AArch64Kind kind, lon // assert value % size == 0 // we can try for a 12 bit scaled offset if (NumUtil.isUnsignedNbit(12, encodedValue)) { - return AArch64Address.AddressingMode.IMMEDIATE_SCALED; + return AddressingMode.IMMEDIATE_SCALED; } } } // we can try for a 9 bit unscaled offset if (NumUtil.isSignedNbit(9, value)) { - return AArch64Address.AddressingMode.IMMEDIATE_UNSCALED; + return AddressingMode.IMMEDIATE_UNSCALED; } // nope this index needs to be passed via offset register - return AArch64Address.AddressingMode.REGISTER_OFFSET; + return AddressingMode.REGISTER_OFFSET; } - private static int computeScaleFactor(AArch64Kind kind, AArch64Address.AddressingMode mode) { - if (mode == AArch64Address.AddressingMode.IMMEDIATE_SCALED) { + private static int computeScaleFactor(AArch64Kind kind, AddressingMode mode) { + if (mode == AddressingMode.IMMEDIATE_SCALED) { return kind.getSizeInBytes(); } return 1; } - boolean isBaseOnlyMode(AArch64Address.AddressingMode addressingMode) { - return addressingMode == AArch64Address.AddressingMode.BASE_REGISTER_ONLY; + boolean isBaseOnlyMode(AddressingMode addressingMode) { + return addressingMode == AddressingMode.BASE_REGISTER_ONLY; } - private static boolean isDisplacementMode(AArch64Address.AddressingMode addressingMode) { + private static boolean isDisplacementMode(AddressingMode addressingMode) { switch (addressingMode) { case IMMEDIATE_POST_INDEXED: case IMMEDIATE_PRE_INDEXED: diff --git a/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64NodeMatchRules.java b/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64NodeMatchRules.java index 4e6d80d611bd..a213f6cdac4f 100644 --- a/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64NodeMatchRules.java +++ b/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64NodeMatchRules.java @@ -32,6 +32,7 @@ import jdk.vm.ci.meta.Value; import org.graalvm.collections.EconomicMap; import org.graalvm.collections.Equivalence; +import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ExtendType; import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler; import org.graalvm.compiler.core.common.LIRKind; import org.graalvm.compiler.core.common.calc.CanonicalCondition; @@ -39,6 +40,8 @@ import org.graalvm.compiler.core.gen.NodeMatchRules; import org.graalvm.compiler.core.match.ComplexMatchResult; import org.graalvm.compiler.core.match.MatchRule; +import org.graalvm.compiler.core.match.MatchableNode; +import org.graalvm.compiler.debug.GraalError; import org.graalvm.compiler.lir.LIRFrameState; import org.graalvm.compiler.lir.LabelRef; import org.graalvm.compiler.lir.Variable; @@ -68,8 +71,10 @@ import org.graalvm.compiler.nodes.calc.UnaryNode; import org.graalvm.compiler.nodes.calc.UnsignedRightShiftNode; import org.graalvm.compiler.nodes.calc.XorNode; +import org.graalvm.compiler.nodes.calc.ZeroExtendNode; import org.graalvm.compiler.nodes.memory.Access; +@MatchableNode(nodeClass = AArch64PointerAddNode.class, inputs = {"base", "offset"}) public class AArch64NodeMatchRules extends NodeMatchRules { private static final EconomicMap, AArch64ArithmeticOp> binaryOpMap; private static final EconomicMap, AArch64BitFieldOp.BitFieldOpCode> bitFieldOpMap; @@ -112,6 +117,22 @@ protected AArch64Kind getMemoryKind(Access access) { return (AArch64Kind) gen.getLIRKind(access.asNode().stamp(NodeView.DEFAULT)).getPlatformKind(); } + private static ExtendType getZeroExtendType(int fromBits) { + switch (fromBits) { + case Byte.SIZE: + return ExtendType.UXTB; + case Short.SIZE: + return ExtendType.UXTH; + case Integer.SIZE: + return ExtendType.UXTW; + case Long.SIZE: + return ExtendType.UXTX; + default: + GraalError.shouldNotReachHere("extended from " + fromBits + "bits is not supported!"); + return null; + } + } + private AllocatableValue moveSp(AllocatableValue value) { return getLIRGeneratorTool().moveSp(value); } @@ -165,6 +186,46 @@ private static boolean isNarrowingLongToInt(NarrowNode narrow) { return narrow.getInputBits() == 64 && narrow.getResultBits() == 32; } + @MatchRule("(AArch64PointerAdd=addP base ZeroExtend)") + @MatchRule("(AArch64PointerAdd=addP base (LeftShift ZeroExtend Constant))") + public ComplexMatchResult extendedPointerAddShift(AArch64PointerAddNode addP) { + ValueNode offset = addP.getOffset(); + ZeroExtendNode zeroExtend; + int shiftNum; + if (offset instanceof ZeroExtendNode) { + zeroExtend = (ZeroExtendNode) offset; + shiftNum = 0; + } else { + LeftShiftNode shift = (LeftShiftNode) offset; + zeroExtend = (ZeroExtendNode) shift.getX(); + shiftNum = shift.getY().asJavaConstant().asInt(); + } + + int fromBits = zeroExtend.getInputBits(); + int toBits = zeroExtend.getResultBits(); + if (toBits != 64) { + return null; + } + assert fromBits <= toBits; + ExtendType extendType = getZeroExtendType(fromBits); + + if (shiftNum >= 0 && shiftNum <= 4) { + ValueNode base = addP.getBase(); + return builder -> { + AllocatableValue x = gen.asAllocatable(operand(base)); + AllocatableValue y = gen.asAllocatable(operand(zeroExtend.getValue())); + AllocatableValue baseReference = LIRKind.derivedBaseFromValue(x); + LIRKind kind = LIRKind.combineDerived(gen.getLIRKind(addP.stamp(NodeView.DEFAULT)), + baseReference, null); + Variable result = gen.newVariable(kind); + gen.append(new AArch64ArithmeticOp.ExtendedAddShiftOp(result, x, moveSp(y), + extendType, shiftNum)); + return result; + }; + } + return null; + } + @MatchRule("(And (UnsignedRightShift=shift a Constant=b) Constant=c)") @MatchRule("(LeftShift=shift (And a Constant=c) Constant=b)") public ComplexMatchResult unsignedBitField(BinaryNode shift, ValueNode a, ConstantNode b, ConstantNode c) { diff --git a/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64PointerAddNode.java b/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64PointerAddNode.java new file mode 100644 index 000000000000..61fd03d44955 --- /dev/null +++ b/compiler/src/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64PointerAddNode.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Arm Limited. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package org.graalvm.compiler.core.aarch64; + +import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1; +import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1; + +import jdk.vm.ci.meta.AllocatableValue; +import jdk.vm.ci.meta.Value; +import org.graalvm.compiler.core.common.LIRKind; +import org.graalvm.compiler.core.common.type.AbstractPointerStamp; +import org.graalvm.compiler.core.common.type.IntegerStamp; +import org.graalvm.compiler.core.common.type.StampFactory; +import org.graalvm.compiler.graph.NodeClass; +import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp; +import org.graalvm.compiler.lir.gen.ArithmeticLIRGeneratorTool; +import org.graalvm.compiler.lir.gen.LIRGeneratorTool; +import org.graalvm.compiler.nodeinfo.NodeInfo; +import org.graalvm.compiler.nodes.NodeView; +import org.graalvm.compiler.nodes.ValueNode; +import org.graalvm.compiler.nodes.calc.FloatingNode; +import org.graalvm.compiler.nodes.spi.ArithmeticLIRLowerable; +import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool; + +@NodeInfo(nameTemplate = "AArch64PointerAdd", cycles = CYCLES_1, size = SIZE_1) +public class AArch64PointerAddNode extends FloatingNode implements ArithmeticLIRLowerable { + + public static final NodeClass TYPE = NodeClass.create(AArch64PointerAddNode.class); + + @Input ValueNode base; + @Input ValueNode offset; + + public AArch64PointerAddNode(ValueNode base, ValueNode offset) { + super(TYPE, StampFactory.pointer()); + this.base = base; + this.offset = offset; + assert base != null && (base.stamp(NodeView.DEFAULT) instanceof AbstractPointerStamp || + IntegerStamp.getBits(base.stamp(NodeView.DEFAULT)) == 64); + assert offset != null && offset.getStackKind().isNumericInteger(); + } + + public ValueNode getBase() { + return base; + } + + public ValueNode getOffset() { + return offset; + } + + @Override + public void generate(NodeLIRBuilderTool builder, ArithmeticLIRGeneratorTool gen) { + LIRGeneratorTool tool = builder.getLIRGeneratorTool(); + Value x = builder.operand(base); + Value y = builder.operand(offset); + AllocatableValue baseValue = tool.asAllocatable(x); + AllocatableValue baseReference = LIRKind.derivedBaseFromValue(baseValue); + LIRKind kind = LIRKind.combineDerived(tool.getLIRKind(stamp(NodeView.DEFAULT)), baseReference, null); + builder.setResult(this, ((AArch64ArithmeticLIRGenerator) gen).emitBinary(kind, AArch64ArithmeticOp.ADD, true, x, y)); + } +} diff --git a/compiler/src/org.graalvm.compiler.core.match.processor/src/org/graalvm/compiler/core/match/processor/MatchProcessor.java b/compiler/src/org.graalvm.compiler.core.match.processor/src/org/graalvm/compiler/core/match/processor/MatchProcessor.java index 6546f672559f..83f760235418 100644 --- a/compiler/src/org.graalvm.compiler.core.match.processor/src/org/graalvm/compiler/core/match/processor/MatchProcessor.java +++ b/compiler/src/org.graalvm.compiler.core.match.processor/src/org/graalvm/compiler/core/match/processor/MatchProcessor.java @@ -519,6 +519,9 @@ private void createFiles(MatchRuleDescriptor info) { out.println("import org.graalvm.compiler.core.gen.NodeMatchRules;"); out.println("import org.graalvm.compiler.graph.Position;"); for (String p : info.requiredPackages) { + if (p.equals(pkg)) { + continue; + } out.println("import " + p + ".*;"); } out.println(""); @@ -774,7 +777,7 @@ private void processMatchableNodes(Element element) { if (mirror != null) { matchableNodeAnnotations = getAnnotationValueList(mirror, "value", AnnotationMirror.class); } else { - mirror = getAnnotation(element, getType(MATCHABLE_NODES_CLASS_NAME)); + mirror = getAnnotation(element, getType(MATCHABLE_NODE_CLASS_NAME)); if (mirror != null) { matchableNodeAnnotations = Collections.singletonList(mirror); } else { diff --git a/compiler/src/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/MatchRuleTest.java b/compiler/src/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/MatchRuleTest.java index d85033e20ef3..0cdb27dbb313 100644 --- a/compiler/src/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/MatchRuleTest.java +++ b/compiler/src/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/MatchRuleTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ */ package org.graalvm.compiler.core.test; +import org.graalvm.compiler.core.common.cfg.AbstractBlockBase; import org.graalvm.compiler.lir.LIR; import org.graalvm.compiler.lir.LIRInstruction; import org.graalvm.compiler.lir.gen.LIRGenerationResult; @@ -59,9 +60,13 @@ protected void run(TargetDescription target, LIRGenerationResult lirGenRes, PreA } protected void checkLIR(String methodName, Predicate predicate, int expected) { + checkLIR(methodName, predicate, 0, expected); + } + + protected void checkLIR(String methodName, Predicate predicate, int blockIndex, int expected) { compile(getResolvedJavaMethod(methodName), null); int actualOpNum = 0; - for (LIRInstruction ins : lir.getLIRforBlock(lir.codeEmittingOrder()[0])) { + for (LIRInstruction ins : lir.getLIRforBlock(lir.codeEmittingOrder()[blockIndex])) { if (predicate.test(ins)) { actualOpNum++; } @@ -69,4 +74,19 @@ protected void checkLIR(String methodName, Predicate predicate, Assert.assertEquals(expected, actualOpNum); } + protected void checkLIRforAll(String methodName, Predicate predicate, int expected) { + compile(getResolvedJavaMethod(methodName), null); + int actualOpNum = 0; + for (AbstractBlockBase block : lir.codeEmittingOrder()) { + if (block == null) { + continue; + } + for (LIRInstruction ins : lir.getLIRforBlock(block)) { + if (predicate.test(ins)) { + actualOpNum++; + } + } + } + Assert.assertEquals(expected, actualOpNum); + } } diff --git a/compiler/src/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotReferenceMapBuilder.java b/compiler/src/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotReferenceMapBuilder.java index 2b16a17c377e..a25f6b48141a 100644 --- a/compiler/src/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotReferenceMapBuilder.java +++ b/compiler/src/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotReferenceMapBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,7 +114,9 @@ public ReferenceMap finish(LIRFrameState state) { if (kind.isDerivedReference()) { Variable baseVariable = (Variable) kind.getDerivedReferenceBase(); Value baseValue = state.getLiveBasePointers().get(baseVariable.index); - assert baseValue.getPlatformKind().getVectorLength() == 1 && ((LIRKind) baseValue.getValueKind()).isReference(0) && !((LIRKind) baseValue.getValueKind()).isDerivedReference(); + assert baseValue.getPlatformKind().getVectorLength() == 1 && + ((LIRKind) baseValue.getValueKind()).isReference(0) && + !((LIRKind) baseValue.getValueKind()).isDerivedReference(); base = toLocation(baseValue, 0); } diff --git a/compiler/src/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/dfa/RegStackValueSet.java b/compiler/src/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/dfa/RegStackValueSet.java index f4096fa2fefd..b2d78d13fa68 100644 --- a/compiler/src/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/dfa/RegStackValueSet.java +++ b/compiler/src/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/dfa/RegStackValueSet.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -163,4 +163,9 @@ public void visitValue(Value value, OperandMode mode, EnumSet flags } } } + + @Override + public String toString() { + return "registers: " + registers.toString() + "\n" + "stack: " + stack.toString(); + } } diff --git a/compiler/src/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/phases/EconomyAllocationStage.java b/compiler/src/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/phases/EconomyAllocationStage.java index 54915e726516..9125820fcdcb 100644 --- a/compiler/src/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/phases/EconomyAllocationStage.java +++ b/compiler/src/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/phases/EconomyAllocationStage.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,12 +26,15 @@ import org.graalvm.compiler.lir.alloc.lsra.LinearScanPhase; import org.graalvm.compiler.lir.dfa.LocationMarkerPhase; +import org.graalvm.compiler.lir.dfa.MarkBasePointersPhase; import org.graalvm.compiler.lir.phases.AllocationPhase.AllocationContext; import org.graalvm.compiler.lir.stackslotalloc.SimpleStackSlotAllocator; import org.graalvm.compiler.options.OptionValues; public class EconomyAllocationStage extends LIRPhaseSuite { public EconomyAllocationStage(@SuppressWarnings("unused") OptionValues options) { + appendPhase(new MarkBasePointersPhase()); + appendPhase(new LinearScanPhase()); // build frame map diff --git a/substratevm/src/com.oracle.svm.core.graal.aarch64/src/com/oracle/svm/core/graal/aarch64/SubstrateAArch64Backend.java b/substratevm/src/com.oracle.svm.core.graal.aarch64/src/com/oracle/svm/core/graal/aarch64/SubstrateAArch64Backend.java index 405b3c937ca3..6ef79923aad6 100755 --- a/substratevm/src/com.oracle.svm.core.graal.aarch64/src/com/oracle/svm/core/graal/aarch64/SubstrateAArch64Backend.java +++ b/substratevm/src/com.oracle.svm.core.graal.aarch64/src/com/oracle/svm/core/graal/aarch64/SubstrateAArch64Backend.java @@ -921,6 +921,6 @@ public LIRGenerationResult newLIRGenerationResult(CompilationIdentifier compilat @Override public Phase newAddressLoweringPhase(CodeCacheProvider codeCache) { - return new AddressLoweringByUsePhase(new AArch64AddressLoweringByUse(createLirKindTool())); + return new AddressLoweringByUsePhase(new AArch64AddressLoweringByUse(createLirKindTool(), false)); } }