From ccb198dc62d3dad50aa26140a3e899ff5694dca2 Mon Sep 17 00:00:00 2001 From: Sander de Smalen Date: Wed, 1 May 2024 11:55:31 +0100 Subject: [PATCH 01/48] [AArch64] NFC: Add RUN lines for streaming-compatible code. (#90617) The intent is to test lowering of vector operations by scalarization, for functions that are streaming-compatible (and thus cannot use NEON) and also don't have the +sve attribute. The generated code is clearly wrong at the moment, but a series of patches will follow to fix up all cases to use scalar instructions. A bit of context: This work will form the base to decouple SME from SVE later on, as it will make sure that no NEON instructions are used in streaming[-compatible] mode. Later this will be followed by a patch that changes `useSVEForFixedLengthVectors` to only return `true` if SVE is available for the given runtime mode, at which point I'll change the `-mattr=+sme -force-streaming-compatible-sve` to `-mattr=+sme -force-streaming-sve` in the RUN lines, so that the tests are considered to be executed in Streaming-SVE mode. --- ...streaming-mode-fixed-length-and-combine.ll | 83 + ...treaming-mode-fixed-length-bit-counting.ll | 457 +++ ...sve-streaming-mode-fixed-length-bitcast.ll | 97 + ...e-streaming-mode-fixed-length-bitselect.ll | 12 + ...treaming-mode-fixed-length-build-vector.ll | 88 + .../sve-streaming-mode-fixed-length-concat.ll | 228 ++ ...e-streaming-mode-fixed-length-ext-loads.ll | 138 + ...ing-mode-fixed-length-extract-subvector.ll | 136 + ...ng-mode-fixed-length-extract-vector-elt.ll | 53 + ...e-streaming-mode-fixed-length-fcopysign.ll | 171 ++ ...ve-streaming-mode-fixed-length-fp-arith.ll | 989 +++++++ ...streaming-mode-fixed-length-fp-compares.ll | 2486 +++++++++++++++++ ...-streaming-mode-fixed-length-fp-convert.ll | 12 + ...aming-mode-fixed-length-fp-extend-trunc.ll | 270 ++ .../sve-streaming-mode-fixed-length-fp-fma.ll | 116 + ...e-streaming-mode-fixed-length-fp-minmax.ll | 965 +++++++ ...eaming-mode-fixed-length-fp-reduce-fa64.ll | 25 + ...e-streaming-mode-fixed-length-fp-reduce.ll | 1058 +++++++ ...streaming-mode-fixed-length-fp-rounding.ll | 547 ++++ ...e-streaming-mode-fixed-length-fp-select.ll | 99 + ...e-streaming-mode-fixed-length-fp-to-int.ll | 925 ++++++ ...-streaming-mode-fixed-length-fp-vselect.ll | 199 ++ ...ing-mode-fixed-length-insert-vector-elt.ll | 172 ++ ...e-streaming-mode-fixed-length-int-arith.ll | 371 +++ ...treaming-mode-fixed-length-int-compares.ll | 154 + ...sve-streaming-mode-fixed-length-int-div.ll | 1145 ++++++++ ...streaming-mode-fixed-length-int-extends.ll | 763 +++++ ...eaming-mode-fixed-length-int-immediates.ll | 546 ++++ ...sve-streaming-mode-fixed-length-int-log.ll | 229 ++ ...-streaming-mode-fixed-length-int-minmax.ll | 325 +++ ...ing-mode-fixed-length-int-mla-neon-fa64.ll | 7 + ...ve-streaming-mode-fixed-length-int-mulh.ll | 291 ++ ...-streaming-mode-fixed-length-int-reduce.ll | 415 +++ ...sve-streaming-mode-fixed-length-int-rem.ll | 1631 +++++++++++ ...-streaming-mode-fixed-length-int-select.ll | 137 + ...-streaming-mode-fixed-length-int-shifts.ll | 313 +++ ...e-streaming-mode-fixed-length-int-to-fp.ll | 822 ++++++ ...streaming-mode-fixed-length-int-vselect.ll | 123 + ...reaming-mode-fixed-length-limit-duplane.ll | 27 + .../sve-streaming-mode-fixed-length-loads.ll | 127 + ...-streaming-mode-fixed-length-log-reduce.ll | 436 +++ ...streaming-mode-fixed-length-masked-load.ll | 954 +++++++ ...treaming-mode-fixed-length-masked-store.ll | 774 +++++ ...eaming-mode-fixed-length-optimize-ptrue.ll | 216 ++ ...streaming-mode-fixed-length-permute-rev.ll | 127 + ...g-mode-fixed-length-permute-zip-uzp-trn.ll | 320 +++ .../sve-streaming-mode-fixed-length-ptest.ll | 72 + .../sve-streaming-mode-fixed-length-rev.ll | 159 ++ ...e-streaming-mode-fixed-length-sdiv-pow2.ll | 132 + ...treaming-mode-fixed-length-splat-vector.ll | 182 ++ .../sve-streaming-mode-fixed-length-stores.ll | 136 + ...e-streaming-mode-fixed-length-subvector.ll | 133 + ...treaming-mode-fixed-length-trunc-stores.ll | 38 + .../sve-streaming-mode-fixed-length-trunc.ll | 389 +++ ...eaming-mode-fixed-length-vector-shuffle.ll | 151 + .../sve-streaming-mode-test-register-mov.ll | 21 + 56 files changed, 20992 insertions(+) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll index d81f725eaefca5..fd9259048df543 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -14,6 +15,12 @@ define <4 x i8> @vls_sve_and_4xi8(<4 x i8> %b) nounwind { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_4xi8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0xff000000ff0000 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %c = and <4 x i8> %b, ret <4 x i8> %c } @@ -27,6 +34,12 @@ define <8 x i8> @vls_sve_and_8xi8(<8 x i8> %b) nounwind { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_8xi8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0xff00ff00ff00ff00 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %c = and <8 x i8> %b, ret <8 x i8> %c } @@ -40,6 +53,12 @@ define <16 x i8> @vls_sve_and_16xi8(<16 x i8> %b) nounwind { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_16xi8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v1.2d, #0xff00ff00ff00ff00 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %c = and <16 x i8> %b, ret <16 x i8> %c } @@ -56,6 +75,13 @@ define <32 x i8> @vls_sve_and_32xi8(<32 x i8> %ap) nounwind { ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_32xi8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v2.2d, #0xff00ff00ff00ff00 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: and v1.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %b = and <32 x i8> %ap, ret <32 x i8> %b @@ -73,6 +99,13 @@ define <2 x i16> @vls_sve_and_2xi16(<2 x i16> %b) nounwind { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_2xi16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov v0.s[0], wzr +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %c = and <2 x i16> %b, ret <2 x i16> %c } @@ -86,6 +119,12 @@ define <4 x i16> @vls_sve_and_4xi16(<4 x i16> %b) nounwind { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_4xi16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0xffff0000ffff0000 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %c = and <4 x i16> %b, ret <4 x i16> %c } @@ -99,6 +138,12 @@ define <8 x i16> @vls_sve_and_8xi16(<8 x i16> %b) nounwind { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_8xi16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v1.2d, #0xffff0000ffff0000 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %c = and <8 x i16> %b, ret <8 x i16> %c } @@ -115,6 +160,13 @@ define <16 x i16> @vls_sve_and_16xi16(<16 x i16> %b) nounwind { ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_16xi16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v2.2d, #0xffff0000ffff0000 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: and v1.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %c = and <16 x i16> %b, ret <16 x i16> %c } @@ -128,6 +180,13 @@ define <2 x i32> @vls_sve_and_2xi32(<2 x i32> %b) nounwind { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_2xi32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov v0.s[0], wzr +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %c = and <2 x i32> %b, ret <2 x i32> %c } @@ -141,6 +200,12 @@ define <4 x i32> @vls_sve_and_4xi32(<4 x i32> %b) nounwind { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_4xi32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v1.2d, #0xffffffff00000000 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %c = and <4 x i32> %b, ret <4 x i32> %c } @@ -157,6 +222,13 @@ define <8 x i32> @vls_sve_and_8xi32(<8 x i32> %b) nounwind { ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_8xi32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v2.2d, #0xffffffff00000000 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: and v1.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %c = and <8 x i32> %b, ret <8 x i32> %c } @@ -170,6 +242,11 @@ define <2 x i64> @vls_sve_and_2xi64(<2 x i64> %b) nounwind { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_2xi64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov v0.d[0], xzr +; NONEON-NOSVE-NEXT: ret %c = and <2 x i64> %b, ret <2 x i64> %c } @@ -185,6 +262,12 @@ define <4 x i64> @vls_sve_and_4xi64(<4 x i64> %b) nounwind { ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: vls_sve_and_4xi64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov v0.d[0], xzr +; NONEON-NOSVE-NEXT: mov v1.d[0], xzr +; NONEON-NOSVE-NEXT: ret %c = and <4 x i64> %b, ret <4 x i64> %c } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll index d547f99a0230a6..8f0378252a54ef 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -18,6 +19,16 @@ define <4 x i8> @ctlz_v4i8(<4 x i8> %op) { ; CHECK-NEXT: sub z0.h, z0.h, #8 // =0x8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0xff00ff00ff00ff +; NONEON-NOSVE-NEXT: mov w8, #8 // =0x8 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: dup v1.4h, w8 +; NONEON-NOSVE-NEXT: clz v0.4h, v0.4h +; NONEON-NOSVE-NEXT: sub v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i8> @llvm.ctlz.v4i8(<4 x i8> %op) ret <4 x i8> %res } @@ -30,6 +41,11 @@ define <8 x i8> @ctlz_v8i8(<8 x i8> %op) { ; CHECK-NEXT: clz z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: clz v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %op) ret <8 x i8> %res } @@ -42,6 +58,11 @@ define <16 x i8> @ctlz_v16i8(<16 x i8> %op) { ; CHECK-NEXT: clz z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: clz v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %op) ret <16 x i8> %res } @@ -55,6 +76,14 @@ define void @ctlz_v32i8(ptr %a) { ; CHECK-NEXT: clz z1.b, p0/m, z1.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: clz v0.16b, v0.16b +; NONEON-NOSVE-NEXT: clz v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %op) store <32 x i8> %res, ptr %a @@ -71,6 +100,16 @@ define <2 x i16> @ctlz_v2i16(<2 x i16> %op) { ; CHECK-NEXT: sub z0.s, z0.s, #16 // =0x10 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0x00ffff0000ffff +; NONEON-NOSVE-NEXT: mov w8, #16 // =0x10 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: dup v1.2s, w8 +; NONEON-NOSVE-NEXT: clz v0.2s, v0.2s +; NONEON-NOSVE-NEXT: sub v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> %op) ret <2 x i16> %res } @@ -83,6 +122,11 @@ define <4 x i16> @ctlz_v4i16(<4 x i16> %op) { ; CHECK-NEXT: clz z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: clz v0.4h, v0.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %op) ret <4 x i16> %res } @@ -95,6 +139,11 @@ define <8 x i16> @ctlz_v8i16(<8 x i16> %op) { ; CHECK-NEXT: clz z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: clz v0.8h, v0.8h +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %op) ret <8 x i16> %res } @@ -108,6 +157,14 @@ define void @ctlz_v16i16(ptr %a) { ; CHECK-NEXT: clz z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: clz v0.8h, v0.8h +; NONEON-NOSVE-NEXT: clz v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %op) store <16 x i16> %res, ptr %a @@ -122,6 +179,11 @@ define <2 x i32> @ctlz_v2i32(<2 x i32> %op) { ; CHECK-NEXT: clz z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: clz v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %op) ret <2 x i32> %res } @@ -134,6 +196,11 @@ define <4 x i32> @ctlz_v4i32(<4 x i32> %op) { ; CHECK-NEXT: clz z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: clz v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %op) ret <4 x i32> %res } @@ -147,6 +214,14 @@ define void @ctlz_v8i32(ptr %a) { ; CHECK-NEXT: clz z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: clz v0.4s, v0.4s +; NONEON-NOSVE-NEXT: clz v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %op) store <8 x i32> %res, ptr %a @@ -161,6 +236,27 @@ define <1 x i64> @ctlz_v1i64(<1 x i64> %op) { ; CHECK-NEXT: clz z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushr d1, d0, #1 +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ushr d1, d0, #2 +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ushr d1, d0, #4 +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ushr d1, d0, #8 +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ushr d1, d0, #16 +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ushr d1, d0, #32 +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: mvn v0.8b, v0.8b +; NONEON-NOSVE-NEXT: cnt v0.8b, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.4h, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.2s, v0.4h +; NONEON-NOSVE-NEXT: uaddlp v0.1d, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %op) ret <1 x i64> %res } @@ -173,6 +269,27 @@ define <2 x i64> @ctlz_v2i64(<2 x i64> %op) { ; CHECK-NEXT: clz z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushr v1.2d, v0.2d, #1 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ushr v1.2d, v0.2d, #2 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ushr v1.2d, v0.2d, #4 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ushr v1.2d, v0.2d, #8 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ushr v1.2d, v0.2d, #16 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ushr v1.2d, v0.2d, #32 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: mvn v0.16b, v0.16b +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v0.4s, v0.8h +; NONEON-NOSVE-NEXT: uaddlp v0.2d, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %op) ret <2 x i64> %res } @@ -186,6 +303,46 @@ define void @ctlz_v4i64(ptr %a) { ; CHECK-NEXT: clz z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctlz_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ushr v2.2d, v0.2d, #1 +; NONEON-NOSVE-NEXT: ushr v3.2d, v1.2d, #1 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v3.16b +; NONEON-NOSVE-NEXT: ushr v2.2d, v0.2d, #2 +; NONEON-NOSVE-NEXT: ushr v3.2d, v1.2d, #2 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v3.16b +; NONEON-NOSVE-NEXT: ushr v2.2d, v0.2d, #4 +; NONEON-NOSVE-NEXT: ushr v3.2d, v1.2d, #4 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v3.16b +; NONEON-NOSVE-NEXT: ushr v2.2d, v0.2d, #8 +; NONEON-NOSVE-NEXT: ushr v3.2d, v1.2d, #8 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v3.16b +; NONEON-NOSVE-NEXT: ushr v2.2d, v0.2d, #16 +; NONEON-NOSVE-NEXT: ushr v3.2d, v1.2d, #16 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v3.16b +; NONEON-NOSVE-NEXT: ushr v2.2d, v0.2d, #32 +; NONEON-NOSVE-NEXT: ushr v3.2d, v1.2d, #32 +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v3.16b +; NONEON-NOSVE-NEXT: mvn v0.16b, v0.16b +; NONEON-NOSVE-NEXT: mvn v1.16b, v1.16b +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: cnt v1.16b, v1.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v1.8h, v1.16b +; NONEON-NOSVE-NEXT: uaddlp v0.4s, v0.8h +; NONEON-NOSVE-NEXT: uaddlp v1.4s, v1.8h +; NONEON-NOSVE-NEXT: uaddlp v0.2d, v0.4s +; NONEON-NOSVE-NEXT: uaddlp v1.2d, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %op) store <4 x i64> %res, ptr %a @@ -205,6 +362,14 @@ define <4 x i8> @ctpop_v4i8(<4 x i8> %op) { ; CHECK-NEXT: cnt z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0xff00ff00ff00ff +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: cnt v0.8b, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.4h, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <4 x i8> @llvm.ctpop.v4i8(<4 x i8> %op) ret <4 x i8> %res } @@ -217,6 +382,11 @@ define <8 x i8> @ctpop_v8i8(<8 x i8> %op) { ; CHECK-NEXT: cnt z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cnt v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %op) ret <8 x i8> %res } @@ -229,6 +399,11 @@ define <16 x i8> @ctpop_v16i8(<16 x i8> %op) { ; CHECK-NEXT: cnt z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %op) ret <16 x i8> %res } @@ -242,6 +417,14 @@ define void @ctpop_v32i8(ptr %a) { ; CHECK-NEXT: cnt z1.b, p0/m, z1.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: cnt v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %op) store <32 x i8> %res, ptr %a @@ -257,6 +440,15 @@ define <2 x i16> @ctpop_v2i16(<2 x i16> %op) { ; CHECK-NEXT: cnt z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0x00ffff0000ffff +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: cnt v0.8b, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.4h, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.2s, v0.4h +; NONEON-NOSVE-NEXT: ret %res = call <2 x i16> @llvm.ctpop.v2i16(<2 x i16> %op) ret <2 x i16> %res } @@ -269,6 +461,12 @@ define <4 x i16> @ctpop_v4i16(<4 x i16> %op) { ; CHECK-NEXT: cnt z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cnt v0.8b, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.4h, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %op) ret <4 x i16> %res } @@ -281,6 +479,12 @@ define <8 x i16> @ctpop_v8i16(<8 x i16> %op) { ; CHECK-NEXT: cnt z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %op) ret <8 x i16> %res } @@ -294,6 +498,16 @@ define void @ctpop_v16i16(ptr %a) { ; CHECK-NEXT: cnt z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: cnt v1.16b, v1.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v1.8h, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %op) store <16 x i16> %res, ptr %a @@ -308,6 +522,13 @@ define <2 x i32> @ctpop_v2i32(<2 x i32> %op) { ; CHECK-NEXT: cnt z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cnt v0.8b, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.4h, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.2s, v0.4h +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %op) ret <2 x i32> %res } @@ -320,6 +541,13 @@ define <4 x i32> @ctpop_v4i32(<4 x i32> %op) { ; CHECK-NEXT: cnt z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v0.4s, v0.8h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %op) ret <4 x i32> %res } @@ -333,6 +561,18 @@ define void @ctpop_v8i32(ptr %a) { ; CHECK-NEXT: cnt z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: cnt v1.16b, v1.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v1.8h, v1.16b +; NONEON-NOSVE-NEXT: uaddlp v0.4s, v0.8h +; NONEON-NOSVE-NEXT: uaddlp v1.4s, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %op) store <8 x i32> %res, ptr %a @@ -347,6 +587,14 @@ define <1 x i64> @ctpop_v1i64(<1 x i64> %op) { ; CHECK-NEXT: cnt z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cnt v0.8b, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.4h, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.2s, v0.4h +; NONEON-NOSVE-NEXT: uaddlp v0.1d, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.ctpop.v1i64(<1 x i64> %op) ret <1 x i64> %res } @@ -359,6 +607,14 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %op) { ; CHECK-NEXT: cnt z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v0.4s, v0.8h +; NONEON-NOSVE-NEXT: uaddlp v0.2d, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %op) ret <2 x i64> %res } @@ -372,6 +628,20 @@ define void @ctpop_v4i64(ptr %a) { ; CHECK-NEXT: cnt z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ctpop_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: cnt v1.16b, v1.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v1.8h, v1.16b +; NONEON-NOSVE-NEXT: uaddlp v0.4s, v0.8h +; NONEON-NOSVE-NEXT: uaddlp v1.4s, v1.8h +; NONEON-NOSVE-NEXT: uaddlp v0.2d, v0.4s +; NONEON-NOSVE-NEXT: uaddlp v1.2d, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %op) store <4 x i64> %res, ptr %a @@ -392,6 +662,21 @@ define <4 x i8> @cttz_v4i8(<4 x i8> %op) { ; CHECK-NEXT: clz z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #256 // =0x100 +; NONEON-NOSVE-NEXT: dup v1.4h, w8 +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: dup v2.4h, w8 +; NONEON-NOSVE-NEXT: mov w8, #16 // =0x10 +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: sub v1.4h, v0.4h, v2.4h +; NONEON-NOSVE-NEXT: bic v0.8b, v1.8b, v0.8b +; NONEON-NOSVE-NEXT: dup v1.4h, w8 +; NONEON-NOSVE-NEXT: clz v0.4h, v0.4h +; NONEON-NOSVE-NEXT: sub v0.4h, v1.4h, v0.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i8> @llvm.cttz.v4i8(<4 x i8> %op) ret <4 x i8> %res } @@ -405,6 +690,14 @@ define <8 x i8> @cttz_v8i8(<8 x i8> %op) { ; CHECK-NEXT: clz z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v1.8b, #1 +; NONEON-NOSVE-NEXT: sub v1.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: bic v0.8b, v1.8b, v0.8b +; NONEON-NOSVE-NEXT: cnt v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i8> @llvm.cttz.v8i8(<8 x i8> %op) ret <8 x i8> %res } @@ -418,6 +711,14 @@ define <16 x i8> @cttz_v16i8(<16 x i8> %op) { ; CHECK-NEXT: clz z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v1.16b, #1 +; NONEON-NOSVE-NEXT: sub v1.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: bic v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %op) ret <16 x i8> %res } @@ -433,6 +734,19 @@ define void @cttz_v32i8(ptr %a) { ; CHECK-NEXT: clz z1.b, p0/m, z1.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #1 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: sub v3.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: sub v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: bic v1.16b, v3.16b, v1.16b +; NONEON-NOSVE-NEXT: bic v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: cnt v1.16b, v1.16b +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %op) store <32 x i8> %res, ptr %a @@ -449,6 +763,21 @@ define <2 x i16> @cttz_v2i16(<2 x i16> %op) { ; CHECK-NEXT: clz z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #65536 // =0x10000 +; NONEON-NOSVE-NEXT: dup v1.2s, w8 +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: dup v2.2s, w8 +; NONEON-NOSVE-NEXT: mov w8, #32 // =0x20 +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: sub v1.2s, v0.2s, v2.2s +; NONEON-NOSVE-NEXT: bic v0.8b, v1.8b, v0.8b +; NONEON-NOSVE-NEXT: dup v1.2s, w8 +; NONEON-NOSVE-NEXT: clz v0.2s, v0.2s +; NONEON-NOSVE-NEXT: sub v0.2s, v1.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i16> @llvm.cttz.v2i16(<2 x i16> %op) ret <2 x i16> %res } @@ -462,6 +791,18 @@ define <4 x i16> @cttz_v4i16(<4 x i16> %op) { ; CHECK-NEXT: clz z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: dup v1.4h, w8 +; NONEON-NOSVE-NEXT: mov w8, #16 // =0x10 +; NONEON-NOSVE-NEXT: sub v1.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: bic v0.8b, v1.8b, v0.8b +; NONEON-NOSVE-NEXT: dup v1.4h, w8 +; NONEON-NOSVE-NEXT: clz v0.4h, v0.4h +; NONEON-NOSVE-NEXT: sub v0.4h, v1.4h, v0.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.cttz.v4i16(<4 x i16> %op) ret <4 x i16> %res } @@ -475,6 +816,18 @@ define <8 x i16> @cttz_v8i16(<8 x i16> %op) { ; CHECK-NEXT: clz z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: dup v1.8h, w8 +; NONEON-NOSVE-NEXT: mov w8, #16 // =0x10 +; NONEON-NOSVE-NEXT: sub v1.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: bic v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: dup v1.8h, w8 +; NONEON-NOSVE-NEXT: clz v0.8h, v0.8h +; NONEON-NOSVE-NEXT: sub v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %op) ret <8 x i16> %res } @@ -490,6 +843,24 @@ define void @cttz_v16i16(ptr %a) { ; CHECK-NEXT: clz z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: mov w8, #16 // =0x10 +; NONEON-NOSVE-NEXT: sub v3.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: sub v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: bic v1.16b, v3.16b, v1.16b +; NONEON-NOSVE-NEXT: bic v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: dup v2.8h, w8 +; NONEON-NOSVE-NEXT: clz v1.8h, v1.8h +; NONEON-NOSVE-NEXT: clz v0.8h, v0.8h +; NONEON-NOSVE-NEXT: sub v1.8h, v2.8h, v1.8h +; NONEON-NOSVE-NEXT: sub v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %op) store <16 x i16> %res, ptr %a @@ -505,6 +876,18 @@ define <2 x i32> @cttz_v2i32(<2 x i32> %op) { ; CHECK-NEXT: clz z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: dup v1.2s, w8 +; NONEON-NOSVE-NEXT: mov w8, #32 // =0x20 +; NONEON-NOSVE-NEXT: sub v1.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: bic v0.8b, v1.8b, v0.8b +; NONEON-NOSVE-NEXT: dup v1.2s, w8 +; NONEON-NOSVE-NEXT: clz v0.2s, v0.2s +; NONEON-NOSVE-NEXT: sub v0.2s, v1.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %op) ret <2 x i32> %res } @@ -518,6 +901,18 @@ define <4 x i32> @cttz_v4i32(<4 x i32> %op) { ; CHECK-NEXT: clz z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: dup v1.4s, w8 +; NONEON-NOSVE-NEXT: mov w8, #32 // =0x20 +; NONEON-NOSVE-NEXT: sub v1.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: bic v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: dup v1.4s, w8 +; NONEON-NOSVE-NEXT: clz v0.4s, v0.4s +; NONEON-NOSVE-NEXT: sub v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %op) ret <4 x i32> %res } @@ -533,6 +928,24 @@ define void @cttz_v8i32(ptr %a) { ; CHECK-NEXT: clz z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: mov w8, #32 // =0x20 +; NONEON-NOSVE-NEXT: sub v3.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: sub v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: bic v1.16b, v3.16b, v1.16b +; NONEON-NOSVE-NEXT: bic v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: dup v2.4s, w8 +; NONEON-NOSVE-NEXT: clz v1.4s, v1.4s +; NONEON-NOSVE-NEXT: clz v0.4s, v0.4s +; NONEON-NOSVE-NEXT: sub v1.4s, v2.4s, v1.4s +; NONEON-NOSVE-NEXT: sub v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %op) store <8 x i32> %res, ptr %a @@ -548,6 +961,18 @@ define <1 x i64> @cttz_v1i64(<1 x i64> %op) { ; CHECK-NEXT: clz z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: fmov d1, x8 +; NONEON-NOSVE-NEXT: sub d1, d0, d1 +; NONEON-NOSVE-NEXT: bic v0.8b, v1.8b, v0.8b +; NONEON-NOSVE-NEXT: cnt v0.8b, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.4h, v0.8b +; NONEON-NOSVE-NEXT: uaddlp v0.2s, v0.4h +; NONEON-NOSVE-NEXT: uaddlp v0.1d, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %op) ret <1 x i64> %res } @@ -561,6 +986,18 @@ define <2 x i64> @cttz_v2i64(<2 x i64> %op) { ; CHECK-NEXT: clz z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: dup v1.2d, x8 +; NONEON-NOSVE-NEXT: sub v1.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: bic v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v0.4s, v0.8h +; NONEON-NOSVE-NEXT: uaddlp v0.2d, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %op) ret <2 x i64> %res } @@ -576,6 +1013,26 @@ define void @cttz_v4i64(ptr %a) { ; CHECK-NEXT: clz z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: cttz_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #1 // =0x1 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: sub v3.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: sub v0.2d, v2.2d, v0.2d +; NONEON-NOSVE-NEXT: bic v1.16b, v3.16b, v1.16b +; NONEON-NOSVE-NEXT: bic v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: cnt v1.16b, v1.16b +; NONEON-NOSVE-NEXT: cnt v0.16b, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v1.8h, v1.16b +; NONEON-NOSVE-NEXT: uaddlp v0.8h, v0.16b +; NONEON-NOSVE-NEXT: uaddlp v1.4s, v1.8h +; NONEON-NOSVE-NEXT: uaddlp v0.4s, v0.8h +; NONEON-NOSVE-NEXT: uaddlp v1.2d, v1.4s +; NONEON-NOSVE-NEXT: uaddlp v0.2d, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %op) store <4 x i64> %res, ptr %a diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll index e3cc74f766ee0e..64dc7ae117d3a9 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -11,6 +12,12 @@ define void @bitcast_v4i8(ptr %a, ptr %b) { ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] ; CHECK-NEXT: st1b { z0.h }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr w8, [x0] +; NONEON-NOSVE-NEXT: str w8, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <4 x i8>, ptr %a %cast = bitcast <4 x i8> %load to <4 x i8> store volatile <4 x i8> %cast, ptr %b @@ -23,6 +30,12 @@ define void @bitcast_v8i8(ptr %a, ptr %b) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <8 x i8>, ptr %a %cast = bitcast <8 x i8> %load to <8 x i8> store volatile <8 x i8> %cast, ptr %b @@ -35,6 +48,12 @@ define void @bitcast_v16i8(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <16 x i8>, ptr %a %cast = bitcast <16 x i8> %load to <16 x i8> store volatile <16 x i8> %cast, ptr %b @@ -49,6 +68,14 @@ define void @bitcast_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: str q1, [x1, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: str q1, [x1, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <32 x i8>, ptr %a %cast = bitcast <32 x i8> %load to <32 x i8> store volatile <32 x i8> %cast, ptr %b @@ -72,6 +99,16 @@ define void @bitcast_v2i16(ptr %a, ptr %b) { ; CHECK-NEXT: str w8, [x1] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldrh w8, [x0] +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: add x8, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[2], [x8] +; NONEON-NOSVE-NEXT: uzp1 v0.4h, v0.4h, v0.4h +; NONEON-NOSVE-NEXT: str s0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <2 x i16>, ptr %a %cast = bitcast <2 x i16> %load to <2 x half> store volatile <2 x half> %cast, ptr %b @@ -84,6 +121,12 @@ define void @bitcast_v4i16(ptr %a, ptr %b) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <4 x i16>, ptr %a %cast = bitcast <4 x i16> %load to <4 x half> store volatile <4 x half> %cast, ptr %b @@ -96,6 +139,12 @@ define void @bitcast_v8i16(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <8 x i16>, ptr %a %cast = bitcast <8 x i16> %load to <8 x half> store volatile <8 x half> %cast, ptr %b @@ -110,6 +159,14 @@ define void @bitcast_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: str q1, [x1, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: str q1, [x1, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <16 x i16>, ptr %a %cast = bitcast <16 x i16> %load to <16 x half> store volatile <16 x half> %cast, ptr %b @@ -122,6 +179,12 @@ define void @bitcast_v2i32(ptr %a, ptr %b) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <2 x i32>, ptr %a %cast = bitcast <2 x i32> %load to <2 x float> store volatile <2 x float> %cast, ptr %b @@ -134,6 +197,12 @@ define void @bitcast_v4i32(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <4 x i32>, ptr %a %cast = bitcast <4 x i32> %load to <4 x float> store volatile <4 x float> %cast, ptr %b @@ -148,6 +217,14 @@ define void @bitcast_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: str q1, [x1, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: str q1, [x1, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <8 x i32>, ptr %a %cast = bitcast <8 x i32> %load to <8 x float> store volatile <8 x float> %cast, ptr %b @@ -160,6 +237,12 @@ define void @bitcast_v1i64(ptr %a, ptr %b) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <1 x i64>, ptr %a %cast = bitcast <1 x i64> %load to <1 x double> store volatile <1 x double> %cast, ptr %b @@ -172,6 +255,12 @@ define void @bitcast_v2i64(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <2 x i64>, ptr %a %cast = bitcast <2 x i64> %load to <2 x double> store volatile <2 x double> %cast, ptr %b @@ -186,6 +275,14 @@ define void @bitcast_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: str q1, [x1, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitcast_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: str q1, [x1, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %load = load volatile <4 x i64>, ptr %a %cast = bitcast <4 x i64> %load to <4 x double> store volatile <4 x double> %cast, ptr %b diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll index 74a4aab15597d4..5e06cd62118d7a 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64" @@ -30,6 +31,17 @@ define <8 x i32> @fixed_bitselect_v8i32(ptr %pre_cond_ptr, ptr %left_ptr, ptr %r ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fixed_bitselect_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x1] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x2] +; NONEON-NOSVE-NEXT: neg v1.4s, v1.4s +; NONEON-NOSVE-NEXT: neg v0.4s, v0.4s +; NONEON-NOSVE-NEXT: bsl v0.16b, v3.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: ret %pre_cond = load <8 x i32>, ptr %pre_cond_ptr %left = load <8 x i32>, ptr %left_ptr %right = load <8 x i32>, ptr %right_ptr diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll index 0c490a662a79fc..7a24430a338525 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -10,6 +11,12 @@ define void @build_vector_7_inc1_v4i1(ptr %a) { ; CHECK-NEXT: mov w8, #5 // =0x5 ; CHECK-NEXT: strb w8, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_7_inc1_v4i1: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: strb w8, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x i1> , ptr %a, align 1 ret void } @@ -23,6 +30,15 @@ define void @build_vector_7_inc1_v32i8(ptr %a) { ; CHECK-NEXT: add z1.b, z1.b, #23 // =0x17 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_7_inc1_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI1_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI1_1 +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI1_0] +; NONEON-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI1_1] +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <32 x i8> , ptr %a, align 1 ret void } @@ -35,6 +51,15 @@ define void @build_vector_0_inc2_v16i16(ptr %a) { ; CHECK-NEXT: add z0.h, z0.h, #16 // =0x10 ; CHECK-NEXT: str q0, [x0, #16] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_0_inc2_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI2_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI2_1 +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI2_0] +; NONEON-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI2_1] +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <16 x i16> , ptr %a, align 2 ret void } @@ -48,6 +73,15 @@ define void @build_vector_0_dec3_v8i32(ptr %a) { ; CHECK-NEXT: add z1.s, z0.s, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_0_dec3_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI3_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI3_1 +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI3_0] +; NONEON-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI3_1] +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <8 x i32> , ptr %a, align 4 ret void } @@ -64,6 +98,15 @@ define void @build_vector_minus2_dec32_v4i64(ptr %a) { ; CHECK-NEXT: add z0.d, z0.d, z2.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_minus2_dec32_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI4_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI4_1 +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI4_0] +; NONEON-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI4_1] +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x i64> , ptr %a, align 8 ret void } @@ -76,6 +119,15 @@ define void @build_vector_no_stride_v4i64(ptr %a) { ; CHECK-NEXT: index z1.d, #0, #4 ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_no_stride_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI5_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI5_1 +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI5_0] +; NONEON-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI5_1] +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x i64> , ptr %a, align 8 ret void } @@ -89,6 +141,15 @@ define void @build_vector_0_inc2_v16f16(ptr %a) { ; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI6_1] ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_0_inc2_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI6_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI6_1 +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI6_0] +; NONEON-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI6_1] +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <16 x half> , ptr %a, align 2 ret void } @@ -103,6 +164,15 @@ define void @build_vector_0_dec3_v8f32(ptr %a) { ; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI7_1] ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_0_dec3_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI7_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI7_1 +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI7_0] +; NONEON-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI7_1] +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <8 x float> , ptr %a, align 4 ret void } @@ -117,6 +187,15 @@ define void @build_vector_minus2_dec32_v4f64(ptr %a) { ; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI8_1] ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_minus2_dec32_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI8_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI8_1 +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI8_0] +; NONEON-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI8_1] +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x double> , ptr %a, align 8 ret void } @@ -131,6 +210,15 @@ define void @build_vector_no_stride_v4f64(ptr %a) { ; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI9_1] ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: build_vector_no_stride_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI9_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI9_1 +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI9_0] +; NONEON-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI9_1] +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x double> , ptr %a, align 8 ret void } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll index 86494c4be50123..ee997228e4532b 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -40,6 +41,11 @@ define <8 x i8> @concat_v8i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: uzp1 v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = shufflevector <4 x i8> %op1, <4 x i8> %op2, <8 x i32> ret <8 x i8> %res } @@ -53,6 +59,13 @@ define <16 x i8> @concat_v16i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %res = shufflevector <8 x i8> %op1, <8 x i8> %op2, <16 x i32> ret <16 x i8> %res @@ -65,6 +78,13 @@ define void @concat_v32i8(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i8>, ptr %a %op2 = load <16 x i8>, ptr %b %res = shufflevector <16 x i8> %op1, <16 x i8> %op2, <32 x i32> , ptr %a %op2 = load <32 x i8>, ptr %b %res = shufflevector <32 x i8> %op1, <32 x i8> %op2, <64 x i32> @concat_v4i16(<2 x i16> %op1, <2 x i16> %op2) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: uzp1 v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = shufflevector <2 x i16> %op1, <2 x i16> %op2, <4 x i32> ret <4 x i16> %res } @@ -135,6 +168,13 @@ define <8 x i16> @concat_v8i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %res = shufflevector <4 x i16> %op1, <4 x i16> %op2, <8 x i32> ret <8 x i16> %res } @@ -146,6 +186,13 @@ define void @concat_v16i16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i16>, ptr %a %op2 = load <8 x i16>, ptr %b %res = shufflevector <8 x i16> %op1, <8 x i16> %op2, <16 x i32> , ptr %a %op2 = load <16 x i16>, ptr %b %res = shufflevector <16 x i16> %op1, <16 x i16> %op2, <32 x i32> @concat_v2i32(<1 x i32> %op1, <1 x i32> %op2) { ; CHECK-NEXT: zip1 z0.s, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: zip1 v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = shufflevector <1 x i32> %op1, <1 x i32> %op2, <2 x i32> ret <2 x i32> %res } @@ -199,6 +259,13 @@ define <4 x i32> @concat_v4i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %res = shufflevector <2 x i32> %op1, <2 x i32> %op2, <4 x i32> ret <4 x i32> %res } @@ -210,6 +277,13 @@ define void @concat_v8i32(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i32>, ptr %a %op2 = load <4 x i32>, ptr %b %res = shufflevector <4 x i32> %op1, <4 x i32> %op2, <8 x i32> @@ -225,6 +299,14 @@ define void @concat_v16i32(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: stp q0, q1, [x2, #32] ; CHECK-NEXT: stp q3, q2, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x2, #32] +; NONEON-NOSVE-NEXT: stp q3, q2, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = shufflevector <8 x i32> %op1, <8 x i32> %op2, <16 x i32> @concat_v2i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: splice z0.d, p0, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %res = shufflevector <1 x i64> %op1, <1 x i64> %op2, <2 x i32> ret <2 x i64> %res } @@ -258,6 +347,13 @@ define void @concat_v4i64(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x i64>, ptr %a %op2 = load <2 x i64>, ptr %b %res = shufflevector <2 x i64> %op1, <2 x i64> %op2, <4 x i32> @@ -273,6 +369,14 @@ define void @concat_v8i64(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: stp q0, q1, [x2, #32] ; CHECK-NEXT: stp q3, q2, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x2, #32] +; NONEON-NOSVE-NEXT: stp q3, q2, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = shufflevector <4 x i64> %op1, <4 x i64> %op2, <8 x i32> @@ -300,6 +404,11 @@ define <4 x half> @concat_v4f16(<2 x half> %op1, <2 x half> %op2) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: zip1 v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = shufflevector <2 x half> %op1, <2 x half> %op2, <4 x i32> ret <4 x half> %res } @@ -313,6 +422,13 @@ define <8 x half> @concat_v8f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %res = shufflevector <4 x half> %op1, <4 x half> %op2, <8 x i32> ret <8 x half> %res } @@ -324,6 +440,13 @@ define void @concat_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %op2 = load <8 x half>, ptr %b %res = shufflevector <8 x half> %op1, <8 x half> %op2, <16 x i32> , ptr %a %op2 = load <16 x half>, ptr %b %res = shufflevector <16 x half> %op1, <16 x half> %op2, <32 x i32> @concat_v2f32(<1 x float> %op1, <1 x float> %op2) { ; CHECK-NEXT: zip1 z0.s, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: zip1 v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = shufflevector <1 x float> %op1, <1 x float> %op2, <2 x i32> ret <2 x float> %res } @@ -377,6 +513,13 @@ define <4 x float> @concat_v4f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %res = shufflevector <2 x float> %op1, <2 x float> %op2, <4 x i32> ret <4 x float> %res } @@ -388,6 +531,13 @@ define void @concat_v8f32(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x float>, ptr %a %op2 = load <4 x float>, ptr %b %res = shufflevector <4 x float> %op1, <4 x float> %op2, <8 x i32> @@ -403,6 +553,14 @@ define void @concat_v16f32(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: stp q0, q1, [x2, #32] ; CHECK-NEXT: stp q3, q2, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v16f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x2, #32] +; NONEON-NOSVE-NEXT: stp q3, q2, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = shufflevector <8 x float> %op1, <8 x float> %op2, <16 x i32> @concat_v2f64(<1 x double> %op1, <1 x double> %op2) { ; CHECK-NEXT: splice z0.d, p0, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %res = shufflevector <1 x double> %op1, <1 x double> %op2, <2 x i32> ret <2 x double> %res } @@ -436,6 +601,13 @@ define void @concat_v4f64(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x double>, ptr %a %op2 = load <2 x double>, ptr %b %res = shufflevector <2 x double> %op1, <2 x double> %op2, <4 x i32> @@ -451,6 +623,14 @@ define void @concat_v8f64(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: stp q0, q1, [x2, #32] ; CHECK-NEXT: stp q3, q2, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v8f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x2, #32] +; NONEON-NOSVE-NEXT: stp q3, q2, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = shufflevector <4 x double> %op1, <4 x double> %op2, <8 x i32> @@ -468,6 +648,12 @@ define void @concat_v32i8_undef(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v32i8_undef: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i8>, ptr %a %res = shufflevector <16 x i8> %op1, <16 x i8> undef, <32 x i32> , ptr %a %res = shufflevector <8 x i16> %op1, <8 x i16> undef, <16 x i32> @@ -496,6 +688,12 @@ define void @concat_v8i32_undef(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v8i32_undef: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i32>, ptr %a %res = shufflevector <4 x i32> %op1, <4 x i32> undef, <8 x i32> store <8 x i32> %res, ptr %b @@ -508,6 +706,12 @@ define void @concat_v4i64_undef(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v4i64_undef: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x i64>, ptr %a %res = shufflevector <2 x i64> %op1, <2 x i64> undef, <4 x i32> store <4 x i64> %res, ptr %b @@ -524,6 +728,12 @@ define void @concat_v32i8_4op(ptr %a, ptr %b) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v32i8_4op: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i8>, ptr %a %shuffle = shufflevector <8 x i8> %op1, <8 x i8> undef, <16 x i32> @@ -541,6 +751,12 @@ define void @concat_v16i16_4op(ptr %a, ptr %b) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v16i16_4op: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i16>, ptr %a %shuffle = shufflevector <4 x i16> %op1, <4 x i16> undef, <8 x i32> %res = shufflevector <8 x i16> %shuffle, <8 x i16> undef, <16 x i32> , ptr %a %shuffle = shufflevector <2 x i32> %op1, <2 x i32> undef, <4 x i32> %res = shufflevector <4 x i32> %shuffle, <4 x i32> undef, <8 x i32> @@ -568,6 +790,12 @@ define void @concat_v4i64_4op(ptr %a, ptr %b) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: concat_v4i64_4op: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <1 x i64>, ptr %a %shuffle = shufflevector <1 x i64> %op1, <1 x i64> undef, <2 x i32> %res = shufflevector <2 x i64> %shuffle, <2 x i64> undef, <4 x i32> diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll index 0aefba2d4c6abe..42aa67fb2ab8b4 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -11,6 +12,12 @@ define <8 x i16> @load_zext_v8i8i16(ptr %ap) { ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_zext_v8i8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: ret %a = load <8 x i8>, ptr %ap %val = zext <8 x i8> %a to <8 x i16> ret <8 x i16> %val @@ -23,6 +30,12 @@ define <4 x i32> @load_zext_v4i16i32(ptr %ap) { ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_zext_v4i16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ret %a = load <4 x i16>, ptr %ap %val = zext <4 x i16> %a to <4 x i32> ret <4 x i32> %val @@ -35,6 +48,12 @@ define <2 x i64> @load_zext_v2i32i64(ptr %ap) { ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_zext_v2i32i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ret %a = load <2 x i32>, ptr %ap %val = zext <2 x i32> %a to <2 x i64> ret <2 x i64> %val @@ -54,6 +73,19 @@ define <2 x i256> @load_zext_v2i64i256(ptr %ap) { ; CHECK-NEXT: mov x7, xzr ; CHECK-NEXT: fmov x4, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_zext_v2i64i256: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: mov x1, xzr +; NONEON-NOSVE-NEXT: mov x2, xzr +; NONEON-NOSVE-NEXT: mov x3, xzr +; NONEON-NOSVE-NEXT: mov x5, xzr +; NONEON-NOSVE-NEXT: mov x6, xzr +; NONEON-NOSVE-NEXT: mov x4, v0.d[1] +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: mov x7, xzr +; NONEON-NOSVE-NEXT: ret %a = load <2 x i64>, ptr %ap %val = zext <2 x i64> %a to <2 x i256> ret <2 x i256> %val @@ -75,6 +107,24 @@ define <16 x i32> @load_sext_v16i8i32(ptr %ap) { ; CHECK-NEXT: // kill: def $q2 killed $q2 killed $z2 ; CHECK-NEXT: // kill: def $q3 killed $q3 killed $z3 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_sext_v16i8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: sshll v1.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v2.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: sshll v0.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q2, q1, [sp, #16] +; NONEON-NOSVE-NEXT: sshll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ldr d3, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d4, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v1.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: sshll v3.4s, v4.4h, #0 +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %a = load <16 x i8>, ptr %ap %val = sext <16 x i8> %a to <16 x i32> ret <16 x i32> %val @@ -90,6 +140,17 @@ define <8 x i32> @load_sext_v8i16i32(ptr %ap) { ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_sext_v8i16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %a = load <8 x i16>, ptr %ap %val = sext <8 x i16> %a to <8 x i32> ret <8 x i32> %val @@ -121,6 +182,39 @@ define <4 x i256> @load_sext_v4i32i256(ptr %ap) { ; CHECK-NEXT: stp x12, x12, [x8, #112] ; CHECK-NEXT: stp x11, x12, [x8, #96] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_sext_v4i32i256: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: add x10, x8, #32 +; NONEON-NOSVE-NEXT: add x11, x8, #96 +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: mov x9, v0.d[1] +; NONEON-NOSVE-NEXT: st1 { v0.d }[1], [x10] +; NONEON-NOSVE-NEXT: fmov x10, d0 +; NONEON-NOSVE-NEXT: st1 { v1.d }[1], [x11] +; NONEON-NOSVE-NEXT: mov x11, v1.d[1] +; NONEON-NOSVE-NEXT: asr x10, x10, #63 +; NONEON-NOSVE-NEXT: str d0, [x8] +; NONEON-NOSVE-NEXT: asr x9, x9, #63 +; NONEON-NOSVE-NEXT: str d1, [x8, #64] +; NONEON-NOSVE-NEXT: stp x10, x10, [x8, #16] +; NONEON-NOSVE-NEXT: stp x9, x9, [x8, #48] +; NONEON-NOSVE-NEXT: str x9, [x8, #40] +; NONEON-NOSVE-NEXT: fmov x9, d1 +; NONEON-NOSVE-NEXT: str x10, [x8, #8] +; NONEON-NOSVE-NEXT: asr x10, x11, #63 +; NONEON-NOSVE-NEXT: asr x9, x9, #63 +; NONEON-NOSVE-NEXT: stp x10, x10, [x8, #112] +; NONEON-NOSVE-NEXT: str x10, [x8, #104] +; NONEON-NOSVE-NEXT: stp x9, x9, [x8, #80] +; NONEON-NOSVE-NEXT: str x9, [x8, #72] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %a = load <4 x i32>, ptr %ap %val = sext <4 x i32> %a to <4 x i256> ret <4 x i256> %val @@ -154,6 +248,22 @@ define <2 x i256> @load_sext_v2i64i256(ptr %ap) { ; CHECK-NEXT: fmov x1, d6 ; CHECK-NEXT: fmov x5, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_sext_v2i64i256: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: mov x8, v0.d[1] +; NONEON-NOSVE-NEXT: dup v1.2d, v0.d[1] +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: asr x1, x0, #63 +; NONEON-NOSVE-NEXT: asr x5, x8, #63 +; NONEON-NOSVE-NEXT: mov x2, x1 +; NONEON-NOSVE-NEXT: mov x3, x1 +; NONEON-NOSVE-NEXT: mov v1.d[1], x5 +; NONEON-NOSVE-NEXT: mov x6, x5 +; NONEON-NOSVE-NEXT: mov x7, x5 +; NONEON-NOSVE-NEXT: fmov x4, d1 +; NONEON-NOSVE-NEXT: ret %a = load <2 x i64>, ptr %ap %val = sext <2 x i64> %a to <2 x i256> ret <2 x i256> %val @@ -187,6 +297,34 @@ define <16 x i64> @load_zext_v16i16i64(ptr %ap) { ; CHECK-NEXT: // kill: def $q6 killed $q6 killed $z6 ; CHECK-NEXT: // kill: def $q7 killed $q7 killed $z7 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_zext_v16i16i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ushll v2.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-96]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 96 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v3.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v4.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: ushll v5.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v0.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: stp q1, q2, [sp, #32] +; NONEON-NOSVE-NEXT: ushll v2.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d6, [sp, #56] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #40] +; NONEON-NOSVE-NEXT: stp q5, q3, [sp, #64] +; NONEON-NOSVE-NEXT: ldr d16, [sp, #88] +; NONEON-NOSVE-NEXT: ldr d17, [sp, #72] +; NONEON-NOSVE-NEXT: ushll v1.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: ushll v3.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: ushll v6.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: ushll v5.2d, v16.2s, #0 +; NONEON-NOSVE-NEXT: ushll v7.2d, v17.2s, #0 +; NONEON-NOSVE-NEXT: add sp, sp, #96 +; NONEON-NOSVE-NEXT: ret %a = load <16 x i16>, ptr %ap %val = zext <16 x i16> %a to <16 x i64> ret <16 x i64> %val diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll index 25ecd7a8d7e32e..d050ddc77640ef 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -27,6 +28,11 @@ define <4 x i1> @extract_subvector_v8i1(<8 x i1> %op) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v8i1: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: zip2 v0.8b, v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %ret = call <4 x i1> @llvm.vector.extract.v4i1.v8i1(<8 x i1> %op, i64 4) ret <4 x i1> %ret } @@ -54,6 +60,11 @@ define <4 x i8> @extract_subvector_v8i8(<8 x i8> %op) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: zip2 v0.8b, v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %ret = call <4 x i8> @llvm.vector.extract.v4i8.v8i8(<8 x i8> %op, i64 4) ret <4 x i8> %ret } @@ -65,6 +76,14 @@ define <8 x i8> @extract_subvector_v16i8(<16 x i8> %op) { ; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %ret = call <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8> %op, i64 8) ret <8 x i8> %ret } @@ -75,6 +94,12 @@ define void @extract_subvector_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %ret = call <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8> %op, i64 16) store <16 x i8> %ret, ptr %b @@ -91,6 +116,15 @@ define <2 x i16> @extract_subvector_v4i16(<4 x i16> %op) { ; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %ret = call <2 x i16> @llvm.vector.extract.v2i16.v4i16(<4 x i16> %op, i64 2) ret <2 x i16> %ret } @@ -102,6 +136,14 @@ define <4 x i16> @extract_subvector_v8i16(<8 x i16> %op) { ; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %ret = call <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16> %op, i64 4) ret <4 x i16> %ret } @@ -112,6 +154,12 @@ define void @extract_subvector_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %ret = call <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16> %op, i64 8) store <8 x i16> %ret, ptr %b @@ -127,6 +175,12 @@ define <1 x i32> @extract_subvector_v2i32(<2 x i32> %op) { ; CHECK-NEXT: mov z0.s, z0.s[1] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.2s, v0.s[1] +; NONEON-NOSVE-NEXT: ret %ret = call <1 x i32> @llvm.vector.extract.v1i32.v2i32(<2 x i32> %op, i64 1) ret <1 x i32> %ret } @@ -138,6 +192,14 @@ define <2 x i32> @extract_subvector_v4i32(<4 x i32> %op) { ; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %ret = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %op, i64 2) ret <2 x i32> %ret } @@ -148,6 +210,12 @@ define void @extract_subvector_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %ret = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %op, i64 4) store <4 x i32> %ret, ptr %b @@ -163,6 +231,14 @@ define <1 x i64> @extract_subvector_v2i64(<2 x i64> %op) { ; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %ret = call <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64> %op, i64 1) ret <1 x i64> %ret } @@ -173,6 +249,12 @@ define void @extract_subvector_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %ret = call <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64> %op, i64 2) store <2 x i64> %ret, ptr %b @@ -190,6 +272,12 @@ define <2 x half> @extract_subvector_v4f16(<4 x half> %op) { ; CHECK-NEXT: tbl z0.h, { z0.h }, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.2s, v0.s[1] +; NONEON-NOSVE-NEXT: ret %ret = call <2 x half> @llvm.vector.extract.v2f16.v4f16(<4 x half> %op, i64 2) ret <2 x half> %ret } @@ -201,6 +289,14 @@ define <4 x half> @extract_subvector_v8f16(<8 x half> %op) { ; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %ret = call <4 x half> @llvm.vector.extract.v4f16.v8f16(<8 x half> %op, i64 4) ret <4 x half> %ret } @@ -211,6 +307,12 @@ define void @extract_subvector_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %ret = call <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half> %op, i64 8) store <8 x half> %ret, ptr %b @@ -226,6 +328,12 @@ define <1 x float> @extract_subvector_v2f32(<2 x float> %op) { ; CHECK-NEXT: mov z0.s, z0.s[1] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.2s, v0.s[1] +; NONEON-NOSVE-NEXT: ret %ret = call <1 x float> @llvm.vector.extract.v1f32.v2f32(<2 x float> %op, i64 1) ret <1 x float> %ret } @@ -237,6 +345,14 @@ define <2 x float> @extract_subvector_v4f32(<4 x float> %op) { ; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %ret = call <2 x float> @llvm.vector.extract.v2f32.v4f32(<4 x float> %op, i64 2) ret <2 x float> %ret } @@ -247,6 +363,12 @@ define void @extract_subvector_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %ret = call <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float> %op, i64 4) store <4 x float> %ret, ptr %b @@ -262,6 +384,14 @@ define <1 x double> @extract_subvector_v2f64(<2 x double> %op) { ; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %ret = call <1 x double> @llvm.vector.extract.v1f64.v2f64(<2 x double> %op, i64 1) ret <1 x double> %ret } @@ -272,6 +402,12 @@ define void @extract_subvector_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q0, [x0, #16] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extract_subvector_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %ret = call <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double> %op, i64 2) store <2 x double> %ret, ptr %b diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll index a752e119b2fb2a..b2cf818e6e3c73 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -15,6 +16,12 @@ define half @extractelement_v2f16(<2 x half> %op1) { ; CHECK-NEXT: mov z0.h, z0.h[1] ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h0, v0.h[1] +; NONEON-NOSVE-NEXT: ret %r = extractelement <2 x half> %op1, i64 1 ret half %r } @@ -26,6 +33,12 @@ define half @extractelement_v4f16(<4 x half> %op1) { ; CHECK-NEXT: mov z0.h, z0.h[3] ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h0, v0.h[3] +; NONEON-NOSVE-NEXT: ret %r = extractelement <4 x half> %op1, i64 3 ret half %r } @@ -37,6 +50,11 @@ define half @extractelement_v8f16(<8 x half> %op1) { ; CHECK-NEXT: mov z0.h, z0.h[7] ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: ret %r = extractelement <8 x half> %op1, i64 7 ret half %r } @@ -48,6 +66,11 @@ define half @extractelement_v16f16(ptr %a) { ; CHECK-NEXT: mov z0.h, z0.h[7] ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr h0, [x0, #30] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %r = extractelement <16 x half> %op1, i64 15 ret half %r @@ -60,6 +83,12 @@ define float @extractelement_v2f32(<2 x float> %op1) { ; CHECK-NEXT: mov z0.s, z0.s[1] ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov s0, v0.s[1] +; NONEON-NOSVE-NEXT: ret %r = extractelement <2 x float> %op1, i64 1 ret float %r } @@ -71,6 +100,11 @@ define float @extractelement_v4f32(<4 x float> %op1) { ; CHECK-NEXT: mov z0.s, z0.s[3] ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov s0, v0.s[3] +; NONEON-NOSVE-NEXT: ret %r = extractelement <4 x float> %op1, i64 3 ret float %r } @@ -82,6 +116,11 @@ define float @extractelement_v8f32(ptr %a) { ; CHECK-NEXT: mov z0.s, z0.s[3] ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr s0, [x0, #28] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %r = extractelement <8 x float> %op1, i64 7 ret float %r @@ -91,6 +130,10 @@ define double @extractelement_v1f64(<1 x double> %op1) { ; CHECK-LABEL: extractelement_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ret %r = extractelement <1 x double> %op1, i64 0 ret double %r } @@ -101,6 +144,11 @@ define double @extractelement_v2f64(<2 x double> %op1) { ; CHECK-NEXT: mov z0.d, z0.d[1] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov d0, v0.d[1] +; NONEON-NOSVE-NEXT: ret %r = extractelement <2 x double> %op1, i64 1 ret double %r } @@ -112,6 +160,11 @@ define double @extractelement_v4f64(ptr %a) { ; CHECK-NEXT: mov z0.d, z0.d[1] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extractelement_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0, #24] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %r = extractelement <4 x double> %op1, i64 3 ret double %r diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll index 0d6675def8b52f..bed5dd53c519b8 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll @@ -2,6 +2,7 @@ ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE ; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" @@ -28,6 +29,16 @@ define void @test_copysign_v4f16_v4f16(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z1.d, z1.d, z2.d, z0.d ; SVE2-NEXT: str d1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v4f16_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #32767 // =0x7fff +; NONEON-NOSVE-NEXT: ldr d1, [x0] +; NONEON-NOSVE-NEXT: ldr d2, [x1] +; NONEON-NOSVE-NEXT: dup v0.4h, w8 +; NONEON-NOSVE-NEXT: bsl v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <4 x half>, ptr %ap %b = load <4 x half>, ptr %bp %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) @@ -54,6 +65,16 @@ define void @test_copysign_v8f16_v8f16(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z1.d, z1.d, z2.d, z0.d ; SVE2-NEXT: str q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v8f16_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #32767 // =0x7fff +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x1] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: bsl v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <8 x half>, ptr %ap %b = load <8 x half>, ptr %bp %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) @@ -84,6 +105,17 @@ define void @test_copysign_v16f16_v16f16(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z3.d, z3.d, z4.d, z0.d ; SVE2-NEXT: stp q2, q3, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v16f16_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #32767 // =0x7fff +; NONEON-NOSVE-NEXT: ldp q1, q4, [x1] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: bit v1.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v3.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <16 x half>, ptr %ap %b = load <16 x half>, ptr %bp %r = call <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b) @@ -112,6 +144,16 @@ define void @test_copysign_v2f32_v2f32(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z1.d, z1.d, z2.d, z0.d ; SVE2-NEXT: str d1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v2f32_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d0, #0xffffffffffffffff +; NONEON-NOSVE-NEXT: ldr d1, [x0] +; NONEON-NOSVE-NEXT: ldr d2, [x1] +; NONEON-NOSVE-NEXT: fneg v0.2s, v0.2s +; NONEON-NOSVE-NEXT: bsl v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <2 x float>, ptr %ap %b = load <2 x float>, ptr %bp %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) @@ -138,6 +180,16 @@ define void @test_copysign_v4f32_v4f32(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z1.d, z1.d, z2.d, z0.d ; SVE2-NEXT: str q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v4f32_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0xffffffffffffffff +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x1] +; NONEON-NOSVE-NEXT: fneg v0.4s, v0.4s +; NONEON-NOSVE-NEXT: bsl v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <4 x float>, ptr %ap %b = load <4 x float>, ptr %bp %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) @@ -168,6 +220,17 @@ define void @test_copysign_v8f32_v8f32(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z3.d, z3.d, z4.d, z0.d ; SVE2-NEXT: stp q2, q3, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v8f32_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0xffffffffffffffff +; NONEON-NOSVE-NEXT: ldp q1, q4, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fneg v0.4s, v0.4s +; NONEON-NOSVE-NEXT: bit v1.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v3.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <8 x float>, ptr %ap %b = load <8 x float>, ptr %bp %r = call <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b) @@ -196,6 +259,16 @@ define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z1.d, z1.d, z2.d, z0.d ; SVE2-NEXT: str q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v2f64_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0xffffffffffffffff +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x1] +; NONEON-NOSVE-NEXT: fneg v0.2d, v0.2d +; NONEON-NOSVE-NEXT: bsl v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <2 x double>, ptr %ap %b = load <2 x double>, ptr %bp %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) @@ -226,6 +299,17 @@ define void @test_copysign_v4f64_v4f64(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z3.d, z3.d, z4.d, z0.d ; SVE2-NEXT: stp q2, q3, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v4f64_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0xffffffffffffffff +; NONEON-NOSVE-NEXT: ldp q1, q4, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fneg v0.2d, v0.2d +; NONEON-NOSVE-NEXT: bit v1.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v3.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <4 x double>, ptr %ap %b = load <4 x double>, ptr %bp %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) @@ -260,6 +344,17 @@ define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z2.d, z2.d, z0.d, z1.d ; SVE2-NEXT: str d2, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v2f32_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d0, #0xffffffffffffffff +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: ldr d2, [x0] +; NONEON-NOSVE-NEXT: fcvtn v1.2s, v1.2d +; NONEON-NOSVE-NEXT: fneg v0.2s, v0.2s +; NONEON-NOSVE-NEXT: bsl v0.8b, v2.8b, v1.8b +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <2 x float>, ptr %ap %b = load <2 x double>, ptr %bp %tmp0 = fptrunc <2 x double> %b to <2 x float> @@ -304,6 +399,18 @@ define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z2.d, z2.d, z0.d, z1.d ; SVE2-NEXT: str q2, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v4f32_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q2, [x1] +; NONEON-NOSVE-NEXT: movi v0.2d, #0xffffffffffffffff +; NONEON-NOSVE-NEXT: fcvtn v1.2s, v1.2d +; NONEON-NOSVE-NEXT: fneg v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.4s, v2.2d +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: bsl v0.16b, v2.16b, v1.16b +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <4 x float>, ptr %ap %b = load <4 x double>, ptr %bp %tmp0 = fptrunc <4 x double> %b to <4 x float> @@ -337,6 +444,17 @@ define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z2.d, z2.d, z0.d, z1.d ; SVE2-NEXT: str q2, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v2f64_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0xffffffffffffffff +; NONEON-NOSVE-NEXT: ldr d1, [x1] +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: fneg v0.2d, v0.2d +; NONEON-NOSVE-NEXT: bsl v0.16b, v2.16b, v1.16b +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <2 x double>, ptr %ap %b = load < 2 x float>, ptr %bp %tmp0 = fpext <2 x float> %b to <2 x double> @@ -381,6 +499,23 @@ define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z4.d, z4.d, z1.d, z2.d ; SVE2-NEXT: stp q3, q4, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v4f64_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: movi v0.2d, #0xffffffffffffffff +; NONEON-NOSVE-NEXT: str q1, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: fneg v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtl v4.2d, v4.2s +; NONEON-NOSVE-NEXT: bit v1.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v3.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %a = load <4 x double>, ptr %ap %b = load <4 x float>, ptr %bp %tmp0 = fpext <4 x float> %b to <4 x double> @@ -416,6 +551,17 @@ define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z2.d, z2.d, z0.d, z1.d ; SVE2-NEXT: str d2, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v4f16_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: mov w8, #32767 // =0x7fff +; NONEON-NOSVE-NEXT: ldr d2, [x0] +; NONEON-NOSVE-NEXT: dup v1.4h, w8 +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: bit v0.8b, v2.8b, v1.8b +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <4 x half>, ptr %ap %b = load <4 x float>, ptr %bp %tmp0 = fptrunc <4 x float> %b to <4 x half> @@ -457,6 +603,19 @@ define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z2.d, z2.d, z0.d, z1.d ; SVE2-NEXT: str d2, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v4f16_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: mov w8, #32767 // =0x7fff +; NONEON-NOSVE-NEXT: ldr d2, [x0] +; NONEON-NOSVE-NEXT: fcvtxn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtxn2 v0.4s, v1.2d +; NONEON-NOSVE-NEXT: dup v1.4h, w8 +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: bit v0.8b, v2.8b, v1.8b +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <4 x half>, ptr %ap %b = load <4 x double>, ptr %bp %tmp0 = fptrunc <4 x double> %b to <4 x half> @@ -500,6 +659,18 @@ define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) { ; SVE2-NEXT: bsl z2.d, z2.d, z0.d, z1.d ; SVE2-NEXT: str q2, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_copysign_v8f16_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: mov w8, #32767 // =0x7fff +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: dup v1.8h, w8 +; NONEON-NOSVE-NEXT: bit v0.16b, v2.16b, v1.16b +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %a = load <8 x half>, ptr %ap %b = load <8 x float>, ptr %bp %tmp0 = fptrunc <8 x float> %b to <8 x half> diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll index c2d6ed4e9ccf95..662a8f2b55fdd8 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -17,6 +18,14 @@ define <2 x half> @fadd_v2f16(<2 x half> %op1, <2 x half> %op2) { ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fadd v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fadd <2 x half> %op1, %op2 ret <2 x half> %res } @@ -30,6 +39,14 @@ define <4 x half> @fadd_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fadd v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fadd <4 x half> %op1, %op2 ret <4 x half> %res } @@ -43,6 +60,18 @@ define <8 x half> @fadd_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fadd v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: fadd v1.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = fadd <8 x half> %op1, %op2 ret <8 x half> %res } @@ -58,6 +87,29 @@ define void @fadd_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fcvtl v4.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v6.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl v5.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v7.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v3.4s, v3.8h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v2.8h +; NONEON-NOSVE-NEXT: fadd v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: fadd v5.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: fadd v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fadd v2.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v4.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v5.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v2.4s +; NONEON-NOSVE-NEXT: stp q1, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %res = fadd <16 x half> %op1, %op2 @@ -74,6 +126,11 @@ define <2 x float> @fadd_v2f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fadd v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = fadd <2 x float> %op1, %op2 ret <2 x float> %res } @@ -87,6 +144,11 @@ define <4 x float> @fadd_v4f32(<4 x float> %op1, <4 x float> %op2) { ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fadd v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = fadd <4 x float> %op1, %op2 ret <4 x float> %res } @@ -102,6 +164,15 @@ define void @fadd_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fadd v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fadd v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = fadd <8 x float> %op1, %op2 @@ -118,6 +189,11 @@ define <2 x double> @fadd_v2f64(<2 x double> %op1, <2 x double> %op2) { ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fadd v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = fadd <2 x double> %op1, %op2 ret <2 x double> %res } @@ -133,6 +209,15 @@ define void @fadd_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fadd v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fadd v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = fadd <4 x double> %op1, %op2 @@ -153,6 +238,14 @@ define <2 x half> @fdiv_v2f16(<2 x half> %op1, <2 x half> %op2) { ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fdiv_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fdiv v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fdiv <2 x half> %op1, %op2 ret <2 x half> %res } @@ -166,6 +259,14 @@ define <4 x half> @fdiv_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fdiv_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fdiv v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fdiv <4 x half> %op1, %op2 ret <4 x half> %res } @@ -179,6 +280,18 @@ define <8 x half> @fdiv_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fdiv_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fdiv v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: fdiv v1.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = fdiv <8 x half> %op1, %op2 ret <8 x half> %res } @@ -194,6 +307,30 @@ define void @fdiv_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fdiv z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fdiv_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q4, q1, [x1] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v5.4s, v4.8h +; NONEON-NOSVE-NEXT: fcvtl v4.4s, v4.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fdiv v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: ldr q3, [x0] +; NONEON-NOSVE-NEXT: fcvtl2 v6.4s, v3.8h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v3.4h +; NONEON-NOSVE-NEXT: fdiv v3.4s, v3.4s, v4.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fdiv v5.4s, v6.4s, v5.4s +; NONEON-NOSVE-NEXT: fdiv v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v5.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q2, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %res = fdiv <16 x half> %op1, %op2 @@ -210,6 +347,11 @@ define <2 x float> @fdiv_v2f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fdiv_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fdiv v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = fdiv <2 x float> %op1, %op2 ret <2 x float> %res } @@ -223,6 +365,11 @@ define <4 x float> @fdiv_v4f32(<4 x float> %op1, <4 x float> %op2) { ; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fdiv_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fdiv v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = fdiv <4 x float> %op1, %op2 ret <4 x float> %res } @@ -238,6 +385,15 @@ define void @fdiv_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fdiv z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fdiv_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fdiv v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fdiv v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = fdiv <8 x float> %op1, %op2 @@ -254,6 +410,11 @@ define <2 x double> @fdiv_v2f64(<2 x double> %op1, <2 x double> %op2) { ; CHECK-NEXT: fdiv z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fdiv_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fdiv v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = fdiv <2 x double> %op1, %op2 ret <2 x double> %res } @@ -269,6 +430,15 @@ define void @fdiv_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fdiv z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fdiv_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fdiv v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fdiv v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = fdiv <4 x double> %op1, %op2 @@ -290,6 +460,46 @@ define <2 x half> @fma_v2f16(<2 x half> %op1, <2 x half> %op2, <2 x half> %op3) ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d2 killed $d2 def $q2 +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[1] +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: fcvt s16, h0 +; NONEON-NOSVE-NEXT: mov h17, v2.h[2] +; NONEON-NOSVE-NEXT: mov h18, v1.h[2] +; NONEON-NOSVE-NEXT: mov h19, v0.h[2] +; NONEON-NOSVE-NEXT: mov h2, v2.h[3] +; NONEON-NOSVE-NEXT: mov h1, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fmadd s6, s16, s7, s6 +; NONEON-NOSVE-NEXT: mov h16, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s7, h19 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmadd s3, s5, s4, s3 +; NONEON-NOSVE-NEXT: fcvt s4, h17 +; NONEON-NOSVE-NEXT: fcvt s5, h18 +; NONEON-NOSVE-NEXT: fcvt h0, s6 +; NONEON-NOSVE-NEXT: fmadd s4, s7, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h16 +; NONEON-NOSVE-NEXT: mov v0.h[1], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: fmadd s1, s5, s1, s2 +; NONEON-NOSVE-NEXT: mov v0.h[2], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: mov v0.h[3], v1.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.fma.v2f16(<2 x half> %op1, <2 x half> %op2, <2 x half> %op3) ret <2 x half> %res } @@ -304,6 +514,46 @@ define <4 x half> @fma_v4f16(<4 x half> %op1, <4 x half> %op2, <4 x half> %op3) ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d2 killed $d2 def $q2 +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[1] +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: fcvt s16, h0 +; NONEON-NOSVE-NEXT: mov h17, v2.h[2] +; NONEON-NOSVE-NEXT: mov h18, v1.h[2] +; NONEON-NOSVE-NEXT: mov h19, v0.h[2] +; NONEON-NOSVE-NEXT: mov h2, v2.h[3] +; NONEON-NOSVE-NEXT: mov h1, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fmadd s6, s16, s7, s6 +; NONEON-NOSVE-NEXT: mov h16, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s7, h19 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmadd s3, s5, s4, s3 +; NONEON-NOSVE-NEXT: fcvt s4, h17 +; NONEON-NOSVE-NEXT: fcvt s5, h18 +; NONEON-NOSVE-NEXT: fcvt h0, s6 +; NONEON-NOSVE-NEXT: fmadd s4, s7, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h16 +; NONEON-NOSVE-NEXT: mov v0.h[1], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: fmadd s1, s5, s1, s2 +; NONEON-NOSVE-NEXT: mov v0.h[2], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: mov v0.h[3], v1.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.fma.v4f16(<4 x half> %op1, <4 x half> %op2, <4 x half> %op3) ret <4 x half> %res } @@ -318,6 +568,79 @@ define <8 x half> @fma_v8f16(<8 x half> %op1, <8 x half> %op2, <8 x half> %op3) ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h3, v2.h[1] +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: fcvt s16, h0 +; NONEON-NOSVE-NEXT: mov h17, v2.h[2] +; NONEON-NOSVE-NEXT: mov h18, v1.h[2] +; NONEON-NOSVE-NEXT: mov h19, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fmadd s6, s16, s7, s6 +; NONEON-NOSVE-NEXT: fcvt s7, h17 +; NONEON-NOSVE-NEXT: fcvt s16, h18 +; NONEON-NOSVE-NEXT: fcvt s17, h19 +; NONEON-NOSVE-NEXT: mov h18, v1.h[3] +; NONEON-NOSVE-NEXT: mov h19, v0.h[3] +; NONEON-NOSVE-NEXT: fmadd s4, s5, s4, s3 +; NONEON-NOSVE-NEXT: mov h5, v2.h[3] +; NONEON-NOSVE-NEXT: fcvt h3, s6 +; NONEON-NOSVE-NEXT: fmadd s6, s17, s16, s7 +; NONEON-NOSVE-NEXT: mov h17, v2.h[4] +; NONEON-NOSVE-NEXT: fcvt s7, h18 +; NONEON-NOSVE-NEXT: fcvt s16, h19 +; NONEON-NOSVE-NEXT: mov h18, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: mov h19, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fcvt s18, h18 +; NONEON-NOSVE-NEXT: mov v3.h[1], v4.h[0] +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: fmadd s5, s16, s7, s5 +; NONEON-NOSVE-NEXT: mov h7, v1.h[5] +; NONEON-NOSVE-NEXT: mov h16, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s19, h19 +; NONEON-NOSVE-NEXT: mov v3.h[2], v6.h[0] +; NONEON-NOSVE-NEXT: mov h6, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fcvt h5, s5 +; NONEON-NOSVE-NEXT: fmadd s17, s19, s18, s17 +; NONEON-NOSVE-NEXT: mov h18, v1.h[6] +; NONEON-NOSVE-NEXT: mov h19, v0.h[6] +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fmadd s4, s16, s7, s4 +; NONEON-NOSVE-NEXT: mov v3.h[3], v5.h[0] +; NONEON-NOSVE-NEXT: fcvt s5, h6 +; NONEON-NOSVE-NEXT: fcvt s6, h18 +; NONEON-NOSVE-NEXT: fcvt s7, h19 +; NONEON-NOSVE-NEXT: fcvt h16, s17 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fmadd s5, s7, s6, s5 +; NONEON-NOSVE-NEXT: mov v3.h[4], v16.h[0] +; NONEON-NOSVE-NEXT: fmadd s0, s0, s1, s2 +; NONEON-NOSVE-NEXT: mov v3.h[5], v4.h[0] +; NONEON-NOSVE-NEXT: fcvt h4, s5 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v3.h[6], v4.h[0] +; NONEON-NOSVE-NEXT: mov v3.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: mov v0.16b, v3.16b +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.fma.v8f16(<8 x half> %op1, <8 x half> %op2, <8 x half> %op3) ret <8 x half> %res } @@ -334,6 +657,150 @@ define void @fma_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: fmla z1.h, p0/m, z3.h, z4.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q3, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q4, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q5, q2, [x2] +; NONEON-NOSVE-NEXT: mov h25, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s19, h0 +; NONEON-NOSVE-NEXT: mov h24, v0.h[2] +; NONEON-NOSVE-NEXT: mov h17, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt s18, h1 +; NONEON-NOSVE-NEXT: mov h22, v1.h[2] +; NONEON-NOSVE-NEXT: mov h16, v2.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: mov h20, v2.h[2] +; NONEON-NOSVE-NEXT: mov h26, v5.h[1] +; NONEON-NOSVE-NEXT: mov h27, v4.h[1] +; NONEON-NOSVE-NEXT: mov h28, v3.h[1] +; NONEON-NOSVE-NEXT: fcvt s25, h25 +; NONEON-NOSVE-NEXT: mov h7, v2.h[3] +; NONEON-NOSVE-NEXT: mov h29, v4.h[2] +; NONEON-NOSVE-NEXT: fcvt s23, h17 +; NONEON-NOSVE-NEXT: mov h17, v0.h[3] +; NONEON-NOSVE-NEXT: mov h30, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s21, h16 +; NONEON-NOSVE-NEXT: fmadd s6, s19, s18, s6 +; NONEON-NOSVE-NEXT: fcvt s18, h20 +; NONEON-NOSVE-NEXT: fcvt s19, h22 +; NONEON-NOSVE-NEXT: fcvt s20, h24 +; NONEON-NOSVE-NEXT: mov h16, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s22, h5 +; NONEON-NOSVE-NEXT: fcvt s24, h4 +; NONEON-NOSVE-NEXT: fcvt s26, h26 +; NONEON-NOSVE-NEXT: fcvt s27, h27 +; NONEON-NOSVE-NEXT: fcvt s28, h28 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fmadd s21, s25, s23, s21 +; NONEON-NOSVE-NEXT: fcvt s23, h3 +; NONEON-NOSVE-NEXT: mov h25, v5.h[2] +; NONEON-NOSVE-NEXT: fmadd s18, s20, s19, s18 +; NONEON-NOSVE-NEXT: mov h19, v3.h[2] +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: mov h31, v0.h[4] +; NONEON-NOSVE-NEXT: fmadd s26, s28, s27, s26 +; NONEON-NOSVE-NEXT: mov h27, v4.h[3] +; NONEON-NOSVE-NEXT: mov h28, v3.h[3] +; NONEON-NOSVE-NEXT: fmadd s22, s23, s24, s22 +; NONEON-NOSVE-NEXT: fcvt h20, s21 +; NONEON-NOSVE-NEXT: mov h21, v2.h[4] +; NONEON-NOSVE-NEXT: fcvt s23, h25 +; NONEON-NOSVE-NEXT: fcvt s24, h29 +; NONEON-NOSVE-NEXT: fcvt s19, h19 +; NONEON-NOSVE-NEXT: fmadd s16, s17, s16, s7 +; NONEON-NOSVE-NEXT: mov h25, v5.h[3] +; NONEON-NOSVE-NEXT: fcvt h18, s18 +; NONEON-NOSVE-NEXT: fcvt h26, s26 +; NONEON-NOSVE-NEXT: mov h29, v2.h[5] +; NONEON-NOSVE-NEXT: mov v6.h[1], v20.h[0] +; NONEON-NOSVE-NEXT: fcvt s17, h21 +; NONEON-NOSVE-NEXT: fcvt s20, h30 +; NONEON-NOSVE-NEXT: fmadd s19, s19, s24, s23 +; NONEON-NOSVE-NEXT: fcvt s21, h31 +; NONEON-NOSVE-NEXT: fcvt h7, s22 +; NONEON-NOSVE-NEXT: fcvt s22, h25 +; NONEON-NOSVE-NEXT: fcvt s23, h27 +; NONEON-NOSVE-NEXT: fcvt s24, h28 +; NONEON-NOSVE-NEXT: mov h25, v5.h[4] +; NONEON-NOSVE-NEXT: mov h27, v4.h[4] +; NONEON-NOSVE-NEXT: mov h28, v3.h[4] +; NONEON-NOSVE-NEXT: mov h30, v1.h[5] +; NONEON-NOSVE-NEXT: mov h31, v0.h[5] +; NONEON-NOSVE-NEXT: mov v6.h[2], v18.h[0] +; NONEON-NOSVE-NEXT: fmadd s17, s21, s20, s17 +; NONEON-NOSVE-NEXT: mov v7.h[1], v26.h[0] +; NONEON-NOSVE-NEXT: fcvt h18, s19 +; NONEON-NOSVE-NEXT: fmadd s19, s24, s23, s22 +; NONEON-NOSVE-NEXT: mov h26, v5.h[5] +; NONEON-NOSVE-NEXT: fcvt h16, s16 +; NONEON-NOSVE-NEXT: fcvt s20, h25 +; NONEON-NOSVE-NEXT: fcvt s21, h27 +; NONEON-NOSVE-NEXT: fcvt s22, h28 +; NONEON-NOSVE-NEXT: mov h27, v4.h[5] +; NONEON-NOSVE-NEXT: mov h28, v3.h[5] +; NONEON-NOSVE-NEXT: fcvt s23, h29 +; NONEON-NOSVE-NEXT: fcvt s24, h30 +; NONEON-NOSVE-NEXT: fcvt s25, h31 +; NONEON-NOSVE-NEXT: mov h29, v2.h[6] +; NONEON-NOSVE-NEXT: mov h30, v1.h[6] +; NONEON-NOSVE-NEXT: mov h31, v0.h[6] +; NONEON-NOSVE-NEXT: mov v7.h[2], v18.h[0] +; NONEON-NOSVE-NEXT: fcvt h18, s19 +; NONEON-NOSVE-NEXT: fmadd s19, s22, s21, s20 +; NONEON-NOSVE-NEXT: mov h20, v5.h[6] +; NONEON-NOSVE-NEXT: mov h21, v4.h[6] +; NONEON-NOSVE-NEXT: mov h22, v3.h[6] +; NONEON-NOSVE-NEXT: fcvt s26, h26 +; NONEON-NOSVE-NEXT: fmadd s23, s25, s24, s23 +; NONEON-NOSVE-NEXT: fcvt s27, h27 +; NONEON-NOSVE-NEXT: fcvt s28, h28 +; NONEON-NOSVE-NEXT: mov v6.h[3], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s17 +; NONEON-NOSVE-NEXT: fcvt s17, h29 +; NONEON-NOSVE-NEXT: fcvt s24, h30 +; NONEON-NOSVE-NEXT: fcvt s25, h31 +; NONEON-NOSVE-NEXT: fcvt s20, h20 +; NONEON-NOSVE-NEXT: fcvt s21, h21 +; NONEON-NOSVE-NEXT: fcvt s22, h22 +; NONEON-NOSVE-NEXT: mov v7.h[3], v18.h[0] +; NONEON-NOSVE-NEXT: fmadd s26, s28, s27, s26 +; NONEON-NOSVE-NEXT: fcvt h18, s19 +; NONEON-NOSVE-NEXT: mov h5, v5.h[7] +; NONEON-NOSVE-NEXT: mov h4, v4.h[7] +; NONEON-NOSVE-NEXT: mov h3, v3.h[7] +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: fmadd s17, s25, s24, s17 +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fmadd s19, s22, s21, s20 +; NONEON-NOSVE-NEXT: mov v6.h[4], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s23 +; NONEON-NOSVE-NEXT: mov v7.h[4], v18.h[0] +; NONEON-NOSVE-NEXT: fcvt h18, s26 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v6.h[5], v16.h[0] +; NONEON-NOSVE-NEXT: mov v7.h[5], v18.h[0] +; NONEON-NOSVE-NEXT: fmadd s3, s3, s4, s5 +; NONEON-NOSVE-NEXT: fcvt h4, s19 +; NONEON-NOSVE-NEXT: fcvt h5, s17 +; NONEON-NOSVE-NEXT: fmadd s0, s0, s1, s2 +; NONEON-NOSVE-NEXT: mov v7.h[6], v4.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s3 +; NONEON-NOSVE-NEXT: mov v6.h[6], v5.h[0] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v7.h[7], v1.h[0] +; NONEON-NOSVE-NEXT: mov v6.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: stp q7, q6, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %op3 = load <16 x half>, ptr %c @@ -352,6 +819,12 @@ define <2 x float> @fma_v2f32(<2 x float> %op1, <2 x float> %op2, <2 x float> %o ; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmla v2.2s, v1.2s, v0.2s +; NONEON-NOSVE-NEXT: fmov d0, d2 +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.fma.v2f32(<2 x float> %op1, <2 x float> %op2, <2 x float> %op3) ret <2 x float> %res } @@ -366,6 +839,12 @@ define <4 x float> @fma_v4f32(<4 x float> %op1, <4 x float> %op2, <4 x float> %o ; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmla v2.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.fma.v4f32(<4 x float> %op1, <4 x float> %op2, <4 x float> %op3) ret <4 x float> %res } @@ -382,6 +861,16 @@ define void @fma_v8f32(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: fmla z1.s, p0/m, z3.s, z4.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q4, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q5, [x2] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fmla v1.4s, v0.4s, v2.4s +; NONEON-NOSVE-NEXT: fmla v5.4s, v4.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q1, q5, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %op3 = load <8 x float>, ptr %c @@ -400,6 +889,12 @@ define <2 x double> @fma_v2f64(<2 x double> %op1, <2 x double> %op2, <2 x double ; CHECK-NEXT: fmad z0.d, p0/m, z1.d, z2.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmla v2.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.fma.v2f64(<2 x double> %op1, <2 x double> %op2, <2 x double> %op3) ret <2 x double> %res } @@ -416,6 +911,16 @@ define void @fma_v4f64(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z4.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q4, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q5, [x2] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fmla v1.2d, v0.2d, v2.2d +; NONEON-NOSVE-NEXT: fmla v5.2d, v4.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q1, q5, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %op3 = load <4 x double>, ptr %c @@ -437,6 +942,14 @@ define <2 x half> @fmul_v2f16(<2 x half> %op1, <2 x half> %op2) { ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmul_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fmul v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fmul <2 x half> %op1, %op2 ret <2 x half> %res } @@ -450,6 +963,14 @@ define <4 x half> @fmul_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmul_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fmul v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fmul <4 x half> %op1, %op2 ret <4 x half> %res } @@ -463,6 +984,18 @@ define <8 x half> @fmul_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmul_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fmul v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: fmul v1.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = fmul <8 x half> %op1, %op2 ret <8 x half> %res } @@ -478,6 +1011,29 @@ define void @fmul_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmul_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fcvtl v4.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v6.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl v5.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v7.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v3.4s, v3.8h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v2.8h +; NONEON-NOSVE-NEXT: fmul v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: fmul v5.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: fmul v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fmul v2.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v4.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v5.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v2.4s +; NONEON-NOSVE-NEXT: stp q1, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %res = fmul <16 x half> %op1, %op2 @@ -494,6 +1050,11 @@ define <2 x float> @fmul_v2f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmul_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmul v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = fmul <2 x float> %op1, %op2 ret <2 x float> %res } @@ -507,6 +1068,11 @@ define <4 x float> @fmul_v4f32(<4 x float> %op1, <4 x float> %op2) { ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmul_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmul v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = fmul <4 x float> %op1, %op2 ret <4 x float> %res } @@ -522,6 +1088,15 @@ define void @fmul_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmul_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmul v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fmul v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = fmul <8 x float> %op1, %op2 @@ -538,6 +1113,11 @@ define <2 x double> @fmul_v2f64(<2 x double> %op1, <2 x double> %op2) { ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmul_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmul v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = fmul <2 x double> %op1, %op2 ret <2 x double> %res } @@ -553,6 +1133,15 @@ define void @fmul_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fmul z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmul_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmul v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fmul v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = fmul <4 x double> %op1, %op2 @@ -572,6 +1161,12 @@ define <2 x half> @fneg_v2f16(<2 x half> %op) { ; CHECK-NEXT: fneg z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fneg_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v1.4h, #128, lsl #8 +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = fneg <2 x half> %op ret <2 x half> %res } @@ -584,6 +1179,12 @@ define <4 x half> @fneg_v4f16(<4 x half> %op) { ; CHECK-NEXT: fneg z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fneg_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v1.4h, #128, lsl #8 +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = fneg <4 x half> %op ret <4 x half> %res } @@ -596,6 +1197,12 @@ define <8 x half> @fneg_v8f16(<8 x half> %op) { ; CHECK-NEXT: fneg z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fneg_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v1.8h, #128, lsl #8 +; NONEON-NOSVE-NEXT: eor v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = fneg <8 x half> %op ret <8 x half> %res } @@ -609,6 +1216,15 @@ define void @fneg_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fneg z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fneg_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.8h, #128, lsl #8 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: eor v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: eor v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = fneg <16 x half> %op store <16 x half> %res, ptr %a @@ -623,6 +1239,11 @@ define <2 x float> @fneg_v2f32(<2 x float> %op) { ; CHECK-NEXT: fneg z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fneg_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fneg v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = fneg <2 x float> %op ret <2 x float> %res } @@ -635,6 +1256,11 @@ define <4 x float> @fneg_v4f32(<4 x float> %op) { ; CHECK-NEXT: fneg z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fneg_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fneg v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fneg <4 x float> %op ret <4 x float> %res } @@ -648,6 +1274,14 @@ define void @fneg_v8f32(ptr %a) { ; CHECK-NEXT: fneg z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fneg_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fneg v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fneg v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = fneg <8 x float> %op store <8 x float> %res, ptr %a @@ -662,6 +1296,11 @@ define <2 x double> @fneg_v2f64(<2 x double> %op) { ; CHECK-NEXT: fneg z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fneg_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fneg v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fneg <2 x double> %op ret <2 x double> %res } @@ -675,6 +1314,14 @@ define void @fneg_v4f64(ptr %a) { ; CHECK-NEXT: fneg z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fneg_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fneg v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fneg v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = fneg <4 x double> %op store <4 x double> %res, ptr %a @@ -693,6 +1340,30 @@ define <2 x half> @fsqrt_v2f16(<2 x half> %op) { ; CHECK-NEXT: fsqrt z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsqrt_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: mov h3, v0.h[2] +; NONEON-NOSVE-NEXT: mov h0, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fsqrt s2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fsqrt s1, s1 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fsqrt s3, s3 +; NONEON-NOSVE-NEXT: fsqrt s4, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s2 +; NONEON-NOSVE-NEXT: mov v0.h[1], v1.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s3 +; NONEON-NOSVE-NEXT: mov v0.h[2], v1.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s4 +; NONEON-NOSVE-NEXT: mov v0.h[3], v1.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.sqrt.v2f16(<2 x half> %op) ret <2 x half> %res } @@ -705,6 +1376,30 @@ define <4 x half> @fsqrt_v4f16(<4 x half> %op) { ; CHECK-NEXT: fsqrt z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsqrt_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: mov h3, v0.h[2] +; NONEON-NOSVE-NEXT: mov h0, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fsqrt s2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fsqrt s1, s1 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fsqrt s3, s3 +; NONEON-NOSVE-NEXT: fsqrt s4, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s2 +; NONEON-NOSVE-NEXT: mov v0.h[1], v1.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s3 +; NONEON-NOSVE-NEXT: mov v0.h[2], v1.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s4 +; NONEON-NOSVE-NEXT: mov v0.h[3], v1.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.sqrt.v4f16(<4 x half> %op) ret <4 x half> %res } @@ -717,6 +1412,48 @@ define <8 x half> @fsqrt_v8f16(<8 x half> %op) { ; CHECK-NEXT: fsqrt z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsqrt_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: mov h3, v0.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[3] +; NONEON-NOSVE-NEXT: mov h5, v0.h[4] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: mov h7, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fsqrt s2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h0 +; NONEON-NOSVE-NEXT: fcvt h0, s2 +; NONEON-NOSVE-NEXT: fsqrt s1, s1 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: mov v0.h[1], v1.h[0] +; NONEON-NOSVE-NEXT: fsqrt s3, s3 +; NONEON-NOSVE-NEXT: fcvt h1, s3 +; NONEON-NOSVE-NEXT: mov v0.h[2], v1.h[0] +; NONEON-NOSVE-NEXT: fsqrt s4, s4 +; NONEON-NOSVE-NEXT: fcvt h1, s4 +; NONEON-NOSVE-NEXT: mov v0.h[3], v1.h[0] +; NONEON-NOSVE-NEXT: fsqrt s5, s5 +; NONEON-NOSVE-NEXT: fcvt h1, s5 +; NONEON-NOSVE-NEXT: mov v0.h[4], v1.h[0] +; NONEON-NOSVE-NEXT: fsqrt s6, s6 +; NONEON-NOSVE-NEXT: fcvt h1, s6 +; NONEON-NOSVE-NEXT: mov v0.h[5], v1.h[0] +; NONEON-NOSVE-NEXT: fsqrt s7, s7 +; NONEON-NOSVE-NEXT: fcvt h1, s7 +; NONEON-NOSVE-NEXT: mov v0.h[6], v1.h[0] +; NONEON-NOSVE-NEXT: fsqrt s2, s16 +; NONEON-NOSVE-NEXT: fcvt h1, s2 +; NONEON-NOSVE-NEXT: mov v0.h[7], v1.h[0] +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.sqrt.v8f16(<8 x half> %op) ret <8 x half> %res } @@ -730,6 +1467,89 @@ define void @fsqrt_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fsqrt z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsqrt_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q16, [x0] +; NONEON-NOSVE-NEXT: mov h0, v1.h[1] +; NONEON-NOSVE-NEXT: mov h17, v16.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s18, h16 +; NONEON-NOSVE-NEXT: mov h19, v16.h[2] +; NONEON-NOSVE-NEXT: mov h4, v1.h[3] +; NONEON-NOSVE-NEXT: mov h20, v16.h[3] +; NONEON-NOSVE-NEXT: mov h5, v1.h[4] +; NONEON-NOSVE-NEXT: mov h21, v16.h[4] +; NONEON-NOSVE-NEXT: mov h6, v1.h[5] +; NONEON-NOSVE-NEXT: mov h22, v16.h[5] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fsqrt s2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s19, h19 +; NONEON-NOSVE-NEXT: mov h7, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s20, h20 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s21, h21 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s22, h22 +; NONEON-NOSVE-NEXT: mov h23, v16.h[6] +; NONEON-NOSVE-NEXT: mov h16, v16.h[7] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s23, h23 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fsqrt s0, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v2.h[1], v0.h[0] +; NONEON-NOSVE-NEXT: fsqrt s17, s17 +; NONEON-NOSVE-NEXT: fcvt h17, s17 +; NONEON-NOSVE-NEXT: fsqrt s18, s18 +; NONEON-NOSVE-NEXT: fcvt h18, s18 +; NONEON-NOSVE-NEXT: mov v18.h[1], v17.h[0] +; NONEON-NOSVE-NEXT: fsqrt s3, s3 +; NONEON-NOSVE-NEXT: fcvt h0, s3 +; NONEON-NOSVE-NEXT: mov v2.h[2], v0.h[0] +; NONEON-NOSVE-NEXT: fsqrt s19, s19 +; NONEON-NOSVE-NEXT: fcvt h17, s19 +; NONEON-NOSVE-NEXT: mov v18.h[2], v17.h[0] +; NONEON-NOSVE-NEXT: fsqrt s4, s4 +; NONEON-NOSVE-NEXT: fcvt h0, s4 +; NONEON-NOSVE-NEXT: mov v2.h[3], v0.h[0] +; NONEON-NOSVE-NEXT: fsqrt s20, s20 +; NONEON-NOSVE-NEXT: fcvt h3, s20 +; NONEON-NOSVE-NEXT: mov v18.h[3], v3.h[0] +; NONEON-NOSVE-NEXT: fsqrt s5, s5 +; NONEON-NOSVE-NEXT: fcvt h0, s5 +; NONEON-NOSVE-NEXT: mov v2.h[4], v0.h[0] +; NONEON-NOSVE-NEXT: fsqrt s21, s21 +; NONEON-NOSVE-NEXT: fcvt h3, s21 +; NONEON-NOSVE-NEXT: mov v18.h[4], v3.h[0] +; NONEON-NOSVE-NEXT: fsqrt s6, s6 +; NONEON-NOSVE-NEXT: fcvt h0, s6 +; NONEON-NOSVE-NEXT: mov v2.h[5], v0.h[0] +; NONEON-NOSVE-NEXT: fsqrt s22, s22 +; NONEON-NOSVE-NEXT: fcvt h3, s22 +; NONEON-NOSVE-NEXT: mov v18.h[5], v3.h[0] +; NONEON-NOSVE-NEXT: fsqrt s7, s7 +; NONEON-NOSVE-NEXT: fcvt h0, s7 +; NONEON-NOSVE-NEXT: mov v2.h[6], v0.h[0] +; NONEON-NOSVE-NEXT: fsqrt s23, s23 +; NONEON-NOSVE-NEXT: fcvt h3, s23 +; NONEON-NOSVE-NEXT: mov v18.h[6], v3.h[0] +; NONEON-NOSVE-NEXT: fsqrt s16, s16 +; NONEON-NOSVE-NEXT: fcvt h3, s16 +; NONEON-NOSVE-NEXT: mov v18.h[7], v3.h[0] +; NONEON-NOSVE-NEXT: fsqrt s1, s1 +; NONEON-NOSVE-NEXT: fcvt h0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: stp q18, q2, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call <16 x half> @llvm.sqrt.v16f16(<16 x half> %op) store <16 x half> %res, ptr %a @@ -744,6 +1564,11 @@ define <2 x float> @fsqrt_v2f32(<2 x float> %op) { ; CHECK-NEXT: fsqrt z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsqrt_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fsqrt v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %op) ret <2 x float> %res } @@ -756,6 +1581,11 @@ define <4 x float> @fsqrt_v4f32(<4 x float> %op) { ; CHECK-NEXT: fsqrt z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsqrt_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fsqrt v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %op) ret <4 x float> %res } @@ -769,6 +1599,14 @@ define void @fsqrt_v8f32(ptr %a) { ; CHECK-NEXT: fsqrt z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsqrt_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fsqrt v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fsqrt v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call <8 x float> @llvm.sqrt.v8f32(<8 x float> %op) store <8 x float> %res, ptr %a @@ -783,6 +1621,11 @@ define <2 x double> @fsqrt_v2f64(<2 x double> %op) { ; CHECK-NEXT: fsqrt z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsqrt_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fsqrt v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %op) ret <2 x double> %res } @@ -796,6 +1639,14 @@ define void @fsqrt_v4f64(ptr %a) { ; CHECK-NEXT: fsqrt z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsqrt_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fsqrt v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fsqrt v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %op) store <4 x double> %res, ptr %a @@ -815,6 +1666,14 @@ define <2 x half> @fsub_v2f16(<2 x half> %op1, <2 x half> %op2) { ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsub_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fsub v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fsub <2 x half> %op1, %op2 ret <2 x half> %res } @@ -828,6 +1687,14 @@ define <4 x half> @fsub_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsub_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fsub v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fsub <4 x half> %op1, %op2 ret <4 x half> %res } @@ -841,6 +1708,18 @@ define <8 x half> @fsub_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsub_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fsub v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: fsub v1.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = fsub <8 x half> %op1, %op2 ret <8 x half> %res } @@ -856,6 +1735,29 @@ define void @fsub_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fsub z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsub_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fcvtl v4.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v6.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl v5.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v7.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v3.4s, v3.8h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v2.8h +; NONEON-NOSVE-NEXT: fsub v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: fsub v5.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: fsub v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fsub v2.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v4.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v5.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v2.4s +; NONEON-NOSVE-NEXT: stp q1, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %res = fsub <16 x half> %op1, %op2 @@ -872,6 +1774,11 @@ define <2 x float> @fsub_v2f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsub_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fsub v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = fsub <2 x float> %op1, %op2 ret <2 x float> %res } @@ -885,6 +1792,11 @@ define <4 x float> @fsub_v4f32(<4 x float> %op1, <4 x float> %op2) { ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsub_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fsub v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = fsub <4 x float> %op1, %op2 ret <4 x float> %res } @@ -900,6 +1812,15 @@ define void @fsub_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fsub z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsub_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fsub v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fsub v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = fsub <8 x float> %op1, %op2 @@ -916,6 +1837,11 @@ define <2 x double> @fsub_v2f64(<2 x double> %op1, <2 x double> %op2) { ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsub_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fsub v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = fsub <2 x double> %op1, %op2 ret <2 x double> %res } @@ -931,6 +1857,15 @@ define void @fsub_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fsub z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fsub_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fsub v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fsub v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = fsub <4 x double> %op1, %op2 @@ -950,6 +1885,11 @@ define <2 x half> @fabs_v2f16(<2 x half> %op) { ; CHECK-NEXT: fabs z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fabs_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: bic v0.4h, #128, lsl #8 +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.fabs.v2f16(<2 x half> %op) ret <2 x half> %res } @@ -962,6 +1902,11 @@ define <4 x half> @fabs_v4f16(<4 x half> %op) { ; CHECK-NEXT: fabs z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fabs_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: bic v0.4h, #128, lsl #8 +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.fabs.v4f16(<4 x half> %op) ret <4 x half> %res } @@ -974,6 +1919,11 @@ define <8 x half> @fabs_v8f16(<8 x half> %op) { ; CHECK-NEXT: fabs z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fabs_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: bic v0.8h, #128, lsl #8 +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.fabs.v8f16(<8 x half> %op) ret <8 x half> %res } @@ -987,6 +1937,14 @@ define void @fabs_v16f16(ptr %a) { ; CHECK-NEXT: fabs z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fabs_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: bic v0.8h, #128, lsl #8 +; NONEON-NOSVE-NEXT: bic v1.8h, #128, lsl #8 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call <16 x half> @llvm.fabs.v16f16(<16 x half> %op) store <16 x half> %res, ptr %a @@ -1001,6 +1959,11 @@ define <2 x float> @fabs_v2f32(<2 x float> %op) { ; CHECK-NEXT: fabs z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fabs_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fabs v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.fabs.v2f32(<2 x float> %op) ret <2 x float> %res } @@ -1013,6 +1976,11 @@ define <4 x float> @fabs_v4f32(<4 x float> %op) { ; CHECK-NEXT: fabs z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fabs_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fabs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.fabs.v4f32(<4 x float> %op) ret <4 x float> %res } @@ -1026,6 +1994,14 @@ define void @fabs_v8f32(ptr %a) { ; CHECK-NEXT: fabs z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fabs_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fabs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fabs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call <8 x float> @llvm.fabs.v8f32(<8 x float> %op) store <8 x float> %res, ptr %a @@ -1040,6 +2016,11 @@ define <2 x double> @fabs_v2f64(<2 x double> %op) { ; CHECK-NEXT: fabs z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fabs_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fabs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.fabs.v2f64(<2 x double> %op) ret <2 x double> %res } @@ -1053,6 +2034,14 @@ define void @fabs_v4f64(ptr %a) { ; CHECK-NEXT: fabs z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fabs_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fabs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fabs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call <4 x double> @llvm.fabs.v4f64(<4 x double> %op) store <4 x double> %res, ptr %a diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-compares.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-compares.ll index 465cc179a3b989..d4810c78cb53dc 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-compares.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-compares.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -19,6 +20,14 @@ define <2 x i16> @fcmp_oeq_v2f16(<2 x half> %op1, <2 x half> %op2) { ; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %cmp = fcmp oeq <2 x half> %op1, %op2 %sext = sext <2 x i1> %cmp to <2 x i16> ret <2 x i16> %sext @@ -34,6 +43,14 @@ define <4 x i16> @fcmp_oeq_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %cmp = fcmp oeq <4 x half> %op1, %op2 %sext = sext <4 x i1> %cmp to <4 x i16> ret <4 x i16> %sext @@ -49,6 +66,65 @@ define <8 x i16> @fcmp_oeq_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: mov h6, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcmp s3, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[2] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h6 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: mov h4, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov h6, v0.h[4] +; NONEON-NOSVE-NEXT: csetm w9, eq +; NONEON-NOSVE-NEXT: fcmp s2, s5 +; NONEON-NOSVE-NEXT: fmov s2, w9 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h5, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v2.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[5] +; NONEON-NOSVE-NEXT: mov h4, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[6] +; NONEON-NOSVE-NEXT: mov h6, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: fcvt s3, h5 +; NONEON-NOSVE-NEXT: fcvt s4, h6 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %cmp = fcmp oeq <8 x half> %op1, %op2 %sext = sext <8 x i1> %cmp to <8 x i16> ret <8 x i16> %sext @@ -66,6 +142,123 @@ define void @fcmp_oeq_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, eq +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, eq +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, eq +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, eq +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, eq +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, eq +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, eq +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, eq +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp oeq <16 x half> %op1, %op2 @@ -84,6 +277,11 @@ define <2 x i32> @fcmp_oeq_v2f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcmeq v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %cmp = fcmp oeq <2 x float> %op1, %op2 %sext = sext <2 x i1> %cmp to <2 x i32> ret <2 x i32> %sext @@ -99,6 +297,11 @@ define <4 x i32> @fcmp_oeq_v4f32(<4 x float> %op1, <4 x float> %op2) { ; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %cmp = fcmp oeq <4 x float> %op1, %op2 %sext = sext <4 x i1> %cmp to <4 x i32> ret <4 x i32> %sext @@ -116,6 +319,15 @@ define void @fcmp_oeq_v8f32(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fcmeq v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fcmeq v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %cmp = fcmp oeq <8 x float> %op1, %op2 @@ -132,6 +344,11 @@ define <1 x i64> @fcmp_oeq_v1f64(<1 x double> %op1, <1 x double> %op2) { ; CHECK-NEXT: mov z0.d, x8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcmeq d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %cmp = fcmp oeq <1 x double> %op1, %op2 %sext = sext <1 x i1> %cmp to <1 x i64> ret <1 x i64> %sext @@ -147,6 +364,11 @@ define <2 x i64> @fcmp_oeq_v2f64(<2 x double> %op1, <2 x double> %op2) { ; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcmeq v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %cmp = fcmp oeq <2 x double> %op1, %op2 %sext = sext <2 x i1> %cmp to <2 x i64> ret <2 x i64> %sext @@ -164,6 +386,15 @@ define void @fcmp_oeq_v4f64(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oeq_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fcmeq v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fcmeq v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %cmp = fcmp oeq <4 x double> %op1, %op2 @@ -192,6 +423,139 @@ define void @fcmp_ueq_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_ueq_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h2 +; NONEON-NOSVE-NEXT: mov h5, v2.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h1 +; NONEON-NOSVE-NEXT: mov h7, v1.h[2] +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s6, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[4] +; NONEON-NOSVE-NEXT: mov h6, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w9, eq +; NONEON-NOSVE-NEXT: csinv w12, w9, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s7, s5 +; NONEON-NOSVE-NEXT: mov h5, v2.h[5] +; NONEON-NOSVE-NEXT: mov h7, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w9, eq +; NONEON-NOSVE-NEXT: csinv w10, w9, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: csetm w9, eq +; NONEON-NOSVE-NEXT: csinv w11, w9, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s6, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s6, h16 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w9, eq +; NONEON-NOSVE-NEXT: csinv w9, w9, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s7, s5 +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: mov h7, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w13, eq +; NONEON-NOSVE-NEXT: csinv w13, w13, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s6, s3 +; NONEON-NOSVE-NEXT: fcvt s3, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h7 +; NONEON-NOSVE-NEXT: mov h6, v0.h[2] +; NONEON-NOSVE-NEXT: mov h7, v1.h[2] +; NONEON-NOSVE-NEXT: csetm w14, eq +; NONEON-NOSVE-NEXT: csinv w14, w14, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s4, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w15, eq +; NONEON-NOSVE-NEXT: csinv w15, w15, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s5, s3 +; NONEON-NOSVE-NEXT: mov h3, v0.h[3] +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w16, eq +; NONEON-NOSVE-NEXT: csinv w16, w16, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s4, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h3 +; NONEON-NOSVE-NEXT: fmov s2, w12 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w17, eq +; NONEON-NOSVE-NEXT: csinv w17, w17, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v0.h[4] +; NONEON-NOSVE-NEXT: fmov s3, w17 +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: mov v3.h[1], w16 +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v0.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: mov v2.h[2], w10 +; NONEON-NOSVE-NEXT: mov v3.h[2], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v0.h[6] +; NONEON-NOSVE-NEXT: mov h7, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w11 +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov v3.h[3], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: fcvt s4, h6 +; NONEON-NOSVE-NEXT: fcvt s5, h7 +; NONEON-NOSVE-NEXT: mov v2.h[4], w9 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v3.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov v2.h[5], w13 +; NONEON-NOSVE-NEXT: mov v3.h[5], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, vc +; NONEON-NOSVE-NEXT: fcmp s1, s0 +; NONEON-NOSVE-NEXT: mov v2.h[6], w14 +; NONEON-NOSVE-NEXT: mov v3.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, vc +; NONEON-NOSVE-NEXT: mov v2.h[7], w15 +; NONEON-NOSVE-NEXT: mov v3.h[7], w8 +; NONEON-NOSVE-NEXT: stp q3, q2, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp ueq <16 x half> %op1, %op2 @@ -220,6 +584,139 @@ define void @fcmp_one_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_one_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h2 +; NONEON-NOSVE-NEXT: mov h5, v2.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h1 +; NONEON-NOSVE-NEXT: mov h7, v1.h[2] +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, le +; NONEON-NOSVE-NEXT: fcmp s6, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[4] +; NONEON-NOSVE-NEXT: mov h6, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w9, mi +; NONEON-NOSVE-NEXT: csinv w12, w9, wzr, le +; NONEON-NOSVE-NEXT: fcmp s7, s5 +; NONEON-NOSVE-NEXT: mov h5, v2.h[5] +; NONEON-NOSVE-NEXT: mov h7, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w9, mi +; NONEON-NOSVE-NEXT: csinv w10, w9, wzr, le +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x1] +; NONEON-NOSVE-NEXT: csetm w9, mi +; NONEON-NOSVE-NEXT: csinv w11, w9, wzr, le +; NONEON-NOSVE-NEXT: fcmp s6, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s6, h16 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w9, mi +; NONEON-NOSVE-NEXT: csinv w9, w9, wzr, le +; NONEON-NOSVE-NEXT: fcmp s7, s5 +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: mov h7, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w13, mi +; NONEON-NOSVE-NEXT: csinv w13, w13, wzr, le +; NONEON-NOSVE-NEXT: fcmp s6, s3 +; NONEON-NOSVE-NEXT: fcvt s3, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h7 +; NONEON-NOSVE-NEXT: mov h6, v0.h[2] +; NONEON-NOSVE-NEXT: mov h7, v1.h[2] +; NONEON-NOSVE-NEXT: csetm w14, mi +; NONEON-NOSVE-NEXT: csinv w14, w14, wzr, le +; NONEON-NOSVE-NEXT: fcmp s4, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w15, mi +; NONEON-NOSVE-NEXT: csinv w15, w15, wzr, le +; NONEON-NOSVE-NEXT: fcmp s5, s3 +; NONEON-NOSVE-NEXT: mov h3, v0.h[3] +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w16, mi +; NONEON-NOSVE-NEXT: csinv w16, w16, wzr, le +; NONEON-NOSVE-NEXT: fcmp s4, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h3 +; NONEON-NOSVE-NEXT: fmov s2, w12 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w17, mi +; NONEON-NOSVE-NEXT: csinv w17, w17, wzr, le +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v0.h[4] +; NONEON-NOSVE-NEXT: fmov s3, w17 +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: mov v3.h[1], w16 +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, le +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v0.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: mov v2.h[2], w10 +; NONEON-NOSVE-NEXT: mov v3.h[2], w8 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, le +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v0.h[6] +; NONEON-NOSVE-NEXT: mov h7, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w11 +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov v3.h[3], w8 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, le +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: fcvt s4, h6 +; NONEON-NOSVE-NEXT: fcvt s5, h7 +; NONEON-NOSVE-NEXT: mov v2.h[4], w9 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v3.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, le +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov v2.h[5], w13 +; NONEON-NOSVE-NEXT: mov v3.h[5], w8 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, le +; NONEON-NOSVE-NEXT: fcmp s1, s0 +; NONEON-NOSVE-NEXT: mov v2.h[6], w14 +; NONEON-NOSVE-NEXT: mov v3.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: csinv w8, w8, wzr, le +; NONEON-NOSVE-NEXT: mov v2.h[7], w15 +; NONEON-NOSVE-NEXT: mov v3.h[7], w8 +; NONEON-NOSVE-NEXT: stp q3, q2, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp one <16 x half> %op1, %op2 @@ -244,6 +741,123 @@ define void @fcmp_une_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_une_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, ne +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, ne +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, ne +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, ne +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, ne +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, ne +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, ne +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, ne +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, ne +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp une <16 x half> %op1, %op2 @@ -268,6 +882,123 @@ define void @fcmp_ogt_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_ogt_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, gt +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, gt +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, gt +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, gt +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, gt +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, gt +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, gt +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, gt +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, gt +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp ogt <16 x half> %op1, %op2 @@ -295,6 +1026,123 @@ define void @fcmp_ugt_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: eor z0.d, z2.d, z0.d ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_ugt_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, hi +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, hi +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, hi +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, hi +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, hi +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, hi +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, hi +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, hi +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, hi +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, hi +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, hi +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, hi +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, hi +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, hi +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, hi +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, hi +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp ugt <16 x half> %op1, %op2 @@ -319,6 +1167,123 @@ define void @fcmp_olt_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_olt_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, mi +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, mi +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, mi +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, mi +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, mi +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, mi +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, mi +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, mi +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, mi +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, mi +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp olt <16 x half> %op1, %op2 @@ -346,6 +1311,123 @@ define void @fcmp_ult_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: eor z0.d, z2.d, z0.d ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_ult_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, lt +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, lt +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, lt +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, lt +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, lt +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, lt +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, lt +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, lt +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, lt +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp ult <16 x half> %op1, %op2 @@ -370,6 +1452,123 @@ define void @fcmp_oge_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_oge_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, ge +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, ge +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, ge +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, ge +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, ge +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, ge +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, ge +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, ge +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, ge +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp oge <16 x half> %op1, %op2 @@ -397,6 +1596,123 @@ define void @fcmp_uge_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: eor z0.d, z2.d, z0.d ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_uge_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, pl +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, pl +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, pl +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, pl +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, pl +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, pl +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, pl +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, pl +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, pl +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, pl +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, pl +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, pl +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, pl +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, pl +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, pl +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, pl +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp uge <16 x half> %op1, %op2 @@ -421,6 +1737,123 @@ define void @fcmp_ole_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_ole_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, ls +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, ls +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, ls +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, ls +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, ls +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, ls +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, ls +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, ls +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, ls +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, ls +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, ls +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, ls +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, ls +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, ls +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, ls +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, ls +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp ole <16 x half> %op1, %op2 @@ -448,6 +1881,123 @@ define void @fcmp_ule_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: eor z0.d, z2.d, z0.d ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_ule_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, le +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, le +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, le +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, le +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, le +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, le +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, le +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, le +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, le +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp ule <16 x half> %op1, %op2 @@ -472,6 +2022,123 @@ define void @fcmp_uno_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_uno_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, vs +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, vs +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, vs +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, vs +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, vs +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, vs +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, vs +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, vs +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, vs +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, vs +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, vs +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, vs +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, vs +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, vs +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, vs +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, vs +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp uno <16 x half> %op1, %op2 @@ -499,6 +2166,123 @@ define void @fcmp_ord_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: eor z0.d, z2.d, z0.d ; CHECK-NEXT: stp q1, q0, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_ord_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, vc +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, vc +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, vc +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, vc +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, vc +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, vc +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, vc +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, vc +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, vc +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, vc +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, vc +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, vc +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, vc +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, vc +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, vc +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, vc +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp ord <16 x half> %op1, %op2 @@ -523,6 +2307,123 @@ define void @fcmp_eq_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_eq_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, eq +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, eq +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, eq +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, eq +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, eq +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, eq +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, eq +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, eq +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp fast oeq <16 x half> %op1, %op2 @@ -547,6 +2448,123 @@ define void @fcmp_ne_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_ne_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, ne +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, ne +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, ne +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, ne +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, ne +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, ne +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, ne +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, ne +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, ne +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp fast one <16 x half> %op1, %op2 @@ -571,6 +2589,123 @@ define void @fcmp_gt_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_gt_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, gt +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, gt +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, gt +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, gt +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, gt +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, gt +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, gt +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, gt +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, gt +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, gt +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp fast ogt <16 x half> %op1, %op2 @@ -595,6 +2730,123 @@ define void @fcmp_lt_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_lt_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, lt +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, lt +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, lt +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, lt +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, lt +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, lt +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, lt +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, lt +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, lt +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, lt +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp fast olt <16 x half> %op1, %op2 @@ -619,6 +2871,123 @@ define void @fcmp_ge_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_ge_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, ge +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, ge +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, ge +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, ge +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, ge +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, ge +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, ge +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, ge +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, ge +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, ge +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp fast oge <16 x half> %op1, %op2 @@ -643,6 +3012,123 @@ define void @fcmp_le_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x2] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcmp_le_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x1, #16] +; NONEON-NOSVE-NEXT: mov h0, v2.h[1] +; NONEON-NOSVE-NEXT: mov h3, v1.h[1] +; NONEON-NOSVE-NEXT: mov h4, v2.h[2] +; NONEON-NOSVE-NEXT: mov h5, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h2 +; NONEON-NOSVE-NEXT: fcvt s7, h1 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h0, v2.h[3] +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[4] +; NONEON-NOSVE-NEXT: mov h7, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w12, le +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v2.h[5] +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w11, le +; NONEON-NOSVE-NEXT: fcmp s3, s0 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w9, le +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: csetm w10, le +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[1] +; NONEON-NOSVE-NEXT: mov h5, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: csetm w13, le +; NONEON-NOSVE-NEXT: fcmp s7, s3 +; NONEON-NOSVE-NEXT: fmov s7, w12 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: csetm w14, le +; NONEON-NOSVE-NEXT: fcmp s6, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: mov v7.h[1], w8 +; NONEON-NOSVE-NEXT: csetm w15, le +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h4, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: mov v7.h[2], w11 +; NONEON-NOSVE-NEXT: csetm w16, le +; NONEON-NOSVE-NEXT: fcmp s5, s2 +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: csetm w17, le +; NONEON-NOSVE-NEXT: mov v7.h[3], w9 +; NONEON-NOSVE-NEXT: fmov s2, w17 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: mov h4, v0.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[1], w16 +; NONEON-NOSVE-NEXT: mov v7.h[4], w10 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: mov h5, v1.h[5] +; NONEON-NOSVE-NEXT: mov h6, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: mov v7.h[5], w13 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: mov h4, v0.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v7.h[6], w14 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcmp s6, s5 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v7.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: fcmp s4, s3 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: fcmp s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: csetm w8, le +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: stp q2, q7, [x2] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %cmp = fcmp fast ole <16 x half> %op1, %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll index 9bdde14e8d83df..ac0b6c0e0440ce 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -17,6 +18,17 @@ define void @fp_convert_combine_crash(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fp_convert_combine_crash: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov v0.4s, #8.00000000 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmul v1.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fmul v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %f = load <8 x float>, ptr %a %mul.i = fmul <8 x float> %f, diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll index 244a4051017395..16f30adbd14e02 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -17,6 +18,12 @@ define void @fcvt_v2f16_to_v2f32(<2 x half> %a, ptr %b) { ; CHECK-NEXT: fcvt z0.s, p0/m, z0.h ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v2f16_to_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %res = fpext <2 x half> %a to <2 x float> store <2 x float> %res, ptr %b ret void @@ -31,6 +38,12 @@ define void @fcvt_v4f16_to_v4f32(<4 x half> %a, ptr %b) { ; CHECK-NEXT: fcvt z0.s, p0/m, z0.h ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v4f16_to_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %res = fpext <4 x half> %a to <4 x float> store <4 x float> %res, ptr %b ret void @@ -48,6 +61,17 @@ define void @fcvt_v8f16_to_v8f32(<8 x half> %a, ptr %b) { ; CHECK-NEXT: fcvt z0.s, p0/m, z0.h ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v8f16_to_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = fpext <8 x half> %a to <8 x float> store <8 x float> %res, ptr %b ret void @@ -72,6 +96,21 @@ define void @fcvt_v16f16_to_v16f32(<16 x half> %a, ptr %b) { ; CHECK-NEXT: stp q3, q0, [x0] ; CHECK-NEXT: stp q2, q1, [x0, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v16f16_to_v16f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v3.4h +; NONEON-NOSVE-NEXT: stp q0, q3, [x0] +; NONEON-NOSVE-NEXT: stp q1, q2, [x0, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %res = fpext <16 x half> %a to <16 x float> store <16 x float> %res, ptr %b ret void @@ -90,6 +129,13 @@ define void @fcvt_v2f16_v2f32(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z0.s, p0/m, z0.h ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v2f16_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x half>, ptr %a %res = fpext <2 x half> %op1 to <2 x float> store <2 x float> %res, ptr %b @@ -104,6 +150,13 @@ define void @fcvt_v4f16_v4f32(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z0.s, p0/m, z0.h ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v4f16_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x half>, ptr %a %res = fpext <4 x half> %op1 to <4 x float> store <4 x float> %res, ptr %b @@ -121,6 +174,18 @@ define void @fcvt_v8f16_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z1.s, p0/m, z1.h ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v8f16_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %res = fpext <8 x half> %op1 to <8 x float> store <8 x float> %res, ptr %b @@ -145,6 +210,22 @@ define void @fcvt_v16f16_v16f32(ptr %a, ptr %b) { ; CHECK-NEXT: stp q0, q1, [x1, #32] ; CHECK-NEXT: stp q2, q3, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v16f16_v16f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v3.4h +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %res = fpext <16 x half> %op1 to <16 x float> store <16 x float> %res, ptr %b @@ -162,6 +243,13 @@ define void @fcvt_v1f16_v1f64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt d0, h0 ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v1f16_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: fcvt d0, h0 +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <1 x half>, ptr %a %res = fpext <1 x half> %op1 to <1 x double> store <1 x double> %res, ptr %b @@ -176,6 +264,14 @@ define void @fcvt_v2f16_v2f64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z0.d, p0/m, z0.h ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v2f16_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x half>, ptr %a %res = fpext <2 x half> %op1 to <2 x double> store <2 x double> %res, ptr %b @@ -193,6 +289,19 @@ define void @fcvt_v4f16_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z1.d, p0/m, z1.h ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v4f16_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x half>, ptr %a %res = fpext <4 x half> %op1 to <4 x double> store <4 x double> %res, ptr %b @@ -217,6 +326,26 @@ define void @fcvt_v8f16_v8f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q0, q1, [x1, #32] ; CHECK-NEXT: stp q2, q3, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v8f16_v8f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: ldr d2, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: fcvtl v2.2d, v2.2s +; NONEON-NOSVE-NEXT: fcvtl v3.2d, v3.2s +; NONEON-NOSVE-NEXT: stp q0, q2, [x1] +; NONEON-NOSVE-NEXT: stp q1, q3, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %res = fpext <8 x half> %op1 to <8 x double> store <8 x double> %res, ptr %b @@ -258,6 +387,38 @@ define void @fcvt_v16f16_v16f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q4, q0, [x1, #32] ; CHECK-NEXT: stp q1, q2, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v16f16_v16f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-96]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 96 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v3.4h +; NONEON-NOSVE-NEXT: stp q2, q0, [sp, #32] +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtl v2.2d, v2.2s +; NONEON-NOSVE-NEXT: stp q3, q1, [sp, #64] +; NONEON-NOSVE-NEXT: ldr d5, [sp, #56] +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: ldr d4, [sp, #88] +; NONEON-NOSVE-NEXT: ldr d6, [sp, #72] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #40] +; NONEON-NOSVE-NEXT: fcvtl v5.2d, v5.2s +; NONEON-NOSVE-NEXT: fcvtl v3.2d, v3.2s +; NONEON-NOSVE-NEXT: fcvtl v4.2d, v4.2s +; NONEON-NOSVE-NEXT: stp q0, q5, [x1] +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v7.2s +; NONEON-NOSVE-NEXT: stp q1, q4, [x1, #64] +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v6.2s +; NONEON-NOSVE-NEXT: stp q2, q0, [x1, #32] +; NONEON-NOSVE-NEXT: stp q3, q1, [x1, #96] +; NONEON-NOSVE-NEXT: add sp, sp, #96 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %res = fpext <16 x half> %op1 to <16 x double> store <16 x double> %res, ptr %b @@ -275,6 +436,13 @@ define void @fcvt_v1f32_v1f64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt d0, s0 ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v1f32_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <1 x float>, ptr %a %res = fpext <1 x float> %op1 to <1 x double> store <1 x double> %res, ptr %b @@ -289,6 +457,13 @@ define void @fcvt_v2f32_v2f64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z0.d, p0/m, z0.s ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v2f32_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x float>, ptr %a %res = fpext <2 x float> %op1 to <2 x double> store <2 x double> %res, ptr %b @@ -306,6 +481,18 @@ define void @fcvt_v4f32_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z1.d, p0/m, z1.s ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v4f32_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x float>, ptr %a %res = fpext <4 x float> %op1 to <4 x double> store <4 x double> %res, ptr %b @@ -330,6 +517,22 @@ define void @fcvt_v8f32_v8f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q0, q1, [x1, #32] ; CHECK-NEXT: stp q2, q3, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v8f32_v8f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtl v2.2d, v2.2s +; NONEON-NOSVE-NEXT: fcvtl v3.2d, v3.2s +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %res = fpext <8 x float> %op1 to <8 x double> store <8 x double> %res, ptr %b @@ -348,6 +551,13 @@ define void @fcvt_v2f32_v2f16(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z0.h, p0/m, z0.s ; CHECK-NEXT: st1h { z0.s }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v2f32_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: str s0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x float>, ptr %a %res = fptrunc <2 x float> %op1 to <2 x half> store <2 x half> %res, ptr %b @@ -362,6 +572,13 @@ define void @fcvt_v4f32_v4f16(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z0.h, p0/m, z0.s ; CHECK-NEXT: st1h { z0.s }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v4f32_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x float>, ptr %a %res = fptrunc <4 x float> %op1 to <4 x half> store <4 x half> %res, ptr %b @@ -379,6 +596,14 @@ define void @fcvt_v8f32_v8f16(ptr %a, ptr %b) { ; CHECK-NEXT: st1h { z0.s }, p0, [x1, x8, lsl #1] ; CHECK-NEXT: st1h { z1.s }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v8f32_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %res = fptrunc <8 x float> %op1 to <8 x half> store <8 x half> %res, ptr %b @@ -397,6 +622,13 @@ define void @fcvt_v1f64_v1f16(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z0.h, p0/m, z0.d ; CHECK-NEXT: st1h { z0.d }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v1f64_v1f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: fcvt h0, d0 +; NONEON-NOSVE-NEXT: str h0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <1 x double>, ptr %a %res = fptrunc <1 x double> %op1 to <1 x half> store <1 x half> %res, ptr %b @@ -411,6 +643,14 @@ define void @fcvt_v2f64_v2f16(ptr %a, ptr %b) { ; CHECK-NEXT: fcvt z0.h, p0/m, z0.d ; CHECK-NEXT: st1h { z0.d }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v2f64_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: fcvtxn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: str s0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x double>, ptr %a %res = fptrunc <2 x double> %op1 to <2 x half> store <2 x half> %res, ptr %b @@ -428,6 +668,15 @@ define void @fcvt_v4f64_v4f16(ptr %a, ptr %b) { ; CHECK-NEXT: st1h { z0.d }, p0, [x1, x8, lsl #1] ; CHECK-NEXT: st1h { z1.d }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v4f64_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtxn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtxn2 v0.4s, v1.2d +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %res = fptrunc <4 x double> %op1 to <4 x half> store <4 x half> %res, ptr %b @@ -446,6 +695,13 @@ define void @fcvt_v1f64_v1f32(<1 x double> %op1, ptr %b) { ; CHECK-NEXT: fcvt z0.s, p0/m, z0.d ; CHECK-NEXT: st1w { z0.d }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v1f64_v1f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: str s0, [x0] +; NONEON-NOSVE-NEXT: ret %res = fptrunc <1 x double> %op1 to <1 x float> store <1 x float> %res, ptr %b ret void @@ -459,6 +715,12 @@ define void @fcvt_v2f64_v2f32(<2 x double> %op1, ptr %b) { ; CHECK-NEXT: fcvt z0.s, p0/m, z0.d ; CHECK-NEXT: st1w { z0.d }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v2f64_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %res = fptrunc <2 x double> %op1 to <2 x float> store <2 x float> %res, ptr %b ret void @@ -475,6 +737,14 @@ define void @fcvt_v4f64_v4f32(ptr %a, ptr %b) { ; CHECK-NEXT: st1w { z0.d }, p0, [x1, x8, lsl #2] ; CHECK-NEXT: st1w { z1.d }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvt_v4f64_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtn2 v0.4s, v1.2d +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %res = fptrunc <4 x double> %op1 to <4 x float> store <4 x float> %res, ptr %b diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll index cbe71d715a8fb9..44d7116e5f8713 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -17,6 +18,18 @@ define <4 x half> @fma_v4f16(<4 x half> %op1, <4 x half> %op2, <4 x half> %op3) ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fmul v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fadd v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %mul = fmul contract <4 x half> %op1, %op2 %res = fadd contract <4 x half> %mul, %op3 ret <4 x half> %res @@ -32,6 +45,26 @@ define <8 x half> @fma_v8f16(<8 x half> %op1, <8 x half> %op2, <8 x half> %op3) ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v4.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fmul v3.4s, v4.4s, v3.4s +; NONEON-NOSVE-NEXT: fmul v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v2.8h +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fadd v0.4s, v0.4s, v3.4s +; NONEON-NOSVE-NEXT: fadd v1.4s, v1.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %mul = fmul contract <8 x half> %op1, %op2 %res = fadd contract <8 x half> %mul, %op3 ret <8 x half> %res @@ -49,6 +82,46 @@ define void @fma_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: fmla z1.h, p0/m, z3.h, z4.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: fcvtl v5.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v7.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl v4.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v6.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v3.4s, v3.8h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v2.8h +; NONEON-NOSVE-NEXT: fmul v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: fmul v5.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: fmul v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fmul v2.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v4.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v5.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v2.4s +; NONEON-NOSVE-NEXT: ldp q0, q2, [x2] +; NONEON-NOSVE-NEXT: fcvtl v4.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v5.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v6.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl v7.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v2.8h +; NONEON-NOSVE-NEXT: fcvtl2 v3.4s, v3.8h +; NONEON-NOSVE-NEXT: fadd v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: fadd v5.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: fadd v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fadd v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v4.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v5.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v2.4s +; NONEON-NOSVE-NEXT: stp q1, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %op3 = load <16 x half>, ptr %c @@ -68,6 +141,12 @@ define <2 x float> @fma_v2f32(<2 x float> %op1, <2 x float> %op2, <2 x float> %o ; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmla v2.2s, v1.2s, v0.2s +; NONEON-NOSVE-NEXT: fmov d0, d2 +; NONEON-NOSVE-NEXT: ret %mul = fmul contract <2 x float> %op1, %op2 %res = fadd contract <2 x float> %mul, %op3 ret <2 x float> %res @@ -83,6 +162,12 @@ define <4 x float> @fma_v4f32(<4 x float> %op1, <4 x float> %op2, <4 x float> %o ; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmla v2.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %mul = fmul contract <4 x float> %op1, %op2 %res = fadd contract <4 x float> %mul, %op3 ret <4 x float> %res @@ -100,6 +185,16 @@ define void @fma_v8f32(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: fmla z1.s, p0/m, z3.s, z4.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q4, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q5, [x2] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fmla v1.4s, v0.4s, v2.4s +; NONEON-NOSVE-NEXT: fmla v5.4s, v4.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q1, q5, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %op3 = load <8 x float>, ptr %c @@ -114,6 +209,11 @@ define <1 x double> @fma_v1f64(<1 x double> %op1, <1 x double> %op2, <1 x double ; CHECK: // %bb.0: ; CHECK-NEXT: fmadd d0, d0, d1, d2 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmadd d0, d0, d1, d2 +; NONEON-NOSVE-NEXT: ret %mul = fmul contract <1 x double> %op1, %op2 %res = fadd contract <1 x double> %mul, %op3 ret <1 x double> %res @@ -129,6 +229,12 @@ define <2 x double> @fma_v2f64(<2 x double> %op1, <2 x double> %op2, <2 x double ; CHECK-NEXT: fmad z0.d, p0/m, z1.d, z2.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmla v2.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %mul = fmul contract <2 x double> %op1, %op2 %res = fadd contract <2 x double> %mul, %op3 ret <2 x double> %res @@ -146,6 +252,16 @@ define void @fma_v4f64(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z4.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fma_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q4, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q5, [x2] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fmla v1.2d, v0.2d, v2.2d +; NONEON-NOSVE-NEXT: fmla v5.2d, v4.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q1, q5, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %op3 = load <4 x double>, ptr %c diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll index 94a74763aa0e9f..bc7659c06ad05f 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -17,6 +18,38 @@ define <4 x half> @fmaxnm_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxnm_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: mov h4, v1.h[2] +; NONEON-NOSVE-NEXT: mov h5, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h1 +; NONEON-NOSVE-NEXT: fcvt s7, h0 +; NONEON-NOSVE-NEXT: mov h1, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s2, s3, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fmaxnm s5, s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: fmaxnm s3, s4, s3 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt h0, s5 +; NONEON-NOSVE-NEXT: fcvt s4, h6 +; NONEON-NOSVE-NEXT: mov v0.h[1], v2.h[0] +; NONEON-NOSVE-NEXT: fcvt h2, s3 +; NONEON-NOSVE-NEXT: fmaxnm s1, s4, s1 +; NONEON-NOSVE-NEXT: mov v0.h[2], v2.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: mov v0.h[3], v1.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.maxnum.v4f16(<4 x half> %op1, <4 x half> %op2) ret <4 x half> %res } @@ -30,6 +63,64 @@ define <8 x half> @fmaxnm_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxnm_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: mov h6, v1.h[2] +; NONEON-NOSVE-NEXT: mov h7, v0.h[2] +; NONEON-NOSVE-NEXT: mov h16, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fmaxnm s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fmaxnm s3, s3, s2 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s4 +; NONEON-NOSVE-NEXT: fmaxnm s4, s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[4] +; NONEON-NOSVE-NEXT: mov h7, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fmaxnm s5, s5, s16 +; NONEON-NOSVE-NEXT: mov h16, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: mov v2.h[1], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt s3, h6 +; NONEON-NOSVE-NEXT: fcvt s6, h7 +; NONEON-NOSVE-NEXT: mov h7, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h5, s5 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: mov v2.h[2], v4.h[0] +; NONEON-NOSVE-NEXT: mov h4, v1.h[6] +; NONEON-NOSVE-NEXT: fmaxnm s3, s6, s3 +; NONEON-NOSVE-NEXT: mov h6, v0.h[6] +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v2.h[3], v5.h[0] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h6 +; NONEON-NOSVE-NEXT: fmaxnm s6, s16, s7 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v2.h[4], v3.h[0] +; NONEON-NOSVE-NEXT: fmaxnm s4, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h3, s6 +; NONEON-NOSVE-NEXT: fmaxnm s0, s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[5], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v2.h[6], v3.h[0] +; NONEON-NOSVE-NEXT: mov v2.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.maxnum.v8f16(<8 x half> %op1, <8 x half> %op2) ret <8 x half> %res } @@ -45,6 +136,119 @@ define void @fmaxnm_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fmaxnm z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxnm_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: mov h7, v0.h[1] +; NONEON-NOSVE-NEXT: mov h16, v0.h[2] +; NONEON-NOSVE-NEXT: mov h18, v2.h[1] +; NONEON-NOSVE-NEXT: mov h5, v1.h[1] +; NONEON-NOSVE-NEXT: mov h6, v1.h[2] +; NONEON-NOSVE-NEXT: mov h17, v3.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s19, h0 +; NONEON-NOSVE-NEXT: fcvt s20, h3 +; NONEON-NOSVE-NEXT: fcvt s21, h2 +; NONEON-NOSVE-NEXT: mov h22, v3.h[2] +; NONEON-NOSVE-NEXT: mov h23, v2.h[2] +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fcvt s18, h18 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fmaxnm s4, s19, s4 +; NONEON-NOSVE-NEXT: mov h19, v0.h[3] +; NONEON-NOSVE-NEXT: mov h24, v3.h[3] +; NONEON-NOSVE-NEXT: fmaxnm s20, s21, s20 +; NONEON-NOSVE-NEXT: fcvt s21, h22 +; NONEON-NOSVE-NEXT: fcvt s22, h23 +; NONEON-NOSVE-NEXT: mov h23, v2.h[3] +; NONEON-NOSVE-NEXT: mov h25, v2.h[6] +; NONEON-NOSVE-NEXT: fmaxnm s5, s7, s5 +; NONEON-NOSVE-NEXT: mov h7, v1.h[3] +; NONEON-NOSVE-NEXT: fmaxnm s6, s16, s6 +; NONEON-NOSVE-NEXT: fmaxnm s16, s18, s17 +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s18, h19 +; NONEON-NOSVE-NEXT: fcvt s19, h24 +; NONEON-NOSVE-NEXT: mov h24, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h17, s5 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt h5, s20 +; NONEON-NOSVE-NEXT: fmaxnm s20, s22, s21 +; NONEON-NOSVE-NEXT: fcvt h16, s16 +; NONEON-NOSVE-NEXT: fcvt s21, h23 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: mov h22, v0.h[4] +; NONEON-NOSVE-NEXT: mov h23, v2.h[4] +; NONEON-NOSVE-NEXT: mov v4.h[1], v17.h[0] +; NONEON-NOSVE-NEXT: mov h17, v1.h[4] +; NONEON-NOSVE-NEXT: fmaxnm s7, s18, s7 +; NONEON-NOSVE-NEXT: mov h18, v3.h[4] +; NONEON-NOSVE-NEXT: mov v5.h[1], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s20 +; NONEON-NOSVE-NEXT: fmaxnm s19, s21, s19 +; NONEON-NOSVE-NEXT: fcvt s20, h23 +; NONEON-NOSVE-NEXT: mov h21, v1.h[5] +; NONEON-NOSVE-NEXT: mov h23, v2.h[5] +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: mov v4.h[2], v6.h[0] +; NONEON-NOSVE-NEXT: fcvt s6, h17 +; NONEON-NOSVE-NEXT: fcvt s17, h22 +; NONEON-NOSVE-NEXT: fcvt h7, s7 +; NONEON-NOSVE-NEXT: fcvt s18, h18 +; NONEON-NOSVE-NEXT: mov h22, v3.h[5] +; NONEON-NOSVE-NEXT: mov v5.h[2], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s19 +; NONEON-NOSVE-NEXT: mov h19, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmaxnm s6, s17, s6 +; NONEON-NOSVE-NEXT: mov h17, v1.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fmaxnm s18, s20, s18 +; NONEON-NOSVE-NEXT: mov h20, v3.h[6] +; NONEON-NOSVE-NEXT: mov v4.h[3], v7.h[0] +; NONEON-NOSVE-NEXT: fcvt s7, h22 +; NONEON-NOSVE-NEXT: fcvt s22, h23 +; NONEON-NOSVE-NEXT: mov v5.h[3], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt s16, h21 +; NONEON-NOSVE-NEXT: fcvt s21, h24 +; NONEON-NOSVE-NEXT: fcvt s19, h19 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fcvt s23, h25 +; NONEON-NOSVE-NEXT: fcvt h18, s18 +; NONEON-NOSVE-NEXT: fcvt s20, h20 +; NONEON-NOSVE-NEXT: mov h3, v3.h[7] +; NONEON-NOSVE-NEXT: fmaxnm s7, s22, s7 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fmaxnm s16, s21, s16 +; NONEON-NOSVE-NEXT: mov v4.h[4], v6.h[0] +; NONEON-NOSVE-NEXT: fmaxnm s6, s19, s17 +; NONEON-NOSVE-NEXT: mov v5.h[4], v18.h[0] +; NONEON-NOSVE-NEXT: fmaxnm s17, s23, s20 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt h7, s7 +; NONEON-NOSVE-NEXT: fmaxnm s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h16, s16 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fmaxnm s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h3, s17 +; NONEON-NOSVE-NEXT: mov v5.h[5], v7.h[0] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v4.h[5], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s2 +; NONEON-NOSVE-NEXT: mov v5.h[6], v3.h[0] +; NONEON-NOSVE-NEXT: mov v4.h[6], v6.h[0] +; NONEON-NOSVE-NEXT: mov v5.h[7], v1.h[0] +; NONEON-NOSVE-NEXT: mov v4.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: stp q5, q4, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %res = call <16 x half> @llvm.maxnum.v16f16(<16 x half> %op1, <16 x half> %op2) @@ -61,6 +265,11 @@ define <2 x float> @fmaxnm_v2f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxnm_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxnm v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %op1, <2 x float> %op2) ret <2 x float> %res } @@ -74,6 +283,11 @@ define <4 x float> @fmaxnm_v4f32(<4 x float> %op1, <4 x float> %op2) { ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxnm_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxnm v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %op1, <4 x float> %op2) ret <4 x float> %res } @@ -89,6 +303,15 @@ define void @fmaxnm_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fmaxnm z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxnm_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmaxnm v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fmaxnm v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %op1, <8 x float> %op2) @@ -101,6 +324,11 @@ define <1 x double> @fmaxnm_v1f64(<1 x double> %op1, <1 x double> %op2) { ; CHECK: // %bb.0: ; CHECK-NEXT: fmaxnm d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxnm_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxnm d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.maxnum.v1f64(<1 x double> %op1, <1 x double> %op2) ret <1 x double> %res } @@ -114,6 +342,11 @@ define <2 x double> @fmaxnm_v2f64(<2 x double> %op1, <2 x double> %op2) { ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxnm_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxnm v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %op1, <2 x double> %op2) ret <2 x double> %res } @@ -129,6 +362,15 @@ define void @fmaxnm_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fmaxnm z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxnm_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmaxnm v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fmaxnm v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %op1, <4 x double> %op2) @@ -149,6 +391,38 @@ define <4 x half> @fminnm_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminnm_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: mov h4, v1.h[2] +; NONEON-NOSVE-NEXT: mov h5, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h1 +; NONEON-NOSVE-NEXT: fcvt s7, h0 +; NONEON-NOSVE-NEXT: mov h1, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s2, s3, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fminnm s5, s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: fminnm s3, s4, s3 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt h0, s5 +; NONEON-NOSVE-NEXT: fcvt s4, h6 +; NONEON-NOSVE-NEXT: mov v0.h[1], v2.h[0] +; NONEON-NOSVE-NEXT: fcvt h2, s3 +; NONEON-NOSVE-NEXT: fminnm s1, s4, s1 +; NONEON-NOSVE-NEXT: mov v0.h[2], v2.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: mov v0.h[3], v1.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.minnum.v4f16(<4 x half> %op1, <4 x half> %op2) ret <4 x half> %res } @@ -162,6 +436,64 @@ define <8 x half> @fminnm_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminnm_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: mov h6, v1.h[2] +; NONEON-NOSVE-NEXT: mov h7, v0.h[2] +; NONEON-NOSVE-NEXT: mov h16, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fminnm s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fminnm s3, s3, s2 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s4 +; NONEON-NOSVE-NEXT: fminnm s4, s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[4] +; NONEON-NOSVE-NEXT: mov h7, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fminnm s5, s5, s16 +; NONEON-NOSVE-NEXT: mov h16, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: mov v2.h[1], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt s3, h6 +; NONEON-NOSVE-NEXT: fcvt s6, h7 +; NONEON-NOSVE-NEXT: mov h7, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h5, s5 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: mov v2.h[2], v4.h[0] +; NONEON-NOSVE-NEXT: mov h4, v1.h[6] +; NONEON-NOSVE-NEXT: fminnm s3, s6, s3 +; NONEON-NOSVE-NEXT: mov h6, v0.h[6] +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v2.h[3], v5.h[0] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h6 +; NONEON-NOSVE-NEXT: fminnm s6, s16, s7 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v2.h[4], v3.h[0] +; NONEON-NOSVE-NEXT: fminnm s4, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h3, s6 +; NONEON-NOSVE-NEXT: fminnm s0, s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[5], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v2.h[6], v3.h[0] +; NONEON-NOSVE-NEXT: mov v2.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.minnum.v8f16(<8 x half> %op1, <8 x half> %op2) ret <8 x half> %res } @@ -177,6 +509,119 @@ define void @fminnm_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fminnm z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminnm_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: mov h7, v0.h[1] +; NONEON-NOSVE-NEXT: mov h16, v0.h[2] +; NONEON-NOSVE-NEXT: mov h18, v2.h[1] +; NONEON-NOSVE-NEXT: mov h5, v1.h[1] +; NONEON-NOSVE-NEXT: mov h6, v1.h[2] +; NONEON-NOSVE-NEXT: mov h17, v3.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s19, h0 +; NONEON-NOSVE-NEXT: fcvt s20, h3 +; NONEON-NOSVE-NEXT: fcvt s21, h2 +; NONEON-NOSVE-NEXT: mov h22, v3.h[2] +; NONEON-NOSVE-NEXT: mov h23, v2.h[2] +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fcvt s18, h18 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fminnm s4, s19, s4 +; NONEON-NOSVE-NEXT: mov h19, v0.h[3] +; NONEON-NOSVE-NEXT: mov h24, v3.h[3] +; NONEON-NOSVE-NEXT: fminnm s20, s21, s20 +; NONEON-NOSVE-NEXT: fcvt s21, h22 +; NONEON-NOSVE-NEXT: fcvt s22, h23 +; NONEON-NOSVE-NEXT: mov h23, v2.h[3] +; NONEON-NOSVE-NEXT: mov h25, v2.h[6] +; NONEON-NOSVE-NEXT: fminnm s5, s7, s5 +; NONEON-NOSVE-NEXT: mov h7, v1.h[3] +; NONEON-NOSVE-NEXT: fminnm s6, s16, s6 +; NONEON-NOSVE-NEXT: fminnm s16, s18, s17 +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s18, h19 +; NONEON-NOSVE-NEXT: fcvt s19, h24 +; NONEON-NOSVE-NEXT: mov h24, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h17, s5 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt h5, s20 +; NONEON-NOSVE-NEXT: fminnm s20, s22, s21 +; NONEON-NOSVE-NEXT: fcvt h16, s16 +; NONEON-NOSVE-NEXT: fcvt s21, h23 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: mov h22, v0.h[4] +; NONEON-NOSVE-NEXT: mov h23, v2.h[4] +; NONEON-NOSVE-NEXT: mov v4.h[1], v17.h[0] +; NONEON-NOSVE-NEXT: mov h17, v1.h[4] +; NONEON-NOSVE-NEXT: fminnm s7, s18, s7 +; NONEON-NOSVE-NEXT: mov h18, v3.h[4] +; NONEON-NOSVE-NEXT: mov v5.h[1], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s20 +; NONEON-NOSVE-NEXT: fminnm s19, s21, s19 +; NONEON-NOSVE-NEXT: fcvt s20, h23 +; NONEON-NOSVE-NEXT: mov h21, v1.h[5] +; NONEON-NOSVE-NEXT: mov h23, v2.h[5] +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: mov v4.h[2], v6.h[0] +; NONEON-NOSVE-NEXT: fcvt s6, h17 +; NONEON-NOSVE-NEXT: fcvt s17, h22 +; NONEON-NOSVE-NEXT: fcvt h7, s7 +; NONEON-NOSVE-NEXT: fcvt s18, h18 +; NONEON-NOSVE-NEXT: mov h22, v3.h[5] +; NONEON-NOSVE-NEXT: mov v5.h[2], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s19 +; NONEON-NOSVE-NEXT: mov h19, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fminnm s6, s17, s6 +; NONEON-NOSVE-NEXT: mov h17, v1.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fminnm s18, s20, s18 +; NONEON-NOSVE-NEXT: mov h20, v3.h[6] +; NONEON-NOSVE-NEXT: mov v4.h[3], v7.h[0] +; NONEON-NOSVE-NEXT: fcvt s7, h22 +; NONEON-NOSVE-NEXT: fcvt s22, h23 +; NONEON-NOSVE-NEXT: mov v5.h[3], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt s16, h21 +; NONEON-NOSVE-NEXT: fcvt s21, h24 +; NONEON-NOSVE-NEXT: fcvt s19, h19 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fcvt s23, h25 +; NONEON-NOSVE-NEXT: fcvt h18, s18 +; NONEON-NOSVE-NEXT: fcvt s20, h20 +; NONEON-NOSVE-NEXT: mov h3, v3.h[7] +; NONEON-NOSVE-NEXT: fminnm s7, s22, s7 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fminnm s16, s21, s16 +; NONEON-NOSVE-NEXT: mov v4.h[4], v6.h[0] +; NONEON-NOSVE-NEXT: fminnm s6, s19, s17 +; NONEON-NOSVE-NEXT: mov v5.h[4], v18.h[0] +; NONEON-NOSVE-NEXT: fminnm s17, s23, s20 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt h7, s7 +; NONEON-NOSVE-NEXT: fminnm s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h16, s16 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fminnm s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h3, s17 +; NONEON-NOSVE-NEXT: mov v5.h[5], v7.h[0] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v4.h[5], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s2 +; NONEON-NOSVE-NEXT: mov v5.h[6], v3.h[0] +; NONEON-NOSVE-NEXT: mov v4.h[6], v6.h[0] +; NONEON-NOSVE-NEXT: mov v5.h[7], v1.h[0] +; NONEON-NOSVE-NEXT: mov v4.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: stp q5, q4, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %res = call <16 x half> @llvm.minnum.v16f16(<16 x half> %op1, <16 x half> %op2) @@ -193,6 +638,11 @@ define <2 x float> @fminnm_v2f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminnm_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminnm v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.minnum.v2f32(<2 x float> %op1, <2 x float> %op2) ret <2 x float> %res } @@ -206,6 +656,11 @@ define <4 x float> @fminnm_v4f32(<4 x float> %op1, <4 x float> %op2) { ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminnm_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminnm v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.minnum.v4f32(<4 x float> %op1, <4 x float> %op2) ret <4 x float> %res } @@ -221,6 +676,15 @@ define void @fminnm_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fminnm z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminnm_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fminnm v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fminnm v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = call <8 x float> @llvm.minnum.v8f32(<8 x float> %op1, <8 x float> %op2) @@ -233,6 +697,11 @@ define <1 x double> @fminnm_v1f64(<1 x double> %op1, <1 x double> %op2) { ; CHECK: // %bb.0: ; CHECK-NEXT: fminnm d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminnm_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminnm d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.minnum.v1f64(<1 x double> %op1, <1 x double> %op2) ret <1 x double> %res } @@ -246,6 +715,11 @@ define <2 x double> @fminnm_v2f64(<2 x double> %op1, <2 x double> %op2) { ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminnm_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminnm v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.minnum.v2f64(<2 x double> %op1, <2 x double> %op2) ret <2 x double> %res } @@ -261,6 +735,15 @@ define void @fminnm_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fminnm z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminnm_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fminnm v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fminnm v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = call <4 x double> @llvm.minnum.v4f64(<4 x double> %op1, <4 x double> %op2) @@ -281,6 +764,38 @@ define <4 x half> @fmax_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmax_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: mov h4, v1.h[2] +; NONEON-NOSVE-NEXT: mov h5, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h1 +; NONEON-NOSVE-NEXT: fcvt s7, h0 +; NONEON-NOSVE-NEXT: mov h1, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s2, s3, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fmax s5, s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: fmax s3, s4, s3 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt h0, s5 +; NONEON-NOSVE-NEXT: fcvt s4, h6 +; NONEON-NOSVE-NEXT: mov v0.h[1], v2.h[0] +; NONEON-NOSVE-NEXT: fcvt h2, s3 +; NONEON-NOSVE-NEXT: fmax s1, s4, s1 +; NONEON-NOSVE-NEXT: mov v0.h[2], v2.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: mov v0.h[3], v1.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.maximum.v4f16(<4 x half> %op1, <4 x half> %op2) ret <4 x half> %res } @@ -294,6 +809,64 @@ define <8 x half> @fmax_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmax_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: mov h6, v1.h[2] +; NONEON-NOSVE-NEXT: mov h7, v0.h[2] +; NONEON-NOSVE-NEXT: mov h16, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fmax s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fmax s3, s3, s2 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s4 +; NONEON-NOSVE-NEXT: fmax s4, s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[4] +; NONEON-NOSVE-NEXT: mov h7, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fmax s5, s5, s16 +; NONEON-NOSVE-NEXT: mov h16, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: mov v2.h[1], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt s3, h6 +; NONEON-NOSVE-NEXT: fcvt s6, h7 +; NONEON-NOSVE-NEXT: mov h7, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h5, s5 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: mov v2.h[2], v4.h[0] +; NONEON-NOSVE-NEXT: mov h4, v1.h[6] +; NONEON-NOSVE-NEXT: fmax s3, s6, s3 +; NONEON-NOSVE-NEXT: mov h6, v0.h[6] +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v2.h[3], v5.h[0] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h6 +; NONEON-NOSVE-NEXT: fmax s6, s16, s7 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v2.h[4], v3.h[0] +; NONEON-NOSVE-NEXT: fmax s4, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h3, s6 +; NONEON-NOSVE-NEXT: fmax s0, s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[5], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v2.h[6], v3.h[0] +; NONEON-NOSVE-NEXT: mov v2.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.maximum.v8f16(<8 x half> %op1, <8 x half> %op2) ret <8 x half> %res } @@ -309,6 +882,119 @@ define void @fmax_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fmax z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmax_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: mov h7, v0.h[1] +; NONEON-NOSVE-NEXT: mov h16, v0.h[2] +; NONEON-NOSVE-NEXT: mov h18, v2.h[1] +; NONEON-NOSVE-NEXT: mov h5, v1.h[1] +; NONEON-NOSVE-NEXT: mov h6, v1.h[2] +; NONEON-NOSVE-NEXT: mov h17, v3.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s19, h0 +; NONEON-NOSVE-NEXT: fcvt s20, h3 +; NONEON-NOSVE-NEXT: fcvt s21, h2 +; NONEON-NOSVE-NEXT: mov h22, v3.h[2] +; NONEON-NOSVE-NEXT: mov h23, v2.h[2] +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fcvt s18, h18 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fmax s4, s19, s4 +; NONEON-NOSVE-NEXT: mov h19, v0.h[3] +; NONEON-NOSVE-NEXT: mov h24, v3.h[3] +; NONEON-NOSVE-NEXT: fmax s20, s21, s20 +; NONEON-NOSVE-NEXT: fcvt s21, h22 +; NONEON-NOSVE-NEXT: fcvt s22, h23 +; NONEON-NOSVE-NEXT: mov h23, v2.h[3] +; NONEON-NOSVE-NEXT: mov h25, v2.h[6] +; NONEON-NOSVE-NEXT: fmax s5, s7, s5 +; NONEON-NOSVE-NEXT: mov h7, v1.h[3] +; NONEON-NOSVE-NEXT: fmax s6, s16, s6 +; NONEON-NOSVE-NEXT: fmax s16, s18, s17 +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s18, h19 +; NONEON-NOSVE-NEXT: fcvt s19, h24 +; NONEON-NOSVE-NEXT: mov h24, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h17, s5 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt h5, s20 +; NONEON-NOSVE-NEXT: fmax s20, s22, s21 +; NONEON-NOSVE-NEXT: fcvt h16, s16 +; NONEON-NOSVE-NEXT: fcvt s21, h23 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: mov h22, v0.h[4] +; NONEON-NOSVE-NEXT: mov h23, v2.h[4] +; NONEON-NOSVE-NEXT: mov v4.h[1], v17.h[0] +; NONEON-NOSVE-NEXT: mov h17, v1.h[4] +; NONEON-NOSVE-NEXT: fmax s7, s18, s7 +; NONEON-NOSVE-NEXT: mov h18, v3.h[4] +; NONEON-NOSVE-NEXT: mov v5.h[1], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s20 +; NONEON-NOSVE-NEXT: fmax s19, s21, s19 +; NONEON-NOSVE-NEXT: fcvt s20, h23 +; NONEON-NOSVE-NEXT: mov h21, v1.h[5] +; NONEON-NOSVE-NEXT: mov h23, v2.h[5] +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: mov v4.h[2], v6.h[0] +; NONEON-NOSVE-NEXT: fcvt s6, h17 +; NONEON-NOSVE-NEXT: fcvt s17, h22 +; NONEON-NOSVE-NEXT: fcvt h7, s7 +; NONEON-NOSVE-NEXT: fcvt s18, h18 +; NONEON-NOSVE-NEXT: mov h22, v3.h[5] +; NONEON-NOSVE-NEXT: mov v5.h[2], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s19 +; NONEON-NOSVE-NEXT: mov h19, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmax s6, s17, s6 +; NONEON-NOSVE-NEXT: mov h17, v1.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fmax s18, s20, s18 +; NONEON-NOSVE-NEXT: mov h20, v3.h[6] +; NONEON-NOSVE-NEXT: mov v4.h[3], v7.h[0] +; NONEON-NOSVE-NEXT: fcvt s7, h22 +; NONEON-NOSVE-NEXT: fcvt s22, h23 +; NONEON-NOSVE-NEXT: mov v5.h[3], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt s16, h21 +; NONEON-NOSVE-NEXT: fcvt s21, h24 +; NONEON-NOSVE-NEXT: fcvt s19, h19 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fcvt s23, h25 +; NONEON-NOSVE-NEXT: fcvt h18, s18 +; NONEON-NOSVE-NEXT: fcvt s20, h20 +; NONEON-NOSVE-NEXT: mov h3, v3.h[7] +; NONEON-NOSVE-NEXT: fmax s7, s22, s7 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fmax s16, s21, s16 +; NONEON-NOSVE-NEXT: mov v4.h[4], v6.h[0] +; NONEON-NOSVE-NEXT: fmax s6, s19, s17 +; NONEON-NOSVE-NEXT: mov v5.h[4], v18.h[0] +; NONEON-NOSVE-NEXT: fmax s17, s23, s20 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt h7, s7 +; NONEON-NOSVE-NEXT: fmax s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h16, s16 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fmax s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h3, s17 +; NONEON-NOSVE-NEXT: mov v5.h[5], v7.h[0] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v4.h[5], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s2 +; NONEON-NOSVE-NEXT: mov v5.h[6], v3.h[0] +; NONEON-NOSVE-NEXT: mov v4.h[6], v6.h[0] +; NONEON-NOSVE-NEXT: mov v5.h[7], v1.h[0] +; NONEON-NOSVE-NEXT: mov v4.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: stp q5, q4, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %res = call <16 x half> @llvm.maximum.v16f16(<16 x half> %op1, <16 x half> %op2) @@ -325,6 +1011,11 @@ define <2 x float> @fmax_v2f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmax_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmax v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.maximum.v2f32(<2 x float> %op1, <2 x float> %op2) ret <2 x float> %res } @@ -338,6 +1029,11 @@ define <4 x float> @fmax_v4f32(<4 x float> %op1, <4 x float> %op2) { ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmax_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmax v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.maximum.v4f32(<4 x float> %op1, <4 x float> %op2) ret <4 x float> %res } @@ -353,6 +1049,15 @@ define void @fmax_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fmax z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmax_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmax v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fmax v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = call <8 x float> @llvm.maximum.v8f32(<8 x float> %op1, <8 x float> %op2) @@ -365,6 +1070,11 @@ define <1 x double> @fmax_v1f64(<1 x double> %op1, <1 x double> %op2) { ; CHECK: // %bb.0: ; CHECK-NEXT: fmax d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmax_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmax d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.maximum.v1f64(<1 x double> %op1, <1 x double> %op2) ret <1 x double> %res } @@ -378,6 +1088,11 @@ define <2 x double> @fmax_v2f64(<2 x double> %op1, <2 x double> %op2) { ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmax_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmax v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.maximum.v2f64(<2 x double> %op1, <2 x double> %op2) ret <2 x double> %res } @@ -393,6 +1108,15 @@ define void @fmax_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fmax z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmax_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmax v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fmax v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = call <4 x double> @llvm.maximum.v4f64(<4 x double> %op1, <4 x double> %op2) @@ -413,6 +1137,38 @@ define <4 x half> @fmin_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmin_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: mov h4, v1.h[2] +; NONEON-NOSVE-NEXT: mov h5, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h1 +; NONEON-NOSVE-NEXT: fcvt s7, h0 +; NONEON-NOSVE-NEXT: mov h1, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s2, s3, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h4 +; NONEON-NOSVE-NEXT: fcvt s4, h5 +; NONEON-NOSVE-NEXT: fmin s5, s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v0.h[3] +; NONEON-NOSVE-NEXT: fmin s3, s4, s3 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt h0, s5 +; NONEON-NOSVE-NEXT: fcvt s4, h6 +; NONEON-NOSVE-NEXT: mov v0.h[1], v2.h[0] +; NONEON-NOSVE-NEXT: fcvt h2, s3 +; NONEON-NOSVE-NEXT: fmin s1, s4, s1 +; NONEON-NOSVE-NEXT: mov v0.h[2], v2.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: mov v0.h[3], v1.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.minimum.v4f16(<4 x half> %op1, <4 x half> %op2) ret <4 x half> %res } @@ -426,6 +1182,64 @@ define <8 x half> @fmin_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmin_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: mov h6, v1.h[2] +; NONEON-NOSVE-NEXT: mov h7, v0.h[2] +; NONEON-NOSVE-NEXT: mov h16, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fmin s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fmin s3, s3, s2 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s4 +; NONEON-NOSVE-NEXT: fmin s4, s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[4] +; NONEON-NOSVE-NEXT: mov h7, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fmin s5, s5, s16 +; NONEON-NOSVE-NEXT: mov h16, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: mov v2.h[1], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt s3, h6 +; NONEON-NOSVE-NEXT: fcvt s6, h7 +; NONEON-NOSVE-NEXT: mov h7, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h5, s5 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: mov v2.h[2], v4.h[0] +; NONEON-NOSVE-NEXT: mov h4, v1.h[6] +; NONEON-NOSVE-NEXT: fmin s3, s6, s3 +; NONEON-NOSVE-NEXT: mov h6, v0.h[6] +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: mov v2.h[3], v5.h[0] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h6 +; NONEON-NOSVE-NEXT: fmin s6, s16, s7 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov v2.h[4], v3.h[0] +; NONEON-NOSVE-NEXT: fmin s4, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h3, s6 +; NONEON-NOSVE-NEXT: fmin s0, s0, s1 +; NONEON-NOSVE-NEXT: mov v2.h[5], v3.h[0] +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v2.h[6], v3.h[0] +; NONEON-NOSVE-NEXT: mov v2.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.minimum.v8f16(<8 x half> %op1, <8 x half> %op2) ret <8 x half> %res } @@ -441,6 +1255,119 @@ define void @fmin_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fmin z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmin_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: mov h7, v0.h[1] +; NONEON-NOSVE-NEXT: mov h16, v0.h[2] +; NONEON-NOSVE-NEXT: mov h18, v2.h[1] +; NONEON-NOSVE-NEXT: mov h5, v1.h[1] +; NONEON-NOSVE-NEXT: mov h6, v1.h[2] +; NONEON-NOSVE-NEXT: mov h17, v3.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s19, h0 +; NONEON-NOSVE-NEXT: fcvt s20, h3 +; NONEON-NOSVE-NEXT: fcvt s21, h2 +; NONEON-NOSVE-NEXT: mov h22, v3.h[2] +; NONEON-NOSVE-NEXT: mov h23, v2.h[2] +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fcvt s18, h18 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fmin s4, s19, s4 +; NONEON-NOSVE-NEXT: mov h19, v0.h[3] +; NONEON-NOSVE-NEXT: mov h24, v3.h[3] +; NONEON-NOSVE-NEXT: fmin s20, s21, s20 +; NONEON-NOSVE-NEXT: fcvt s21, h22 +; NONEON-NOSVE-NEXT: fcvt s22, h23 +; NONEON-NOSVE-NEXT: mov h23, v2.h[3] +; NONEON-NOSVE-NEXT: mov h25, v2.h[6] +; NONEON-NOSVE-NEXT: fmin s5, s7, s5 +; NONEON-NOSVE-NEXT: mov h7, v1.h[3] +; NONEON-NOSVE-NEXT: fmin s6, s16, s6 +; NONEON-NOSVE-NEXT: fmin s16, s18, s17 +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s18, h19 +; NONEON-NOSVE-NEXT: fcvt s19, h24 +; NONEON-NOSVE-NEXT: mov h24, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h17, s5 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcvt h5, s20 +; NONEON-NOSVE-NEXT: fmin s20, s22, s21 +; NONEON-NOSVE-NEXT: fcvt h16, s16 +; NONEON-NOSVE-NEXT: fcvt s21, h23 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: mov h22, v0.h[4] +; NONEON-NOSVE-NEXT: mov h23, v2.h[4] +; NONEON-NOSVE-NEXT: mov v4.h[1], v17.h[0] +; NONEON-NOSVE-NEXT: mov h17, v1.h[4] +; NONEON-NOSVE-NEXT: fmin s7, s18, s7 +; NONEON-NOSVE-NEXT: mov h18, v3.h[4] +; NONEON-NOSVE-NEXT: mov v5.h[1], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s20 +; NONEON-NOSVE-NEXT: fmin s19, s21, s19 +; NONEON-NOSVE-NEXT: fcvt s20, h23 +; NONEON-NOSVE-NEXT: mov h21, v1.h[5] +; NONEON-NOSVE-NEXT: mov h23, v2.h[5] +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: mov v4.h[2], v6.h[0] +; NONEON-NOSVE-NEXT: fcvt s6, h17 +; NONEON-NOSVE-NEXT: fcvt s17, h22 +; NONEON-NOSVE-NEXT: fcvt h7, s7 +; NONEON-NOSVE-NEXT: fcvt s18, h18 +; NONEON-NOSVE-NEXT: mov h22, v3.h[5] +; NONEON-NOSVE-NEXT: mov v5.h[2], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h16, s19 +; NONEON-NOSVE-NEXT: mov h19, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmin s6, s17, s6 +; NONEON-NOSVE-NEXT: mov h17, v1.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fmin s18, s20, s18 +; NONEON-NOSVE-NEXT: mov h20, v3.h[6] +; NONEON-NOSVE-NEXT: mov v4.h[3], v7.h[0] +; NONEON-NOSVE-NEXT: fcvt s7, h22 +; NONEON-NOSVE-NEXT: fcvt s22, h23 +; NONEON-NOSVE-NEXT: mov v5.h[3], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt s16, h21 +; NONEON-NOSVE-NEXT: fcvt s21, h24 +; NONEON-NOSVE-NEXT: fcvt s19, h19 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fcvt s23, h25 +; NONEON-NOSVE-NEXT: fcvt h18, s18 +; NONEON-NOSVE-NEXT: fcvt s20, h20 +; NONEON-NOSVE-NEXT: mov h3, v3.h[7] +; NONEON-NOSVE-NEXT: fmin s7, s22, s7 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fmin s16, s21, s16 +; NONEON-NOSVE-NEXT: mov v4.h[4], v6.h[0] +; NONEON-NOSVE-NEXT: fmin s6, s19, s17 +; NONEON-NOSVE-NEXT: mov v5.h[4], v18.h[0] +; NONEON-NOSVE-NEXT: fmin s17, s23, s20 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt h7, s7 +; NONEON-NOSVE-NEXT: fmin s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h16, s16 +; NONEON-NOSVE-NEXT: fcvt h6, s6 +; NONEON-NOSVE-NEXT: fmin s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h3, s17 +; NONEON-NOSVE-NEXT: mov v5.h[5], v7.h[0] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: mov v4.h[5], v16.h[0] +; NONEON-NOSVE-NEXT: fcvt h1, s2 +; NONEON-NOSVE-NEXT: mov v5.h[6], v3.h[0] +; NONEON-NOSVE-NEXT: mov v4.h[6], v6.h[0] +; NONEON-NOSVE-NEXT: mov v5.h[7], v1.h[0] +; NONEON-NOSVE-NEXT: mov v4.h[7], v0.h[0] +; NONEON-NOSVE-NEXT: stp q5, q4, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %res = call <16 x half> @llvm.minimum.v16f16(<16 x half> %op1, <16 x half> %op2) @@ -457,6 +1384,11 @@ define <2 x float> @fmin_v2f32(<2 x float> %op1, <2 x float> %op2) { ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmin_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmin v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.minimum.v2f32(<2 x float> %op1, <2 x float> %op2) ret <2 x float> %res } @@ -470,6 +1402,11 @@ define <4 x float> @fmin_v4f32(<4 x float> %op1, <4 x float> %op2) { ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmin_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmin v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.minimum.v4f32(<4 x float> %op1, <4 x float> %op2) ret <4 x float> %res } @@ -485,6 +1422,15 @@ define void @fmin_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fmin z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmin_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmin v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fmin v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = call <8 x float> @llvm.minimum.v8f32(<8 x float> %op1, <8 x float> %op2) @@ -497,6 +1443,11 @@ define <1 x double> @fmin_v1f64(<1 x double> %op1, <1 x double> %op2) { ; CHECK: // %bb.0: ; CHECK-NEXT: fmin d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmin_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmin d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.minimum.v1f64(<1 x double> %op1, <1 x double> %op2) ret <1 x double> %res } @@ -510,6 +1461,11 @@ define <2 x double> @fmin_v2f64(<2 x double> %op1, <2 x double> %op2) { ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmin_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmin v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.minimum.v2f64(<2 x double> %op1, <2 x double> %op2) ret <2 x double> %res } @@ -525,6 +1481,15 @@ define void @fmin_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fmin z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmin_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmin v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fmin v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = call <4 x double> @llvm.minimum.v4f64(<4 x double> %op1, <4 x double> %op2) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce-fa64.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce-fa64.ll index b56e67d95ba004..fdb81b8e5fe1b6 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce-fa64.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce-fa64.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sme-fa64 -force-streaming-compatible-sve < %s | FileCheck %s -check-prefix=FA64 ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s -check-prefix=NO-FA64 +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -26,6 +27,30 @@ define half @fadda_v4f16(half %start, <4 x half> %a) { ; NO-FA64-NEXT: fadd h0, h0, h2 ; NO-FA64-NEXT: fadd h0, h0, h1 ; NO-FA64-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[2] +; NONEON-NOSVE-NEXT: mov h1, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fadd.v4f16(half %start, <4 x half> %a) ret half %res } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll index df9613a30e40b0..74a5db4b38e013 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -19,6 +20,30 @@ define half @fadda_v4f16(half %start, <4 x half> %a) { ; CHECK-NEXT: fadd h0, h0, h2 ; CHECK-NEXT: fadd h0, h0, h1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[2] +; NONEON-NOSVE-NEXT: mov h1, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fadd.v4f16(half %start, <4 x half> %a) ret half %res } @@ -43,6 +68,49 @@ define half @fadda_v8f16(half %start, <8 x half> %a) { ; CHECK-NEXT: fadd h0, h0, h2 ; CHECK-NEXT: fadd h0, h0, h1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fadd.v8f16(half %start, <8 x half> %a) ret half %res } @@ -83,6 +151,90 @@ define half @fadda_v16f16(half %start, ptr %a) { ; CHECK-NEXT: fadd h0, h0, h2 ; CHECK-NEXT: fadd h0, h0, h1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: fcvt s2, h1 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call half @llvm.vector.reduce.fadd.v16f16(half %start, <16 x half> %op) ret half %res @@ -96,6 +248,14 @@ define float @fadda_v2f32(float %start, <2 x float> %a) { ; CHECK-NEXT: mov z1.s, z1.s[1] ; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: mov s2, v1.s[1] +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fadd.v2f32(float %start, <2 x float> %a) ret float %res } @@ -112,6 +272,17 @@ define float @fadda_v4f32(float %start, <4 x float> %a) { ; CHECK-NEXT: fadd s0, s0, s2 ; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov s2, v1.s[1] +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: mov s3, v1.s[2] +; NONEON-NOSVE-NEXT: mov s1, v1.s[3] +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: fadd s0, s0, s3 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fadd.v4f32(float %start, <4 x float> %a) ret float %res } @@ -136,6 +307,26 @@ define float @fadda_v8f32(float %start, ptr %a) { ; CHECK-NEXT: fadd s0, s0, s2 ; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: mov s2, v1.s[1] +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: mov s3, v1.s[2] +; NONEON-NOSVE-NEXT: mov s1, v1.s[3] +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: fadd s0, s0, s3 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: mov s2, v1.s[1] +; NONEON-NOSVE-NEXT: mov s3, v1.s[2] +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: mov s1, v1.s[3] +; NONEON-NOSVE-NEXT: fadd s0, s0, s2 +; NONEON-NOSVE-NEXT: fadd s0, s0, s3 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call float @llvm.vector.reduce.fadd.v8f32(float %start, <8 x float> %op) ret float %res @@ -146,6 +337,11 @@ define double @fadda_v1f64(double %start, <1 x double> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: fadd d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fadd d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fadd.v1f64(double %start, <1 x double> %a) ret double %res } @@ -158,6 +354,13 @@ define double @fadda_v2f64(double %start, <2 x double> %a) { ; CHECK-NEXT: mov z1.d, z1.d[1] ; CHECK-NEXT: fadd d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov d2, v1.d[1] +; NONEON-NOSVE-NEXT: fadd d0, d0, d1 +; NONEON-NOSVE-NEXT: fadd d0, d0, d2 +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fadd.v2f64(double %start, <2 x double> %a) ret double %res } @@ -174,6 +377,17 @@ define double @fadda_v4f64(double %start, ptr %a) { ; CHECK-NEXT: mov z1.d, z1.d[1] ; CHECK-NEXT: fadd d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadda_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q3, q1, [x0] +; NONEON-NOSVE-NEXT: mov d2, v3.d[1] +; NONEON-NOSVE-NEXT: fadd d0, d0, d3 +; NONEON-NOSVE-NEXT: fadd d0, d0, d2 +; NONEON-NOSVE-NEXT: mov d2, v1.d[1] +; NONEON-NOSVE-NEXT: fadd d0, d0, d1 +; NONEON-NOSVE-NEXT: fadd d0, d0, d2 +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call double @llvm.vector.reduce.fadd.v4f64(double %start, <4 x double> %op) ret double %res @@ -191,6 +405,30 @@ define half @faddv_v4f16(half %start, <4 x half> %a) { ; CHECK-NEXT: faddv h1, p0, z1.h ; CHECK-NEXT: fadd h0, h0, h1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: faddv_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt s3, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s2, s3, s2 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: mov h1, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s1, s2, s1 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call fast half @llvm.vector.reduce.fadd.v4f16(half %start, <4 x half> %a) ret half %res } @@ -203,6 +441,49 @@ define half @faddv_v8f16(half %start, <8 x half> %a) { ; CHECK-NEXT: faddv h1, p0, z1.h ; CHECK-NEXT: fadd h0, h0, h1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: faddv_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt s3, h1 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s2, s3, s2 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s2, s2, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s2, s2, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s2, s2, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s2, s2, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[6] +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fadd s1, s2, s1 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call fast half @llvm.vector.reduce.fadd.v8f16(half %start, <8 x half> %a) ret half %res } @@ -216,6 +497,58 @@ define half @faddv_v16f16(half %start, ptr %a) { ; CHECK-NEXT: faddv h1, p0, z1.h ; CHECK-NEXT: fadd h0, h0, h1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: faddv_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl v4.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v2.8h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fadd v3.4s, v4.4s, v3.4s +; NONEON-NOSVE-NEXT: fadd v1.4s, v1.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v1.4s +; NONEON-NOSVE-NEXT: mov h1, v2.h[1] +; NONEON-NOSVE-NEXT: fcvt s3, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s1, s3, s1 +; NONEON-NOSVE-NEXT: mov h3, v2.h[2] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s1, s1, s3 +; NONEON-NOSVE-NEXT: mov h3, v2.h[3] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s1, s1, s3 +; NONEON-NOSVE-NEXT: mov h3, v2.h[4] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s1, s1, s3 +; NONEON-NOSVE-NEXT: mov h3, v2.h[5] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s1, s1, s3 +; NONEON-NOSVE-NEXT: mov h3, v2.h[6] +; NONEON-NOSVE-NEXT: mov h2, v2.h[7] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s1, s1, s3 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s1, s1, s2 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call fast half @llvm.vector.reduce.fadd.v16f16(half %start, <16 x half> %op) ret half %res @@ -229,6 +562,12 @@ define float @faddv_v2f32(float %start, <2 x float> %a) { ; CHECK-NEXT: faddv s1, p0, z1.s ; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: faddv_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: faddp s1, v1.2s +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: ret %res = call fast float @llvm.vector.reduce.fadd.v2f32(float %start, <2 x float> %a) ret float %res } @@ -241,6 +580,13 @@ define float @faddv_v4f32(float %start, <4 x float> %a) { ; CHECK-NEXT: faddv s1, p0, z1.s ; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: faddv_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: faddp v1.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: faddp s1, v1.2s +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: ret %res = call fast float @llvm.vector.reduce.fadd.v4f32(float %start, <4 x float> %a) ret float %res } @@ -254,6 +600,15 @@ define float @faddv_v8f32(float %start, ptr %a) { ; CHECK-NEXT: faddv s1, p0, z1.s ; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: faddv_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q1, [x0] +; NONEON-NOSVE-NEXT: fadd v1.4s, v2.4s, v1.4s +; NONEON-NOSVE-NEXT: faddp v1.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: faddp s1, v1.2s +; NONEON-NOSVE-NEXT: fadd s0, s0, s1 +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call fast float @llvm.vector.reduce.fadd.v8f32(float %start, <8 x float> %op) ret float %res @@ -264,6 +619,11 @@ define double @faddv_v1f64(double %start, <1 x double> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: fadd d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: faddv_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fadd d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = call fast double @llvm.vector.reduce.fadd.v1f64(double %start, <1 x double> %a) ret double %res } @@ -276,6 +636,12 @@ define double @faddv_v2f64(double %start, <2 x double> %a) { ; CHECK-NEXT: faddv d1, p0, z1.d ; CHECK-NEXT: fadd d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: faddv_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: faddp d1, v1.2d +; NONEON-NOSVE-NEXT: fadd d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = call fast double @llvm.vector.reduce.fadd.v2f64(double %start, <2 x double> %a) ret double %res } @@ -289,6 +655,14 @@ define double @faddv_v4f64(double %start, ptr %a) { ; CHECK-NEXT: faddv d1, p0, z1.d ; CHECK-NEXT: fadd d0, d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: faddv_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q1, [x0] +; NONEON-NOSVE-NEXT: fadd v1.2d, v2.2d, v1.2d +; NONEON-NOSVE-NEXT: faddp d1, v1.2d +; NONEON-NOSVE-NEXT: fadd d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call fast double @llvm.vector.reduce.fadd.v4f64(double %start, <4 x double> %op) ret double %res @@ -306,6 +680,26 @@ define half @fmaxv_v4f16(<4 x half> %a) { ; CHECK-NEXT: fmaxnmv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxv_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s1, s2, s1 +; NONEON-NOSVE-NEXT: mov h2, v0.h[2] +; NONEON-NOSVE-NEXT: mov h0, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s1, s1, s2 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fmax.v4f16(<4 x half> %a) ret half %res } @@ -318,6 +712,45 @@ define half @fmaxv_v8f16(<8 x half> %a) { ; CHECK-NEXT: fmaxnmv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxv_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s1, s2, s1 +; NONEON-NOSVE-NEXT: mov h2, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s1, s1, s2 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fmax.v8f16(<8 x half> %a) ret half %res } @@ -331,6 +764,85 @@ define half @fmaxv_v16f16(ptr %a) { ; CHECK-NEXT: fmaxnmv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxv_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fmaxnm s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[2] +; NONEON-NOSVE-NEXT: fmaxnm s2, s3, s2 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmaxnm s3, s5, s3 +; NONEON-NOSVE-NEXT: mov h5, v0.h[3] +; NONEON-NOSVE-NEXT: fmaxnm s2, s4, s2 +; NONEON-NOSVE-NEXT: mov h4, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmaxnm s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[4] +; NONEON-NOSVE-NEXT: fmaxnm s2, s2, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmaxnm s3, s5, s3 +; NONEON-NOSVE-NEXT: mov h5, v0.h[5] +; NONEON-NOSVE-NEXT: fmaxnm s2, s2, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmaxnm s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fmaxnm s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmaxnm s0, s0, s1 +; NONEON-NOSVE-NEXT: fmaxnm s2, s2, s3 +; NONEON-NOSVE-NEXT: fmaxnm s3, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fmaxnm s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h1, s2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmaxnm s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call half @llvm.vector.reduce.fmax.v16f16(<16 x half> %op) ret half %res @@ -344,6 +856,11 @@ define float @fmaxv_v2f32(<2 x float> %a) { ; CHECK-NEXT: fmaxnmv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxv_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxnmp s0, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> %a) ret float %res } @@ -356,6 +873,11 @@ define float @fmaxv_v4f32(<4 x float> %a) { ; CHECK-NEXT: fmaxnmv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxv_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxnmv s0, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a) ret float %res } @@ -369,6 +891,13 @@ define float @fmaxv_v8f32(ptr %a) { ; CHECK-NEXT: fmaxnmv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxv_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: fmaxnm v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fmaxnmv s0, v0.4s +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %op) ret float %res @@ -378,6 +907,10 @@ define double @fmaxv_v1f64(<1 x double> %a) { ; CHECK-LABEL: fmaxv_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxv_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fmax.v1f64(<1 x double> %a) ret double %res } @@ -390,6 +923,11 @@ define double @fmaxv_v2f64(<2 x double> %a) { ; CHECK-NEXT: fmaxnmv d0, p0, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxv_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxnmp d0, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fmax.v2f64(<2 x double> %a) ret double %res } @@ -403,6 +941,13 @@ define double @fmaxv_v4f64(ptr %a) { ; CHECK-NEXT: fmaxnmv d0, p0, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaxv_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: fmaxnm v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fmaxnmp d0, v0.2d +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call double @llvm.vector.reduce.fmax.v4f64(<4 x double> %op) ret double %res @@ -420,6 +965,26 @@ define half @fminv_v4f16(<4 x half> %a) { ; CHECK-NEXT: fminnmv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminv_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s1, s2, s1 +; NONEON-NOSVE-NEXT: mov h2, v0.h[2] +; NONEON-NOSVE-NEXT: mov h0, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s1, s1, s2 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fmin.v4f16(<4 x half> %a) ret half %res } @@ -432,6 +997,45 @@ define half @fminv_v8f16(<8 x half> %a) { ; CHECK-NEXT: fminnmv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminv_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s1, s2, s1 +; NONEON-NOSVE-NEXT: mov h2, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s1, s1, s2 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fmin.v8f16(<8 x half> %a) ret half %res } @@ -445,6 +1049,85 @@ define half @fminv_v16f16(ptr %a) { ; CHECK-NEXT: fminnmv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminv_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fminnm s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[2] +; NONEON-NOSVE-NEXT: fminnm s2, s3, s2 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fminnm s3, s5, s3 +; NONEON-NOSVE-NEXT: mov h5, v0.h[3] +; NONEON-NOSVE-NEXT: fminnm s2, s4, s2 +; NONEON-NOSVE-NEXT: mov h4, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fminnm s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[4] +; NONEON-NOSVE-NEXT: fminnm s2, s2, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fminnm s3, s5, s3 +; NONEON-NOSVE-NEXT: mov h5, v0.h[5] +; NONEON-NOSVE-NEXT: fminnm s2, s2, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fminnm s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fminnm s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fminnm s0, s0, s1 +; NONEON-NOSVE-NEXT: fminnm s2, s2, s3 +; NONEON-NOSVE-NEXT: fminnm s3, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fminnm s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h1, s2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fminnm s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call half @llvm.vector.reduce.fmin.v16f16(<16 x half> %op) ret half %res @@ -458,6 +1141,11 @@ define float @fminv_v2f32(<2 x float> %a) { ; CHECK-NEXT: fminnmv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminv_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminnmp s0, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> %a) ret float %res } @@ -470,6 +1158,11 @@ define float @fminv_v4f32(<4 x float> %a) { ; CHECK-NEXT: fminnmv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminv_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminnmv s0, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a) ret float %res } @@ -483,6 +1176,13 @@ define float @fminv_v8f32(ptr %a) { ; CHECK-NEXT: fminnmv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminv_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: fminnm v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fminnmv s0, v0.4s +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %op) ret float %res @@ -492,6 +1192,10 @@ define double @fminv_v1f64(<1 x double> %a) { ; CHECK-LABEL: fminv_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminv_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fmin.v1f64(<1 x double> %a) ret double %res } @@ -504,6 +1208,11 @@ define double @fminv_v2f64(<2 x double> %a) { ; CHECK-NEXT: fminnmv d0, p0, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminv_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminnmp d0, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fmin.v2f64(<2 x double> %a) ret double %res } @@ -517,6 +1226,13 @@ define double @fminv_v4f64(ptr %a) { ; CHECK-NEXT: fminnmv d0, p0, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminv_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: fminnm v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fminnmp d0, v0.2d +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %op) ret double %res @@ -534,6 +1250,26 @@ define half @fmaximumv_v4f16(<4 x half> %a) { ; CHECK-NEXT: fmaxv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaximumv_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s1, s2, s1 +; NONEON-NOSVE-NEXT: mov h2, v0.h[2] +; NONEON-NOSVE-NEXT: mov h0, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s1, s1, s2 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fmaximum.v4f16(<4 x half> %a) ret half %res } @@ -546,6 +1282,45 @@ define half @fmaximumv_v8f16(<8 x half> %a) { ; CHECK-NEXT: fmaxv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaximumv_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s1, s2, s1 +; NONEON-NOSVE-NEXT: mov h2, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s1, s1, s2 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fmaximum.v8f16(<8 x half> %a) ret half %res } @@ -559,6 +1334,85 @@ define half @fmaximumv_v16f16(ptr %a) { ; CHECK-NEXT: fmaxv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaximumv_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fmax s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[2] +; NONEON-NOSVE-NEXT: fmax s2, s3, s2 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmax s3, s5, s3 +; NONEON-NOSVE-NEXT: mov h5, v0.h[3] +; NONEON-NOSVE-NEXT: fmax s2, s4, s2 +; NONEON-NOSVE-NEXT: mov h4, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmax s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[4] +; NONEON-NOSVE-NEXT: fmax s2, s2, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmax s3, s5, s3 +; NONEON-NOSVE-NEXT: mov h5, v0.h[5] +; NONEON-NOSVE-NEXT: fmax s2, s2, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmax s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fmax s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmax s0, s0, s1 +; NONEON-NOSVE-NEXT: fmax s2, s2, s3 +; NONEON-NOSVE-NEXT: fmax s3, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fmax s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h1, s2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmax s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call half @llvm.vector.reduce.fmaximum.v16f16(<16 x half> %op) ret half %res @@ -572,6 +1426,11 @@ define float @fmaximumv_v2f32(<2 x float> %a) { ; CHECK-NEXT: fmaxv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaximumv_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxp s0, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> %a) ret float %res } @@ -584,6 +1443,11 @@ define float @fmaximumv_v4f32(<4 x float> %a) { ; CHECK-NEXT: fmaxv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaximumv_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxv s0, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> %a) ret float %res } @@ -597,6 +1461,13 @@ define float @fmaximumv_v8f32(ptr %a) { ; CHECK-NEXT: fmaxv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaximumv_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: fmax v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fmaxv s0, v0.4s +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> %op) ret float %res @@ -606,6 +1477,10 @@ define double @fmaximumv_v1f64(<1 x double> %a) { ; CHECK-LABEL: fmaximumv_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaximumv_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fmaximum.v1f64(<1 x double> %a) ret double %res } @@ -618,6 +1493,11 @@ define double @fmaximumv_v2f64(<2 x double> %a) { ; CHECK-NEXT: fmaxv d0, p0, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaximumv_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmaxp d0, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> %a) ret double %res } @@ -631,6 +1511,13 @@ define double @fmaximumv_v4f64(ptr %a) { ; CHECK-NEXT: fmaxv d0, p0, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fmaximumv_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: fmax v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fmaxp d0, v0.2d +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> %op) ret double %res @@ -648,6 +1535,26 @@ define half @fminimumv_v4f16(<4 x half> %a) { ; CHECK-NEXT: fminv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminimumv_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s1, s2, s1 +; NONEON-NOSVE-NEXT: mov h2, v0.h[2] +; NONEON-NOSVE-NEXT: mov h0, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s1, s1, s2 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fminimum.v4f16(<4 x half> %a) ret half %res } @@ -660,6 +1567,45 @@ define half @fminimumv_v8f16(<8 x half> %a) { ; CHECK-NEXT: fminv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminimumv_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s1, s2, s1 +; NONEON-NOSVE-NEXT: mov h2, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s1, s1, s2 +; NONEON-NOSVE-NEXT: mov h2, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s1, s1, s2 +; NONEON-NOSVE-NEXT: fcvt h1, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %res = call half @llvm.vector.reduce.fminimum.v8f16(<8 x half> %a) ret half %res } @@ -673,6 +1619,85 @@ define half @fminimumv_v16f16(ptr %a) { ; CHECK-NEXT: fminv h0, p0, z0.h ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminimumv_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s4, h1 +; NONEON-NOSVE-NEXT: fcvt s5, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fmin s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[2] +; NONEON-NOSVE-NEXT: fmin s2, s3, s2 +; NONEON-NOSVE-NEXT: mov h3, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmin s3, s5, s3 +; NONEON-NOSVE-NEXT: mov h5, v0.h[3] +; NONEON-NOSVE-NEXT: fmin s2, s4, s2 +; NONEON-NOSVE-NEXT: mov h4, v1.h[3] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmin s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[4] +; NONEON-NOSVE-NEXT: fmin s2, s2, s3 +; NONEON-NOSVE-NEXT: mov h3, v1.h[4] +; NONEON-NOSVE-NEXT: fcvt h4, s4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmin s3, s5, s3 +; NONEON-NOSVE-NEXT: mov h5, v0.h[5] +; NONEON-NOSVE-NEXT: fmin s2, s2, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[5] +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmin s4, s5, s4 +; NONEON-NOSVE-NEXT: mov h5, v0.h[6] +; NONEON-NOSVE-NEXT: mov h0, v0.h[7] +; NONEON-NOSVE-NEXT: fmin s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h3, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[6] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: mov h1, v1.h[7] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fmin s0, s0, s1 +; NONEON-NOSVE-NEXT: fmin s2, s2, s3 +; NONEON-NOSVE-NEXT: fmin s3, s5, s4 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: fcvt h2, s2 +; NONEON-NOSVE-NEXT: fcvt h3, s3 +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fmin s2, s2, s3 +; NONEON-NOSVE-NEXT: fcvt h1, s2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fmin s0, s1, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call half @llvm.vector.reduce.fminimum.v16f16(<16 x half> %op) ret half %res @@ -686,6 +1711,11 @@ define float @fminimumv_v2f32(<2 x float> %a) { ; CHECK-NEXT: fminv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminimumv_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminp s0, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> %a) ret float %res } @@ -698,6 +1728,11 @@ define float @fminimumv_v4f32(<4 x float> %a) { ; CHECK-NEXT: fminv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminimumv_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminv s0, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> %a) ret float %res } @@ -711,6 +1746,13 @@ define float @fminimumv_v8f32(ptr %a) { ; CHECK-NEXT: fminv s0, p0, z0.s ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminimumv_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: fmin v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fminv s0, v0.4s +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> %op) ret float %res @@ -720,6 +1762,10 @@ define double @fminimumv_v1f64(<1 x double> %a) { ; CHECK-LABEL: fminimumv_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminimumv_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fminimum.v1f64(<1 x double> %a) ret double %res } @@ -732,6 +1778,11 @@ define double @fminimumv_v2f64(<2 x double> %a) { ; CHECK-NEXT: fminv d0, p0, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminimumv_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fminp d0, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> %a) ret double %res } @@ -745,6 +1796,13 @@ define double @fminimumv_v4f64(ptr %a) { ; CHECK-NEXT: fminv d0, p0, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fminimumv_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: fmin v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fminp d0, v0.2d +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> %op) ret double %res diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll index 7ddc641f366caa..454683865eb9a9 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -16,6 +17,13 @@ define <2 x half> @frintp_v2f16(<2 x half> %op) { ; CHECK-NEXT: frintp z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintp v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.ceil.v2f16(<2 x half> %op) ret <2 x half> %res } @@ -28,6 +36,13 @@ define <4 x half> @frintp_v4f16(<4 x half> %op) { ; CHECK-NEXT: frintp z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintp v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.ceil.v4f16(<4 x half> %op) ret <4 x half> %res } @@ -40,6 +55,16 @@ define <8 x half> @frintp_v8f16(<8 x half> %op) { ; CHECK-NEXT: frintp z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v0.8h +; NONEON-NOSVE-NEXT: frintp v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v1.4s +; NONEON-NOSVE-NEXT: frintp v1.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.ceil.v8f16(<8 x half> %op) ret <8 x half> %res } @@ -53,6 +78,24 @@ define void @frintp_v16f16(ptr %a) { ; CHECK-NEXT: frintp z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: frintp v2.4s, v2.4s +; NONEON-NOSVE-NEXT: frintp v3.4s, v3.4s +; NONEON-NOSVE-NEXT: frintp v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintp v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v1.4s +; NONEON-NOSVE-NEXT: stp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call <16 x half> @llvm.ceil.v16f16(<16 x half> %op) store <16 x half> %res, ptr %a @@ -67,6 +110,11 @@ define <2 x float> @frintp_v2f32(<2 x float> %op) { ; CHECK-NEXT: frintp z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintp v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.ceil.v2f32(<2 x float> %op) ret <2 x float> %res } @@ -79,6 +127,11 @@ define <4 x float> @frintp_v4f32(<4 x float> %op) { ; CHECK-NEXT: frintp z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintp v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.ceil.v4f32(<4 x float> %op) ret <4 x float> %res } @@ -92,6 +145,14 @@ define void @frintp_v8f32(ptr %a) { ; CHECK-NEXT: frintp z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintp v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintp v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call <8 x float> @llvm.ceil.v8f32(<8 x float> %op) store <8 x float> %res, ptr %a @@ -103,6 +164,11 @@ define <1 x double> @frintp_v1f64(<1 x double> %op) { ; CHECK: // %bb.0: ; CHECK-NEXT: frintp d0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintp d0, d0 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.ceil.v1f64(<1 x double> %op) ret <1 x double> %res } @@ -115,6 +181,11 @@ define <2 x double> @frintp_v2f64(<2 x double> %op) { ; CHECK-NEXT: frintp z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintp v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.ceil.v2f64(<2 x double> %op) ret <2 x double> %res } @@ -128,6 +199,14 @@ define void @frintp_v4f64(ptr %a) { ; CHECK-NEXT: frintp z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintp_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintp v0.2d, v0.2d +; NONEON-NOSVE-NEXT: frintp v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call <4 x double> @llvm.ceil.v4f64(<4 x double> %op) store <4 x double> %res, ptr %a @@ -146,6 +225,13 @@ define <2 x half> @frintm_v2f16(<2 x half> %op) { ; CHECK-NEXT: frintm z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintm v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.floor.v2f16(<2 x half> %op) ret <2 x half> %res } @@ -158,6 +244,13 @@ define <4 x half> @frintm_v4f16(<4 x half> %op) { ; CHECK-NEXT: frintm z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintm v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.floor.v4f16(<4 x half> %op) ret <4 x half> %res } @@ -170,6 +263,16 @@ define <8 x half> @frintm_v8f16(<8 x half> %op) { ; CHECK-NEXT: frintm z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v0.8h +; NONEON-NOSVE-NEXT: frintm v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v1.4s +; NONEON-NOSVE-NEXT: frintm v1.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.floor.v8f16(<8 x half> %op) ret <8 x half> %res } @@ -183,6 +286,24 @@ define void @frintm_v16f16(ptr %a) { ; CHECK-NEXT: frintm z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: frintm v2.4s, v2.4s +; NONEON-NOSVE-NEXT: frintm v3.4s, v3.4s +; NONEON-NOSVE-NEXT: frintm v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintm v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v1.4s +; NONEON-NOSVE-NEXT: stp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call <16 x half> @llvm.floor.v16f16(<16 x half> %op) store <16 x half> %res, ptr %a @@ -197,6 +318,11 @@ define <2 x float> @frintm_v2f32(<2 x float> %op) { ; CHECK-NEXT: frintm z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintm v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.floor.v2f32(<2 x float> %op) ret <2 x float> %res } @@ -209,6 +335,11 @@ define <4 x float> @frintm_v4f32(<4 x float> %op) { ; CHECK-NEXT: frintm z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintm v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.floor.v4f32(<4 x float> %op) ret <4 x float> %res } @@ -222,6 +353,14 @@ define void @frintm_v8f32(ptr %a) { ; CHECK-NEXT: frintm z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintm v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintm v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call <8 x float> @llvm.floor.v8f32(<8 x float> %op) store <8 x float> %res, ptr %a @@ -233,6 +372,11 @@ define <1 x double> @frintm_v1f64(<1 x double> %op) { ; CHECK: // %bb.0: ; CHECK-NEXT: frintm d0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintm d0, d0 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.floor.v1f64(<1 x double> %op) ret <1 x double> %res } @@ -245,6 +389,11 @@ define <2 x double> @frintm_v2f64(<2 x double> %op) { ; CHECK-NEXT: frintm z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintm v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.floor.v2f64(<2 x double> %op) ret <2 x double> %res } @@ -258,6 +407,14 @@ define void @frintm_v4f64(ptr %a) { ; CHECK-NEXT: frintm z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintm_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintm v0.2d, v0.2d +; NONEON-NOSVE-NEXT: frintm v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call <4 x double> @llvm.floor.v4f64(<4 x double> %op) store <4 x double> %res, ptr %a @@ -276,6 +433,13 @@ define <2 x half> @frinti_v2f16(<2 x half> %op) { ; CHECK-NEXT: frinti z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frinti v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.nearbyint.v2f16(<2 x half> %op) ret <2 x half> %res } @@ -288,6 +452,13 @@ define <4 x half> @frinti_v4f16(<4 x half> %op) { ; CHECK-NEXT: frinti z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frinti v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.nearbyint.v4f16(<4 x half> %op) ret <4 x half> %res } @@ -300,6 +471,16 @@ define <8 x half> @frinti_v8f16(<8 x half> %op) { ; CHECK-NEXT: frinti z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v0.8h +; NONEON-NOSVE-NEXT: frinti v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v1.4s +; NONEON-NOSVE-NEXT: frinti v1.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.nearbyint.v8f16(<8 x half> %op) ret <8 x half> %res } @@ -313,6 +494,24 @@ define void @frinti_v16f16(ptr %a) { ; CHECK-NEXT: frinti z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: frinti v2.4s, v2.4s +; NONEON-NOSVE-NEXT: frinti v3.4s, v3.4s +; NONEON-NOSVE-NEXT: frinti v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frinti v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v1.4s +; NONEON-NOSVE-NEXT: stp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call <16 x half> @llvm.nearbyint.v16f16(<16 x half> %op) store <16 x half> %res, ptr %a @@ -327,6 +526,11 @@ define <2 x float> @frinti_v2f32(<2 x float> %op) { ; CHECK-NEXT: frinti z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frinti v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %op) ret <2 x float> %res } @@ -339,6 +543,11 @@ define <4 x float> @frinti_v4f32(<4 x float> %op) { ; CHECK-NEXT: frinti z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frinti v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %op) ret <4 x float> %res } @@ -352,6 +561,14 @@ define void @frinti_v8f32(ptr %a) { ; CHECK-NEXT: frinti z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frinti v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frinti v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %op) store <8 x float> %res, ptr %a @@ -363,6 +580,11 @@ define <1 x double> @frinti_v1f64(<1 x double> %op) { ; CHECK: // %bb.0: ; CHECK-NEXT: frinti d0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frinti d0, d0 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.nearbyint.v1f64(<1 x double> %op) ret <1 x double> %res } @@ -375,6 +597,11 @@ define <2 x double> @frinti_v2f64(<2 x double> %op) { ; CHECK-NEXT: frinti z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frinti v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %op) ret <2 x double> %res } @@ -388,6 +615,14 @@ define void @frinti_v4f64(ptr %a) { ; CHECK-NEXT: frinti z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinti_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frinti v0.2d, v0.2d +; NONEON-NOSVE-NEXT: frinti v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %op) store <4 x double> %res, ptr %a @@ -406,6 +641,13 @@ define <2 x half> @frintx_v2f16(<2 x half> %op) { ; CHECK-NEXT: frintx z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintx v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.rint.v2f16(<2 x half> %op) ret <2 x half> %res } @@ -418,6 +660,13 @@ define <4 x half> @frintx_v4f16(<4 x half> %op) { ; CHECK-NEXT: frintx z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintx v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.rint.v4f16(<4 x half> %op) ret <4 x half> %res } @@ -430,6 +679,16 @@ define <8 x half> @frintx_v8f16(<8 x half> %op) { ; CHECK-NEXT: frintx z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v0.8h +; NONEON-NOSVE-NEXT: frintx v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v1.4s +; NONEON-NOSVE-NEXT: frintx v1.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.rint.v8f16(<8 x half> %op) ret <8 x half> %res } @@ -443,6 +702,24 @@ define void @frintx_v16f16(ptr %a) { ; CHECK-NEXT: frintx z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: frintx v2.4s, v2.4s +; NONEON-NOSVE-NEXT: frintx v3.4s, v3.4s +; NONEON-NOSVE-NEXT: frintx v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintx v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v1.4s +; NONEON-NOSVE-NEXT: stp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call <16 x half> @llvm.rint.v16f16(<16 x half> %op) store <16 x half> %res, ptr %a @@ -457,6 +734,11 @@ define <2 x float> @frintx_v2f32(<2 x float> %op) { ; CHECK-NEXT: frintx z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintx v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.rint.v2f32(<2 x float> %op) ret <2 x float> %res } @@ -469,6 +751,11 @@ define <4 x float> @frintx_v4f32(<4 x float> %op) { ; CHECK-NEXT: frintx z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintx v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.rint.v4f32(<4 x float> %op) ret <4 x float> %res } @@ -482,6 +769,14 @@ define void @frintx_v8f32(ptr %a) { ; CHECK-NEXT: frintx z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintx v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintx v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call <8 x float> @llvm.rint.v8f32(<8 x float> %op) store <8 x float> %res, ptr %a @@ -493,6 +788,11 @@ define <1 x double> @frintx_v1f64(<1 x double> %op) { ; CHECK: // %bb.0: ; CHECK-NEXT: frintx d0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintx d0, d0 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.rint.v1f64(<1 x double> %op) ret <1 x double> %res } @@ -505,6 +805,11 @@ define <2 x double> @frintx_v2f64(<2 x double> %op) { ; CHECK-NEXT: frintx z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintx v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.rint.v2f64(<2 x double> %op) ret <2 x double> %res } @@ -518,6 +823,14 @@ define void @frintx_v4f64(ptr %a) { ; CHECK-NEXT: frintx z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintx_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintx v0.2d, v0.2d +; NONEON-NOSVE-NEXT: frintx v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call <4 x double> @llvm.rint.v4f64(<4 x double> %op) store <4 x double> %res, ptr %a @@ -536,6 +849,13 @@ define <2 x half> @frinta_v2f16(<2 x half> %op) { ; CHECK-NEXT: frinta z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frinta v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.round.v2f16(<2 x half> %op) ret <2 x half> %res } @@ -548,6 +868,13 @@ define <4 x half> @frinta_v4f16(<4 x half> %op) { ; CHECK-NEXT: frinta z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frinta v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.round.v4f16(<4 x half> %op) ret <4 x half> %res } @@ -560,6 +887,16 @@ define <8 x half> @frinta_v8f16(<8 x half> %op) { ; CHECK-NEXT: frinta z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v0.8h +; NONEON-NOSVE-NEXT: frinta v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v1.4s +; NONEON-NOSVE-NEXT: frinta v1.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.round.v8f16(<8 x half> %op) ret <8 x half> %res } @@ -573,6 +910,24 @@ define void @frinta_v16f16(ptr %a) { ; CHECK-NEXT: frinta z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: frinta v2.4s, v2.4s +; NONEON-NOSVE-NEXT: frinta v3.4s, v3.4s +; NONEON-NOSVE-NEXT: frinta v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frinta v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v1.4s +; NONEON-NOSVE-NEXT: stp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call <16 x half> @llvm.round.v16f16(<16 x half> %op) store <16 x half> %res, ptr %a @@ -587,6 +942,11 @@ define <2 x float> @frinta_v2f32(<2 x float> %op) { ; CHECK-NEXT: frinta z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frinta v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.round.v2f32(<2 x float> %op) ret <2 x float> %res } @@ -599,6 +959,11 @@ define <4 x float> @frinta_v4f32(<4 x float> %op) { ; CHECK-NEXT: frinta z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frinta v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.round.v4f32(<4 x float> %op) ret <4 x float> %res } @@ -612,6 +977,14 @@ define void @frinta_v8f32(ptr %a) { ; CHECK-NEXT: frinta z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frinta v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frinta v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call <8 x float> @llvm.round.v8f32(<8 x float> %op) store <8 x float> %res, ptr %a @@ -623,6 +996,11 @@ define <1 x double> @frinta_v1f64(<1 x double> %op) { ; CHECK: // %bb.0: ; CHECK-NEXT: frinta d0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frinta d0, d0 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.round.v1f64(<1 x double> %op) ret <1 x double> %res } @@ -635,6 +1013,11 @@ define <2 x double> @frinta_v2f64(<2 x double> %op) { ; CHECK-NEXT: frinta z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frinta v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.round.v2f64(<2 x double> %op) ret <2 x double> %res } @@ -648,6 +1031,14 @@ define void @frinta_v4f64(ptr %a) { ; CHECK-NEXT: frinta z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frinta_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frinta v0.2d, v0.2d +; NONEON-NOSVE-NEXT: frinta v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call <4 x double> @llvm.round.v4f64(<4 x double> %op) store <4 x double> %res, ptr %a @@ -666,6 +1057,13 @@ define <2 x half> @frintn_v2f16(<2 x half> %op) { ; CHECK-NEXT: frintn z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintn v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.roundeven.v2f16(<2 x half> %op) ret <2 x half> %res } @@ -678,6 +1076,13 @@ define <4 x half> @frintn_v4f16(<4 x half> %op) { ; CHECK-NEXT: frintn z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintn v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %op) ret <4 x half> %res } @@ -690,6 +1095,16 @@ define <8 x half> @frintn_v8f16(<8 x half> %op) { ; CHECK-NEXT: frintn z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v0.8h +; NONEON-NOSVE-NEXT: frintn v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v1.4s +; NONEON-NOSVE-NEXT: frintn v1.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %op) ret <8 x half> %res } @@ -703,6 +1118,24 @@ define void @frintn_v16f16(ptr %a) { ; CHECK-NEXT: frintn z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: frintn v2.4s, v2.4s +; NONEON-NOSVE-NEXT: frintn v3.4s, v3.4s +; NONEON-NOSVE-NEXT: frintn v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintn v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v1.4s +; NONEON-NOSVE-NEXT: stp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call <16 x half> @llvm.roundeven.v16f16(<16 x half> %op) store <16 x half> %res, ptr %a @@ -717,6 +1150,11 @@ define <2 x float> @frintn_v2f32(<2 x float> %op) { ; CHECK-NEXT: frintn z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintn v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %op) ret <2 x float> %res } @@ -729,6 +1167,11 @@ define <4 x float> @frintn_v4f32(<4 x float> %op) { ; CHECK-NEXT: frintn z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintn v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %op) ret <4 x float> %res } @@ -742,6 +1185,14 @@ define void @frintn_v8f32(ptr %a) { ; CHECK-NEXT: frintn z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintn v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintn v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %op) store <8 x float> %res, ptr %a @@ -753,6 +1204,11 @@ define <1 x double> @frintn_v1f64(<1 x double> %op) { ; CHECK: // %bb.0: ; CHECK-NEXT: frintn d0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintn d0, d0 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %op) ret <1 x double> %res } @@ -765,6 +1221,11 @@ define <2 x double> @frintn_v2f64(<2 x double> %op) { ; CHECK-NEXT: frintn z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintn v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %op) ret <2 x double> %res } @@ -778,6 +1239,14 @@ define void @frintn_v4f64(ptr %a) { ; CHECK-NEXT: frintn z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintn_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintn v0.2d, v0.2d +; NONEON-NOSVE-NEXT: frintn v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %op) store <4 x double> %res, ptr %a @@ -796,6 +1265,13 @@ define <2 x half> @frintz_v2f16(<2 x half> %op) { ; CHECK-NEXT: frintz z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintz v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <2 x half> @llvm.trunc.v2f16(<2 x half> %op) ret <2 x half> %res } @@ -808,6 +1284,13 @@ define <4 x half> @frintz_v4f16(<4 x half> %op) { ; CHECK-NEXT: frintz z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: frintz v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x half> @llvm.trunc.v4f16(<4 x half> %op) ret <4 x half> %res } @@ -820,6 +1303,16 @@ define <8 x half> @frintz_v8f16(<8 x half> %op) { ; CHECK-NEXT: frintz z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v0.8h +; NONEON-NOSVE-NEXT: frintz v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v1.4s +; NONEON-NOSVE-NEXT: frintz v1.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <8 x half> @llvm.trunc.v8f16(<8 x half> %op) ret <8 x half> %res } @@ -833,6 +1326,24 @@ define void @frintz_v16f16(ptr %a) { ; CHECK-NEXT: frintz z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: frintz v2.4s, v2.4s +; NONEON-NOSVE-NEXT: frintz v3.4s, v3.4s +; NONEON-NOSVE-NEXT: frintz v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintz v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v3.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v1.4s +; NONEON-NOSVE-NEXT: stp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x half>, ptr %a %res = call <16 x half> @llvm.trunc.v16f16(<16 x half> %op) store <16 x half> %res, ptr %a @@ -847,6 +1358,11 @@ define <2 x float> @frintz_v2f32(<2 x float> %op) { ; CHECK-NEXT: frintz z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintz v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x float> @llvm.trunc.v2f32(<2 x float> %op) ret <2 x float> %res } @@ -859,6 +1375,11 @@ define <4 x float> @frintz_v4f32(<4 x float> %op) { ; CHECK-NEXT: frintz z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintz v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %op) ret <4 x float> %res } @@ -872,6 +1393,14 @@ define void @frintz_v8f32(ptr %a) { ; CHECK-NEXT: frintz z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintz v0.4s, v0.4s +; NONEON-NOSVE-NEXT: frintz v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x float>, ptr %a %res = call <8 x float> @llvm.trunc.v8f32(<8 x float> %op) store <8 x float> %res, ptr %a @@ -883,6 +1412,11 @@ define <1 x double> @frintz_v1f64(<1 x double> %op) { ; CHECK: // %bb.0: ; CHECK-NEXT: frintz d0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintz d0, d0 +; NONEON-NOSVE-NEXT: ret %res = call <1 x double> @llvm.trunc.v1f64(<1 x double> %op) ret <1 x double> %res } @@ -895,6 +1429,11 @@ define <2 x double> @frintz_v2f64(<2 x double> %op) { ; CHECK-NEXT: frintz z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: frintz v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %op) ret <2 x double> %res } @@ -908,6 +1447,14 @@ define void @frintz_v4f64(ptr %a) { ; CHECK-NEXT: frintz z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: frintz_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: frintz v0.2d, v0.2d +; NONEON-NOSVE-NEXT: frintz v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x double>, ptr %a %res = call <4 x double> @llvm.trunc.v4f64(<4 x double> %op) store <4 x double> %res, ptr %a diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll index 7d36925fdc57f3..0268dd1b5d318f 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -16,6 +17,14 @@ define <2 x half> @select_v2f16(<2 x half> %op1, <2 x half> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.4h, w8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <2 x half> %op1, <2 x half> %op2 ret <2 x half> %sel } @@ -32,6 +41,14 @@ define <4 x half> @select_v4f16(<4 x half> %op1, <4 x half> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.4h, w8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <4 x half> %op1, <4 x half> %op2 ret <4 x half> %sel } @@ -48,6 +65,14 @@ define <8 x half> @select_v8f16(<8 x half> %op1, <8 x half> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.8h, w8 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <8 x half> %op1, <8 x half> %op2 ret <8 x half> %sel } @@ -67,6 +92,20 @@ define void @select_v16f16(ptr %a, ptr %b, i1 %mask) { ; CHECK-NEXT: sel z1.h, p0, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w2, #0x1 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x0, #16] +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: ldr q4, [x1, #16] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: bif v1.16b, v3.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load volatile <16 x half>, ptr %a %op2 = load volatile <16 x half>, ptr %b %sel = select i1 %mask, <16 x half> %op1, <16 x half> %op2 @@ -86,6 +125,14 @@ define <2 x float> @select_v2f32(<2 x float> %op1, <2 x float> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.2s, w8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <2 x float> %op1, <2 x float> %op2 ret <2 x float> %sel } @@ -102,6 +149,14 @@ define <4 x float> @select_v4f32(<4 x float> %op1, <4 x float> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.4s, w8 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <4 x float> %op1, <4 x float> %op2 ret <4 x float> %sel } @@ -121,6 +176,20 @@ define void @select_v8f32(ptr %a, ptr %b, i1 %mask) { ; CHECK-NEXT: sel z1.s, p0, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w2, #0x1 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x0, #16] +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: ldr q4, [x1, #16] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: bif v1.16b, v3.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load volatile <8 x float>, ptr %a %op2 = load volatile <8 x float>, ptr %b %sel = select i1 %mask, <8 x float> %op1, <8 x float> %op2 @@ -134,6 +203,14 @@ define <1 x double> @select_v1f64(<1 x double> %op1, <1 x double> %op2, i1 %mask ; CHECK-NEXT: tst w0, #0x1 ; CHECK-NEXT: fcsel d0, d0, d1, ne ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm x8, ne +; NONEON-NOSVE-NEXT: fmov d2, x8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <1 x double> %op1, <1 x double> %op2 ret <1 x double> %sel } @@ -151,6 +228,14 @@ define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, i1 %mask ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm x8, ne +; NONEON-NOSVE-NEXT: dup v2.2d, x8 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <2 x double> %op1, <2 x double> %op2 ret <2 x double> %sel } @@ -171,6 +256,20 @@ define void @select_v4f64(ptr %a, ptr %b, i1 %mask) { ; CHECK-NEXT: sel z1.d, p0, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w2, #0x1 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x0, #16] +; NONEON-NOSVE-NEXT: csetm x8, ne +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: ldr q4, [x1, #16] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: bif v1.16b, v3.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load volatile <4 x double>, ptr %a %op2 = load volatile <4 x double>, ptr %b %sel = select i1 %mask, <4 x double> %op1, <4 x double> %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll index bf8a335a850379..1c63a3870d682f 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -15,6 +16,13 @@ define <4 x i16> @fcvtzu_v4f16_v4i16(<4 x half> %op1) { ; CHECK-NEXT: fcvtzu z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v4f16_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fptoui <4 x half> %op1 to <4 x i16> ret <4 x i16> %res } @@ -27,6 +35,21 @@ define void @fcvtzu_v8f16_v8i16(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzu z0.h, p0/m, z0.h ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v8f16_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzu v1.4s, v1.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %res = fptoui <8 x half> %op1 to <8 x i16> store <8 x i16> %res, ptr %b @@ -42,6 +65,27 @@ define void @fcvtzu_v16f16_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzu z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v16f16_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtzu v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzu v2.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtzu v3.4s, v3.4s +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v1.8h, v2.8h +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %res = fptoui <16 x half> %op1 to <16 x i16> store <16 x i16> %res, ptr %b @@ -61,6 +105,13 @@ define <2 x i32> @fcvtzu_v2f16_v2i32(<2 x half> %op1) { ; CHECK-NEXT: fcvtzu z0.s, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v2f16_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = fptoui <2 x half> %op1 to <2 x i32> ret <2 x i32> %res } @@ -74,6 +125,12 @@ define <4 x i32> @fcvtzu_v4f16_v4i32(<4 x half> %op1) { ; CHECK-NEXT: fcvtzu z0.s, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v4f16_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fptoui <4 x half> %op1 to <4 x i32> ret <4 x i32> %res } @@ -90,6 +147,20 @@ define void @fcvtzu_v8f16_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzu z0.s, p0/m, z0.h ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v8f16_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzu v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %res = fptoui <8 x half> %op1 to <8 x i32> store <8 x i32> %res, ptr %b @@ -114,6 +185,26 @@ define void @fcvtzu_v16f16_v16i32(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v16f16_v16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtzu v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzu v2.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtzu v3.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %res = fptoui <16 x half> %op1 to <16 x i32> store <16 x i32> %res, ptr %b @@ -130,6 +221,13 @@ define <1 x i64> @fcvtzu_v1f16_v1i64(<1 x half> %op1) { ; CHECK-NEXT: fcvtzu x8, h0 ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v1f16_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvtzu x8, s0 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %res = fptoui <1 x half> %op1 to <1 x i64> ret <1 x i64> %res } @@ -145,6 +243,18 @@ define <2 x i64> @fcvtzu_v2f16_v2i64(<2 x half> %op1) { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldr q0, [sp], #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v2f16_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvtzu x8, s0 +; NONEON-NOSVE-NEXT: fcvtzu x9, s1 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: ret %res = fptoui <2 x half> %op1 to <2 x i64> ret <2 x i64> %res } @@ -167,6 +277,27 @@ define void @fcvtzu_v4f16_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v4f16_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: mov h1, v0.h[2] +; NONEON-NOSVE-NEXT: mov h2, v0.h[3] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvtzu x9, s0 +; NONEON-NOSVE-NEXT: fcvtzu x8, s1 +; NONEON-NOSVE-NEXT: fcvtzu x10, s2 +; NONEON-NOSVE-NEXT: fcvtzu x11, s3 +; NONEON-NOSVE-NEXT: fmov d1, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: mov v1.d[1], x11 +; NONEON-NOSVE-NEXT: mov v0.d[1], x10 +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x half>, ptr %a %res = fptoui <4 x half> %op1 to <4 x i64> store <4 x i64> %res, ptr %b @@ -204,6 +335,47 @@ define void @fcvtzu_v8f16_v8i64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q1, q0, [x1, #32] ; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v8f16_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: mov h1, v0.h[2] +; NONEON-NOSVE-NEXT: mov h3, v0.h[3] +; NONEON-NOSVE-NEXT: mov h4, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov h5, v2.h[2] +; NONEON-NOSVE-NEXT: mov h6, v2.h[3] +; NONEON-NOSVE-NEXT: mov h7, v2.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvtzu x9, s0 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvtzu x13, s2 +; NONEON-NOSVE-NEXT: fcvtzu x8, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h7 +; NONEON-NOSVE-NEXT: fcvtzu x10, s3 +; NONEON-NOSVE-NEXT: fcvtzu x11, s4 +; NONEON-NOSVE-NEXT: fcvtzu x12, s5 +; NONEON-NOSVE-NEXT: fcvtzu x14, s6 +; NONEON-NOSVE-NEXT: fmov d3, x13 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: fcvtzu x8, s1 +; NONEON-NOSVE-NEXT: fmov d1, x9 +; NONEON-NOSVE-NEXT: fmov d2, x12 +; NONEON-NOSVE-NEXT: mov v0.d[1], x10 +; NONEON-NOSVE-NEXT: mov v1.d[1], x11 +; NONEON-NOSVE-NEXT: mov v3.d[1], x8 +; NONEON-NOSVE-NEXT: mov v2.d[1], x14 +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: stp q3, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %res = fptoui <8 x half> %op1 to <8 x i64> store <8 x i64> %res, ptr %b @@ -264,6 +436,80 @@ define void @fcvtzu_v16f16_v16i64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q5, q2, [x1, #96] ; CHECK-NEXT: add sp, sp, #128 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v16f16_v16i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: mov h2, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s3, h1 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #24] +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: mov h7, v0.h[2] +; NONEON-NOSVE-NEXT: mov h16, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s6, h0 +; NONEON-NOSVE-NEXT: mov h0, v0.h[1] +; NONEON-NOSVE-NEXT: mov h1, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt s17, h4 +; NONEON-NOSVE-NEXT: mov h18, v4.h[2] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvtzu x8, s3 +; NONEON-NOSVE-NEXT: fcvt s3, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h7 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: mov h16, v4.h[3] +; NONEON-NOSVE-NEXT: fcvtzu x9, s6 +; NONEON-NOSVE-NEXT: ldr d6, [sp, #8] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: mov h4, v4.h[1] +; NONEON-NOSVE-NEXT: fcvtzu x11, s2 +; NONEON-NOSVE-NEXT: mov h2, v6.h[2] +; NONEON-NOSVE-NEXT: fcvtzu x10, s17 +; NONEON-NOSVE-NEXT: fcvtzu x13, s5 +; NONEON-NOSVE-NEXT: fcvtzu x12, s3 +; NONEON-NOSVE-NEXT: mov h3, v6.h[3] +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: mov h5, v6.h[1] +; NONEON-NOSVE-NEXT: fcvt s17, h18 +; NONEON-NOSVE-NEXT: fcvtzu x14, s7 +; NONEON-NOSVE-NEXT: fmov d7, x8 +; NONEON-NOSVE-NEXT: fcvtzu x8, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fmov d0, x11 +; NONEON-NOSVE-NEXT: fcvtzu x11, s1 +; NONEON-NOSVE-NEXT: fmov d1, x13 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvtzu x13, s16 +; NONEON-NOSVE-NEXT: fmov d16, x9 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvtzu x15, s17 +; NONEON-NOSVE-NEXT: mov v0.d[1], x12 +; NONEON-NOSVE-NEXT: mov v1.d[1], x14 +; NONEON-NOSVE-NEXT: fcvtzu x9, s2 +; NONEON-NOSVE-NEXT: mov v16.d[1], x8 +; NONEON-NOSVE-NEXT: fcvtzu x8, s6 +; NONEON-NOSVE-NEXT: fcvtzu x14, s4 +; NONEON-NOSVE-NEXT: fcvtzu x12, s3 +; NONEON-NOSVE-NEXT: mov v7.d[1], x11 +; NONEON-NOSVE-NEXT: fmov d3, x10 +; NONEON-NOSVE-NEXT: fcvtzu x11, s5 +; NONEON-NOSVE-NEXT: fmov d2, x15 +; NONEON-NOSVE-NEXT: stp q16, q1, [x1, #64] +; NONEON-NOSVE-NEXT: fmov d1, x9 +; NONEON-NOSVE-NEXT: fmov d4, x8 +; NONEON-NOSVE-NEXT: stp q7, q0, [x1] +; NONEON-NOSVE-NEXT: mov v2.d[1], x13 +; NONEON-NOSVE-NEXT: mov v3.d[1], x14 +; NONEON-NOSVE-NEXT: mov v1.d[1], x12 +; NONEON-NOSVE-NEXT: mov v4.d[1], x11 +; NONEON-NOSVE-NEXT: stp q3, q2, [x1, #96] +; NONEON-NOSVE-NEXT: stp q4, q1, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %res = fptoui <16 x half> %op1 to <16 x i64> store <16 x i64> %res, ptr %b @@ -282,6 +528,11 @@ define <2 x i16> @fcvtzu_v2f32_v2i16(<2 x float> %op1) { ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v2f32_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = fptoui <2 x float> %op1 to <2 x i16> ret <2 x i16> %res } @@ -295,6 +546,12 @@ define <4 x i16> @fcvtzu_v4f32_v4i16(<4 x float> %op1) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v4f32_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fptoui <4 x float> %op1 to <4 x i16> ret <4 x i16> %res } @@ -312,6 +569,14 @@ define <8 x i16> @fcvtzu_v8f32_v8i16(ptr %a) { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v8f32_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzu v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %res = fptoui <8 x float> %op1 to <8 x i16> ret <8 x i16> %res @@ -336,6 +601,19 @@ define void @fcvtzu_v16f32_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: splice z2.h, p0, z2.h, z3.h ; CHECK-NEXT: stp q2, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v16f32_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fcvtzu v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzu v3.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtzu v2.4s, v2.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x float>, ptr %a %res = fptoui <16 x float> %op1 to <16 x i16> store <16 x i16> %res, ptr %b @@ -354,6 +632,11 @@ define <2 x i32> @fcvtzu_v2f32_v2i32(<2 x float> %op1) { ; CHECK-NEXT: fcvtzu z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v2f32_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzu v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = fptoui <2 x float> %op1 to <2 x i32> ret <2 x i32> %res } @@ -366,6 +649,11 @@ define <4 x i32> @fcvtzu_v4f32_v4i32(<4 x float> %op1) { ; CHECK-NEXT: fcvtzu z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v4f32_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fptoui <4 x float> %op1 to <4 x i32> ret <4 x i32> %res } @@ -379,6 +667,14 @@ define void @fcvtzu_v8f32_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzu z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v8f32_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzu v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzu v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %res = fptoui <8 x float> %op1 to <8 x i32> store <8 x i32> %res, ptr %b @@ -398,6 +694,13 @@ define <1 x i64> @fcvtzu_v1f32_v1i64(<1 x float> %op1) { ; CHECK-NEXT: fcvtzu z0.d, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v1f32_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = fptoui <1 x float> %op1 to <1 x i64> ret <1 x i64> %res } @@ -411,6 +714,12 @@ define <2 x i64> @fcvtzu_v2f32_v2i64(<2 x float> %op1) { ; CHECK-NEXT: fcvtzu z0.d, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v2f32_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptoui <2 x float> %op1 to <2 x i64> ret <2 x i64> %res } @@ -427,6 +736,20 @@ define void @fcvtzu_v4f32_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzu z0.d, p0/m, z0.s ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v4f32_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzu v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x float>, ptr %a %res = fptoui <4 x float> %op1 to <4 x i64> store <4 x i64> %res, ptr %b @@ -451,6 +774,26 @@ define void @fcvtzu_v8f32_v8i64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v8f32_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtl v2.2d, v2.2s +; NONEON-NOSVE-NEXT: fcvtl v3.2d, v3.2s +; NONEON-NOSVE-NEXT: fcvtzu v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzu v2.2d, v2.2d +; NONEON-NOSVE-NEXT: fcvtzu v3.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %res = fptoui <8 x float> %op1 to <8 x i64> store <8 x i64> %res, ptr %b @@ -468,6 +811,12 @@ define <1 x i16> @fcvtzu_v1f64_v1i16(<1 x double> %op1) { ; CHECK-NEXT: mov z0.h, w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v1f64_v1i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs w8, d0 +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: ret %res = fptoui <1 x double> %op1 to <1 x i16> ret <1 x i16> %res } @@ -481,6 +830,12 @@ define <2 x i16> @fcvtzu_v2f64_v2i16(<2 x double> %op1) { ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v2f64_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: xtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptoui <2 x double> %op1 to <2 x i16> ret <2 x i16> %res } @@ -509,6 +864,15 @@ define <4 x i16> @fcvtzu_v4f64_v4i16(ptr %a) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v4f64_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %res = fptoui <4 x double> %op1 to <4 x i16> ret <4 x i16> %res @@ -552,6 +916,23 @@ define <8 x i16> @fcvtzu_v8f64_v8i16(ptr %a) { ; CHECK-NEXT: strh w8, [sp, #2] ; CHECK-NEXT: ldr q0, [sp], #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v8f64_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #32] +; NONEON-NOSVE-NEXT: adrp x8, .LCPI26_0 +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzs v2.2d, v2.2d +; NONEON-NOSVE-NEXT: fcvtzs v3.2d, v3.2d +; NONEON-NOSVE-NEXT: xtn v7.2s, v0.2d +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI26_0] +; NONEON-NOSVE-NEXT: xtn v6.2s, v1.2d +; NONEON-NOSVE-NEXT: xtn v5.2s, v2.2d +; NONEON-NOSVE-NEXT: xtn v4.2s, v3.2d +; NONEON-NOSVE-NEXT: tbl v0.16b, { v4.16b, v5.16b, v6.16b, v7.16b }, v0.16b +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x double>, ptr %a %res = fptoui <8 x double> %op1 to <8 x i16> ret <8 x i16> %res @@ -628,6 +1009,35 @@ define void @fcvtzu_v16f64_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v16f64_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #96] +; NONEON-NOSVE-NEXT: adrp x8, .LCPI27_0 +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q4, q5, [x0, #64] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzs v3.2d, v3.2d +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzs v2.2d, v2.2d +; NONEON-NOSVE-NEXT: fcvtzs v5.2d, v5.2d +; NONEON-NOSVE-NEXT: fcvtzs v4.2d, v4.2d +; NONEON-NOSVE-NEXT: fcvtzs v6.2d, v6.2d +; NONEON-NOSVE-NEXT: fcvtzs v7.2d, v7.2d +; NONEON-NOSVE-NEXT: xtn v19.2s, v0.2d +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI27_0] +; NONEON-NOSVE-NEXT: xtn v23.2s, v3.2d +; NONEON-NOSVE-NEXT: xtn v18.2s, v1.2d +; NONEON-NOSVE-NEXT: xtn v22.2s, v2.2d +; NONEON-NOSVE-NEXT: xtn v17.2s, v5.2d +; NONEON-NOSVE-NEXT: xtn v21.2s, v6.2d +; NONEON-NOSVE-NEXT: xtn v16.2s, v4.2d +; NONEON-NOSVE-NEXT: xtn v20.2s, v7.2d +; NONEON-NOSVE-NEXT: tbl v1.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v0.16b +; NONEON-NOSVE-NEXT: tbl v0.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v0.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x double>, ptr %a %res = fptoui <16 x double> %op1 to <16 x i16> store <16 x i16> %res, ptr %b @@ -647,6 +1057,13 @@ define <1 x i32> @fcvtzu_v1f64_v1i32(<1 x double> %op1) { ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v1f64_v1i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: xtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptoui <1 x double> %op1 to <1 x i32> ret <1 x i32> %res } @@ -660,6 +1077,12 @@ define <2 x i32> @fcvtzu_v2f64_v2i32(<2 x double> %op1) { ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v2f64_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: xtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptoui <2 x double> %op1 to <2 x i32> ret <2 x i32> %res } @@ -677,6 +1100,14 @@ define <4 x i32> @fcvtzu_v4f64_v4i32(ptr %a) { ; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v4f64_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzu v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %res = fptoui <4 x double> %op1 to <4 x i32> ret <4 x i32> %res @@ -701,6 +1132,19 @@ define void @fcvtzu_v8f64_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: splice z2.s, p0, z2.s, z3.s ; CHECK-NEXT: stp q2, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v8f64_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fcvtzu v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzu v3.2d, v3.2d +; NONEON-NOSVE-NEXT: fcvtzu v2.2d, v2.2d +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x double>, ptr %a %res = fptoui <8 x double> %op1 to <8 x i32> store <8 x i32> %res, ptr %b @@ -719,6 +1163,12 @@ define <1 x i64> @fcvtzu_v1f64_v1i64(<1 x double> %op1) { ; CHECK-NEXT: fcvtzu z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v1f64_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzu x8, d0 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %res = fptoui <1 x double> %op1 to <1 x i64> ret <1 x i64> %res } @@ -731,6 +1181,11 @@ define <2 x i64> @fcvtzu_v2f64_v2i64(<2 x double> %op1) { ; CHECK-NEXT: fcvtzu z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v2f64_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptoui <2 x double> %op1 to <2 x i64> ret <2 x i64> %res } @@ -744,6 +1199,14 @@ define void @fcvtzu_v4f64_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzu z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzu_v4f64_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzu v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzu v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %res = fptoui <4 x double> %op1 to <4 x i64> store <4 x i64> %res, ptr %b @@ -762,6 +1225,13 @@ define <4 x i16> @fcvtzs_v4f16_v4i16(<4 x half> %op1) { ; CHECK-NEXT: fcvtzs z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v4f16_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fptosi <4 x half> %op1 to <4 x i16> ret <4 x i16> %res } @@ -774,6 +1244,21 @@ define void @fcvtzs_v8f16_v8i16(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzs z0.h, p0/m, z0.h ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v8f16_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %res = fptosi <8 x half> %op1 to <8 x i16> store <8 x i16> %res, ptr %b @@ -789,6 +1274,27 @@ define void @fcvtzs_v16f16_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzs z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v16f16_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtzs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzs v2.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtzs v3.4s, v3.4s +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v1.8h, v2.8h +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %res = fptosi <16 x half> %op1 to <16 x i16> store <16 x i16> %res, ptr %b @@ -808,6 +1314,13 @@ define <2 x i32> @fcvtzs_v2f16_v2i32(<2 x half> %op1) { ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v2f16_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = fptosi <2 x half> %op1 to <2 x i32> ret <2 x i32> %res } @@ -821,6 +1334,12 @@ define <4 x i32> @fcvtzs_v4f16_v4i32(<4 x half> %op1) { ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v4f16_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fptosi <4 x half> %op1 to <4 x i32> ret <4 x i32> %res } @@ -837,6 +1356,20 @@ define void @fcvtzs_v8f16_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.h ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v8f16_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %res = fptosi <8 x half> %op1 to <8 x i32> store <8 x i32> %res, ptr %b @@ -861,6 +1394,26 @@ define void @fcvtzs_v16f16_v16i32(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v16f16_v16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtzs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzs v2.4s, v2.4s +; NONEON-NOSVE-NEXT: fcvtzs v3.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %res = fptosi <16 x half> %op1 to <16 x i32> store <16 x i32> %res, ptr %b @@ -877,6 +1430,13 @@ define <1 x i64> @fcvtzs_v1f16_v1i64(<1 x half> %op1) { ; CHECK-NEXT: fcvtzs x8, h0 ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v1f16_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvtzs x8, s0 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %res = fptosi <1 x half> %op1 to <1 x i64> ret <1 x i64> %res } @@ -893,6 +1453,18 @@ define <2 x i64> @fcvtzs_v2f16_v2i64(<2 x half> %op1) { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldr q0, [sp], #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v2f16_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov h1, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvtzs x8, s0 +; NONEON-NOSVE-NEXT: fcvtzs x9, s1 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: ret %res = fptosi <2 x half> %op1 to <2 x i64> ret <2 x i64> %res } @@ -915,6 +1487,27 @@ define void @fcvtzs_v4f16_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v4f16_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: mov h1, v0.h[2] +; NONEON-NOSVE-NEXT: mov h2, v0.h[3] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvtzs x9, s0 +; NONEON-NOSVE-NEXT: fcvtzs x8, s1 +; NONEON-NOSVE-NEXT: fcvtzs x10, s2 +; NONEON-NOSVE-NEXT: fcvtzs x11, s3 +; NONEON-NOSVE-NEXT: fmov d1, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: mov v1.d[1], x11 +; NONEON-NOSVE-NEXT: mov v0.d[1], x10 +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x half>, ptr %a %res = fptosi <4 x half> %op1 to <4 x i64> store <4 x i64> %res, ptr %b @@ -952,6 +1545,47 @@ define void @fcvtzs_v8f16_v8i64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q1, q0, [x1, #32] ; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v8f16_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: mov h1, v0.h[2] +; NONEON-NOSVE-NEXT: mov h3, v0.h[3] +; NONEON-NOSVE-NEXT: mov h4, v0.h[1] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: mov h5, v2.h[2] +; NONEON-NOSVE-NEXT: mov h6, v2.h[3] +; NONEON-NOSVE-NEXT: mov h7, v2.h[1] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvtzs x9, s0 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvtzs x13, s2 +; NONEON-NOSVE-NEXT: fcvtzs x8, s1 +; NONEON-NOSVE-NEXT: fcvt s1, h7 +; NONEON-NOSVE-NEXT: fcvtzs x10, s3 +; NONEON-NOSVE-NEXT: fcvtzs x11, s4 +; NONEON-NOSVE-NEXT: fcvtzs x12, s5 +; NONEON-NOSVE-NEXT: fcvtzs x14, s6 +; NONEON-NOSVE-NEXT: fmov d3, x13 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: fcvtzs x8, s1 +; NONEON-NOSVE-NEXT: fmov d1, x9 +; NONEON-NOSVE-NEXT: fmov d2, x12 +; NONEON-NOSVE-NEXT: mov v0.d[1], x10 +; NONEON-NOSVE-NEXT: mov v1.d[1], x11 +; NONEON-NOSVE-NEXT: mov v3.d[1], x8 +; NONEON-NOSVE-NEXT: mov v2.d[1], x14 +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: stp q3, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %res = fptosi <8 x half> %op1 to <8 x i64> store <8 x i64> %res, ptr %b @@ -1012,6 +1646,80 @@ define void @fcvtzs_v16f16_v16i64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q5, q2, [x1, #96] ; CHECK-NEXT: add sp, sp, #128 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v16f16_v16i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: mov h2, v1.h[2] +; NONEON-NOSVE-NEXT: fcvt s3, h1 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #24] +; NONEON-NOSVE-NEXT: mov h5, v1.h[3] +; NONEON-NOSVE-NEXT: mov h7, v0.h[2] +; NONEON-NOSVE-NEXT: mov h16, v0.h[3] +; NONEON-NOSVE-NEXT: fcvt s6, h0 +; NONEON-NOSVE-NEXT: mov h0, v0.h[1] +; NONEON-NOSVE-NEXT: mov h1, v1.h[1] +; NONEON-NOSVE-NEXT: fcvt s17, h4 +; NONEON-NOSVE-NEXT: mov h18, v4.h[2] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvtzs x8, s3 +; NONEON-NOSVE-NEXT: fcvt s3, h5 +; NONEON-NOSVE-NEXT: fcvt s5, h7 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: mov h16, v4.h[3] +; NONEON-NOSVE-NEXT: fcvtzs x9, s6 +; NONEON-NOSVE-NEXT: ldr d6, [sp, #8] +; NONEON-NOSVE-NEXT: fcvt s0, h0 +; NONEON-NOSVE-NEXT: fcvt s1, h1 +; NONEON-NOSVE-NEXT: mov h4, v4.h[1] +; NONEON-NOSVE-NEXT: fcvtzs x11, s2 +; NONEON-NOSVE-NEXT: mov h2, v6.h[2] +; NONEON-NOSVE-NEXT: fcvtzs x10, s17 +; NONEON-NOSVE-NEXT: fcvtzs x13, s5 +; NONEON-NOSVE-NEXT: fcvtzs x12, s3 +; NONEON-NOSVE-NEXT: mov h3, v6.h[3] +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: mov h5, v6.h[1] +; NONEON-NOSVE-NEXT: fcvt s17, h18 +; NONEON-NOSVE-NEXT: fcvtzs x14, s7 +; NONEON-NOSVE-NEXT: fmov d7, x8 +; NONEON-NOSVE-NEXT: fcvtzs x8, s0 +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fmov d0, x11 +; NONEON-NOSVE-NEXT: fcvtzs x11, s1 +; NONEON-NOSVE-NEXT: fmov d1, x13 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvtzs x13, s16 +; NONEON-NOSVE-NEXT: fmov d16, x9 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvtzs x15, s17 +; NONEON-NOSVE-NEXT: mov v0.d[1], x12 +; NONEON-NOSVE-NEXT: mov v1.d[1], x14 +; NONEON-NOSVE-NEXT: fcvtzs x9, s2 +; NONEON-NOSVE-NEXT: mov v16.d[1], x8 +; NONEON-NOSVE-NEXT: fcvtzs x8, s6 +; NONEON-NOSVE-NEXT: fcvtzs x14, s4 +; NONEON-NOSVE-NEXT: fcvtzs x12, s3 +; NONEON-NOSVE-NEXT: mov v7.d[1], x11 +; NONEON-NOSVE-NEXT: fmov d3, x10 +; NONEON-NOSVE-NEXT: fcvtzs x11, s5 +; NONEON-NOSVE-NEXT: fmov d2, x15 +; NONEON-NOSVE-NEXT: stp q16, q1, [x1, #64] +; NONEON-NOSVE-NEXT: fmov d1, x9 +; NONEON-NOSVE-NEXT: fmov d4, x8 +; NONEON-NOSVE-NEXT: stp q7, q0, [x1] +; NONEON-NOSVE-NEXT: mov v2.d[1], x13 +; NONEON-NOSVE-NEXT: mov v3.d[1], x14 +; NONEON-NOSVE-NEXT: mov v1.d[1], x12 +; NONEON-NOSVE-NEXT: mov v4.d[1], x11 +; NONEON-NOSVE-NEXT: stp q3, q2, [x1, #96] +; NONEON-NOSVE-NEXT: stp q4, q1, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %res = fptosi <16 x half> %op1 to <16 x i64> store <16 x i64> %res, ptr %b @@ -1030,6 +1738,11 @@ define <2 x i16> @fcvtzs_v2f32_v2i16(<2 x float> %op1) { ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v2f32_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = fptosi <2 x float> %op1 to <2 x i16> ret <2 x i16> %res } @@ -1043,6 +1756,12 @@ define <4 x i16> @fcvtzs_v4f32_v4i16(<4 x float> %op1) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v4f32_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fptosi <4 x float> %op1 to <4 x i16> ret <4 x i16> %res } @@ -1060,6 +1779,14 @@ define <8 x i16> @fcvtzs_v8f32_v8i16(ptr %a) { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v8f32_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %res = fptosi <8 x float> %op1 to <8 x i16> ret <8 x i16> %res @@ -1084,6 +1811,19 @@ define void @fcvtzs_v16f32_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: splice z2.h, p0, z2.h, z3.h ; CHECK-NEXT: stp q2, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v16f32_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzs v3.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtzs v2.4s, v2.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x float>, ptr %a %res = fptosi <16 x float> %op1 to <16 x i16> store <16 x i16> %res, ptr %b @@ -1102,6 +1842,11 @@ define <2 x i32> @fcvtzs_v2f32_v2i32(<2 x float> %op1) { ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v2f32_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = fptosi <2 x float> %op1 to <2 x i32> ret <2 x i32> %res } @@ -1114,6 +1859,11 @@ define <4 x i32> @fcvtzs_v4f32_v4i32(<4 x float> %op1) { ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v4f32_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = fptosi <4 x float> %op1 to <4 x i32> ret <4 x i32> %res } @@ -1127,6 +1877,14 @@ define void @fcvtzs_v8f32_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzs z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v8f32_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtzs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %res = fptosi <8 x float> %op1 to <8 x i32> store <8 x i32> %res, ptr %b @@ -1146,6 +1904,13 @@ define <1 x i64> @fcvtzs_v1f32_v1i64(<1 x float> %op1) { ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v1f32_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = fptosi <1 x float> %op1 to <1 x i64> ret <1 x i64> %res } @@ -1159,6 +1924,12 @@ define <2 x i64> @fcvtzs_v2f32_v2i64(<2 x float> %op1) { ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v2f32_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptosi <2 x float> %op1 to <2 x i64> ret <2 x i64> %res } @@ -1175,6 +1946,20 @@ define void @fcvtzs_v4f32_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v4f32_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x float>, ptr %a %res = fptosi <4 x float> %op1 to <4 x i64> store <4 x i64> %res, ptr %b @@ -1199,6 +1984,26 @@ define void @fcvtzs_v8f32_v8i64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v8f32_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: fcvtl v1.2d, v1.2s +; NONEON-NOSVE-NEXT: fcvtl v0.2d, v0.2s +; NONEON-NOSVE-NEXT: fcvtl v2.2d, v2.2s +; NONEON-NOSVE-NEXT: fcvtl v3.2d, v3.2s +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzs v2.2d, v2.2d +; NONEON-NOSVE-NEXT: fcvtzs v3.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %res = fptosi <8 x float> %op1 to <8 x i64> store <8 x i64> %res, ptr %b @@ -1218,6 +2023,12 @@ define <1 x i16> @fcvtzs_v1f64_v1i16(<1 x double> %op1) { ; CHECK-NEXT: mov z0.h, w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v1f64_v1i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs w8, d0 +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: ret %res = fptosi <1 x double> %op1 to <1 x i16> ret <1 x i16> %res } @@ -1231,6 +2042,12 @@ define <2 x i16> @fcvtzs_v2f64_v2i16(<2 x double> %op1) { ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v2f64_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: xtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptosi <2 x double> %op1 to <2 x i16> ret <2 x i16> %res } @@ -1259,6 +2076,15 @@ define <4 x i16> @fcvtzs_v4f64_v4i16(ptr %a) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v4f64_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %res = fptosi <4 x double> %op1 to <4 x i16> ret <4 x i16> %res @@ -1302,6 +2128,23 @@ define <8 x i16> @fcvtzs_v8f64_v8i16(ptr %a) { ; CHECK-NEXT: strh w8, [sp, #2] ; CHECK-NEXT: ldr q0, [sp], #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v8f64_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #32] +; NONEON-NOSVE-NEXT: adrp x8, .LCPI61_0 +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzs v2.2d, v2.2d +; NONEON-NOSVE-NEXT: fcvtzs v3.2d, v3.2d +; NONEON-NOSVE-NEXT: xtn v7.2s, v0.2d +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI61_0] +; NONEON-NOSVE-NEXT: xtn v6.2s, v1.2d +; NONEON-NOSVE-NEXT: xtn v5.2s, v2.2d +; NONEON-NOSVE-NEXT: xtn v4.2s, v3.2d +; NONEON-NOSVE-NEXT: tbl v0.16b, { v4.16b, v5.16b, v6.16b, v7.16b }, v0.16b +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x double>, ptr %a %res = fptosi <8 x double> %op1 to <8 x i16> ret <8 x i16> %res @@ -1378,6 +2221,35 @@ define void @fcvtzs_v16f64_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v16f64_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #96] +; NONEON-NOSVE-NEXT: adrp x8, .LCPI62_0 +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q4, q5, [x0, #64] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzs v3.2d, v3.2d +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzs v2.2d, v2.2d +; NONEON-NOSVE-NEXT: fcvtzs v5.2d, v5.2d +; NONEON-NOSVE-NEXT: fcvtzs v4.2d, v4.2d +; NONEON-NOSVE-NEXT: fcvtzs v6.2d, v6.2d +; NONEON-NOSVE-NEXT: fcvtzs v7.2d, v7.2d +; NONEON-NOSVE-NEXT: xtn v19.2s, v0.2d +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI62_0] +; NONEON-NOSVE-NEXT: xtn v23.2s, v3.2d +; NONEON-NOSVE-NEXT: xtn v18.2s, v1.2d +; NONEON-NOSVE-NEXT: xtn v22.2s, v2.2d +; NONEON-NOSVE-NEXT: xtn v17.2s, v5.2d +; NONEON-NOSVE-NEXT: xtn v21.2s, v6.2d +; NONEON-NOSVE-NEXT: xtn v16.2s, v4.2d +; NONEON-NOSVE-NEXT: xtn v20.2s, v7.2d +; NONEON-NOSVE-NEXT: tbl v1.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v0.16b +; NONEON-NOSVE-NEXT: tbl v0.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v0.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x double>, ptr %a %res = fptosi <16 x double> %op1 to <16 x i16> store <16 x i16> %res, ptr %b @@ -1397,6 +2269,13 @@ define <1 x i32> @fcvtzs_v1f64_v1i32(<1 x double> %op1) { ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v1f64_v1i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: xtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptosi <1 x double> %op1 to <1 x i32> ret <1 x i32> %res } @@ -1410,6 +2289,12 @@ define <2 x i32> @fcvtzs_v2f64_v2i32(<2 x double> %op1) { ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v2f64_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: xtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptosi <2 x double> %op1 to <2 x i32> ret <2 x i32> %res } @@ -1427,6 +2312,14 @@ define <4 x i32> @fcvtzs_v4f64_v4i32(ptr %a) { ; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v4f64_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %res = fptosi <4 x double> %op1 to <4 x i32> ret <4 x i32> %res @@ -1451,6 +2344,19 @@ define void @fcvtzs_v8f64_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: splice z2.s, p0, z2.s, z3.s ; CHECK-NEXT: stp q2, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v8f64_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzs v3.2d, v3.2d +; NONEON-NOSVE-NEXT: fcvtzs v2.2d, v2.2d +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x double>, ptr %a %res = fptosi <8 x double> %op1 to <8 x i32> store <8 x i32> %res, ptr %b @@ -1469,6 +2375,12 @@ define <1 x i64> @fcvtzs_v1f64_v1i64(<1 x double> %op1) { ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v1f64_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs x8, d0 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %res = fptosi <1 x double> %op1 to <1 x i64> ret <1 x i64> %res } @@ -1481,6 +2393,11 @@ define <2 x i64> @fcvtzs_v2f64_v2i64(<2 x double> %op1) { ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v2f64_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = fptosi <2 x double> %op1 to <2 x i64> ret <2 x i64> %res } @@ -1494,6 +2411,14 @@ define void @fcvtzs_v4f64_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fcvtzs_v4f64_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fcvtzs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtzs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %res = fptosi <4 x double> %op1 to <4 x i64> store <4 x i64> %res, ptr %b diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll index 30a4f04a3d2bd6..32fe74bbb65f47 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -27,6 +28,14 @@ define <2 x half> @select_v2f16(<2 x half> %op1, <2 x half> %op2, <2 x i1> %mask ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: uzp1 v2.4h, v2.4h, v0.4h +; NONEON-NOSVE-NEXT: shl v2.4h, v2.4h, #15 +; NONEON-NOSVE-NEXT: cmlt v2.4h, v2.4h, #0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <2 x i1> %mask, <2 x half> %op1, <2 x half> %op2 ret <2 x half> %sel } @@ -45,6 +54,13 @@ define <4 x half> @select_v4f16(<4 x half> %op1, <4 x half> %op2, <4 x i1> %mask ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v2.4h, v2.4h, #15 +; NONEON-NOSVE-NEXT: cmlt v2.4h, v2.4h, #0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <4 x i1> %mask, <4 x half> %op1, <4 x half> %op2 ret <4 x half> %sel } @@ -64,6 +80,14 @@ define <8 x half> @select_v8f16(<8 x half> %op1, <8 x half> %op2, <8 x i1> %mask ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v2.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: shl v2.8h, v2.8h, #15 +; NONEON-NOSVE-NEXT: cmlt v2.8h, v2.8h, #0 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select <8 x i1> %mask, <8 x half> %op1, <8 x half> %op2 ret <8 x half> %sel } @@ -80,6 +104,126 @@ define void @select_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: sel z1.h, p0, z2.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: mov h2, v1.h[1] +; NONEON-NOSVE-NEXT: mov h3, v0.h[1] +; NONEON-NOSVE-NEXT: mov h4, v1.h[2] +; NONEON-NOSVE-NEXT: mov h5, v0.h[2] +; NONEON-NOSVE-NEXT: fcvt s6, h1 +; NONEON-NOSVE-NEXT: fcvt s7, h0 +; NONEON-NOSVE-NEXT: mov h16, v1.h[6] +; NONEON-NOSVE-NEXT: mov h17, v0.h[6] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fcmp s3, s2 +; NONEON-NOSVE-NEXT: mov h2, v1.h[3] +; NONEON-NOSVE-NEXT: mov h3, v0.h[3] +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[4] +; NONEON-NOSVE-NEXT: mov h7, v0.h[4] +; NONEON-NOSVE-NEXT: fcvt s2, h2 +; NONEON-NOSVE-NEXT: fcvt s3, h3 +; NONEON-NOSVE-NEXT: csetm w14, eq +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v1.h[5] +; NONEON-NOSVE-NEXT: mov h5, v0.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w12, eq +; NONEON-NOSVE-NEXT: fcmp s3, s2 +; NONEON-NOSVE-NEXT: ldr q2, [x0, #16] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: ldr q3, [x1, #16] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w11, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v1.h[7] +; NONEON-NOSVE-NEXT: mov h7, v0.h[7] +; NONEON-NOSVE-NEXT: mov h18, v3.h[3] +; NONEON-NOSVE-NEXT: csetm w13, eq +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: mov h4, v3.h[1] +; NONEON-NOSVE-NEXT: mov h5, v2.h[1] +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: csetm w9, eq +; NONEON-NOSVE-NEXT: fcmp s17, s16 +; NONEON-NOSVE-NEXT: mov h16, v3.h[2] +; NONEON-NOSVE-NEXT: fcvt s4, h4 +; NONEON-NOSVE-NEXT: mov h17, v2.h[2] +; NONEON-NOSVE-NEXT: fcvt s5, h5 +; NONEON-NOSVE-NEXT: csetm w10, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: fcvt s6, h3 +; NONEON-NOSVE-NEXT: fcvt s7, h2 +; NONEON-NOSVE-NEXT: csetm w15, eq +; NONEON-NOSVE-NEXT: fcmp s5, s4 +; NONEON-NOSVE-NEXT: fmov s4, w14 +; NONEON-NOSVE-NEXT: csetm w16, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v2.h[3] +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: fcvt s16, h17 +; NONEON-NOSVE-NEXT: mov v4.h[1], w8 +; NONEON-NOSVE-NEXT: fcvt s17, h18 +; NONEON-NOSVE-NEXT: csetm w14, eq +; NONEON-NOSVE-NEXT: fmov s5, w14 +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcmp s16, s7 +; NONEON-NOSVE-NEXT: mov h7, v3.h[4] +; NONEON-NOSVE-NEXT: mov h16, v2.h[4] +; NONEON-NOSVE-NEXT: mov v4.h[2], w12 +; NONEON-NOSVE-NEXT: mov v5.h[1], w16 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s6, s17 +; NONEON-NOSVE-NEXT: mov h17, v2.h[5] +; NONEON-NOSVE-NEXT: fcvt s6, h7 +; NONEON-NOSVE-NEXT: fcvt s7, h16 +; NONEON-NOSVE-NEXT: mov h16, v3.h[5] +; NONEON-NOSVE-NEXT: mov v4.h[3], w11 +; NONEON-NOSVE-NEXT: mov v5.h[2], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcvt s17, h17 +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov h6, v3.h[6] +; NONEON-NOSVE-NEXT: mov h7, v2.h[6] +; NONEON-NOSVE-NEXT: fcvt s16, h16 +; NONEON-NOSVE-NEXT: mov v4.h[4], w13 +; NONEON-NOSVE-NEXT: mov v5.h[3], w8 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcvt s6, h6 +; NONEON-NOSVE-NEXT: fcvt s7, h7 +; NONEON-NOSVE-NEXT: fcmp s17, s16 +; NONEON-NOSVE-NEXT: mov h16, v3.h[7] +; NONEON-NOSVE-NEXT: mov h17, v2.h[7] +; NONEON-NOSVE-NEXT: mov v5.h[4], w8 +; NONEON-NOSVE-NEXT: mov v4.h[5], w9 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: fcvt s6, h16 +; NONEON-NOSVE-NEXT: fcvt s7, h17 +; NONEON-NOSVE-NEXT: mov v5.h[5], w8 +; NONEON-NOSVE-NEXT: mov v4.h[6], w10 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: fcmp s7, s6 +; NONEON-NOSVE-NEXT: mov v5.h[6], w8 +; NONEON-NOSVE-NEXT: mov v4.h[7], w15 +; NONEON-NOSVE-NEXT: csetm w8, eq +; NONEON-NOSVE-NEXT: mov v5.h[7], w8 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %mask = fcmp oeq <16 x half> %op1, %op2 @@ -102,6 +246,13 @@ define <2 x float> @select_v2f32(<2 x float> %op1, <2 x float> %op2, <2 x i1> %m ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v2.2s, v2.2s, #31 +; NONEON-NOSVE-NEXT: cmlt v2.2s, v2.2s, #0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <2 x i1> %mask, <2 x float> %op1, <2 x float> %op2 ret <2 x float> %sel } @@ -121,6 +272,14 @@ define <4 x float> @select_v4f32(<4 x float> %op1, <4 x float> %op2, <4 x i1> %m ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: shl v2.4s, v2.4s, #31 +; NONEON-NOSVE-NEXT: cmlt v2.4s, v2.4s, #0 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select <4 x i1> %mask, <4 x float> %op1, <4 x float> %op2 ret <4 x float> %sel } @@ -137,6 +296,18 @@ define void @select_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: sel z1.s, p0, z2.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: fcmeq v4.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcmeq v5.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %mask = fcmp oeq <8 x float> %op1, %op2 @@ -151,6 +322,14 @@ define <1 x double> @select_v1f64(<1 x double> %op1, <1 x double> %op2, <1 x i1> ; CHECK-NEXT: tst w0, #0x1 ; CHECK-NEXT: fcsel d0, d0, d1, ne ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm x8, ne +; NONEON-NOSVE-NEXT: fmov d2, x8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <1 x i1> %mask, <1 x double> %op1, <1 x double> %op2 ret <1 x double> %sel } @@ -170,6 +349,14 @@ define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, <2 x i1> ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: shl v2.2d, v2.2d, #63 +; NONEON-NOSVE-NEXT: cmlt v2.2d, v2.2d, #0 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select <2 x i1> %mask, <2 x double> %op1, <2 x double> %op2 ret <2 x double> %sel } @@ -186,6 +373,18 @@ define void @select_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: sel z1.d, p0, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: fcmeq v4.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: fcmeq v5.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %mask = fcmp oeq <4 x double> %op1, %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll index 4aa965777c742d..c85048ab72e038 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -21,6 +22,14 @@ define <4 x i8> @insertelement_v4i8(<4 x i8> %op1) { ; CHECK-NEXT: mov z0.h, p0/m, w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %r = insertelement <4 x i8> %op1, i8 5, i64 3 ret <4 x i8> %r } @@ -38,6 +47,14 @@ define <8 x i8> @insertelement_v8i8(<8 x i8> %op1) { ; CHECK-NEXT: mov z0.b, p0/m, w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v0.b[7], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %r = insertelement <8 x i8> %op1, i8 5, i64 7 ret <8 x i8> %r } @@ -55,6 +72,12 @@ define <16 x i8> @insertelement_v16i8(<16 x i8> %op1) { ; CHECK-NEXT: mov z0.b, p0/m, w8 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v0.b[15], w8 +; NONEON-NOSVE-NEXT: ret %r = insertelement <16 x i8> %op1, i8 5, i64 15 ret <16 x i8> %r } @@ -72,6 +95,12 @@ define <32 x i8> @insertelement_v32i8(<32 x i8> %op1) { ; CHECK-NEXT: mov z1.b, p0/m, w8 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v1.b[15], w8 +; NONEON-NOSVE-NEXT: ret %r = insertelement <32 x i8> %op1, i8 5, i64 31 ret <32 x i8> %r } @@ -90,6 +119,14 @@ define <2 x i16> @insertelement_v2i16(<2 x i16> %op1) { ; CHECK-NEXT: mov z0.s, p0/m, w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v0.s[1], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %r = insertelement <2 x i16> %op1, i16 5, i64 1 ret <2 x i16> %r } @@ -107,6 +144,14 @@ define <4 x i16> @insertelement_v4i16(<4 x i16> %op1) { ; CHECK-NEXT: mov z0.h, p0/m, w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %r = insertelement <4 x i16> %op1, i16 5, i64 3 ret <4 x i16> %r } @@ -124,6 +169,12 @@ define <8 x i16> @insertelement_v8i16(<8 x i16> %op1) { ; CHECK-NEXT: mov z0.h, p0/m, w8 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v0.h[7], w8 +; NONEON-NOSVE-NEXT: ret %r = insertelement <8 x i16> %op1, i16 5, i64 7 ret <8 x i16> %r } @@ -141,6 +192,12 @@ define <16 x i16> @insertelement_v16i16(<16 x i16> %op1) { ; CHECK-NEXT: mov z1.h, p0/m, w8 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v1.h[7], w8 +; NONEON-NOSVE-NEXT: ret %r = insertelement <16 x i16> %op1, i16 5, i64 15 ret <16 x i16> %r } @@ -159,6 +216,14 @@ define <2 x i32> @insertelement_v2i32(<2 x i32> %op1) { ; CHECK-NEXT: mov z0.s, p0/m, w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v0.s[1], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %r = insertelement <2 x i32> %op1, i32 5, i64 1 ret <2 x i32> %r } @@ -176,6 +241,12 @@ define <4 x i32> @insertelement_v4i32(<4 x i32> %op1) { ; CHECK-NEXT: mov z0.s, p0/m, w8 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v0.s[3], w8 +; NONEON-NOSVE-NEXT: ret %r = insertelement <4 x i32> %op1, i32 5, i64 3 ret <4 x i32> %r } @@ -193,6 +264,13 @@ define <8 x i32> @insertelement_v8i32(ptr %a) { ; CHECK-NEXT: mov z1.s, p0/m, w8 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v1.s[3], w8 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %r = insertelement <8 x i32> %op1, i32 5, i64 7 ret <8 x i32> %r @@ -205,6 +283,12 @@ define <1 x i64> @insertelement_v1i64(<1 x i64> %op1) { ; CHECK-NEXT: mov z0.d, #5 // =0x5 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %r = insertelement <1 x i64> %op1, i64 5, i64 0 ret <1 x i64> %r } @@ -222,6 +306,12 @@ define <2 x i64> @insertelement_v2i64(<2 x i64> %op1) { ; CHECK-NEXT: mov z0.d, p0/m, x8 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v0.d[1], x8 +; NONEON-NOSVE-NEXT: ret %r = insertelement <2 x i64> %op1, i64 5, i64 1 ret <2 x i64> %r } @@ -239,6 +329,13 @@ define <4 x i64> @insertelement_v4i64(ptr %a) { ; CHECK-NEXT: mov z1.d, p0/m, x8 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: mov w8, #5 // =0x5 +; NONEON-NOSVE-NEXT: mov v1.d[1], x8 +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %r = insertelement <4 x i64> %op1, i64 5, i64 3 ret <4 x i64> %r @@ -257,6 +354,16 @@ define <2 x half> @insertelement_v2f16(<2 x half> %op1) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI14_0 +; NONEON-NOSVE-NEXT: add x8, x8, :lo12:.LCPI14_0 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: ld1r { v1.4h }, [x8] +; NONEON-NOSVE-NEXT: mov v1.h[0], v0.h[0] +; NONEON-NOSVE-NEXT: fmov d0, d1 +; NONEON-NOSVE-NEXT: ret %r = insertelement <2 x half> %op1, half 5.0, i64 1 ret <2 x half> %r } @@ -274,6 +381,15 @@ define <4 x half> @insertelement_v4f16(<4 x half> %op1) { ; CHECK-NEXT: mov z0.h, p0/m, h1 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI15_0 +; NONEON-NOSVE-NEXT: add x8, x8, :lo12:.LCPI15_0 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[3], [x8] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %r = insertelement <4 x half> %op1, half 5.0, i64 3 ret <4 x half> %r } @@ -291,6 +407,13 @@ define <8 x half> @insertelement_v8f16(<8 x half> %op1) { ; CHECK-NEXT: mov z0.h, p0/m, h1 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI16_0 +; NONEON-NOSVE-NEXT: add x8, x8, :lo12:.LCPI16_0 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[7], [x8] +; NONEON-NOSVE-NEXT: ret %r = insertelement <8 x half> %op1, half 5.0, i64 7 ret <8 x half> %r } @@ -308,6 +431,14 @@ define <16 x half> @insertelement_v16f16(ptr %a) { ; CHECK-NEXT: mov z1.h, p0/m, h2 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: adrp x8, .LCPI17_0 +; NONEON-NOSVE-NEXT: add x8, x8, :lo12:.LCPI17_0 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[7], [x8] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %r = insertelement <16 x half> %op1, half 5.0, i64 15 ret <16 x half> %r @@ -327,6 +458,14 @@ define <2 x float> @insertelement_v2f32(<2 x float> %op1) { ; CHECK-NEXT: mov z0.s, p0/m, s1 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov s1, #5.00000000 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: mov v0.s[1], v1.s[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %r = insertelement <2 x float> %op1, float 5.0, i64 1 ret <2 x float> %r } @@ -344,6 +483,12 @@ define <4 x float> @insertelement_v4f32(<4 x float> %op1) { ; CHECK-NEXT: mov z0.s, p0/m, s1 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov s1, #5.00000000 +; NONEON-NOSVE-NEXT: mov v0.s[3], v1.s[0] +; NONEON-NOSVE-NEXT: ret %r = insertelement <4 x float> %op1, float 5.0, i64 3 ret <4 x float> %r } @@ -361,6 +506,13 @@ define <8 x float> @insertelement_v8f32(ptr %a) { ; CHECK-NEXT: mov z1.s, p0/m, s2 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov s2, #5.00000000 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: mov v1.s[3], v2.s[0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %r = insertelement <8 x float> %op1, float 5.0, i64 7 ret <8 x float> %r @@ -372,6 +524,12 @@ define <1 x double> @insertelement_v1f64(<1 x double> %op1) { ; CHECK: // %bb.0: ; CHECK-NEXT: fmov d0, #5.00000000 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov x8, #4617315517961601024 // =0x4014000000000000 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %r = insertelement <1 x double> %op1, double 5.0, i64 0 ret <1 x double> %r } @@ -389,6 +547,12 @@ define <2 x double> @insertelement_v2f64(<2 x double> %op1) { ; CHECK-NEXT: mov z0.d, p0/m, d1 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov d1, #5.00000000 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %r = insertelement <2 x double> %op1, double 5.0, i64 1 ret <2 x double> %r } @@ -406,6 +570,14 @@ define <4 x double> @insertelement_v4f64(ptr %a) { ; CHECK-NEXT: mov z1.d, p0/m, d2 ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: insertelement_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov d0, #5.00000000 +; NONEON-NOSVE-NEXT: ldr q1, [x0, #16] +; NONEON-NOSVE-NEXT: mov v1.d[1], v0.d[0] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %r = insertelement <4 x double> %op1, double 5.0, i64 3 ret <4 x double> %r diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll index 8baa87c6d686de..da408a11e784d4 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll @@ -2,6 +2,7 @@ ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE ; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -16,6 +17,11 @@ define <4 x i8> @add_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: add z0.h, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = add <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -28,6 +34,11 @@ define <8 x i8> @add_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: add z0.b, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = add <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -40,6 +51,11 @@ define <16 x i8> @add_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: add z0.b, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = add <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -53,6 +69,15 @@ define void @add_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: add z1.b, z2.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: add v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = add <32 x i8> %op1, %op2 @@ -68,6 +93,11 @@ define <2 x i16> @add_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; CHECK-NEXT: add z0.s, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = add <2 x i16> %op1, %op2 ret <2 x i16> %res } @@ -80,6 +110,11 @@ define <4 x i16> @add_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: add z0.h, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = add <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -92,6 +127,11 @@ define <8 x i16> @add_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: add z0.h, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = add <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -105,6 +145,15 @@ define void @add_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: add z1.h, z2.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: add v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = add <16 x i16> %op1, %op2 @@ -120,6 +169,11 @@ define <2 x i32> @add_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: add z0.s, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = add <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -132,6 +186,11 @@ define <4 x i32> @add_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: add z0.s, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = add <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -145,6 +204,15 @@ define void @add_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: add z1.s, z2.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: add v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: add v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = add <8 x i32> %op1, %op2 @@ -160,6 +228,11 @@ define <1 x i64> @add_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: add z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = add <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -172,6 +245,11 @@ define <2 x i64> @add_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: add z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: add v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = add <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -185,6 +263,15 @@ define void @add_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: add z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: add v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: add v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = add <4 x i64> %op1, %op2 @@ -213,6 +300,11 @@ define <4 x i8> @mul_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; SVE2-NEXT: mul z0.h, z0.h, z1.h ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mul v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = mul <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -234,6 +326,11 @@ define <8 x i8> @mul_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; SVE2-NEXT: mul z0.b, z0.b, z1.b ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mul v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = mul <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -255,6 +352,11 @@ define <16 x i8> @mul_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; SVE2-NEXT: mul z0.b, z0.b, z1.b ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mul v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = mul <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -279,6 +381,15 @@ define void @mul_v32i8(ptr %a, ptr %b) { ; SVE2-NEXT: mul z1.b, z2.b, z3.b ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: mul v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: mul v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = mul <32 x i8> %op1, %op2 @@ -303,6 +414,11 @@ define <2 x i16> @mul_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; SVE2-NEXT: mul z0.s, z0.s, z1.s ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mul v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = mul <2 x i16> %op1, %op2 ret <2 x i16> %res } @@ -324,6 +440,11 @@ define <4 x i16> @mul_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; SVE2-NEXT: mul z0.h, z0.h, z1.h ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mul v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = mul <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -345,6 +466,11 @@ define <8 x i16> @mul_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; SVE2-NEXT: mul z0.h, z0.h, z1.h ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mul v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = mul <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -369,6 +495,15 @@ define void @mul_v16i16(ptr %a, ptr %b) { ; SVE2-NEXT: mul z1.h, z2.h, z3.h ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: mul v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: mul v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = mul <16 x i16> %op1, %op2 @@ -393,6 +528,11 @@ define <2 x i32> @mul_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; SVE2-NEXT: mul z0.s, z0.s, z1.s ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mul v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = mul <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -414,6 +554,11 @@ define <4 x i32> @mul_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; SVE2-NEXT: mul z0.s, z0.s, z1.s ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mul v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = mul <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -438,6 +583,15 @@ define void @mul_v8i32(ptr %a, ptr %b) { ; SVE2-NEXT: mul z1.s, z2.s, z3.s ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: mul v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: mul v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = mul <8 x i32> %op1, %op2 @@ -462,6 +616,16 @@ define <1 x i64> @mul_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; SVE2-NEXT: mul z0.d, z0.d, z1.d ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: mul x8, x9, x8 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %res = mul <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -483,6 +647,18 @@ define <2 x i64> @mul_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; SVE2-NEXT: mul z0.d, z0.d, z1.d ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x10, d1 +; NONEON-NOSVE-NEXT: fmov x11, d0 +; NONEON-NOSVE-NEXT: mov x8, v1.d[1] +; NONEON-NOSVE-NEXT: mov x9, v0.d[1] +; NONEON-NOSVE-NEXT: mul x10, x11, x10 +; NONEON-NOSVE-NEXT: mul x8, x9, x8 +; NONEON-NOSVE-NEXT: fmov d0, x10 +; NONEON-NOSVE-NEXT: mov v0.d[1], x8 +; NONEON-NOSVE-NEXT: ret %res = mul <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -507,6 +683,29 @@ define void @mul_v4i64(ptr %a, ptr %b) { ; SVE2-NEXT: mul z1.d, z2.d, z3.d ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: fmov x12, d2 +; NONEON-NOSVE-NEXT: mov x11, v2.d[1] +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: mov x10, v3.d[1] +; NONEON-NOSVE-NEXT: mov x13, v1.d[1] +; NONEON-NOSVE-NEXT: mov x14, v0.d[1] +; NONEON-NOSVE-NEXT: mul x8, x9, x8 +; NONEON-NOSVE-NEXT: fmov x9, d3 +; NONEON-NOSVE-NEXT: mul x10, x11, x10 +; NONEON-NOSVE-NEXT: mul x9, x12, x9 +; NONEON-NOSVE-NEXT: fmov d1, x8 +; NONEON-NOSVE-NEXT: mul x11, x14, x13 +; NONEON-NOSVE-NEXT: fmov d0, x9 +; NONEON-NOSVE-NEXT: mov v1.d[1], x11 +; NONEON-NOSVE-NEXT: mov v0.d[1], x10 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = mul <4 x i64> %op1, %op2 @@ -526,6 +725,11 @@ define <4 x i8> @sub_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: sub z0.h, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = sub <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -538,6 +742,11 @@ define <8 x i8> @sub_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: sub z0.b, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = sub <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -550,6 +759,11 @@ define <16 x i8> @sub_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: sub z0.b, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = sub <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -563,6 +777,15 @@ define void @sub_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: sub z1.b, z2.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: sub v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: sub v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = sub <32 x i8> %op1, %op2 @@ -578,6 +801,11 @@ define <2 x i16> @sub_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; CHECK-NEXT: sub z0.s, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = sub <2 x i16> %op1, %op2 ret <2 x i16> %res } @@ -590,6 +818,11 @@ define <4 x i16> @sub_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: sub z0.h, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = sub <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -602,6 +835,11 @@ define <8 x i16> @sub_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: sub z0.h, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = sub <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -615,6 +853,15 @@ define void @sub_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: sub z1.h, z2.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: sub v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: sub v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = sub <16 x i16> %op1, %op2 @@ -630,6 +877,11 @@ define <2 x i32> @sub_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: sub z0.s, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = sub <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -642,6 +894,11 @@ define <4 x i32> @sub_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: sub z0.s, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = sub <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -655,6 +912,15 @@ define void @sub_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: sub z1.s, z2.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: sub v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: sub v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = sub <8 x i32> %op1, %op2 @@ -670,6 +936,11 @@ define <1 x i64> @sub_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: sub z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = sub <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -682,6 +953,11 @@ define <2 x i64> @sub_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: sub z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = sub <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -695,6 +971,15 @@ define void @sub_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: sub z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: sub v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: sub v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = sub <4 x i64> %op1, %op2 @@ -715,6 +1000,13 @@ define <4 x i8> @abs_v4i8(<4 x i8> %op1) { ; CHECK-NEXT: abs z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: sshr v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: abs v0.4h, v0.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i8> @llvm.abs.v4i8(<4 x i8> %op1, i1 false) ret <4 x i8> %res } @@ -727,6 +1019,11 @@ define <8 x i8> @abs_v8i8(<8 x i8> %op1) { ; CHECK-NEXT: abs z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: abs v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i8> @llvm.abs.v8i8(<8 x i8> %op1, i1 false) ret <8 x i8> %res } @@ -739,6 +1036,11 @@ define <16 x i8> @abs_v16i8(<16 x i8> %op1) { ; CHECK-NEXT: abs z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: abs v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %op1, i1 false) ret <16 x i8> %res } @@ -752,6 +1054,14 @@ define void @abs_v32i8(ptr %a) { ; CHECK-NEXT: abs z1.b, p0/m, z1.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: abs v0.16b, v0.16b +; NONEON-NOSVE-NEXT: abs v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %res = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %op1, i1 false) store <32 x i8> %res, ptr %a @@ -767,6 +1077,13 @@ define <2 x i16> @abs_v2i16(<2 x i16> %op1) { ; CHECK-NEXT: abs z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: sshr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: abs v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %op1, i1 false) ret <2 x i16> %res } @@ -779,6 +1096,11 @@ define <4 x i16> @abs_v4i16(<4 x i16> %op1) { ; CHECK-NEXT: abs z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: abs v0.4h, v0.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %op1, i1 false) ret <4 x i16> %res } @@ -791,6 +1113,11 @@ define <8 x i16> @abs_v8i16(<8 x i16> %op1) { ; CHECK-NEXT: abs z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: abs v0.8h, v0.8h +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %op1, i1 false) ret <8 x i16> %res } @@ -804,6 +1131,14 @@ define void @abs_v16i16(ptr %a) { ; CHECK-NEXT: abs z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: abs v0.8h, v0.8h +; NONEON-NOSVE-NEXT: abs v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %res = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %op1, i1 false) store <16 x i16> %res, ptr %a @@ -818,6 +1153,11 @@ define <2 x i32> @abs_v2i32(<2 x i32> %op1) { ; CHECK-NEXT: abs z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: abs v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %op1, i1 false) ret <2 x i32> %res } @@ -830,6 +1170,11 @@ define <4 x i32> @abs_v4i32(<4 x i32> %op1) { ; CHECK-NEXT: abs z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: abs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %op1, i1 false) ret <4 x i32> %res } @@ -843,6 +1188,14 @@ define void @abs_v8i32(ptr %a) { ; CHECK-NEXT: abs z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: abs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: abs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %op1, i1 false) store <8 x i32> %res, ptr %a @@ -857,6 +1210,11 @@ define <1 x i64> @abs_v1i64(<1 x i64> %op1) { ; CHECK-NEXT: abs z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: abs d0, d0 +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.abs.v1i64(<1 x i64> %op1, i1 false) ret <1 x i64> %res } @@ -869,6 +1227,11 @@ define <2 x i64> @abs_v2i64(<2 x i64> %op1) { ; CHECK-NEXT: abs z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: abs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %op1, i1 false) ret <2 x i64> %res } @@ -882,6 +1245,14 @@ define void @abs_v4i64(ptr %a) { ; CHECK-NEXT: abs z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: abs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: abs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %res = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %op1, i1 false) store <4 x i64> %res, ptr %a diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll index 73c1eac99dd303..3148d4f1677cd5 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -18,6 +19,11 @@ define <8 x i8> @icmp_eq_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmeq v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %cmp = icmp eq <8 x i8> %op1, %op2 %sext = sext <8 x i1> %cmp to <8 x i8> ret <8 x i8> %sext @@ -33,6 +39,11 @@ define <16 x i8> @icmp_eq_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmeq v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %cmp = icmp eq <16 x i8> %op1, %op2 %sext = sext <16 x i1> %cmp to <16 x i8> ret <16 x i8> %sext @@ -50,6 +61,15 @@ define void @icmp_eq_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmeq v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: cmeq v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %cmp = icmp eq <32 x i8> %op1, %op2 @@ -68,6 +88,11 @@ define <4 x i16> @icmp_eq_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmeq v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %cmp = icmp eq <4 x i16> %op1, %op2 %sext = sext <4 x i1> %cmp to <4 x i16> ret <4 x i16> %sext @@ -83,6 +108,11 @@ define <8 x i16> @icmp_eq_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmeq v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %cmp = icmp eq <8 x i16> %op1, %op2 %sext = sext <8 x i1> %cmp to <8 x i16> ret <8 x i16> %sext @@ -100,6 +130,15 @@ define void @icmp_eq_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmeq v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: cmeq v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %cmp = icmp eq <16 x i16> %op1, %op2 @@ -118,6 +157,11 @@ define <2 x i32> @icmp_eq_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmeq v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %cmp = icmp eq <2 x i32> %op1, %op2 %sext = sext <2 x i1> %cmp to <2 x i32> ret <2 x i32> %sext @@ -133,6 +177,11 @@ define <4 x i32> @icmp_eq_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmeq v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %cmp = icmp eq <4 x i32> %op1, %op2 %sext = sext <4 x i1> %cmp to <4 x i32> ret <4 x i32> %sext @@ -150,6 +199,15 @@ define void @icmp_eq_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmeq v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: cmeq v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %cmp = icmp eq <8 x i32> %op1, %op2 @@ -168,6 +226,11 @@ define <1 x i64> @icmp_eq_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmeq d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %cmp = icmp eq <1 x i64> %op1, %op2 %sext = sext <1 x i1> %cmp to <1 x i64> ret <1 x i64> %sext @@ -183,6 +246,11 @@ define <2 x i64> @icmp_eq_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmeq v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %cmp = icmp eq <2 x i64> %op1, %op2 %sext = sext <2 x i1> %cmp to <2 x i64> ret <2 x i64> %sext @@ -200,6 +268,15 @@ define void @icmp_eq_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmeq v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: cmeq v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %cmp = icmp eq <4 x i64> %op1, %op2 @@ -224,6 +301,17 @@ define void @icmp_ne_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_ne_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmeq v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: cmeq v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: mvn v0.16b, v0.16b +; NONEON-NOSVE-NEXT: mvn v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %cmp = icmp ne <32 x i8> %op1, %op2 @@ -246,6 +334,14 @@ define void @icmp_sge_v8i16(ptr %a, ptr %b) { ; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_sge_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: cmge v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i16>, ptr %a %op2 = load <8 x i16>, ptr %b %cmp = icmp sge <8 x i16> %op1, %op2 @@ -270,6 +366,15 @@ define void @icmp_sgt_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_sgt_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmgt v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: cmgt v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %cmp = icmp sgt <16 x i16> %op1, %op2 @@ -292,6 +397,14 @@ define void @icmp_sle_v4i32(ptr %a, ptr %b) { ; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_sle_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: cmge v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i32>, ptr %a %op2 = load <4 x i32>, ptr %b %cmp = icmp sle <4 x i32> %op1, %op2 @@ -316,6 +429,15 @@ define void @icmp_slt_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_slt_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmgt v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: cmgt v1.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %cmp = icmp slt <8 x i32> %op1, %op2 @@ -338,6 +460,14 @@ define void @icmp_uge_v2i64(ptr %a, ptr %b) { ; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_uge_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: cmhs v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x i64>, ptr %a %op2 = load <2 x i64>, ptr %b %cmp = icmp uge <2 x i64> %op1, %op2 @@ -360,6 +490,14 @@ define void @icmp_ugt_v2i64(ptr %a, ptr %b) { ; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_ugt_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: cmhi v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x i64>, ptr %a %op2 = load <2 x i64>, ptr %b %cmp = icmp ugt <2 x i64> %op1, %op2 @@ -382,6 +520,14 @@ define void @icmp_ule_v2i64(ptr %a, ptr %b) { ; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_ule_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: cmhs v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x i64>, ptr %a %op2 = load <2 x i64>, ptr %b %cmp = icmp ule <2 x i64> %op1, %op2 @@ -404,6 +550,14 @@ define void @icmp_ult_v2i64(ptr %a, ptr %b) { ; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_ult_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: cmhi v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x i64>, ptr %a %op2 = load <2 x i64>, ptr %b %cmp = icmp ult <2 x i64> %op1, %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll index 5158dda37a8b9d..27a4924ea367cb 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll @@ -2,6 +2,7 @@ ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE ; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -24,6 +25,31 @@ define <4 x i8> @sdiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: shl v1.4h, v1.4h, #8 +; NONEON-NOSVE-NEXT: sshr v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: sshr v1.4h, v1.4h, #8 +; NONEON-NOSVE-NEXT: smov w8, v1.h[1] +; NONEON-NOSVE-NEXT: smov w9, v0.h[1] +; NONEON-NOSVE-NEXT: smov w10, v0.h[0] +; NONEON-NOSVE-NEXT: smov w11, v0.h[2] +; NONEON-NOSVE-NEXT: smov w12, v0.h[3] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: smov w9, v1.h[0] +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.h[2] +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: smov w11, v1.h[3] +; NONEON-NOSVE-NEXT: fmov s0, w9 +; NONEON-NOSVE-NEXT: mov v0.h[1], w8 +; NONEON-NOSVE-NEXT: sdiv w8, w12, w11 +; NONEON-NOSVE-NEXT: mov v0.h[2], w10 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = sdiv <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -51,6 +77,45 @@ define <8 x i8> @sdiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: uzp1 z0.b, z1.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: smov w8, v1.b[1] +; NONEON-NOSVE-NEXT: smov w9, v0.b[1] +; NONEON-NOSVE-NEXT: smov w10, v0.b[0] +; NONEON-NOSVE-NEXT: smov w11, v0.b[2] +; NONEON-NOSVE-NEXT: smov w12, v0.b[3] +; NONEON-NOSVE-NEXT: smov w13, v0.b[4] +; NONEON-NOSVE-NEXT: smov w14, v0.b[5] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: smov w9, v1.b[0] +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.b[2] +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: smov w11, v1.b[3] +; NONEON-NOSVE-NEXT: fmov s2, w9 +; NONEON-NOSVE-NEXT: smov w9, v1.b[6] +; NONEON-NOSVE-NEXT: mov v2.b[1], w8 +; NONEON-NOSVE-NEXT: sdiv w11, w12, w11 +; NONEON-NOSVE-NEXT: smov w12, v1.b[4] +; NONEON-NOSVE-NEXT: mov v2.b[2], w10 +; NONEON-NOSVE-NEXT: smov w10, v0.b[6] +; NONEON-NOSVE-NEXT: sdiv w12, w13, w12 +; NONEON-NOSVE-NEXT: smov w13, v1.b[5] +; NONEON-NOSVE-NEXT: mov v2.b[3], w11 +; NONEON-NOSVE-NEXT: smov w11, v0.b[7] +; NONEON-NOSVE-NEXT: sdiv w8, w14, w13 +; NONEON-NOSVE-NEXT: mov v2.b[4], w12 +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.b[7] +; NONEON-NOSVE-NEXT: mov v2.b[5], w8 +; NONEON-NOSVE-NEXT: sdiv w8, w11, w10 +; NONEON-NOSVE-NEXT: mov v2.b[6], w9 +; NONEON-NOSVE-NEXT: mov v2.b[7], w8 +; NONEON-NOSVE-NEXT: fmov d0, d2 +; NONEON-NOSVE-NEXT: ret %res = sdiv <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -98,6 +163,75 @@ define <16 x i8> @sdiv_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smov w8, v1.b[1] +; NONEON-NOSVE-NEXT: smov w9, v0.b[1] +; NONEON-NOSVE-NEXT: smov w10, v0.b[0] +; NONEON-NOSVE-NEXT: smov w11, v0.b[2] +; NONEON-NOSVE-NEXT: smov w12, v0.b[3] +; NONEON-NOSVE-NEXT: smov w13, v0.b[4] +; NONEON-NOSVE-NEXT: smov w14, v0.b[5] +; NONEON-NOSVE-NEXT: smov w15, v0.b[6] +; NONEON-NOSVE-NEXT: smov w16, v0.b[7] +; NONEON-NOSVE-NEXT: smov w17, v0.b[8] +; NONEON-NOSVE-NEXT: smov w18, v0.b[9] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: smov w9, v1.b[0] +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.b[2] +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: smov w11, v1.b[3] +; NONEON-NOSVE-NEXT: fmov s2, w9 +; NONEON-NOSVE-NEXT: smov w9, v1.b[10] +; NONEON-NOSVE-NEXT: mov v2.b[1], w8 +; NONEON-NOSVE-NEXT: sdiv w11, w12, w11 +; NONEON-NOSVE-NEXT: smov w12, v1.b[4] +; NONEON-NOSVE-NEXT: mov v2.b[2], w10 +; NONEON-NOSVE-NEXT: smov w10, v0.b[10] +; NONEON-NOSVE-NEXT: sdiv w12, w13, w12 +; NONEON-NOSVE-NEXT: smov w13, v1.b[5] +; NONEON-NOSVE-NEXT: mov v2.b[3], w11 +; NONEON-NOSVE-NEXT: smov w11, v0.b[11] +; NONEON-NOSVE-NEXT: sdiv w13, w14, w13 +; NONEON-NOSVE-NEXT: smov w14, v1.b[6] +; NONEON-NOSVE-NEXT: mov v2.b[4], w12 +; NONEON-NOSVE-NEXT: smov w12, v0.b[12] +; NONEON-NOSVE-NEXT: sdiv w14, w15, w14 +; NONEON-NOSVE-NEXT: smov w15, v1.b[7] +; NONEON-NOSVE-NEXT: mov v2.b[5], w13 +; NONEON-NOSVE-NEXT: smov w13, v0.b[13] +; NONEON-NOSVE-NEXT: sdiv w15, w16, w15 +; NONEON-NOSVE-NEXT: smov w16, v1.b[8] +; NONEON-NOSVE-NEXT: mov v2.b[6], w14 +; NONEON-NOSVE-NEXT: sdiv w16, w17, w16 +; NONEON-NOSVE-NEXT: smov w17, v1.b[9] +; NONEON-NOSVE-NEXT: mov v2.b[7], w15 +; NONEON-NOSVE-NEXT: sdiv w8, w18, w17 +; NONEON-NOSVE-NEXT: mov v2.b[8], w16 +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.b[11] +; NONEON-NOSVE-NEXT: mov v2.b[9], w8 +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: smov w11, v1.b[12] +; NONEON-NOSVE-NEXT: mov v2.b[10], w9 +; NONEON-NOSVE-NEXT: smov w9, v1.b[14] +; NONEON-NOSVE-NEXT: sdiv w11, w12, w11 +; NONEON-NOSVE-NEXT: smov w12, v1.b[13] +; NONEON-NOSVE-NEXT: mov v2.b[11], w10 +; NONEON-NOSVE-NEXT: smov w10, v1.b[15] +; NONEON-NOSVE-NEXT: sdiv w8, w13, w12 +; NONEON-NOSVE-NEXT: smov w12, v0.b[14] +; NONEON-NOSVE-NEXT: mov v2.b[12], w11 +; NONEON-NOSVE-NEXT: smov w11, v0.b[15] +; NONEON-NOSVE-NEXT: sdiv w9, w12, w9 +; NONEON-NOSVE-NEXT: mov v2.b[13], w8 +; NONEON-NOSVE-NEXT: sdiv w8, w11, w10 +; NONEON-NOSVE-NEXT: mov v2.b[14], w9 +; NONEON-NOSVE-NEXT: mov v2.b[15], w8 +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = sdiv <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -178,6 +312,163 @@ define void @sdiv_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: splice z3.b, p0, z3.b, z1.b ; CHECK-NEXT: stp q3, q2, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str x27, [sp, #-80]! // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x26, x25, [sp, #16] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x24, x23, [sp, #32] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 80 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -40 +; NONEON-NOSVE-NEXT: .cfi_offset w24, -48 +; NONEON-NOSVE-NEXT: .cfi_offset w25, -56 +; NONEON-NOSVE-NEXT: .cfi_offset w26, -64 +; NONEON-NOSVE-NEXT: .cfi_offset w27, -80 +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: smov w8, v1.b[1] +; NONEON-NOSVE-NEXT: smov w9, v0.b[1] +; NONEON-NOSVE-NEXT: smov w10, v0.b[0] +; NONEON-NOSVE-NEXT: smov w11, v0.b[2] +; NONEON-NOSVE-NEXT: smov w12, v0.b[3] +; NONEON-NOSVE-NEXT: smov w13, v0.b[4] +; NONEON-NOSVE-NEXT: smov w14, v0.b[5] +; NONEON-NOSVE-NEXT: smov w15, v0.b[6] +; NONEON-NOSVE-NEXT: smov w17, v0.b[8] +; NONEON-NOSVE-NEXT: smov w2, v0.b[10] +; NONEON-NOSVE-NEXT: smov w3, v0.b[11] +; NONEON-NOSVE-NEXT: smov w4, v0.b[12] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: smov w9, v1.b[0] +; NONEON-NOSVE-NEXT: smov w5, v0.b[13] +; NONEON-NOSVE-NEXT: smov w6, v0.b[14] +; NONEON-NOSVE-NEXT: smov w1, v3.b[1] +; NONEON-NOSVE-NEXT: smov w7, v2.b[0] +; NONEON-NOSVE-NEXT: smov w19, v2.b[2] +; NONEON-NOSVE-NEXT: smov w20, v2.b[3] +; NONEON-NOSVE-NEXT: smov w21, v2.b[4] +; NONEON-NOSVE-NEXT: smov w22, v2.b[5] +; NONEON-NOSVE-NEXT: smov w23, v2.b[6] +; NONEON-NOSVE-NEXT: smov w24, v2.b[7] +; NONEON-NOSVE-NEXT: smov w25, v2.b[8] +; NONEON-NOSVE-NEXT: smov w26, v2.b[9] +; NONEON-NOSVE-NEXT: smov w27, v2.b[10] +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.b[2] +; NONEON-NOSVE-NEXT: sdiv w11, w11, w10 +; NONEON-NOSVE-NEXT: smov w10, v1.b[3] +; NONEON-NOSVE-NEXT: fmov s5, w9 +; NONEON-NOSVE-NEXT: smov w9, v3.b[11] +; NONEON-NOSVE-NEXT: mov v5.b[1], w8 +; NONEON-NOSVE-NEXT: sdiv w10, w12, w10 +; NONEON-NOSVE-NEXT: smov w12, v1.b[4] +; NONEON-NOSVE-NEXT: mov v5.b[2], w11 +; NONEON-NOSVE-NEXT: smov w11, v2.b[11] +; NONEON-NOSVE-NEXT: sdiv w13, w13, w12 +; NONEON-NOSVE-NEXT: smov w12, v1.b[5] +; NONEON-NOSVE-NEXT: mov v5.b[3], w10 +; NONEON-NOSVE-NEXT: smov w10, v3.b[12] +; NONEON-NOSVE-NEXT: sdiv w12, w14, w12 +; NONEON-NOSVE-NEXT: smov w14, v1.b[6] +; NONEON-NOSVE-NEXT: mov v5.b[4], w13 +; NONEON-NOSVE-NEXT: smov w13, v2.b[14] +; NONEON-NOSVE-NEXT: sdiv w16, w15, w14 +; NONEON-NOSVE-NEXT: smov w14, v1.b[7] +; NONEON-NOSVE-NEXT: smov w15, v0.b[7] +; NONEON-NOSVE-NEXT: mov v5.b[5], w12 +; NONEON-NOSVE-NEXT: smov w12, v2.b[13] +; NONEON-NOSVE-NEXT: sdiv w14, w15, w14 +; NONEON-NOSVE-NEXT: smov w15, v1.b[8] +; NONEON-NOSVE-NEXT: mov v5.b[6], w16 +; NONEON-NOSVE-NEXT: sdiv w18, w17, w15 +; NONEON-NOSVE-NEXT: smov w15, v1.b[9] +; NONEON-NOSVE-NEXT: smov w17, v0.b[9] +; NONEON-NOSVE-NEXT: mov v5.b[7], w14 +; NONEON-NOSVE-NEXT: sdiv w17, w17, w15 +; NONEON-NOSVE-NEXT: smov w15, v1.b[10] +; NONEON-NOSVE-NEXT: mov v5.b[8], w18 +; NONEON-NOSVE-NEXT: sdiv w15, w2, w15 +; NONEON-NOSVE-NEXT: smov w2, v1.b[11] +; NONEON-NOSVE-NEXT: mov v5.b[9], w17 +; NONEON-NOSVE-NEXT: sdiv w2, w3, w2 +; NONEON-NOSVE-NEXT: smov w3, v1.b[12] +; NONEON-NOSVE-NEXT: mov v5.b[10], w15 +; NONEON-NOSVE-NEXT: sdiv w3, w4, w3 +; NONEON-NOSVE-NEXT: smov w4, v1.b[13] +; NONEON-NOSVE-NEXT: mov v5.b[11], w2 +; NONEON-NOSVE-NEXT: sdiv w4, w5, w4 +; NONEON-NOSVE-NEXT: smov w5, v1.b[14] +; NONEON-NOSVE-NEXT: mov v5.b[12], w3 +; NONEON-NOSVE-NEXT: sdiv w5, w6, w5 +; NONEON-NOSVE-NEXT: smov w6, v2.b[1] +; NONEON-NOSVE-NEXT: mov v5.b[13], w4 +; NONEON-NOSVE-NEXT: sdiv w1, w6, w1 +; NONEON-NOSVE-NEXT: smov w6, v3.b[0] +; NONEON-NOSVE-NEXT: mov v5.b[14], w5 +; NONEON-NOSVE-NEXT: sdiv w6, w7, w6 +; NONEON-NOSVE-NEXT: smov w7, v3.b[2] +; NONEON-NOSVE-NEXT: sdiv w7, w19, w7 +; NONEON-NOSVE-NEXT: smov w19, v3.b[3] +; NONEON-NOSVE-NEXT: fmov s4, w6 +; NONEON-NOSVE-NEXT: mov v4.b[1], w1 +; NONEON-NOSVE-NEXT: sdiv w19, w20, w19 +; NONEON-NOSVE-NEXT: smov w20, v3.b[4] +; NONEON-NOSVE-NEXT: mov v4.b[2], w7 +; NONEON-NOSVE-NEXT: sdiv w20, w21, w20 +; NONEON-NOSVE-NEXT: smov w21, v3.b[5] +; NONEON-NOSVE-NEXT: mov v4.b[3], w19 +; NONEON-NOSVE-NEXT: sdiv w21, w22, w21 +; NONEON-NOSVE-NEXT: smov w22, v3.b[6] +; NONEON-NOSVE-NEXT: mov v4.b[4], w20 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w22, w23, w22 +; NONEON-NOSVE-NEXT: smov w23, v3.b[7] +; NONEON-NOSVE-NEXT: mov v4.b[5], w21 +; NONEON-NOSVE-NEXT: sdiv w23, w24, w23 +; NONEON-NOSVE-NEXT: smov w24, v3.b[8] +; NONEON-NOSVE-NEXT: mov v4.b[6], w22 +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w24, w25, w24 +; NONEON-NOSVE-NEXT: smov w25, v3.b[9] +; NONEON-NOSVE-NEXT: mov v4.b[7], w23 +; NONEON-NOSVE-NEXT: sdiv w25, w26, w25 +; NONEON-NOSVE-NEXT: smov w26, v3.b[10] +; NONEON-NOSVE-NEXT: mov v4.b[8], w24 +; NONEON-NOSVE-NEXT: ldp x24, x23, [sp, #32] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w8, w27, w26 +; NONEON-NOSVE-NEXT: mov v4.b[9], w25 +; NONEON-NOSVE-NEXT: ldp x26, x25, [sp, #16] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w9, w11, w9 +; NONEON-NOSVE-NEXT: smov w11, v2.b[12] +; NONEON-NOSVE-NEXT: mov v4.b[10], w8 +; NONEON-NOSVE-NEXT: smov w8, v3.b[15] +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: smov w11, v3.b[13] +; NONEON-NOSVE-NEXT: mov v4.b[11], w9 +; NONEON-NOSVE-NEXT: smov w9, v1.b[15] +; NONEON-NOSVE-NEXT: sdiv w11, w12, w11 +; NONEON-NOSVE-NEXT: smov w12, v3.b[14] +; NONEON-NOSVE-NEXT: mov v4.b[12], w10 +; NONEON-NOSVE-NEXT: smov w10, v0.b[15] +; NONEON-NOSVE-NEXT: sdiv w12, w13, w12 +; NONEON-NOSVE-NEXT: smov w13, v2.b[15] +; NONEON-NOSVE-NEXT: mov v4.b[13], w11 +; NONEON-NOSVE-NEXT: sdiv w8, w13, w8 +; NONEON-NOSVE-NEXT: mov v4.b[14], w12 +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: mov v4.b[15], w8 +; NONEON-NOSVE-NEXT: mov v5.b[15], w9 +; NONEON-NOSVE-NEXT: stp q4, q5, [x0] +; NONEON-NOSVE-NEXT: ldr x27, [sp], #80 // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = sdiv <32 x i8> %op1, %op2 @@ -196,6 +487,23 @@ define <2 x i16> @sdiv_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: shl v1.2s, v1.2s, #16 +; NONEON-NOSVE-NEXT: sshr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: sshr v1.2s, v1.2s, #16 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: fmov w9, s0 +; NONEON-NOSVE-NEXT: mov w10, v0.s[1] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: mov w9, v1.s[1] +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: mov v0.s[1], w9 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = sdiv <2 x i16> %op1, %op2 ret <2 x i16> %res } @@ -212,6 +520,29 @@ define <4 x i16> @sdiv_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: smov w8, v1.h[1] +; NONEON-NOSVE-NEXT: smov w9, v0.h[1] +; NONEON-NOSVE-NEXT: smov w10, v0.h[0] +; NONEON-NOSVE-NEXT: smov w11, v0.h[2] +; NONEON-NOSVE-NEXT: smov w12, v0.h[3] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: smov w9, v1.h[0] +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.h[2] +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: smov w11, v1.h[3] +; NONEON-NOSVE-NEXT: fmov s0, w9 +; NONEON-NOSVE-NEXT: mov v0.h[1], w8 +; NONEON-NOSVE-NEXT: sdiv w8, w12, w11 +; NONEON-NOSVE-NEXT: mov v0.h[2], w10 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = sdiv <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -238,6 +569,43 @@ define <8 x i16> @sdiv_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smov w8, v1.h[1] +; NONEON-NOSVE-NEXT: smov w9, v0.h[1] +; NONEON-NOSVE-NEXT: smov w10, v0.h[0] +; NONEON-NOSVE-NEXT: smov w11, v0.h[2] +; NONEON-NOSVE-NEXT: smov w12, v0.h[3] +; NONEON-NOSVE-NEXT: smov w13, v0.h[4] +; NONEON-NOSVE-NEXT: smov w14, v0.h[5] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: smov w9, v1.h[0] +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.h[2] +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: smov w11, v1.h[3] +; NONEON-NOSVE-NEXT: fmov s2, w9 +; NONEON-NOSVE-NEXT: smov w9, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[1], w8 +; NONEON-NOSVE-NEXT: sdiv w11, w12, w11 +; NONEON-NOSVE-NEXT: smov w12, v1.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[2], w10 +; NONEON-NOSVE-NEXT: smov w10, v0.h[6] +; NONEON-NOSVE-NEXT: sdiv w12, w13, w12 +; NONEON-NOSVE-NEXT: smov w13, v1.h[5] +; NONEON-NOSVE-NEXT: mov v2.h[3], w11 +; NONEON-NOSVE-NEXT: smov w11, v0.h[7] +; NONEON-NOSVE-NEXT: sdiv w8, w14, w13 +; NONEON-NOSVE-NEXT: mov v2.h[4], w12 +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.h[7] +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: sdiv w8, w11, w10 +; NONEON-NOSVE-NEXT: mov v2.h[6], w9 +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = sdiv <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -278,6 +646,79 @@ define void @sdiv_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: splice z3.h, p0, z3.h, z1.h ; CHECK-NEXT: stp q3, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: smov w8, v1.h[1] +; NONEON-NOSVE-NEXT: smov w9, v0.h[1] +; NONEON-NOSVE-NEXT: smov w10, v0.h[0] +; NONEON-NOSVE-NEXT: smov w11, v0.h[2] +; NONEON-NOSVE-NEXT: smov w12, v0.h[3] +; NONEON-NOSVE-NEXT: smov w13, v0.h[4] +; NONEON-NOSVE-NEXT: smov w14, v0.h[5] +; NONEON-NOSVE-NEXT: smov w15, v0.h[6] +; NONEON-NOSVE-NEXT: smov w16, v2.h[1] +; NONEON-NOSVE-NEXT: smov w17, v2.h[0] +; NONEON-NOSVE-NEXT: smov w18, v2.h[2] +; NONEON-NOSVE-NEXT: smov w1, v2.h[3] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: smov w9, v1.h[0] +; NONEON-NOSVE-NEXT: smov w2, v2.h[4] +; NONEON-NOSVE-NEXT: smov w3, v2.h[5] +; NONEON-NOSVE-NEXT: smov w4, v2.h[6] +; NONEON-NOSVE-NEXT: sdiv w10, w10, w9 +; NONEON-NOSVE-NEXT: smov w9, v1.h[2] +; NONEON-NOSVE-NEXT: sdiv w9, w11, w9 +; NONEON-NOSVE-NEXT: smov w11, v1.h[3] +; NONEON-NOSVE-NEXT: fmov s5, w10 +; NONEON-NOSVE-NEXT: smov w10, v3.h[7] +; NONEON-NOSVE-NEXT: mov v5.h[1], w8 +; NONEON-NOSVE-NEXT: sdiv w11, w12, w11 +; NONEON-NOSVE-NEXT: smov w12, v1.h[4] +; NONEON-NOSVE-NEXT: mov v5.h[2], w9 +; NONEON-NOSVE-NEXT: smov w9, v2.h[7] +; NONEON-NOSVE-NEXT: sdiv w12, w13, w12 +; NONEON-NOSVE-NEXT: smov w13, v1.h[5] +; NONEON-NOSVE-NEXT: mov v5.h[3], w11 +; NONEON-NOSVE-NEXT: smov w11, v0.h[7] +; NONEON-NOSVE-NEXT: sdiv w13, w14, w13 +; NONEON-NOSVE-NEXT: smov w14, v1.h[6] +; NONEON-NOSVE-NEXT: mov v5.h[4], w12 +; NONEON-NOSVE-NEXT: sdiv w14, w15, w14 +; NONEON-NOSVE-NEXT: smov w15, v3.h[1] +; NONEON-NOSVE-NEXT: mov v5.h[5], w13 +; NONEON-NOSVE-NEXT: sdiv w15, w16, w15 +; NONEON-NOSVE-NEXT: smov w16, v3.h[0] +; NONEON-NOSVE-NEXT: mov v5.h[6], w14 +; NONEON-NOSVE-NEXT: sdiv w16, w17, w16 +; NONEON-NOSVE-NEXT: smov w17, v3.h[2] +; NONEON-NOSVE-NEXT: sdiv w17, w18, w17 +; NONEON-NOSVE-NEXT: smov w18, v3.h[3] +; NONEON-NOSVE-NEXT: fmov s4, w16 +; NONEON-NOSVE-NEXT: mov v4.h[1], w15 +; NONEON-NOSVE-NEXT: sdiv w18, w1, w18 +; NONEON-NOSVE-NEXT: smov w1, v3.h[4] +; NONEON-NOSVE-NEXT: mov v4.h[2], w17 +; NONEON-NOSVE-NEXT: sdiv w1, w2, w1 +; NONEON-NOSVE-NEXT: smov w2, v3.h[5] +; NONEON-NOSVE-NEXT: mov v4.h[3], w18 +; NONEON-NOSVE-NEXT: sdiv w2, w3, w2 +; NONEON-NOSVE-NEXT: smov w3, v3.h[6] +; NONEON-NOSVE-NEXT: mov v4.h[4], w1 +; NONEON-NOSVE-NEXT: sdiv w8, w4, w3 +; NONEON-NOSVE-NEXT: mov v4.h[5], w2 +; NONEON-NOSVE-NEXT: sdiv w9, w9, w10 +; NONEON-NOSVE-NEXT: smov w10, v1.h[7] +; NONEON-NOSVE-NEXT: mov v4.h[6], w8 +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: mov v4.h[7], w9 +; NONEON-NOSVE-NEXT: mov v5.h[7], w10 +; NONEON-NOSVE-NEXT: stp q4, q5, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = sdiv <16 x i16> %op1, %op2 @@ -294,6 +735,21 @@ define <2 x i32> @sdiv_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: fmov w9, s0 +; NONEON-NOSVE-NEXT: mov w10, v0.s[1] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: mov w9, v1.s[1] +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: mov v0.s[1], w9 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = sdiv <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -307,6 +763,26 @@ define <4 x i32> @sdiv_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, v1.s[1] +; NONEON-NOSVE-NEXT: mov w9, v0.s[1] +; NONEON-NOSVE-NEXT: fmov w10, s0 +; NONEON-NOSVE-NEXT: mov w11, v0.s[2] +; NONEON-NOSVE-NEXT: mov w12, v0.s[3] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: fmov w9, s1 +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: mov w10, v1.s[2] +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: mov w11, v1.s[3] +; NONEON-NOSVE-NEXT: fmov s0, w9 +; NONEON-NOSVE-NEXT: mov v0.s[1], w8 +; NONEON-NOSVE-NEXT: sdiv w8, w12, w11 +; NONEON-NOSVE-NEXT: mov v0.s[2], w10 +; NONEON-NOSVE-NEXT: mov v0.s[3], w8 +; NONEON-NOSVE-NEXT: ret %res = sdiv <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -322,6 +798,45 @@ define void @sdiv_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: sdiv z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: mov w9, v0.s[1] +; NONEON-NOSVE-NEXT: fmov w10, s0 +; NONEON-NOSVE-NEXT: mov w11, v0.s[2] +; NONEON-NOSVE-NEXT: mov w8, v1.s[1] +; NONEON-NOSVE-NEXT: mov w12, v2.s[1] +; NONEON-NOSVE-NEXT: fmov w13, s2 +; NONEON-NOSVE-NEXT: mov w14, v2.s[2] +; NONEON-NOSVE-NEXT: mov w15, v2.s[3] +; NONEON-NOSVE-NEXT: mov w16, v0.s[3] +; NONEON-NOSVE-NEXT: sdiv w8, w9, w8 +; NONEON-NOSVE-NEXT: fmov w9, s1 +; NONEON-NOSVE-NEXT: sdiv w9, w10, w9 +; NONEON-NOSVE-NEXT: mov w10, v1.s[2] +; NONEON-NOSVE-NEXT: sdiv w10, w11, w10 +; NONEON-NOSVE-NEXT: mov w11, v3.s[1] +; NONEON-NOSVE-NEXT: sdiv w11, w12, w11 +; NONEON-NOSVE-NEXT: fmov w12, s3 +; NONEON-NOSVE-NEXT: sdiv w12, w13, w12 +; NONEON-NOSVE-NEXT: mov w13, v3.s[2] +; NONEON-NOSVE-NEXT: sdiv w13, w14, w13 +; NONEON-NOSVE-NEXT: mov w14, v3.s[3] +; NONEON-NOSVE-NEXT: fmov s0, w12 +; NONEON-NOSVE-NEXT: mov v0.s[1], w11 +; NONEON-NOSVE-NEXT: sdiv w14, w15, w14 +; NONEON-NOSVE-NEXT: mov w15, v1.s[3] +; NONEON-NOSVE-NEXT: fmov s1, w9 +; NONEON-NOSVE-NEXT: mov v0.s[2], w13 +; NONEON-NOSVE-NEXT: mov v1.s[1], w8 +; NONEON-NOSVE-NEXT: mov v1.s[2], w10 +; NONEON-NOSVE-NEXT: sdiv w8, w16, w15 +; NONEON-NOSVE-NEXT: mov v0.s[3], w14 +; NONEON-NOSVE-NEXT: mov v1.s[3], w8 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = sdiv <8 x i32> %op1, %op2 @@ -338,6 +853,16 @@ define <1 x i64> @sdiv_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: sdiv z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: sdiv x8, x9, x8 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %res = sdiv <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -351,6 +876,18 @@ define <2 x i64> @sdiv_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: sdiv z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: mov x10, v0.d[1] +; NONEON-NOSVE-NEXT: sdiv x8, x9, x8 +; NONEON-NOSVE-NEXT: mov x9, v1.d[1] +; NONEON-NOSVE-NEXT: sdiv x9, x10, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: ret %res = sdiv <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -366,6 +903,29 @@ define void @sdiv_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: sdiv z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: mov x10, v2.d[1] +; NONEON-NOSVE-NEXT: fmov x11, d2 +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: mov x12, v0.d[1] +; NONEON-NOSVE-NEXT: sdiv x8, x9, x8 +; NONEON-NOSVE-NEXT: mov x9, v3.d[1] +; NONEON-NOSVE-NEXT: sdiv x9, x10, x9 +; NONEON-NOSVE-NEXT: fmov x10, d3 +; NONEON-NOSVE-NEXT: sdiv x10, x11, x10 +; NONEON-NOSVE-NEXT: mov x11, v1.d[1] +; NONEON-NOSVE-NEXT: fmov d1, x8 +; NONEON-NOSVE-NEXT: sdiv x11, x12, x11 +; NONEON-NOSVE-NEXT: fmov d0, x10 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: mov v1.d[1], x11 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = sdiv <4 x i64> %op1, %op2 @@ -391,6 +951,37 @@ define <4 x i8> @udiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: umov w8, v1.h[1] +; NONEON-NOSVE-NEXT: umov w9, v0.h[1] +; NONEON-NOSVE-NEXT: umov w10, v0.h[0] +; NONEON-NOSVE-NEXT: umov w11, v0.h[2] +; NONEON-NOSVE-NEXT: umov w12, v0.h[3] +; NONEON-NOSVE-NEXT: and w8, w8, #0xff +; NONEON-NOSVE-NEXT: and w9, w9, #0xff +; NONEON-NOSVE-NEXT: and w10, w10, #0xff +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: umov w9, v1.h[0] +; NONEON-NOSVE-NEXT: and w11, w11, #0xff +; NONEON-NOSVE-NEXT: and w9, w9, #0xff +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.h[2] +; NONEON-NOSVE-NEXT: and w10, w10, #0xff +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: umov w11, v1.h[3] +; NONEON-NOSVE-NEXT: fmov s0, w9 +; NONEON-NOSVE-NEXT: mov v0.h[1], w8 +; NONEON-NOSVE-NEXT: and w9, w11, #0xff +; NONEON-NOSVE-NEXT: and w11, w12, #0xff +; NONEON-NOSVE-NEXT: udiv w8, w11, w9 +; NONEON-NOSVE-NEXT: mov v0.h[2], w10 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = udiv <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -418,6 +1009,45 @@ define <8 x i8> @udiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: uzp1 z0.b, z1.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: umov w8, v1.b[1] +; NONEON-NOSVE-NEXT: umov w9, v0.b[1] +; NONEON-NOSVE-NEXT: umov w10, v0.b[0] +; NONEON-NOSVE-NEXT: umov w11, v0.b[2] +; NONEON-NOSVE-NEXT: umov w12, v0.b[3] +; NONEON-NOSVE-NEXT: umov w13, v0.b[4] +; NONEON-NOSVE-NEXT: umov w14, v0.b[5] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: umov w9, v1.b[0] +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.b[2] +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: umov w11, v1.b[3] +; NONEON-NOSVE-NEXT: fmov s2, w9 +; NONEON-NOSVE-NEXT: umov w9, v1.b[6] +; NONEON-NOSVE-NEXT: mov v2.b[1], w8 +; NONEON-NOSVE-NEXT: udiv w11, w12, w11 +; NONEON-NOSVE-NEXT: umov w12, v1.b[4] +; NONEON-NOSVE-NEXT: mov v2.b[2], w10 +; NONEON-NOSVE-NEXT: umov w10, v0.b[6] +; NONEON-NOSVE-NEXT: udiv w12, w13, w12 +; NONEON-NOSVE-NEXT: umov w13, v1.b[5] +; NONEON-NOSVE-NEXT: mov v2.b[3], w11 +; NONEON-NOSVE-NEXT: umov w11, v0.b[7] +; NONEON-NOSVE-NEXT: udiv w8, w14, w13 +; NONEON-NOSVE-NEXT: mov v2.b[4], w12 +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.b[7] +; NONEON-NOSVE-NEXT: mov v2.b[5], w8 +; NONEON-NOSVE-NEXT: udiv w8, w11, w10 +; NONEON-NOSVE-NEXT: mov v2.b[6], w9 +; NONEON-NOSVE-NEXT: mov v2.b[7], w8 +; NONEON-NOSVE-NEXT: fmov d0, d2 +; NONEON-NOSVE-NEXT: ret %res = udiv <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -465,6 +1095,75 @@ define <16 x i8> @udiv_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umov w8, v1.b[1] +; NONEON-NOSVE-NEXT: umov w9, v0.b[1] +; NONEON-NOSVE-NEXT: umov w10, v0.b[0] +; NONEON-NOSVE-NEXT: umov w11, v0.b[2] +; NONEON-NOSVE-NEXT: umov w12, v0.b[3] +; NONEON-NOSVE-NEXT: umov w13, v0.b[4] +; NONEON-NOSVE-NEXT: umov w14, v0.b[5] +; NONEON-NOSVE-NEXT: umov w15, v0.b[6] +; NONEON-NOSVE-NEXT: umov w16, v0.b[7] +; NONEON-NOSVE-NEXT: umov w17, v0.b[8] +; NONEON-NOSVE-NEXT: umov w18, v0.b[9] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: umov w9, v1.b[0] +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.b[2] +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: umov w11, v1.b[3] +; NONEON-NOSVE-NEXT: fmov s2, w9 +; NONEON-NOSVE-NEXT: umov w9, v1.b[10] +; NONEON-NOSVE-NEXT: mov v2.b[1], w8 +; NONEON-NOSVE-NEXT: udiv w11, w12, w11 +; NONEON-NOSVE-NEXT: umov w12, v1.b[4] +; NONEON-NOSVE-NEXT: mov v2.b[2], w10 +; NONEON-NOSVE-NEXT: umov w10, v0.b[10] +; NONEON-NOSVE-NEXT: udiv w12, w13, w12 +; NONEON-NOSVE-NEXT: umov w13, v1.b[5] +; NONEON-NOSVE-NEXT: mov v2.b[3], w11 +; NONEON-NOSVE-NEXT: umov w11, v0.b[11] +; NONEON-NOSVE-NEXT: udiv w13, w14, w13 +; NONEON-NOSVE-NEXT: umov w14, v1.b[6] +; NONEON-NOSVE-NEXT: mov v2.b[4], w12 +; NONEON-NOSVE-NEXT: umov w12, v0.b[12] +; NONEON-NOSVE-NEXT: udiv w14, w15, w14 +; NONEON-NOSVE-NEXT: umov w15, v1.b[7] +; NONEON-NOSVE-NEXT: mov v2.b[5], w13 +; NONEON-NOSVE-NEXT: umov w13, v0.b[13] +; NONEON-NOSVE-NEXT: udiv w15, w16, w15 +; NONEON-NOSVE-NEXT: umov w16, v1.b[8] +; NONEON-NOSVE-NEXT: mov v2.b[6], w14 +; NONEON-NOSVE-NEXT: udiv w16, w17, w16 +; NONEON-NOSVE-NEXT: umov w17, v1.b[9] +; NONEON-NOSVE-NEXT: mov v2.b[7], w15 +; NONEON-NOSVE-NEXT: udiv w8, w18, w17 +; NONEON-NOSVE-NEXT: mov v2.b[8], w16 +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.b[11] +; NONEON-NOSVE-NEXT: mov v2.b[9], w8 +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: umov w11, v1.b[12] +; NONEON-NOSVE-NEXT: mov v2.b[10], w9 +; NONEON-NOSVE-NEXT: umov w9, v1.b[14] +; NONEON-NOSVE-NEXT: udiv w11, w12, w11 +; NONEON-NOSVE-NEXT: umov w12, v1.b[13] +; NONEON-NOSVE-NEXT: mov v2.b[11], w10 +; NONEON-NOSVE-NEXT: umov w10, v1.b[15] +; NONEON-NOSVE-NEXT: udiv w8, w13, w12 +; NONEON-NOSVE-NEXT: umov w12, v0.b[14] +; NONEON-NOSVE-NEXT: mov v2.b[12], w11 +; NONEON-NOSVE-NEXT: umov w11, v0.b[15] +; NONEON-NOSVE-NEXT: udiv w9, w12, w9 +; NONEON-NOSVE-NEXT: mov v2.b[13], w8 +; NONEON-NOSVE-NEXT: udiv w8, w11, w10 +; NONEON-NOSVE-NEXT: mov v2.b[14], w9 +; NONEON-NOSVE-NEXT: mov v2.b[15], w8 +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = udiv <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -545,6 +1244,163 @@ define void @udiv_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: splice z3.b, p0, z3.b, z1.b ; CHECK-NEXT: stp q3, q2, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str x27, [sp, #-80]! // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x26, x25, [sp, #16] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x24, x23, [sp, #32] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 80 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -40 +; NONEON-NOSVE-NEXT: .cfi_offset w24, -48 +; NONEON-NOSVE-NEXT: .cfi_offset w25, -56 +; NONEON-NOSVE-NEXT: .cfi_offset w26, -64 +; NONEON-NOSVE-NEXT: .cfi_offset w27, -80 +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: umov w8, v1.b[1] +; NONEON-NOSVE-NEXT: umov w9, v0.b[1] +; NONEON-NOSVE-NEXT: umov w10, v0.b[0] +; NONEON-NOSVE-NEXT: umov w11, v0.b[2] +; NONEON-NOSVE-NEXT: umov w12, v0.b[3] +; NONEON-NOSVE-NEXT: umov w13, v0.b[4] +; NONEON-NOSVE-NEXT: umov w14, v0.b[5] +; NONEON-NOSVE-NEXT: umov w15, v0.b[6] +; NONEON-NOSVE-NEXT: umov w17, v0.b[8] +; NONEON-NOSVE-NEXT: umov w2, v0.b[10] +; NONEON-NOSVE-NEXT: umov w3, v0.b[11] +; NONEON-NOSVE-NEXT: umov w4, v0.b[12] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: umov w9, v1.b[0] +; NONEON-NOSVE-NEXT: umov w5, v0.b[13] +; NONEON-NOSVE-NEXT: umov w6, v0.b[14] +; NONEON-NOSVE-NEXT: umov w1, v3.b[1] +; NONEON-NOSVE-NEXT: umov w7, v2.b[0] +; NONEON-NOSVE-NEXT: umov w19, v2.b[2] +; NONEON-NOSVE-NEXT: umov w20, v2.b[3] +; NONEON-NOSVE-NEXT: umov w21, v2.b[4] +; NONEON-NOSVE-NEXT: umov w22, v2.b[5] +; NONEON-NOSVE-NEXT: umov w23, v2.b[6] +; NONEON-NOSVE-NEXT: umov w24, v2.b[7] +; NONEON-NOSVE-NEXT: umov w25, v2.b[8] +; NONEON-NOSVE-NEXT: umov w26, v2.b[9] +; NONEON-NOSVE-NEXT: umov w27, v2.b[10] +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.b[2] +; NONEON-NOSVE-NEXT: udiv w11, w11, w10 +; NONEON-NOSVE-NEXT: umov w10, v1.b[3] +; NONEON-NOSVE-NEXT: fmov s5, w9 +; NONEON-NOSVE-NEXT: umov w9, v3.b[11] +; NONEON-NOSVE-NEXT: mov v5.b[1], w8 +; NONEON-NOSVE-NEXT: udiv w10, w12, w10 +; NONEON-NOSVE-NEXT: umov w12, v1.b[4] +; NONEON-NOSVE-NEXT: mov v5.b[2], w11 +; NONEON-NOSVE-NEXT: umov w11, v2.b[11] +; NONEON-NOSVE-NEXT: udiv w13, w13, w12 +; NONEON-NOSVE-NEXT: umov w12, v1.b[5] +; NONEON-NOSVE-NEXT: mov v5.b[3], w10 +; NONEON-NOSVE-NEXT: umov w10, v3.b[12] +; NONEON-NOSVE-NEXT: udiv w12, w14, w12 +; NONEON-NOSVE-NEXT: umov w14, v1.b[6] +; NONEON-NOSVE-NEXT: mov v5.b[4], w13 +; NONEON-NOSVE-NEXT: umov w13, v2.b[14] +; NONEON-NOSVE-NEXT: udiv w16, w15, w14 +; NONEON-NOSVE-NEXT: umov w14, v1.b[7] +; NONEON-NOSVE-NEXT: umov w15, v0.b[7] +; NONEON-NOSVE-NEXT: mov v5.b[5], w12 +; NONEON-NOSVE-NEXT: umov w12, v2.b[13] +; NONEON-NOSVE-NEXT: udiv w14, w15, w14 +; NONEON-NOSVE-NEXT: umov w15, v1.b[8] +; NONEON-NOSVE-NEXT: mov v5.b[6], w16 +; NONEON-NOSVE-NEXT: udiv w18, w17, w15 +; NONEON-NOSVE-NEXT: umov w15, v1.b[9] +; NONEON-NOSVE-NEXT: umov w17, v0.b[9] +; NONEON-NOSVE-NEXT: mov v5.b[7], w14 +; NONEON-NOSVE-NEXT: udiv w17, w17, w15 +; NONEON-NOSVE-NEXT: umov w15, v1.b[10] +; NONEON-NOSVE-NEXT: mov v5.b[8], w18 +; NONEON-NOSVE-NEXT: udiv w15, w2, w15 +; NONEON-NOSVE-NEXT: umov w2, v1.b[11] +; NONEON-NOSVE-NEXT: mov v5.b[9], w17 +; NONEON-NOSVE-NEXT: udiv w2, w3, w2 +; NONEON-NOSVE-NEXT: umov w3, v1.b[12] +; NONEON-NOSVE-NEXT: mov v5.b[10], w15 +; NONEON-NOSVE-NEXT: udiv w3, w4, w3 +; NONEON-NOSVE-NEXT: umov w4, v1.b[13] +; NONEON-NOSVE-NEXT: mov v5.b[11], w2 +; NONEON-NOSVE-NEXT: udiv w4, w5, w4 +; NONEON-NOSVE-NEXT: umov w5, v1.b[14] +; NONEON-NOSVE-NEXT: mov v5.b[12], w3 +; NONEON-NOSVE-NEXT: udiv w5, w6, w5 +; NONEON-NOSVE-NEXT: umov w6, v2.b[1] +; NONEON-NOSVE-NEXT: mov v5.b[13], w4 +; NONEON-NOSVE-NEXT: udiv w1, w6, w1 +; NONEON-NOSVE-NEXT: umov w6, v3.b[0] +; NONEON-NOSVE-NEXT: mov v5.b[14], w5 +; NONEON-NOSVE-NEXT: udiv w6, w7, w6 +; NONEON-NOSVE-NEXT: umov w7, v3.b[2] +; NONEON-NOSVE-NEXT: udiv w7, w19, w7 +; NONEON-NOSVE-NEXT: umov w19, v3.b[3] +; NONEON-NOSVE-NEXT: fmov s4, w6 +; NONEON-NOSVE-NEXT: mov v4.b[1], w1 +; NONEON-NOSVE-NEXT: udiv w19, w20, w19 +; NONEON-NOSVE-NEXT: umov w20, v3.b[4] +; NONEON-NOSVE-NEXT: mov v4.b[2], w7 +; NONEON-NOSVE-NEXT: udiv w20, w21, w20 +; NONEON-NOSVE-NEXT: umov w21, v3.b[5] +; NONEON-NOSVE-NEXT: mov v4.b[3], w19 +; NONEON-NOSVE-NEXT: udiv w21, w22, w21 +; NONEON-NOSVE-NEXT: umov w22, v3.b[6] +; NONEON-NOSVE-NEXT: mov v4.b[4], w20 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w22, w23, w22 +; NONEON-NOSVE-NEXT: umov w23, v3.b[7] +; NONEON-NOSVE-NEXT: mov v4.b[5], w21 +; NONEON-NOSVE-NEXT: udiv w23, w24, w23 +; NONEON-NOSVE-NEXT: umov w24, v3.b[8] +; NONEON-NOSVE-NEXT: mov v4.b[6], w22 +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w24, w25, w24 +; NONEON-NOSVE-NEXT: umov w25, v3.b[9] +; NONEON-NOSVE-NEXT: mov v4.b[7], w23 +; NONEON-NOSVE-NEXT: udiv w25, w26, w25 +; NONEON-NOSVE-NEXT: umov w26, v3.b[10] +; NONEON-NOSVE-NEXT: mov v4.b[8], w24 +; NONEON-NOSVE-NEXT: ldp x24, x23, [sp, #32] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w8, w27, w26 +; NONEON-NOSVE-NEXT: mov v4.b[9], w25 +; NONEON-NOSVE-NEXT: ldp x26, x25, [sp, #16] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w9, w11, w9 +; NONEON-NOSVE-NEXT: umov w11, v2.b[12] +; NONEON-NOSVE-NEXT: mov v4.b[10], w8 +; NONEON-NOSVE-NEXT: umov w8, v3.b[15] +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: umov w11, v3.b[13] +; NONEON-NOSVE-NEXT: mov v4.b[11], w9 +; NONEON-NOSVE-NEXT: umov w9, v1.b[15] +; NONEON-NOSVE-NEXT: udiv w11, w12, w11 +; NONEON-NOSVE-NEXT: umov w12, v3.b[14] +; NONEON-NOSVE-NEXT: mov v4.b[12], w10 +; NONEON-NOSVE-NEXT: umov w10, v0.b[15] +; NONEON-NOSVE-NEXT: udiv w12, w13, w12 +; NONEON-NOSVE-NEXT: umov w13, v2.b[15] +; NONEON-NOSVE-NEXT: mov v4.b[13], w11 +; NONEON-NOSVE-NEXT: udiv w8, w13, w8 +; NONEON-NOSVE-NEXT: mov v4.b[14], w12 +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: mov v4.b[15], w8 +; NONEON-NOSVE-NEXT: mov v5.b[15], w9 +; NONEON-NOSVE-NEXT: stp q4, q5, [x0] +; NONEON-NOSVE-NEXT: ldr x27, [sp], #80 // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = udiv <32 x i8> %op1, %op2 @@ -563,6 +1419,22 @@ define <2 x i16> @udiv_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d2, #0x00ffff0000ffff +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v2.8b +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: fmov w9, s0 +; NONEON-NOSVE-NEXT: mov w10, v0.s[1] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: mov w9, v1.s[1] +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: mov v0.s[1], w9 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = udiv <2 x i16> %op1, %op2 ret <2 x i16> %res } @@ -579,6 +1451,29 @@ define <4 x i16> @udiv_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: umov w8, v1.h[1] +; NONEON-NOSVE-NEXT: umov w9, v0.h[1] +; NONEON-NOSVE-NEXT: umov w10, v0.h[0] +; NONEON-NOSVE-NEXT: umov w11, v0.h[2] +; NONEON-NOSVE-NEXT: umov w12, v0.h[3] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: umov w9, v1.h[0] +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.h[2] +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: umov w11, v1.h[3] +; NONEON-NOSVE-NEXT: fmov s0, w9 +; NONEON-NOSVE-NEXT: mov v0.h[1], w8 +; NONEON-NOSVE-NEXT: udiv w8, w12, w11 +; NONEON-NOSVE-NEXT: mov v0.h[2], w10 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = udiv <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -605,6 +1500,43 @@ define <8 x i16> @udiv_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umov w8, v1.h[1] +; NONEON-NOSVE-NEXT: umov w9, v0.h[1] +; NONEON-NOSVE-NEXT: umov w10, v0.h[0] +; NONEON-NOSVE-NEXT: umov w11, v0.h[2] +; NONEON-NOSVE-NEXT: umov w12, v0.h[3] +; NONEON-NOSVE-NEXT: umov w13, v0.h[4] +; NONEON-NOSVE-NEXT: umov w14, v0.h[5] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: umov w9, v1.h[0] +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.h[2] +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: umov w11, v1.h[3] +; NONEON-NOSVE-NEXT: fmov s2, w9 +; NONEON-NOSVE-NEXT: umov w9, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[1], w8 +; NONEON-NOSVE-NEXT: udiv w11, w12, w11 +; NONEON-NOSVE-NEXT: umov w12, v1.h[4] +; NONEON-NOSVE-NEXT: mov v2.h[2], w10 +; NONEON-NOSVE-NEXT: umov w10, v0.h[6] +; NONEON-NOSVE-NEXT: udiv w12, w13, w12 +; NONEON-NOSVE-NEXT: umov w13, v1.h[5] +; NONEON-NOSVE-NEXT: mov v2.h[3], w11 +; NONEON-NOSVE-NEXT: umov w11, v0.h[7] +; NONEON-NOSVE-NEXT: udiv w8, w14, w13 +; NONEON-NOSVE-NEXT: mov v2.h[4], w12 +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.h[7] +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: udiv w8, w11, w10 +; NONEON-NOSVE-NEXT: mov v2.h[6], w9 +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = udiv <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -645,6 +1577,79 @@ define void @udiv_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: splice z3.h, p0, z3.h, z1.h ; CHECK-NEXT: stp q3, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: umov w8, v1.h[1] +; NONEON-NOSVE-NEXT: umov w9, v0.h[1] +; NONEON-NOSVE-NEXT: umov w10, v0.h[0] +; NONEON-NOSVE-NEXT: umov w11, v0.h[2] +; NONEON-NOSVE-NEXT: umov w12, v0.h[3] +; NONEON-NOSVE-NEXT: umov w13, v0.h[4] +; NONEON-NOSVE-NEXT: umov w14, v0.h[5] +; NONEON-NOSVE-NEXT: umov w15, v0.h[6] +; NONEON-NOSVE-NEXT: umov w16, v2.h[1] +; NONEON-NOSVE-NEXT: umov w17, v2.h[0] +; NONEON-NOSVE-NEXT: umov w18, v2.h[2] +; NONEON-NOSVE-NEXT: umov w1, v2.h[3] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: umov w9, v1.h[0] +; NONEON-NOSVE-NEXT: umov w2, v2.h[4] +; NONEON-NOSVE-NEXT: umov w3, v2.h[5] +; NONEON-NOSVE-NEXT: umov w4, v2.h[6] +; NONEON-NOSVE-NEXT: udiv w10, w10, w9 +; NONEON-NOSVE-NEXT: umov w9, v1.h[2] +; NONEON-NOSVE-NEXT: udiv w9, w11, w9 +; NONEON-NOSVE-NEXT: umov w11, v1.h[3] +; NONEON-NOSVE-NEXT: fmov s5, w10 +; NONEON-NOSVE-NEXT: umov w10, v3.h[7] +; NONEON-NOSVE-NEXT: mov v5.h[1], w8 +; NONEON-NOSVE-NEXT: udiv w11, w12, w11 +; NONEON-NOSVE-NEXT: umov w12, v1.h[4] +; NONEON-NOSVE-NEXT: mov v5.h[2], w9 +; NONEON-NOSVE-NEXT: umov w9, v2.h[7] +; NONEON-NOSVE-NEXT: udiv w12, w13, w12 +; NONEON-NOSVE-NEXT: umov w13, v1.h[5] +; NONEON-NOSVE-NEXT: mov v5.h[3], w11 +; NONEON-NOSVE-NEXT: umov w11, v0.h[7] +; NONEON-NOSVE-NEXT: udiv w13, w14, w13 +; NONEON-NOSVE-NEXT: umov w14, v1.h[6] +; NONEON-NOSVE-NEXT: mov v5.h[4], w12 +; NONEON-NOSVE-NEXT: udiv w14, w15, w14 +; NONEON-NOSVE-NEXT: umov w15, v3.h[1] +; NONEON-NOSVE-NEXT: mov v5.h[5], w13 +; NONEON-NOSVE-NEXT: udiv w15, w16, w15 +; NONEON-NOSVE-NEXT: umov w16, v3.h[0] +; NONEON-NOSVE-NEXT: mov v5.h[6], w14 +; NONEON-NOSVE-NEXT: udiv w16, w17, w16 +; NONEON-NOSVE-NEXT: umov w17, v3.h[2] +; NONEON-NOSVE-NEXT: udiv w17, w18, w17 +; NONEON-NOSVE-NEXT: umov w18, v3.h[3] +; NONEON-NOSVE-NEXT: fmov s4, w16 +; NONEON-NOSVE-NEXT: mov v4.h[1], w15 +; NONEON-NOSVE-NEXT: udiv w18, w1, w18 +; NONEON-NOSVE-NEXT: umov w1, v3.h[4] +; NONEON-NOSVE-NEXT: mov v4.h[2], w17 +; NONEON-NOSVE-NEXT: udiv w1, w2, w1 +; NONEON-NOSVE-NEXT: umov w2, v3.h[5] +; NONEON-NOSVE-NEXT: mov v4.h[3], w18 +; NONEON-NOSVE-NEXT: udiv w2, w3, w2 +; NONEON-NOSVE-NEXT: umov w3, v3.h[6] +; NONEON-NOSVE-NEXT: mov v4.h[4], w1 +; NONEON-NOSVE-NEXT: udiv w8, w4, w3 +; NONEON-NOSVE-NEXT: mov v4.h[5], w2 +; NONEON-NOSVE-NEXT: udiv w9, w9, w10 +; NONEON-NOSVE-NEXT: umov w10, v1.h[7] +; NONEON-NOSVE-NEXT: mov v4.h[6], w8 +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: mov v4.h[7], w9 +; NONEON-NOSVE-NEXT: mov v5.h[7], w10 +; NONEON-NOSVE-NEXT: stp q4, q5, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = udiv <16 x i16> %op1, %op2 @@ -661,6 +1666,21 @@ define <2 x i32> @udiv_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: fmov w9, s0 +; NONEON-NOSVE-NEXT: mov w10, v0.s[1] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: mov w9, v1.s[1] +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: mov v0.s[1], w9 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = udiv <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -674,6 +1694,26 @@ define <4 x i32> @udiv_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, v1.s[1] +; NONEON-NOSVE-NEXT: mov w9, v0.s[1] +; NONEON-NOSVE-NEXT: fmov w10, s0 +; NONEON-NOSVE-NEXT: mov w11, v0.s[2] +; NONEON-NOSVE-NEXT: mov w12, v0.s[3] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: fmov w9, s1 +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: mov w10, v1.s[2] +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: mov w11, v1.s[3] +; NONEON-NOSVE-NEXT: fmov s0, w9 +; NONEON-NOSVE-NEXT: mov v0.s[1], w8 +; NONEON-NOSVE-NEXT: udiv w8, w12, w11 +; NONEON-NOSVE-NEXT: mov v0.s[2], w10 +; NONEON-NOSVE-NEXT: mov v0.s[3], w8 +; NONEON-NOSVE-NEXT: ret %res = udiv <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -689,6 +1729,45 @@ define void @udiv_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: udiv z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: mov w9, v0.s[1] +; NONEON-NOSVE-NEXT: fmov w10, s0 +; NONEON-NOSVE-NEXT: mov w11, v0.s[2] +; NONEON-NOSVE-NEXT: mov w8, v1.s[1] +; NONEON-NOSVE-NEXT: mov w12, v2.s[1] +; NONEON-NOSVE-NEXT: fmov w13, s2 +; NONEON-NOSVE-NEXT: mov w14, v2.s[2] +; NONEON-NOSVE-NEXT: mov w15, v2.s[3] +; NONEON-NOSVE-NEXT: mov w16, v0.s[3] +; NONEON-NOSVE-NEXT: udiv w8, w9, w8 +; NONEON-NOSVE-NEXT: fmov w9, s1 +; NONEON-NOSVE-NEXT: udiv w9, w10, w9 +; NONEON-NOSVE-NEXT: mov w10, v1.s[2] +; NONEON-NOSVE-NEXT: udiv w10, w11, w10 +; NONEON-NOSVE-NEXT: mov w11, v3.s[1] +; NONEON-NOSVE-NEXT: udiv w11, w12, w11 +; NONEON-NOSVE-NEXT: fmov w12, s3 +; NONEON-NOSVE-NEXT: udiv w12, w13, w12 +; NONEON-NOSVE-NEXT: mov w13, v3.s[2] +; NONEON-NOSVE-NEXT: udiv w13, w14, w13 +; NONEON-NOSVE-NEXT: mov w14, v3.s[3] +; NONEON-NOSVE-NEXT: fmov s0, w12 +; NONEON-NOSVE-NEXT: mov v0.s[1], w11 +; NONEON-NOSVE-NEXT: udiv w14, w15, w14 +; NONEON-NOSVE-NEXT: mov w15, v1.s[3] +; NONEON-NOSVE-NEXT: fmov s1, w9 +; NONEON-NOSVE-NEXT: mov v0.s[2], w13 +; NONEON-NOSVE-NEXT: mov v1.s[1], w8 +; NONEON-NOSVE-NEXT: mov v1.s[2], w10 +; NONEON-NOSVE-NEXT: udiv w8, w16, w15 +; NONEON-NOSVE-NEXT: mov v0.s[3], w14 +; NONEON-NOSVE-NEXT: mov v1.s[3], w8 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = udiv <8 x i32> %op1, %op2 @@ -705,6 +1784,16 @@ define <1 x i64> @udiv_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: udiv z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: udiv x8, x9, x8 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %res = udiv <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -718,6 +1807,18 @@ define <2 x i64> @udiv_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: udiv z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: mov x10, v0.d[1] +; NONEON-NOSVE-NEXT: udiv x8, x9, x8 +; NONEON-NOSVE-NEXT: mov x9, v1.d[1] +; NONEON-NOSVE-NEXT: udiv x9, x10, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: ret %res = udiv <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -733,6 +1834,29 @@ define void @udiv_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: udiv z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: mov x10, v2.d[1] +; NONEON-NOSVE-NEXT: fmov x11, d2 +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: mov x12, v0.d[1] +; NONEON-NOSVE-NEXT: udiv x8, x9, x8 +; NONEON-NOSVE-NEXT: mov x9, v3.d[1] +; NONEON-NOSVE-NEXT: udiv x9, x10, x9 +; NONEON-NOSVE-NEXT: fmov x10, d3 +; NONEON-NOSVE-NEXT: udiv x10, x11, x10 +; NONEON-NOSVE-NEXT: mov x11, v1.d[1] +; NONEON-NOSVE-NEXT: fmov d1, x8 +; NONEON-NOSVE-NEXT: udiv x11, x12, x11 +; NONEON-NOSVE-NEXT: fmov d0, x10 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: mov v1.d[1], x11 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = udiv <4 x i64> %op1, %op2 @@ -778,6 +1902,27 @@ define void @udiv_constantsplat_v8i32(ptr %a) { ; SVE2-NEXT: lsr z0.s, z0.s, #6 ; SVE2-NEXT: stp q1, q0, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: udiv_constantsplat_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #8969 // =0x2309 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: movk w8, #22765, lsl #16 +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: umull2 v3.2d, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: umull v4.2d, v1.2s, v0.2s +; NONEON-NOSVE-NEXT: umull2 v5.2d, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: umull v0.2d, v2.2s, v0.2s +; NONEON-NOSVE-NEXT: uzp2 v3.4s, v4.4s, v3.4s +; NONEON-NOSVE-NEXT: uzp2 v0.4s, v0.4s, v5.4s +; NONEON-NOSVE-NEXT: sub v1.4s, v1.4s, v3.4s +; NONEON-NOSVE-NEXT: sub v2.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: usra v3.4s, v1.4s, #1 +; NONEON-NOSVE-NEXT: usra v0.4s, v2.4s, #1 +; NONEON-NOSVE-NEXT: ushr v1.4s, v3.4s, #6 +; NONEON-NOSVE-NEXT: ushr v0.4s, v0.4s, #6 +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = udiv <8 x i32> %op1, store <8 x i32> %res, ptr %a diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll index c7a89612d278f2..e320fed2a498de 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll @@ -2,6 +2,7 @@ ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE ; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -26,6 +27,22 @@ define void @sext_v8i1_v8i32(<8 x i1> %a, ptr %out) { ; CHECK-NEXT: asr z0.s, z0.s, #31 ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v8i1_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: shl v0.4s, v0.4s, #31 +; NONEON-NOSVE-NEXT: shl v1.4s, v1.4s, #31 +; NONEON-NOSVE-NEXT: cmlt v0.4s, v0.4s, #0 +; NONEON-NOSVE-NEXT: cmlt v1.4s, v1.4s, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = sext <8 x i1> %a to <8 x i32> store <8 x i32> %b, ptr %out ret void @@ -52,6 +69,22 @@ define void @sext_v4i3_v4i64(<4 x i3> %a, ptr %out) { ; CHECK-NEXT: asr z0.d, z0.d, #61 ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v4i3_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: shl v0.2d, v0.2d, #61 +; NONEON-NOSVE-NEXT: shl v1.2d, v1.2d, #61 +; NONEON-NOSVE-NEXT: sshr v0.2d, v0.2d, #61 +; NONEON-NOSVE-NEXT: sshr v1.2d, v1.2d, #61 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = sext <4 x i3> %a to <4 x i64> store <4 x i64> %b, ptr %out ret void @@ -70,6 +103,17 @@ define void @sext_v16i8_v16i16(<16 x i8> %a, ptr %out) { ; CHECK-NEXT: sunpklo z0.h, z0.b ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v16i8_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: sshll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = sext <16 x i8> %a to <16 x i16> store <16 x i16>%b, ptr %out ret void @@ -91,6 +135,24 @@ define void @sext_v32i8_v32i16(ptr %in, ptr %out) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v32i8_v32i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: sshll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: sshll v2.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: sshll v3.8h, v3.8b, #0 +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %a = load <32 x i8>, ptr %in %b = add <32 x i8> %a, %a %c = sext <32 x i8> %b to <32 x i16> @@ -112,6 +174,18 @@ define void @sext_v8i8_v8i32(<8 x i8> %a, ptr %out) { ; CHECK-NEXT: sunpklo z0.s, z0.h ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v8i8_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sshll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = sext <8 x i8> %a to <8 x i32> store <8 x i32>%b, ptr %out ret void @@ -133,6 +207,25 @@ define void @sext_v16i8_v16i32(<16 x i8> %a, ptr %out) { ; CHECK-NEXT: stp q2, q1, [x0] ; CHECK-NEXT: stp q3, q0, [x0, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v16i8_v16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: sshll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: sshll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x0] +; NONEON-NOSVE-NEXT: stp q1, q3, [x0, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %b = sext <16 x i8> %a to <16 x i32> store <16 x i32> %b, ptr %out ret void @@ -167,6 +260,40 @@ define void @sext_v32i8_v32i32(ptr %in, ptr %out) { ; CHECK-NEXT: stp q6, q0, [x1, #96] ; CHECK-NEXT: stp q7, q1, [x1, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v32i8_v32i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-96]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 96 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: sshll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: sshll v2.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: sshll v3.8h, v3.8b, #0 +; NONEON-NOSVE-NEXT: stp q2, q0, [sp, #32] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: stp q3, q1, [sp, #64] +; NONEON-NOSVE-NEXT: ldr d5, [sp, #56] +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #88] +; NONEON-NOSVE-NEXT: ldr d6, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #72] +; NONEON-NOSVE-NEXT: sshll v5.4s, v5.4h, #0 +; NONEON-NOSVE-NEXT: sshll v4.4s, v4.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q5, [x1] +; NONEON-NOSVE-NEXT: sshll v0.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: sshll v2.4s, v6.4h, #0 +; NONEON-NOSVE-NEXT: stp q1, q4, [x1, #64] +; NONEON-NOSVE-NEXT: sshll v1.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: sshll v3.4s, v7.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #32] +; NONEON-NOSVE-NEXT: stp q1, q3, [x1, #96] +; NONEON-NOSVE-NEXT: add sp, sp, #96 +; NONEON-NOSVE-NEXT: ret %a = load <32 x i8>, ptr %in %b = add <32 x i8> %a, %a %c = sext <32 x i8> %b to <32 x i32> @@ -194,6 +321,22 @@ define void @sext_v4i8_v4i64(<4 x i8> %a, ptr %out) { ; CHECK-NEXT: sxtb z0.d, p0/m, z0.d ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v4i8_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: shl v0.2d, v0.2d, #56 +; NONEON-NOSVE-NEXT: shl v1.2d, v1.2d, #56 +; NONEON-NOSVE-NEXT: sshr v0.2d, v0.2d, #56 +; NONEON-NOSVE-NEXT: sshr v1.2d, v1.2d, #56 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = sext <4 x i8> %a to <4 x i64> store <4 x i64>%b, ptr %out ret void @@ -216,6 +359,26 @@ define void @sext_v8i8_v8i64(<8 x i8> %a, ptr %out) { ; CHECK-NEXT: stp q2, q1, [x0] ; CHECK-NEXT: stp q3, q0, [x0, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v8i8_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sshll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: sshll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x0] +; NONEON-NOSVE-NEXT: stp q1, q3, [x0, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %b = sext <8 x i8> %a to <8 x i64> store <8 x i64>%b, ptr %out ret void @@ -253,6 +416,41 @@ define void @sext_v16i8_v16i64(<16 x i8> %a, ptr %out) { ; CHECK-NEXT: stp q1, q4, [x0, #32] ; CHECK-NEXT: stp q0, q2, [x0, #96] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v16i8_v16i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-112]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 112 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: sshll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #40] +; NONEON-NOSVE-NEXT: sshll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: sshll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q2, q1, [sp, #48] +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: stp q3, q0, [sp, #80] +; NONEON-NOSVE-NEXT: ldr d5, [sp, #72] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #104] +; NONEON-NOSVE-NEXT: ldr d6, [sp, #56] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #88] +; NONEON-NOSVE-NEXT: sshll v5.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: sshll v4.2d, v4.2s, #0 +; NONEON-NOSVE-NEXT: stp q1, q5, [x0, #64] +; NONEON-NOSVE-NEXT: sshll v1.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: sshll v2.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q4, [x0] +; NONEON-NOSVE-NEXT: sshll v0.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: sshll v3.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: stp q1, q2, [x0, #96] +; NONEON-NOSVE-NEXT: stp q0, q3, [x0, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #112 +; NONEON-NOSVE-NEXT: ret %b = sext <16 x i8> %a to <16 x i64> store <16 x i64> %b, ptr %out ret void @@ -321,6 +519,73 @@ define void @sext_v32i8_v32i64(ptr %in, ptr %out) { ; CHECK-NEXT: stp q0, q2, [x1, #224] ; CHECK-NEXT: stp q3, q1, [x1, #96] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v32i8_v32i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub sp, sp, #224 +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 224 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [sp] +; NONEON-NOSVE-NEXT: sshll v5.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: sshll v6.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v3.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v4.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: stp q3, q5, [sp, #32] +; NONEON-NOSVE-NEXT: sshll v5.4s, v5.4h, #0 +; NONEON-NOSVE-NEXT: sshll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #56] +; NONEON-NOSVE-NEXT: ldr d0, [sp, #40] +; NONEON-NOSVE-NEXT: stp q4, q6, [sp, #64] +; NONEON-NOSVE-NEXT: sshll v6.4s, v6.4h, #0 +; NONEON-NOSVE-NEXT: sshll v4.4s, v4.4h, #0 +; NONEON-NOSVE-NEXT: ldr d7, [sp, #88] +; NONEON-NOSVE-NEXT: sshll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #72] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v7.4s, v7.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q2, q5, [sp, #128] +; NONEON-NOSVE-NEXT: sshll v5.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: sshll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ldr d19, [sp, #152] +; NONEON-NOSVE-NEXT: stp q0, q3, [sp, #96] +; NONEON-NOSVE-NEXT: ldr d20, [sp, #136] +; NONEON-NOSVE-NEXT: stp q1, q4, [sp, #160] +; NONEON-NOSVE-NEXT: ldr d17, [sp, #104] +; NONEON-NOSVE-NEXT: ldr d21, [sp, #120] +; NONEON-NOSVE-NEXT: stp q7, q6, [sp, #192] +; NONEON-NOSVE-NEXT: sshll v6.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: sshll v19.2d, v19.2s, #0 +; NONEON-NOSVE-NEXT: ldr d16, [sp, #216] +; NONEON-NOSVE-NEXT: ldr d22, [sp, #200] +; NONEON-NOSVE-NEXT: ldr d23, [sp, #184] +; NONEON-NOSVE-NEXT: ldr d18, [sp, #168] +; NONEON-NOSVE-NEXT: sshll v4.2d, v4.2s, #0 +; NONEON-NOSVE-NEXT: sshll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: sshll v16.2d, v16.2s, #0 +; NONEON-NOSVE-NEXT: stp q5, q19, [x1] +; NONEON-NOSVE-NEXT: sshll v5.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: sshll v7.2d, v22.2s, #0 +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: stp q6, q16, [x1, #128] +; NONEON-NOSVE-NEXT: sshll v6.2d, v23.2s, #0 +; NONEON-NOSVE-NEXT: stp q5, q7, [x1, #160] +; NONEON-NOSVE-NEXT: sshll v5.2d, v20.2s, #0 +; NONEON-NOSVE-NEXT: stp q4, q6, [x1, #192] +; NONEON-NOSVE-NEXT: sshll v4.2d, v21.2s, #0 +; NONEON-NOSVE-NEXT: stp q2, q5, [x1, #32] +; NONEON-NOSVE-NEXT: sshll v2.2d, v17.2s, #0 +; NONEON-NOSVE-NEXT: stp q3, q4, [x1, #64] +; NONEON-NOSVE-NEXT: sshll v3.2d, v18.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #96] +; NONEON-NOSVE-NEXT: stp q1, q3, [x1, #224] +; NONEON-NOSVE-NEXT: add sp, sp, #224 +; NONEON-NOSVE-NEXT: ret %a = load <32 x i8>, ptr %in %b = add <32 x i8> %a, %a %c = sext <32 x i8> %b to <32 x i64> @@ -341,6 +606,17 @@ define void @sext_v8i16_v8i32(<8 x i16> %a, ptr %out) { ; CHECK-NEXT: sunpklo z0.s, z0.h ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v8i16_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = sext <8 x i16> %a to <8 x i32> store <8 x i32>%b, ptr %out ret void @@ -361,6 +637,24 @@ define void @sext_v16i16_v16i32(ptr %in, ptr %out) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v16i16_v16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: sshll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %a = load <16 x i16>, ptr %in %b = add <16 x i16> %a, %a %c = sext <16 x i16> %b to <16 x i32> @@ -382,6 +676,18 @@ define void @sext_v4i16_v4i64(<4 x i16> %a, ptr %out) { ; CHECK-NEXT: sunpklo z0.d, z0.s ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v4i16_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = sext <4 x i16> %a to <4 x i64> store <4 x i64>%b, ptr %out ret void @@ -403,6 +709,25 @@ define void @sext_v8i16_v8i64(<8 x i16> %a, ptr %out) { ; CHECK-NEXT: stp q2, q1, [x0] ; CHECK-NEXT: stp q3, q0, [x0, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v8i16_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: sshll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x0] +; NONEON-NOSVE-NEXT: stp q1, q3, [x0, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %b = sext <8 x i16> %a to <8 x i64> store <8 x i64>%b, ptr %out ret void @@ -437,6 +762,40 @@ define void @sext_v16i16_v16i64(ptr %in, ptr %out) { ; CHECK-NEXT: stp q6, q0, [x1, #96] ; CHECK-NEXT: stp q7, q1, [x1, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v16i16_v16i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-96]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 96 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: sshll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: sshll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q2, q0, [sp, #32] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: stp q3, q1, [sp, #64] +; NONEON-NOSVE-NEXT: ldr d5, [sp, #56] +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #88] +; NONEON-NOSVE-NEXT: ldr d6, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #72] +; NONEON-NOSVE-NEXT: sshll v5.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: sshll v4.2d, v4.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q5, [x1] +; NONEON-NOSVE-NEXT: sshll v0.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: sshll v2.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: stp q1, q4, [x1, #64] +; NONEON-NOSVE-NEXT: sshll v1.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: sshll v3.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #32] +; NONEON-NOSVE-NEXT: stp q1, q3, [x1, #96] +; NONEON-NOSVE-NEXT: add sp, sp, #96 +; NONEON-NOSVE-NEXT: ret %a = load <16 x i16>, ptr %in %b = add <16 x i16> %a, %a %c = sext <16 x i16> %b to <16 x i64> @@ -457,6 +816,17 @@ define void @sext_v4i32_v4i64(<4 x i32> %a, ptr %out) { ; CHECK-NEXT: sunpklo z0.d, z0.s ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v4i32_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = sext <4 x i32> %a to <4 x i64> store <4 x i64>%b, ptr %out ret void @@ -477,6 +847,24 @@ define void @sext_v8i32_v8i64(ptr %in, ptr %out) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sext_v8i32_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: add v1.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: sshll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %a = load <8 x i32>, ptr %in %b = add <8 x i32> %a, %a %c = sext <8 x i32> %b to <8 x i64> @@ -497,6 +885,17 @@ define void @zext_v16i8_v16i16(<16 x i8> %a, ptr %out) { ; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v16i8_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: ushll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = zext <16 x i8> %a to <16 x i16> store <16 x i16>%b, ptr %out ret void @@ -518,6 +917,24 @@ define void @zext_v32i8_v32i16(ptr %in, ptr %out) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v32i8_v32i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: ushll v2.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: ushll v3.8h, v3.8b, #0 +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %a = load <32 x i8>, ptr %in %b = add <32 x i8> %a, %a %c = zext <32 x i8> %b to <32 x i16> @@ -539,6 +956,18 @@ define void @zext_v8i8_v8i32(<8 x i8> %a, ptr %out) { ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v8i8_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = zext <8 x i8> %a to <8 x i32> store <8 x i32>%b, ptr %out ret void @@ -560,6 +989,25 @@ define void @zext_v16i8_v16i32(<16 x i8> %a, ptr %out) { ; CHECK-NEXT: stp q2, q1, [x0] ; CHECK-NEXT: stp q3, q0, [x0, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v16i8_v16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: ushll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ushll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x0] +; NONEON-NOSVE-NEXT: stp q1, q3, [x0, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %b = zext <16 x i8> %a to <16 x i32> store <16 x i32> %b, ptr %out ret void @@ -594,6 +1042,40 @@ define void @zext_v32i8_v32i32(ptr %in, ptr %out) { ; CHECK-NEXT: stp q6, q0, [x1, #96] ; CHECK-NEXT: stp q7, q1, [x1, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v32i8_v32i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-96]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 96 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: ushll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: ushll v2.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: ushll v3.8h, v3.8b, #0 +; NONEON-NOSVE-NEXT: stp q2, q0, [sp, #32] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: stp q3, q1, [sp, #64] +; NONEON-NOSVE-NEXT: ldr d5, [sp, #56] +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #88] +; NONEON-NOSVE-NEXT: ldr d6, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #72] +; NONEON-NOSVE-NEXT: ushll v5.4s, v5.4h, #0 +; NONEON-NOSVE-NEXT: ushll v4.4s, v4.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q5, [x1] +; NONEON-NOSVE-NEXT: ushll v0.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ushll v2.4s, v6.4h, #0 +; NONEON-NOSVE-NEXT: stp q1, q4, [x1, #64] +; NONEON-NOSVE-NEXT: ushll v1.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: ushll v3.4s, v7.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #32] +; NONEON-NOSVE-NEXT: stp q1, q3, [x1, #96] +; NONEON-NOSVE-NEXT: add sp, sp, #96 +; NONEON-NOSVE-NEXT: ret %a = load <32 x i8>, ptr %in %b = add <32 x i8> %a, %a %c = zext <32 x i8> %b to <32 x i32> @@ -619,6 +1101,20 @@ define void @zext_v4i8_v4i64(<4 x i8> %a, ptr %out) { ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v4i8_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0xff00ff00ff00ff +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = zext <4 x i8> %a to <4 x i64> store <4 x i64>%b, ptr %out ret void @@ -641,6 +1137,26 @@ define void @zext_v8i8_v8i64(<8 x i8> %a, ptr %out) { ; CHECK-NEXT: stp q2, q1, [x0] ; CHECK-NEXT: stp q3, q0, [x0, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v8i8_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ushll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x0] +; NONEON-NOSVE-NEXT: stp q1, q3, [x0, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %b = zext <8 x i8> %a to <8 x i64> store <8 x i64>%b, ptr %out ret void @@ -678,6 +1194,41 @@ define void @zext_v16i8_v16i64(<16 x i8> %a, ptr %out) { ; CHECK-NEXT: stp q1, q4, [x0, #32] ; CHECK-NEXT: stp q0, q2, [x0, #96] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v16i8_v16i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-112]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 112 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: ushll v1.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #40] +; NONEON-NOSVE-NEXT: ushll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ushll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q2, q1, [sp, #48] +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: stp q3, q0, [sp, #80] +; NONEON-NOSVE-NEXT: ldr d5, [sp, #72] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #104] +; NONEON-NOSVE-NEXT: ldr d6, [sp, #56] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #88] +; NONEON-NOSVE-NEXT: ushll v5.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: ushll v4.2d, v4.2s, #0 +; NONEON-NOSVE-NEXT: stp q1, q5, [x0, #64] +; NONEON-NOSVE-NEXT: ushll v1.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ushll v2.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q4, [x0] +; NONEON-NOSVE-NEXT: ushll v0.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: ushll v3.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: stp q1, q2, [x0, #96] +; NONEON-NOSVE-NEXT: stp q0, q3, [x0, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #112 +; NONEON-NOSVE-NEXT: ret %b = zext <16 x i8> %a to <16 x i64> store <16 x i64> %b, ptr %out ret void @@ -746,6 +1297,73 @@ define void @zext_v32i8_v32i64(ptr %in, ptr %out) { ; CHECK-NEXT: stp q0, q2, [x1, #224] ; CHECK-NEXT: stp q3, q1, [x1, #96] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v32i8_v32i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub sp, sp, #224 +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 224 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [sp] +; NONEON-NOSVE-NEXT: ushll v5.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: ushll v6.8h, v1.8b, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v3.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v4.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: stp q3, q5, [sp, #32] +; NONEON-NOSVE-NEXT: ushll v5.4s, v5.4h, #0 +; NONEON-NOSVE-NEXT: ushll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #56] +; NONEON-NOSVE-NEXT: ldr d0, [sp, #40] +; NONEON-NOSVE-NEXT: stp q4, q6, [sp, #64] +; NONEON-NOSVE-NEXT: ushll v6.4s, v6.4h, #0 +; NONEON-NOSVE-NEXT: ushll v4.4s, v4.4h, #0 +; NONEON-NOSVE-NEXT: ldr d7, [sp, #88] +; NONEON-NOSVE-NEXT: ushll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #72] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v7.4s, v7.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q2, q5, [sp, #128] +; NONEON-NOSVE-NEXT: ushll v5.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: ushll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ldr d19, [sp, #152] +; NONEON-NOSVE-NEXT: stp q0, q3, [sp, #96] +; NONEON-NOSVE-NEXT: ldr d20, [sp, #136] +; NONEON-NOSVE-NEXT: stp q1, q4, [sp, #160] +; NONEON-NOSVE-NEXT: ldr d17, [sp, #104] +; NONEON-NOSVE-NEXT: ldr d21, [sp, #120] +; NONEON-NOSVE-NEXT: stp q7, q6, [sp, #192] +; NONEON-NOSVE-NEXT: ushll v6.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: ushll v19.2d, v19.2s, #0 +; NONEON-NOSVE-NEXT: ldr d16, [sp, #216] +; NONEON-NOSVE-NEXT: ldr d22, [sp, #200] +; NONEON-NOSVE-NEXT: ldr d23, [sp, #184] +; NONEON-NOSVE-NEXT: ldr d18, [sp, #168] +; NONEON-NOSVE-NEXT: ushll v4.2d, v4.2s, #0 +; NONEON-NOSVE-NEXT: ushll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: ushll v16.2d, v16.2s, #0 +; NONEON-NOSVE-NEXT: stp q5, q19, [x1] +; NONEON-NOSVE-NEXT: ushll v5.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: ushll v7.2d, v22.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: stp q6, q16, [x1, #128] +; NONEON-NOSVE-NEXT: ushll v6.2d, v23.2s, #0 +; NONEON-NOSVE-NEXT: stp q5, q7, [x1, #160] +; NONEON-NOSVE-NEXT: ushll v5.2d, v20.2s, #0 +; NONEON-NOSVE-NEXT: stp q4, q6, [x1, #192] +; NONEON-NOSVE-NEXT: ushll v4.2d, v21.2s, #0 +; NONEON-NOSVE-NEXT: stp q2, q5, [x1, #32] +; NONEON-NOSVE-NEXT: ushll v2.2d, v17.2s, #0 +; NONEON-NOSVE-NEXT: stp q3, q4, [x1, #64] +; NONEON-NOSVE-NEXT: ushll v3.2d, v18.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #96] +; NONEON-NOSVE-NEXT: stp q1, q3, [x1, #224] +; NONEON-NOSVE-NEXT: add sp, sp, #224 +; NONEON-NOSVE-NEXT: ret %a = load <32 x i8>, ptr %in %b = add <32 x i8> %a, %a %c = zext <32 x i8> %b to <32 x i64> @@ -766,6 +1384,17 @@ define void @zext_v8i16_v8i32(<8 x i16> %a, ptr %out) { ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v8i16_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = zext <8 x i16> %a to <8 x i32> store <8 x i32>%b, ptr %out ret void @@ -786,6 +1415,24 @@ define void @zext_v16i16_v16i32(ptr %in, ptr %out) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v16i16_v16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ushll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %a = load <16 x i16>, ptr %in %b = add <16 x i16> %a, %a %c = zext <16 x i16> %b to <16 x i32> @@ -807,6 +1454,18 @@ define void @zext_v4i16_v4i64(<4 x i16> %a, ptr %out) { ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v4i16_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = zext <4 x i16> %a to <4 x i64> store <4 x i64>%b, ptr %out ret void @@ -828,6 +1487,25 @@ define void @zext_v8i16_v8i64(<8 x i16> %a, ptr %out) { ; CHECK-NEXT: stp q2, q1, [x0] ; CHECK-NEXT: stp q3, q0, [x0, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v8i16_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ushll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x0] +; NONEON-NOSVE-NEXT: stp q1, q3, [x0, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %b = zext <8 x i16> %a to <8 x i64> store <8 x i64>%b, ptr %out ret void @@ -862,6 +1540,40 @@ define void @zext_v16i16_v16i64(ptr %in, ptr %out) { ; CHECK-NEXT: stp q6, q0, [x1, #96] ; CHECK-NEXT: stp q7, q1, [x1, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v16i16_v16i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-96]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 96 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ushll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ushll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q2, q0, [sp, #32] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: stp q3, q1, [sp, #64] +; NONEON-NOSVE-NEXT: ldr d5, [sp, #56] +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #88] +; NONEON-NOSVE-NEXT: ldr d6, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #72] +; NONEON-NOSVE-NEXT: ushll v5.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: ushll v4.2d, v4.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q5, [x1] +; NONEON-NOSVE-NEXT: ushll v0.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ushll v2.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: stp q1, q4, [x1, #64] +; NONEON-NOSVE-NEXT: ushll v1.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: ushll v3.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #32] +; NONEON-NOSVE-NEXT: stp q1, q3, [x1, #96] +; NONEON-NOSVE-NEXT: add sp, sp, #96 +; NONEON-NOSVE-NEXT: ret %a = load <16 x i16>, ptr %in %b = add <16 x i16> %a, %a %c = zext <16 x i16> %b to <16 x i64> @@ -882,6 +1594,17 @@ define void @zext_v4i32_v4i64(<4 x i32> %a, ptr %out) { ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v4i32_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %b = zext <4 x i32> %a to <4 x i64> store <4 x i64>%b, ptr %out ret void @@ -902,6 +1625,24 @@ define void @zext_v8i32_v8i64(ptr %in, ptr %out) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zext_v8i32_v8i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: add v0.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: add v1.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ushll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %a = load <8 x i32>, ptr %in %b = add <8 x i32> %a, %a %c = zext <8 x i32> %b to <8 x i64> @@ -928,6 +1669,21 @@ define void @extend_and_mul(i32 %0, <2 x i64> %1, ptr %2) { ; SVE2-NEXT: mul z0.d, z1.d, z0.d ; SVE2-NEXT: str q0, [x1] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: extend_and_mul: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v1.2s, w0 +; NONEON-NOSVE-NEXT: fmov x10, d0 +; NONEON-NOSVE-NEXT: mov x8, v0.d[1] +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: fmov x11, d1 +; NONEON-NOSVE-NEXT: mov x9, v1.d[1] +; NONEON-NOSVE-NEXT: mul x10, x11, x10 +; NONEON-NOSVE-NEXT: mul x8, x9, x8 +; NONEON-NOSVE-NEXT: fmov d0, x10 +; NONEON-NOSVE-NEXT: mov v0.d[1], x8 +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %broadcast.splatinsert2 = insertelement <2 x i32> poison, i32 %0, i64 0 %broadcast.splat3 = shufflevector <2 x i32> %broadcast.splatinsert2, <2 x i32> poison, <2 x i32> zeroinitializer %4 = zext <2 x i32> %broadcast.splat3 to <2 x i64> @@ -943,6 +1699,13 @@ define void @extend_no_mul(i32 %0, <2 x i64> %1, ptr %2) { ; CHECK-NEXT: mov z0.d, x8 ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: extend_no_mul: +; NONEON-NOSVE: // %bb.0: // %entry +; NONEON-NOSVE-NEXT: dup v0.2s, w0 +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret entry: %broadcast.splatinsert2 = insertelement <2 x i32> poison, i32 %0, i64 0 %broadcast.splat3 = shufflevector <2 x i32> %broadcast.splatinsert2, <2 x i32> poison, <2 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll index f028b3eeca2571..d86cfcbfb4f6e5 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -22,6 +23,15 @@ define void @add_v32i8(ptr %a) { ; CHECK-NEXT: add z1.b, z1.b, #7 // =0x7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: add v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i32 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -38,6 +48,16 @@ define void @add_v16i16(ptr %a) { ; CHECK-NEXT: add z1.h, z1.h, #15 // =0xf ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: add v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -54,6 +74,16 @@ define void @add_v8i32(ptr %a) { ; CHECK-NEXT: add z1.s, z1.s, #31 // =0x1f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: add v1.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: add v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -70,6 +100,16 @@ define void @add_v4i64(ptr %a) { ; CHECK-NEXT: add z1.d, z1.d, #63 // =0x3f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: add v1.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: add v0.2d, v2.2d, v0.2d +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -90,6 +130,15 @@ define void @and_v32i8(ptr %a) { ; CHECK-NEXT: and z1.b, z1.b, #0x7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: and v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: and v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i32 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -106,6 +155,16 @@ define void @and_v16i16(ptr %a) { ; CHECK-NEXT: and z1.h, z1.h, #0xf ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: and v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: and v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -122,6 +181,16 @@ define void @and_v8i32(ptr %a) { ; CHECK-NEXT: and z1.s, z1.s, #0x1f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: and v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: and v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -138,6 +207,16 @@ define void @and_v4i64(ptr %a) { ; CHECK-NEXT: and z1.d, z1.d, #0x3f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: and v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: and v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -158,6 +237,14 @@ define void @ashr_v32i8(ptr %a) { ; CHECK-NEXT: asr z1.b, z1.b, #7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0 +; NONEON-NOSVE-NEXT: cmlt v1.16b, v1.16b, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i32 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -174,6 +261,14 @@ define void @ashr_v16i16(ptr %a) { ; CHECK-NEXT: asr z1.h, z1.h, #15 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmlt v0.8h, v0.8h, #0 +; NONEON-NOSVE-NEXT: cmlt v1.8h, v1.8h, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -190,6 +285,14 @@ define void @ashr_v8i32(ptr %a) { ; CHECK-NEXT: asr z1.s, z1.s, #31 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmlt v0.4s, v0.4s, #0 +; NONEON-NOSVE-NEXT: cmlt v1.4s, v1.4s, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -206,6 +309,14 @@ define void @ashr_v4i64(ptr %a) { ; CHECK-NEXT: asr z1.d, z1.d, #63 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmlt v0.2d, v0.2d, #0 +; NONEON-NOSVE-NEXT: cmlt v1.2d, v1.2d, #0 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -229,6 +340,15 @@ define void @icmp_eq_v32i8(ptr %a) { ; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_eq_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmeq v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: cmeq v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -249,6 +369,16 @@ define void @icmp_sge_v16i16(ptr %a) { ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_sge_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: cmge v1.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: cmge v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -269,6 +399,16 @@ define void @icmp_sgt_v8i32(ptr %a) { ; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_sgt_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #-8 // =0xfffffff8 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: cmgt v1.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: cmgt v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 -8, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -289,6 +429,16 @@ define void @icmp_ult_v4i64(ptr %a) { ; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: icmp_ult_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: cmhi v1.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: cmhi v0.2d, v0.2d, v2.2d +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -310,6 +460,14 @@ define void @lshr_v32i8(ptr %a) { ; CHECK-NEXT: lsr z1.b, z1.b, #7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ushr v0.16b, v0.16b, #7 +; NONEON-NOSVE-NEXT: ushr v1.16b, v1.16b, #7 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -326,6 +484,14 @@ define void @lshr_v16i16(ptr %a) { ; CHECK-NEXT: lsr z1.h, z1.h, #15 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ushr v0.8h, v0.8h, #15 +; NONEON-NOSVE-NEXT: ushr v1.8h, v1.8h, #15 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -342,6 +508,14 @@ define void @lshr_v8i32(ptr %a) { ; CHECK-NEXT: lsr z1.s, z1.s, #31 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ushr v0.4s, v0.4s, #31 +; NONEON-NOSVE-NEXT: ushr v1.4s, v1.4s, #31 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -358,6 +532,14 @@ define void @lshr_v4i64(ptr %a) { ; CHECK-NEXT: lsr z1.d, z1.d, #63 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ushr v0.2d, v0.2d, #63 +; NONEON-NOSVE-NEXT: ushr v1.2d, v1.2d, #63 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -378,6 +560,15 @@ define void @mul_v32i8(ptr %a) { ; CHECK-NEXT: mul z1.b, z1.b, #7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: mul v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: mul v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -394,6 +585,16 @@ define void @mul_v16i16(ptr %a) { ; CHECK-NEXT: mul z1.h, z1.h, #15 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: mul v1.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: mul v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -410,6 +611,16 @@ define void @mul_v8i32(ptr %a) { ; CHECK-NEXT: mul z1.s, z1.s, #31 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: mul v1.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: mul v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -426,6 +637,28 @@ define void @mul_v4i64(ptr %a) { ; CHECK-NEXT: mul z1.d, z1.d, #63 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: mul_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: fmov x10, d0 +; NONEON-NOSVE-NEXT: fmov x11, d1 +; NONEON-NOSVE-NEXT: mov x8, v0.d[1] +; NONEON-NOSVE-NEXT: mov x9, v1.d[1] +; NONEON-NOSVE-NEXT: lsl x12, x10, #6 +; NONEON-NOSVE-NEXT: lsl x13, x11, #6 +; NONEON-NOSVE-NEXT: lsl x14, x8, #6 +; NONEON-NOSVE-NEXT: sub x10, x12, x10 +; NONEON-NOSVE-NEXT: sub x11, x13, x11 +; NONEON-NOSVE-NEXT: lsl x12, x9, #6 +; NONEON-NOSVE-NEXT: fmov d0, x10 +; NONEON-NOSVE-NEXT: fmov d1, x11 +; NONEON-NOSVE-NEXT: sub x8, x14, x8 +; NONEON-NOSVE-NEXT: sub x9, x12, x9 +; NONEON-NOSVE-NEXT: mov v0.d[1], x8 +; NONEON-NOSVE-NEXT: mov v1.d[1], x9 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -446,6 +679,15 @@ define void @or_v32i8(ptr %a) { ; CHECK-NEXT: orr z1.b, z1.b, #0x7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: orr v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -462,6 +704,16 @@ define void @or_v16i16(ptr %a) { ; CHECK-NEXT: orr z1.h, z1.h, #0xf ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: orr v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -478,6 +730,16 @@ define void @or_v8i32(ptr %a) { ; CHECK-NEXT: orr z1.s, z1.s, #0x1f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: orr v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -494,6 +756,16 @@ define void @or_v4i64(ptr %a) { ; CHECK-NEXT: orr z1.d, z1.d, #0x3f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: orr v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: orr v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -514,6 +786,14 @@ define void @shl_v32i8(ptr %a) { ; CHECK-NEXT: lsl z1.b, z1.b, #7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: shl v0.16b, v0.16b, #7 +; NONEON-NOSVE-NEXT: shl v1.16b, v1.16b, #7 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -530,6 +810,14 @@ define void @shl_v16i16(ptr %a) { ; CHECK-NEXT: lsl z1.h, z1.h, #15 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: shl v0.8h, v0.8h, #15 +; NONEON-NOSVE-NEXT: shl v1.8h, v1.8h, #15 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -546,6 +834,14 @@ define void @shl_v8i32(ptr %a) { ; CHECK-NEXT: lsl z1.s, z1.s, #31 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: shl v0.4s, v0.4s, #31 +; NONEON-NOSVE-NEXT: shl v1.4s, v1.4s, #31 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -562,6 +858,14 @@ define void @shl_v4i64(ptr %a) { ; CHECK-NEXT: lsl z1.d, z1.d, #63 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: shl v0.2d, v0.2d, #63 +; NONEON-NOSVE-NEXT: shl v1.2d, v1.2d, #63 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -582,6 +886,15 @@ define void @smax_v32i8(ptr %a) { ; CHECK-NEXT: smax z1.b, z1.b, #7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smax v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: smax v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -598,6 +911,16 @@ define void @smax_v16i16(ptr %a) { ; CHECK-NEXT: smax z1.h, z1.h, #15 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: smax v1.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: smax v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -614,6 +937,16 @@ define void @smax_v8i32(ptr %a) { ; CHECK-NEXT: smax z1.s, z1.s, #31 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: smax v1.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: smax v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -630,6 +963,18 @@ define void @smax_v4i64(ptr %a) { ; CHECK-NEXT: smax z1.d, z1.d, #63 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: cmgt v3.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: cmgt v4.2d, v2.2d, v0.2d +; NONEON-NOSVE-NEXT: bif v1.16b, v0.16b, v3.16b +; NONEON-NOSVE-NEXT: bit v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -650,6 +995,15 @@ define void @smin_v32i8(ptr %a) { ; CHECK-NEXT: smin z1.b, z1.b, #7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smin v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: smin v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -666,6 +1020,16 @@ define void @smin_v16i16(ptr %a) { ; CHECK-NEXT: smin z1.h, z1.h, #15 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: smin v1.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: smin v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -682,6 +1046,16 @@ define void @smin_v8i32(ptr %a) { ; CHECK-NEXT: smin z1.s, z1.s, #31 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: smin v1.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: smin v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -698,6 +1072,18 @@ define void @smin_v4i64(ptr %a) { ; CHECK-NEXT: smin z1.d, z1.d, #63 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: cmgt v3.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: cmgt v4.2d, v0.2d, v2.2d +; NONEON-NOSVE-NEXT: bif v1.16b, v0.16b, v3.16b +; NONEON-NOSVE-NEXT: bit v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -718,6 +1104,15 @@ define void @sub_v32i8(ptr %a) { ; CHECK-NEXT: sub z1.b, z1.b, #7 // =0x7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: sub v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: sub v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -734,6 +1129,16 @@ define void @sub_v16i16(ptr %a) { ; CHECK-NEXT: sub z1.h, z1.h, #15 // =0xf ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: sub v1.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: sub v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -750,6 +1155,16 @@ define void @sub_v8i32(ptr %a) { ; CHECK-NEXT: sub z1.s, z1.s, #31 // =0x1f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: sub v1.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: sub v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -766,6 +1181,16 @@ define void @sub_v4i64(ptr %a) { ; CHECK-NEXT: sub z1.d, z1.d, #63 // =0x3f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sub_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: sub v1.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: sub v0.2d, v2.2d, v0.2d +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -786,6 +1211,15 @@ define void @umax_v32i8(ptr %a) { ; CHECK-NEXT: umax z1.b, z1.b, #7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umax v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: umax v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -802,6 +1236,16 @@ define void @umax_v16i16(ptr %a) { ; CHECK-NEXT: umax z1.h, z1.h, #15 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: umax v1.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: umax v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -818,6 +1262,16 @@ define void @umax_v8i32(ptr %a) { ; CHECK-NEXT: umax z1.s, z1.s, #31 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: umax v1.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: umax v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -834,6 +1288,18 @@ define void @umax_v4i64(ptr %a) { ; CHECK-NEXT: umax z1.d, z1.d, #63 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: cmhi v3.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: cmhi v4.2d, v2.2d, v0.2d +; NONEON-NOSVE-NEXT: bif v1.16b, v0.16b, v3.16b +; NONEON-NOSVE-NEXT: bit v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -854,6 +1320,15 @@ define void @umin_v32i8(ptr %a) { ; CHECK-NEXT: umin z1.b, z1.b, #7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umin v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: umin v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -870,6 +1345,16 @@ define void @umin_v16i16(ptr %a) { ; CHECK-NEXT: umin z1.h, z1.h, #15 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: umin v1.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: umin v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -886,6 +1371,16 @@ define void @umin_v8i32(ptr %a) { ; CHECK-NEXT: umin z1.s, z1.s, #31 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: umin v1.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: umin v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -902,6 +1397,18 @@ define void @umin_v4i64(ptr %a) { ; CHECK-NEXT: umin z1.d, z1.d, #63 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: cmhi v3.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: cmhi v4.2d, v0.2d, v2.2d +; NONEON-NOSVE-NEXT: bif v1.16b, v0.16b, v3.16b +; NONEON-NOSVE-NEXT: bit v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer @@ -922,6 +1429,15 @@ define void @xor_v32i8(ptr %a) { ; CHECK-NEXT: eor z1.b, z1.b, #0x7 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #7 +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: eor v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: eor v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %ins = insertelement <32 x i8> undef, i8 7, i64 0 %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer @@ -938,6 +1454,16 @@ define void @xor_v16i16(ptr %a) { ; CHECK-NEXT: eor z1.h, z1.h, #0xf ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #15 // =0xf +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: eor v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: eor v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %ins = insertelement <16 x i16> undef, i16 15, i64 0 %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer @@ -954,6 +1480,16 @@ define void @xor_v8i32(ptr %a) { ; CHECK-NEXT: eor z1.s, z1.s, #0x1f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: eor v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: eor v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %ins = insertelement <8 x i32> undef, i32 31, i64 0 %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer @@ -970,6 +1506,16 @@ define void @xor_v4i64(ptr %a) { ; CHECK-NEXT: eor z1.d, z1.d, #0x3f ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #63 // =0x3f +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: eor v1.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: eor v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %ins = insertelement <4 x i64> undef, i64 63, i64 0 %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll index 4d70c1dd1c9118..f0b39b275614d4 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -16,6 +17,11 @@ define <8 x i8> @and_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = and <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -28,6 +34,11 @@ define <16 x i8> @and_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = and <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -41,6 +52,15 @@ define void @and_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: and z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: and v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: and v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = and <32 x i8> %op1, %op2 @@ -56,6 +76,11 @@ define <4 x i16> @and_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = and <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -68,6 +93,11 @@ define <8 x i16> @and_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = and <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -81,6 +111,15 @@ define void @and_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: and z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: and v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: and v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = and <16 x i16> %op1, %op2 @@ -96,6 +135,11 @@ define <2 x i32> @and_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = and <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -108,6 +152,11 @@ define <4 x i32> @and_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = and <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -121,6 +170,15 @@ define void @and_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: and z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: and v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: and v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = and <8 x i32> %op1, %op2 @@ -136,6 +194,11 @@ define <1 x i64> @and_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = and <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -148,6 +211,11 @@ define <2 x i64> @and_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = and <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -161,6 +229,15 @@ define void @and_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: and z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: and_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: and v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: and v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = and <4 x i64> %op1, %op2 @@ -180,6 +257,11 @@ define <8 x i8> @or_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: orr z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = or <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -192,6 +274,11 @@ define <16 x i8> @or_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: orr z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = or <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -205,6 +292,15 @@ define void @or_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: orr z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: orr v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = or <32 x i8> %op1, %op2 @@ -220,6 +316,11 @@ define <4 x i16> @or_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: orr z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = or <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -232,6 +333,11 @@ define <8 x i16> @or_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: orr z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = or <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -245,6 +351,15 @@ define void @or_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: orr z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: orr v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = or <16 x i16> %op1, %op2 @@ -260,6 +375,11 @@ define <2 x i32> @or_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: orr z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = or <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -272,6 +392,11 @@ define <4 x i32> @or_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: orr z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = or <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -285,6 +410,15 @@ define void @or_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: orr z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: orr v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = or <8 x i32> %op1, %op2 @@ -300,6 +434,11 @@ define <1 x i64> @or_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: orr z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = or <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -312,6 +451,11 @@ define <2 x i64> @or_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: orr z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = or <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -325,6 +469,15 @@ define void @or_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: orr z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: or_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: orr v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: orr v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = or <4 x i64> %op1, %op2 @@ -344,6 +497,11 @@ define <8 x i8> @xor_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: eor z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = xor <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -356,6 +514,11 @@ define <16 x i8> @xor_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: eor z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: eor v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = xor <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -369,6 +532,15 @@ define void @xor_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: eor z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: eor v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: eor v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = xor <32 x i8> %op1, %op2 @@ -384,6 +556,11 @@ define <4 x i16> @xor_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: eor z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = xor <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -396,6 +573,11 @@ define <8 x i16> @xor_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: eor z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: eor v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = xor <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -409,6 +591,15 @@ define void @xor_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: eor z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: eor v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: eor v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = xor <16 x i16> %op1, %op2 @@ -424,6 +615,11 @@ define <2 x i32> @xor_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: eor z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = xor <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -436,6 +632,11 @@ define <4 x i32> @xor_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: eor z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: eor v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = xor <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -449,6 +650,15 @@ define void @xor_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: eor z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: eor v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: eor v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = xor <8 x i32> %op1, %op2 @@ -464,6 +674,11 @@ define <1 x i64> @xor_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: eor z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = xor <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -476,6 +691,11 @@ define <2 x i64> @xor_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: eor z0.d, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: eor v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = xor <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -489,6 +709,15 @@ define void @xor_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: eor z1.d, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: xor_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: eor v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: eor v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = xor <4 x i64> %op1, %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll index 50cf9b73d9a79c..51c404ece6cd5e 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -17,6 +18,11 @@ define <8 x i8> @smax_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smax v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i8> @llvm.smax.v8i8(<8 x i8> %op1, <8 x i8> %op2) ret <8 x i8> %res } @@ -30,6 +36,11 @@ define <16 x i8> @smax_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smax v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = call <16 x i8> @llvm.smax.v16i8(<16 x i8> %op1, <16 x i8> %op2) ret <16 x i8> %res } @@ -45,6 +56,15 @@ define void @smax_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: smax z1.b, p0/m, z1.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smax v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: smax v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %op1, <32 x i8> %op2) @@ -61,6 +81,11 @@ define <4 x i16> @smax_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smax v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.smax.v4i16(<4 x i16> %op1, <4 x i16> %op2) ret <4 x i16> %res } @@ -74,6 +99,11 @@ define <8 x i16> @smax_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smax v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %op1, <8 x i16> %op2) ret <8 x i16> %res } @@ -89,6 +119,15 @@ define void @smax_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: smax z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smax v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: smax v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %op1, <16 x i16> %op2) @@ -105,6 +144,11 @@ define <2 x i32> @smax_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smax v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %op1, <2 x i32> %op2) ret <2 x i32> %res } @@ -118,6 +162,11 @@ define <4 x i32> @smax_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smax v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %op1, <4 x i32> %op2) ret <4 x i32> %res } @@ -133,6 +182,15 @@ define void @smax_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: smax z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smax v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: smax v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %op1, <8 x i32> %op2) @@ -150,6 +208,12 @@ define <1 x i64> @smax_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmgt d2, d0, d1 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.smax.v1i64(<1 x i64> %op1, <1 x i64> %op2) ret <1 x i64> %res } @@ -164,6 +228,12 @@ define <2 x i64> @smax_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmgt v2.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %op1, <2 x i64> %op2) ret <2 x i64> %res } @@ -179,6 +249,18 @@ define void @smax_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: smax z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smax_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmgt v4.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: cmgt v5.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: bit v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %op1, <4 x i64> %op2) @@ -199,6 +281,11 @@ define <8 x i8> @smin_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smin v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i8> @llvm.smin.v8i8(<8 x i8> %op1, <8 x i8> %op2) ret <8 x i8> %res } @@ -212,6 +299,11 @@ define <16 x i8> @smin_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smin v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = call <16 x i8> @llvm.smin.v16i8(<16 x i8> %op1, <16 x i8> %op2) ret <16 x i8> %res } @@ -227,6 +319,15 @@ define void @smin_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: smin z1.b, p0/m, z1.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smin v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: smin v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %op1, <32 x i8> %op2) @@ -243,6 +344,11 @@ define <4 x i16> @smin_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smin v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.smin.v4i16(<4 x i16> %op1, <4 x i16> %op2) ret <4 x i16> %res } @@ -256,6 +362,11 @@ define <8 x i16> @smin_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smin v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %op1, <8 x i16> %op2) ret <8 x i16> %res } @@ -271,6 +382,15 @@ define void @smin_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: smin z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smin v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: smin v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %op1, <16 x i16> %op2) @@ -287,6 +407,11 @@ define <2 x i32> @smin_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smin v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %op1, <2 x i32> %op2) ret <2 x i32> %res } @@ -300,6 +425,11 @@ define <4 x i32> @smin_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smin v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %op1, <4 x i32> %op2) ret <4 x i32> %res } @@ -315,6 +445,15 @@ define void @smin_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: smin z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smin v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: smin v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %op1, <8 x i32> %op2) @@ -332,6 +471,12 @@ define <1 x i64> @smin_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmgt d2, d1, d0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.smin.v1i64(<1 x i64> %op1, <1 x i64> %op2) ret <1 x i64> %res } @@ -346,6 +491,12 @@ define <2 x i64> @smin_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmgt v2.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %op1, <2 x i64> %op2) ret <2 x i64> %res } @@ -361,6 +512,18 @@ define void @smin_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: smin z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smin_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmgt v4.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: cmgt v5.2d, v3.2d, v2.2d +; NONEON-NOSVE-NEXT: bit v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %op1, <4 x i64> %op2) @@ -381,6 +544,11 @@ define <8 x i8> @umax_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umax v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i8> @llvm.umax.v8i8(<8 x i8> %op1, <8 x i8> %op2) ret <8 x i8> %res } @@ -394,6 +562,11 @@ define <16 x i8> @umax_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umax v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = call <16 x i8> @llvm.umax.v16i8(<16 x i8> %op1, <16 x i8> %op2) ret <16 x i8> %res } @@ -409,6 +582,15 @@ define void @umax_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: umax z1.b, p0/m, z1.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umax v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: umax v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %op1, <32 x i8> %op2) @@ -425,6 +607,11 @@ define <4 x i16> @umax_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umax v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.umax.v4i16(<4 x i16> %op1, <4 x i16> %op2) ret <4 x i16> %res } @@ -438,6 +625,11 @@ define <8 x i16> @umax_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umax v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.umax.v8i16(<8 x i16> %op1, <8 x i16> %op2) ret <8 x i16> %res } @@ -453,6 +645,15 @@ define void @umax_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: umax z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umax v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: umax v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %op1, <16 x i16> %op2) @@ -469,6 +670,11 @@ define <2 x i32> @umax_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umax v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.umax.v2i32(<2 x i32> %op1, <2 x i32> %op2) ret <2 x i32> %res } @@ -482,6 +688,11 @@ define <4 x i32> @umax_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umax v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %op1, <4 x i32> %op2) ret <4 x i32> %res } @@ -497,6 +708,15 @@ define void @umax_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: umax z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umax v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: umax v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %op1, <8 x i32> %op2) @@ -514,6 +734,12 @@ define <1 x i64> @umax_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmhi d2, d0, d1 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.umax.v1i64(<1 x i64> %op1, <1 x i64> %op2) ret <1 x i64> %res } @@ -528,6 +754,12 @@ define <2 x i64> @umax_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmhi v2.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %op1, <2 x i64> %op2) ret <2 x i64> %res } @@ -543,6 +775,18 @@ define void @umax_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: umax z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umax_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmhi v4.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: cmhi v5.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: bit v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %op1, <4 x i64> %op2) @@ -563,6 +807,11 @@ define <8 x i8> @umin_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umin v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i8> @llvm.umin.v8i8(<8 x i8> %op1, <8 x i8> %op2) ret <8 x i8> %res } @@ -576,6 +825,11 @@ define <16 x i8> @umin_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umin v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = call <16 x i8> @llvm.umin.v16i8(<16 x i8> %op1, <16 x i8> %op2) ret <16 x i8> %res } @@ -591,6 +845,15 @@ define void @umin_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: umin z1.b, p0/m, z1.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umin v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: umin v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %op1, <32 x i8> %op2) @@ -607,6 +870,11 @@ define <4 x i16> @umin_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umin v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.umin.v4i16(<4 x i16> %op1, <4 x i16> %op2) ret <4 x i16> %res } @@ -620,6 +888,11 @@ define <8 x i16> @umin_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umin v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.umin.v8i16(<8 x i16> %op1, <8 x i16> %op2) ret <8 x i16> %res } @@ -635,6 +908,15 @@ define void @umin_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: umin z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umin v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: umin v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %op1, <16 x i16> %op2) @@ -651,6 +933,11 @@ define <2 x i32> @umin_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umin v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.umin.v2i32(<2 x i32> %op1, <2 x i32> %op2) ret <2 x i32> %res } @@ -664,6 +951,11 @@ define <4 x i32> @umin_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umin v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %op1, <4 x i32> %op2) ret <4 x i32> %res } @@ -679,6 +971,15 @@ define void @umin_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: umin z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umin v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: umin v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %op1, <8 x i32> %op2) @@ -696,6 +997,12 @@ define <1 x i64> @umin_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmhi d2, d1, d0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.umin.v1i64(<1 x i64> %op1, <1 x i64> %op2) ret <1 x i64> %res } @@ -710,6 +1017,12 @@ define <2 x i64> @umin_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmhi v2.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %op1, <2 x i64> %op2) ret <2 x i64> %res } @@ -725,6 +1038,18 @@ define void @umin_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: umin z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umin_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: cmhi v4.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: cmhi v5.2d, v3.2d, v2.2d +; NONEON-NOSVE-NEXT: bit v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %op1, <4 x i64> %op2) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mla-neon-fa64.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mla-neon-fa64.ll index 149ad6d1e267ee..83714152c173f5 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mla-neon-fa64.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mla-neon-fa64.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sme-fa64 -force-streaming-compatible-sve < %s | FileCheck %s -check-prefix=FA64 ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s -check-prefix=NO-FA64 +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -20,6 +21,12 @@ define <8 x i8> @mla8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) { ; NO-FA64-NEXT: mad z0.b, p0/m, z1.b, z2.b ; NO-FA64-NEXT: // kill: def $d0 killed $d0 killed $z0 ; NO-FA64-NEXT: ret +; +; NONEON-NOSVE-LABEL: mla8xi8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mla v2.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov d0, d2 +; NONEON-NOSVE-NEXT: ret %tmp1 = mul <8 x i8> %A, %B; %tmp2 = add <8 x i8> %C, %tmp1; ret <8 x i8> %tmp2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll index cb7fa53eac5130..6e6d40e2ea040f 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll @@ -2,6 +2,7 @@ ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE ; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2 +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE ; This test only tests the legal types for a given vector width, as mulh nodes ; do not get generated for non-legal types. @@ -36,6 +37,16 @@ define <4 x i8> @smulh_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; SVE2-NEXT: lsr z0.h, z0.h, #4 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: shl v1.4h, v1.4h, #8 +; NONEON-NOSVE-NEXT: sshr v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: sshr v1.4h, v1.4h, #8 +; NONEON-NOSVE-NEXT: mul v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ushr v0.4h, v0.4h, #4 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x i16> undef, i16 4, i64 0 %splat = shufflevector <4 x i16> %insert, <4 x i16> undef, <4 x i32> zeroinitializer %1 = sext <4 x i8> %op1 to <4 x i16> @@ -63,6 +74,12 @@ define <8 x i8> @smulh_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; SVE2-NEXT: smulh z0.b, z0.b, z1.b ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smull v0.8h, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: shrn v0.8b, v0.8h, #8 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <8 x i16> undef, i16 8, i64 0 %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer %1 = sext <8 x i8> %op1 to <8 x i16> @@ -90,6 +107,13 @@ define <16 x i8> @smulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; SVE2-NEXT: smulh z0.b, z0.b, z1.b ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smull2 v2.8h, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: smull v0.8h, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: uzp2 v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %1 = sext <16 x i8> %op1 to <16 x i16> %2 = sext <16 x i8> %op2 to <16 x i16> %mul = mul <16 x i16> %1, %2 @@ -118,6 +142,19 @@ define void @smulh_v32i8(ptr %a, ptr %b) { ; SVE2-NEXT: smulh z1.b, z2.b, z3.b ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smull2 v4.8h, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: smull v0.8h, v1.8b, v0.8b +; NONEON-NOSVE-NEXT: smull2 v1.8h, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: smull v2.8h, v2.8b, v3.8b +; NONEON-NOSVE-NEXT: uzp2 v0.16b, v0.16b, v4.16b +; NONEON-NOSVE-NEXT: uzp2 v1.16b, v2.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %1 = sext <32 x i8> %op1 to <32 x i16> @@ -153,6 +190,16 @@ define <2 x i16> @smulh_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; SVE2-NEXT: lsr z0.s, z0.s, #16 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: shl v1.2s, v1.2s, #16 +; NONEON-NOSVE-NEXT: sshr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: sshr v1.2s, v1.2s, #16 +; NONEON-NOSVE-NEXT: mul v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ushr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: ret %1 = sext <2 x i16> %op1 to <2 x i32> %2 = sext <2 x i16> %op2 to <2 x i32> %mul = mul <2 x i32> %1, %2 @@ -178,6 +225,12 @@ define <4 x i16> @smulh_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; SVE2-NEXT: smulh z0.h, z0.h, z1.h ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smull v0.4s, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: shrn v0.4h, v0.4s, #16 +; NONEON-NOSVE-NEXT: ret %1 = sext <4 x i16> %op1 to <4 x i32> %2 = sext <4 x i16> %op2 to <4 x i32> %mul = mul <4 x i32> %1, %2 @@ -203,6 +256,13 @@ define <8 x i16> @smulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; SVE2-NEXT: smulh z0.h, z0.h, z1.h ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smull2 v2.4s, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: smull v0.4s, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: uzp2 v0.8h, v0.8h, v2.8h +; NONEON-NOSVE-NEXT: ret %1 = sext <8 x i16> %op1 to <8 x i32> %2 = sext <8 x i16> %op2 to <8 x i32> %mul = mul <8 x i32> %1, %2 @@ -231,6 +291,19 @@ define void @smulh_v16i16(ptr %a, ptr %b) { ; SVE2-NEXT: smulh z1.h, z2.h, z3.h ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smull2 v4.4s, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: smull v0.4s, v1.4h, v0.4h +; NONEON-NOSVE-NEXT: smull2 v1.4s, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: smull v2.4s, v2.4h, v3.4h +; NONEON-NOSVE-NEXT: uzp2 v0.8h, v0.8h, v4.8h +; NONEON-NOSVE-NEXT: uzp2 v1.8h, v2.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %1 = sext <16 x i16> %op1 to <16 x i32> @@ -259,6 +332,12 @@ define <2 x i32> @smulh_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; SVE2-NEXT: smulh z0.s, z0.s, z1.s ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smull v0.2d, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: shrn v0.2s, v0.2d, #32 +; NONEON-NOSVE-NEXT: ret %1 = sext <2 x i32> %op1 to <2 x i64> %2 = sext <2 x i32> %op2 to <2 x i64> %mul = mul <2 x i64> %1, %2 @@ -284,6 +363,13 @@ define <4 x i32> @smulh_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; SVE2-NEXT: smulh z0.s, z0.s, z1.s ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smull2 v2.2d, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: smull v0.2d, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: uzp2 v0.4s, v0.4s, v2.4s +; NONEON-NOSVE-NEXT: ret %1 = sext <4 x i32> %op1 to <4 x i64> %2 = sext <4 x i32> %op2 to <4 x i64> %mul = mul <4 x i64> %1, %2 @@ -312,6 +398,19 @@ define void @smulh_v8i32(ptr %a, ptr %b) { ; SVE2-NEXT: smulh z1.s, z2.s, z3.s ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: smull2 v4.2d, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: smull v0.2d, v1.2s, v0.2s +; NONEON-NOSVE-NEXT: smull2 v1.2d, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: smull v2.2d, v2.2s, v3.2s +; NONEON-NOSVE-NEXT: uzp2 v0.4s, v0.4s, v4.4s +; NONEON-NOSVE-NEXT: uzp2 v1.4s, v2.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %1 = sext <8 x i32> %op1 to <8 x i64> @@ -340,6 +439,16 @@ define <1 x i64> @smulh_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; SVE2-NEXT: smulh z0.d, z0.d, z1.d ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: fmov x9, d1 +; NONEON-NOSVE-NEXT: smulh x8, x8, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <1 x i128> undef, i128 64, i128 0 %splat = shufflevector <1 x i128> %insert, <1 x i128> undef, <1 x i32> zeroinitializer %1 = sext <1 x i64> %op1 to <1 x i128> @@ -367,6 +476,19 @@ define <2 x i64> @smulh_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; SVE2-NEXT: smulh z0.d, z0.d, z1.d ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov x8, v0.d[1] +; NONEON-NOSVE-NEXT: mov x9, v1.d[1] +; NONEON-NOSVE-NEXT: fmov x10, d0 +; NONEON-NOSVE-NEXT: fmov x11, d1 +; NONEON-NOSVE-NEXT: smulh x10, x10, x11 +; NONEON-NOSVE-NEXT: smulh x8, x8, x9 +; NONEON-NOSVE-NEXT: fmov d0, x10 +; NONEON-NOSVE-NEXT: fmov d1, x8 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %1 = sext <2 x i64> %op1 to <2 x i128> %2 = sext <2 x i64> %op2 to <2 x i128> %mul = mul <2 x i128> %1, %2 @@ -395,6 +517,31 @@ define void @smulh_v4i64(ptr %a, ptr %b) { ; SVE2-NEXT: smulh z1.d, z2.d, z3.d ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: smulh_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: mov x11, v0.d[1] +; NONEON-NOSVE-NEXT: mov x14, v3.d[1] +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: mov x10, v1.d[1] +; NONEON-NOSVE-NEXT: mov x13, v2.d[1] +; NONEON-NOSVE-NEXT: fmov x12, d3 +; NONEON-NOSVE-NEXT: smulh x8, x8, x9 +; NONEON-NOSVE-NEXT: fmov x9, d2 +; NONEON-NOSVE-NEXT: smulh x10, x10, x11 +; NONEON-NOSVE-NEXT: smulh x9, x9, x12 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: smulh x11, x13, x14 +; NONEON-NOSVE-NEXT: fmov d1, x10 +; NONEON-NOSVE-NEXT: fmov d2, x9 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: fmov d3, x11 +; NONEON-NOSVE-NEXT: mov v2.d[1], v3.d[0] +; NONEON-NOSVE-NEXT: stp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %1 = sext <4 x i64> %op1 to <4 x i128> @@ -433,6 +580,15 @@ define <4 x i8> @umulh_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; SVE2-NEXT: lsr z0.h, z0.h, #4 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d2, #0xff00ff00ff00ff +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v2.8b +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: mul v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ushr v0.4h, v0.4h, #4 +; NONEON-NOSVE-NEXT: ret %1 = zext <4 x i8> %op1 to <4 x i16> %2 = zext <4 x i8> %op2 to <4 x i16> %mul = mul <4 x i16> %1, %2 @@ -458,6 +614,12 @@ define <8 x i8> @umulh_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; SVE2-NEXT: umulh z0.b, z0.b, z1.b ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umull v0.8h, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: shrn v0.8b, v0.8h, #8 +; NONEON-NOSVE-NEXT: ret %1 = zext <8 x i8> %op1 to <8 x i16> %2 = zext <8 x i8> %op2 to <8 x i16> %mul = mul <8 x i16> %1, %2 @@ -483,6 +645,13 @@ define <16 x i8> @umulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; SVE2-NEXT: umulh z0.b, z0.b, z1.b ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umull2 v2.8h, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: umull v0.8h, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: uzp2 v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %1 = zext <16 x i8> %op1 to <16 x i16> %2 = zext <16 x i8> %op2 to <16 x i16> %mul = mul <16 x i16> %1, %2 @@ -511,6 +680,19 @@ define void @umulh_v32i8(ptr %a, ptr %b) { ; SVE2-NEXT: umulh z1.b, z2.b, z3.b ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umull2 v4.8h, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: umull v0.8h, v1.8b, v0.8b +; NONEON-NOSVE-NEXT: umull2 v1.8h, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: umull v2.8h, v2.8b, v3.8b +; NONEON-NOSVE-NEXT: uzp2 v0.16b, v0.16b, v4.16b +; NONEON-NOSVE-NEXT: uzp2 v1.16b, v2.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %1 = zext <32 x i8> %op1 to <32 x i16> @@ -545,6 +727,15 @@ define <2 x i16> @umulh_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; SVE2-NEXT: lsr z0.s, z0.s, #16 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d2, #0x00ffff0000ffff +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v2.8b +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: mul v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ushr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: ret %1 = zext <2 x i16> %op1 to <2 x i32> %2 = zext <2 x i16> %op2 to <2 x i32> %mul = mul <2 x i32> %1, %2 @@ -570,6 +761,12 @@ define <4 x i16> @umulh_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; SVE2-NEXT: umulh z0.h, z0.h, z1.h ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umull v0.4s, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: shrn v0.4h, v0.4s, #16 +; NONEON-NOSVE-NEXT: ret %1 = zext <4 x i16> %op1 to <4 x i32> %2 = zext <4 x i16> %op2 to <4 x i32> %mul = mul <4 x i32> %1, %2 @@ -595,6 +792,13 @@ define <8 x i16> @umulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; SVE2-NEXT: umulh z0.h, z0.h, z1.h ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umull2 v2.4s, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: umull v0.4s, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: uzp2 v0.8h, v0.8h, v2.8h +; NONEON-NOSVE-NEXT: ret %1 = zext <8 x i16> %op1 to <8 x i32> %2 = zext <8 x i16> %op2 to <8 x i32> %mul = mul <8 x i32> %1, %2 @@ -623,6 +827,19 @@ define void @umulh_v16i16(ptr %a, ptr %b) { ; SVE2-NEXT: umulh z1.h, z2.h, z3.h ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umull2 v4.4s, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: umull v0.4s, v1.4h, v0.4h +; NONEON-NOSVE-NEXT: umull2 v1.4s, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: umull v2.4s, v2.4h, v3.4h +; NONEON-NOSVE-NEXT: uzp2 v0.8h, v0.8h, v4.8h +; NONEON-NOSVE-NEXT: uzp2 v1.8h, v2.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %1 = zext <16 x i16> %op1 to <16 x i32> @@ -651,6 +868,12 @@ define <2 x i32> @umulh_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; SVE2-NEXT: umulh z0.s, z0.s, z1.s ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umull v0.2d, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: shrn v0.2s, v0.2d, #32 +; NONEON-NOSVE-NEXT: ret %1 = zext <2 x i32> %op1 to <2 x i64> %2 = zext <2 x i32> %op2 to <2 x i64> %mul = mul <2 x i64> %1, %2 @@ -676,6 +899,13 @@ define <4 x i32> @umulh_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; SVE2-NEXT: umulh z0.s, z0.s, z1.s ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umull2 v2.2d, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: umull v0.2d, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: uzp2 v0.4s, v0.4s, v2.4s +; NONEON-NOSVE-NEXT: ret %1 = zext <4 x i32> %op1 to <4 x i64> %2 = zext <4 x i32> %op2 to <4 x i64> %mul = mul <4 x i64> %1, %2 @@ -704,6 +934,19 @@ define void @umulh_v8i32(ptr %a, ptr %b) { ; SVE2-NEXT: umulh z1.s, z2.s, z3.s ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: umull2 v4.2d, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: umull v0.2d, v1.2s, v0.2s +; NONEON-NOSVE-NEXT: umull2 v1.2d, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: umull v2.2d, v2.2s, v3.2s +; NONEON-NOSVE-NEXT: uzp2 v0.4s, v0.4s, v4.4s +; NONEON-NOSVE-NEXT: uzp2 v1.4s, v2.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %insert = insertelement <8 x i64> undef, i64 32, i64 0 @@ -734,6 +977,16 @@ define <1 x i64> @umulh_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; SVE2-NEXT: umulh z0.d, z0.d, z1.d ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: fmov x9, d1 +; NONEON-NOSVE-NEXT: umulh x8, x8, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %1 = zext <1 x i64> %op1 to <1 x i128> %2 = zext <1 x i64> %op2 to <1 x i128> %mul = mul <1 x i128> %1, %2 @@ -759,6 +1012,19 @@ define <2 x i64> @umulh_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; SVE2-NEXT: umulh z0.d, z0.d, z1.d ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov x8, v0.d[1] +; NONEON-NOSVE-NEXT: mov x9, v1.d[1] +; NONEON-NOSVE-NEXT: fmov x10, d0 +; NONEON-NOSVE-NEXT: fmov x11, d1 +; NONEON-NOSVE-NEXT: umulh x10, x10, x11 +; NONEON-NOSVE-NEXT: umulh x8, x8, x9 +; NONEON-NOSVE-NEXT: fmov d0, x10 +; NONEON-NOSVE-NEXT: fmov d1, x8 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: ret %1 = zext <2 x i64> %op1 to <2 x i128> %2 = zext <2 x i64> %op2 to <2 x i128> %mul = mul <2 x i128> %1, %2 @@ -787,6 +1053,31 @@ define void @umulh_v4i64(ptr %a, ptr %b) { ; SVE2-NEXT: umulh z1.d, z2.d, z3.d ; SVE2-NEXT: stp q0, q1, [x0] ; SVE2-NEXT: ret +; +; NONEON-NOSVE-LABEL: umulh_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: mov x11, v0.d[1] +; NONEON-NOSVE-NEXT: mov x14, v3.d[1] +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: mov x10, v1.d[1] +; NONEON-NOSVE-NEXT: mov x13, v2.d[1] +; NONEON-NOSVE-NEXT: fmov x12, d3 +; NONEON-NOSVE-NEXT: umulh x8, x8, x9 +; NONEON-NOSVE-NEXT: fmov x9, d2 +; NONEON-NOSVE-NEXT: umulh x10, x10, x11 +; NONEON-NOSVE-NEXT: umulh x9, x9, x12 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: umulh x11, x13, x14 +; NONEON-NOSVE-NEXT: fmov d1, x10 +; NONEON-NOSVE-NEXT: fmov d2, x9 +; NONEON-NOSVE-NEXT: mov v0.d[1], v1.d[0] +; NONEON-NOSVE-NEXT: fmov d3, x11 +; NONEON-NOSVE-NEXT: mov v2.d[1], v3.d[0] +; NONEON-NOSVE-NEXT: stp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %1 = zext <4 x i64> %op1 to <4 x i128> diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll index 751f43768a511a..50eaa6c12d71e6 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -17,6 +18,12 @@ define i8 @uaddv_v8i8(<8 x i8> %a) { ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: addv b0, v0.8b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a) ret i8 %res } @@ -30,6 +37,12 @@ define i8 @uaddv_v16i8(<16 x i8> %a) { ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: addv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %a) ret i8 %res } @@ -44,6 +57,14 @@ define i8 @uaddv_v32i8(ptr %a) { ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: add v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: addv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %op) ret i8 %res @@ -58,6 +79,12 @@ define i16 @uaddv_v4i16(<4 x i16> %a) { ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: addv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a) ret i16 %res } @@ -71,6 +98,12 @@ define i16 @uaddv_v8i16(<8 x i16> %a) { ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: addv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %a) ret i16 %res } @@ -85,6 +118,14 @@ define i16 @uaddv_v16i16(ptr %a) { ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: add v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: addv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %op) ret i16 %res @@ -99,6 +140,12 @@ define i32 @uaddv_v2i32(<2 x i32> %a) { ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: addp v0.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %a) ret i32 %res } @@ -112,6 +159,12 @@ define i32 @uaddv_v4i32(<4 x i32> %a) { ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: addv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) ret i32 %res } @@ -126,6 +179,14 @@ define i32 @uaddv_v8i32(ptr %a) { ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: add v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: addv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %op) ret i32 %res @@ -139,6 +200,12 @@ define i64 @uaddv_v2i64(<2 x i64> %a) { ; CHECK-NEXT: uaddv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: addp d0, v0.2d +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: ret %res = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a) ret i64 %res } @@ -152,6 +219,14 @@ define i64 @uaddv_v4i64(ptr %a) { ; CHECK-NEXT: uaddv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uaddv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: add v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: addp d0, v0.2d +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %op) ret i64 %res @@ -169,6 +244,12 @@ define i8 @smaxv_v8i8(<8 x i8> %a) { ; CHECK-NEXT: smaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smaxv b0, v0.8b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %a) ret i8 %res } @@ -181,6 +262,12 @@ define i8 @smaxv_v16i8(<16 x i8> %a) { ; CHECK-NEXT: smaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smaxv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %a) ret i8 %res } @@ -194,6 +281,14 @@ define i8 @smaxv_v32i8(ptr %a) { ; CHECK-NEXT: smaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: smax v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: smaxv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %op) ret i8 %res @@ -207,6 +302,12 @@ define i16 @smaxv_v4i16(<4 x i16> %a) { ; CHECK-NEXT: smaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smaxv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %a) ret i16 %res } @@ -219,6 +320,12 @@ define i16 @smaxv_v8i16(<8 x i16> %a) { ; CHECK-NEXT: smaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smaxv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %a) ret i16 %res } @@ -232,6 +339,14 @@ define i16 @smaxv_v16i16(ptr %a) { ; CHECK-NEXT: smaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: smax v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: smaxv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %op) ret i16 %res @@ -245,6 +360,12 @@ define i32 @smaxv_v2i32(<2 x i32> %a) { ; CHECK-NEXT: smaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smaxp v0.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %a) ret i32 %res } @@ -257,6 +378,12 @@ define i32 @smaxv_v4i32(<4 x i32> %a) { ; CHECK-NEXT: smaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smaxv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %a) ret i32 %res } @@ -270,6 +397,14 @@ define i32 @smaxv_v8i32(ptr %a) { ; CHECK-NEXT: smaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: smax v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: smaxv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %op) ret i32 %res @@ -284,6 +419,17 @@ define i64 @smaxv_v2i64(<2 x i64> %a) { ; CHECK-NEXT: smaxv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: cmgt d2, d0, d1 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %a) ret i64 %res } @@ -297,6 +443,20 @@ define i64 @smaxv_v4i64(ptr %a) { ; CHECK-NEXT: smaxv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: smaxv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: cmgt v2.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: bit v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: cmgt d2, d0, d1 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %op) ret i64 %res @@ -314,6 +474,12 @@ define i8 @sminv_v8i8(<8 x i8> %a) { ; CHECK-NEXT: sminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sminv b0, v0.8b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %a) ret i8 %res } @@ -326,6 +492,12 @@ define i8 @sminv_v16i8(<16 x i8> %a) { ; CHECK-NEXT: sminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sminv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %a) ret i8 %res } @@ -339,6 +511,14 @@ define i8 @sminv_v32i8(ptr %a) { ; CHECK-NEXT: sminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: smin v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: sminv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %op) ret i8 %res @@ -352,6 +532,12 @@ define i16 @sminv_v4i16(<4 x i16> %a) { ; CHECK-NEXT: sminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sminv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %a) ret i16 %res } @@ -364,6 +550,12 @@ define i16 @sminv_v8i16(<8 x i16> %a) { ; CHECK-NEXT: sminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sminv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %a) ret i16 %res } @@ -377,6 +569,14 @@ define i16 @sminv_v16i16(ptr %a) { ; CHECK-NEXT: sminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: smin v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: sminv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %op) ret i16 %res @@ -390,6 +590,12 @@ define i32 @sminv_v2i32(<2 x i32> %a) { ; CHECK-NEXT: sminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sminp v0.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %a) ret i32 %res } @@ -402,6 +608,12 @@ define i32 @sminv_v4i32(<4 x i32> %a) { ; CHECK-NEXT: sminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sminv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %a) ret i32 %res } @@ -415,6 +627,14 @@ define i32 @sminv_v8i32(ptr %a) { ; CHECK-NEXT: sminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: smin v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: sminv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %op) ret i32 %res @@ -429,6 +649,17 @@ define i64 @sminv_v2i64(<2 x i64> %a) { ; CHECK-NEXT: sminv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: cmgt d2, d1, d0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %a) ret i64 %res } @@ -442,6 +673,20 @@ define i64 @sminv_v4i64(ptr %a) { ; CHECK-NEXT: sminv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sminv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmgt v2.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: cmgt d2, d1, d0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %op) ret i64 %res @@ -459,6 +704,12 @@ define i8 @umaxv_v8i8(<8 x i8> %a) { ; CHECK-NEXT: umaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umaxv b0, v0.8b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %a) ret i8 %res } @@ -471,6 +722,12 @@ define i8 @umaxv_v16i8(<16 x i8> %a) { ; CHECK-NEXT: umaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umaxv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %a) ret i8 %res } @@ -484,6 +741,14 @@ define i8 @umaxv_v32i8(ptr %a) { ; CHECK-NEXT: umaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: umax v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: umaxv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %op) ret i8 %res @@ -497,6 +762,12 @@ define i16 @umaxv_v4i16(<4 x i16> %a) { ; CHECK-NEXT: umaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umaxv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %a) ret i16 %res } @@ -509,6 +780,12 @@ define i16 @umaxv_v8i16(<8 x i16> %a) { ; CHECK-NEXT: umaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umaxv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %a) ret i16 %res } @@ -522,6 +799,14 @@ define i16 @umaxv_v16i16(ptr %a) { ; CHECK-NEXT: umaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: umax v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: umaxv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %op) ret i16 %res @@ -535,6 +820,12 @@ define i32 @umaxv_v2i32(<2 x i32> %a) { ; CHECK-NEXT: umaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umaxp v0.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %a) ret i32 %res } @@ -547,6 +838,12 @@ define i32 @umaxv_v4i32(<4 x i32> %a) { ; CHECK-NEXT: umaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umaxv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %a) ret i32 %res } @@ -560,6 +857,14 @@ define i32 @umaxv_v8i32(ptr %a) { ; CHECK-NEXT: umaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: umax v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: umaxv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %op) ret i32 %res @@ -574,6 +879,17 @@ define i64 @umaxv_v2i64(<2 x i64> %a) { ; CHECK-NEXT: umaxv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: cmhi d2, d0, d1 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %a) ret i64 %res } @@ -587,6 +903,20 @@ define i64 @umaxv_v4i64(ptr %a) { ; CHECK-NEXT: umaxv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: umaxv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: cmhi v2.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: bit v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: cmhi d2, d0, d1 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %op) ret i64 %res @@ -604,6 +934,12 @@ define i8 @uminv_v8i8(<8 x i8> %a) { ; CHECK-NEXT: uminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: uminv b0, v0.8b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %a) ret i8 %res } @@ -616,6 +952,12 @@ define i8 @uminv_v16i8(<16 x i8> %a) { ; CHECK-NEXT: uminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: uminv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %a) ret i8 %res } @@ -629,6 +971,14 @@ define i8 @uminv_v32i8(ptr %a) { ; CHECK-NEXT: uminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: umin v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: uminv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %op) ret i8 %res @@ -642,6 +992,12 @@ define i16 @uminv_v4i16(<4 x i16> %a) { ; CHECK-NEXT: uminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: uminv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %a) ret i16 %res } @@ -654,6 +1010,12 @@ define i16 @uminv_v8i16(<8 x i16> %a) { ; CHECK-NEXT: uminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: uminv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %a) ret i16 %res } @@ -667,6 +1029,14 @@ define i16 @uminv_v16i16(ptr %a) { ; CHECK-NEXT: uminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: umin v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: uminv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %op) ret i16 %res @@ -680,6 +1050,12 @@ define i32 @uminv_v2i32(<2 x i32> %a) { ; CHECK-NEXT: uminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: uminp v0.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %a) ret i32 %res } @@ -692,6 +1068,12 @@ define i32 @uminv_v4i32(<4 x i32> %a) { ; CHECK-NEXT: uminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: uminv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %a) ret i32 %res } @@ -705,6 +1087,14 @@ define i32 @uminv_v8i32(ptr %a) { ; CHECK-NEXT: uminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: umin v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uminv s0, v0.4s +; NONEON-NOSVE-NEXT: fmov w0, s0 +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %op) ret i32 %res @@ -719,6 +1109,17 @@ define i64 @uminv_v2i64(<2 x i64> %a) { ; CHECK-NEXT: uminv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: cmhi d2, d1, d0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %a) ret i64 %res } @@ -732,6 +1133,20 @@ define i64 @uminv_v4i64(ptr %a) { ; CHECK-NEXT: uminv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uminv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmhi v2.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: cmhi d2, d1, d0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %op) ret i64 %res diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll index d373a9063f8521..97bd76311b61c3 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -24,6 +25,35 @@ define <4 x i8> @srem_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: shl v1.4h, v1.4h, #8 +; NONEON-NOSVE-NEXT: sshr v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: sshr v1.4h, v1.4h, #8 +; NONEON-NOSVE-NEXT: smov w11, v1.h[0] +; NONEON-NOSVE-NEXT: smov w12, v0.h[0] +; NONEON-NOSVE-NEXT: smov w8, v1.h[1] +; NONEON-NOSVE-NEXT: smov w9, v0.h[1] +; NONEON-NOSVE-NEXT: smov w14, v1.h[2] +; NONEON-NOSVE-NEXT: smov w15, v0.h[2] +; NONEON-NOSVE-NEXT: smov w17, v1.h[3] +; NONEON-NOSVE-NEXT: smov w18, v0.h[3] +; NONEON-NOSVE-NEXT: sdiv w13, w12, w11 +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: fmov s0, w11 +; NONEON-NOSVE-NEXT: sdiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: mov v0.h[1], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: mov v0.h[2], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w17, w18 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = srem <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -53,6 +83,53 @@ define <8 x i8> @srem_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: mls z0.b, p0/m, z2.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: smov w11, v1.b[0] +; NONEON-NOSVE-NEXT: smov w12, v0.b[0] +; NONEON-NOSVE-NEXT: smov w8, v1.b[1] +; NONEON-NOSVE-NEXT: smov w9, v0.b[1] +; NONEON-NOSVE-NEXT: smov w14, v1.b[2] +; NONEON-NOSVE-NEXT: smov w15, v0.b[2] +; NONEON-NOSVE-NEXT: smov w17, v1.b[3] +; NONEON-NOSVE-NEXT: smov w18, v0.b[3] +; NONEON-NOSVE-NEXT: smov w1, v1.b[4] +; NONEON-NOSVE-NEXT: smov w2, v0.b[4] +; NONEON-NOSVE-NEXT: smov w4, v1.b[5] +; NONEON-NOSVE-NEXT: smov w5, v0.b[5] +; NONEON-NOSVE-NEXT: sdiv w13, w12, w11 +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: smov w13, v1.b[7] +; NONEON-NOSVE-NEXT: fmov s2, w11 +; NONEON-NOSVE-NEXT: smov w11, v0.b[6] +; NONEON-NOSVE-NEXT: sdiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.b[6] +; NONEON-NOSVE-NEXT: mov v2.b[1], w8 +; NONEON-NOSVE-NEXT: sdiv w0, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: smov w14, v0.b[7] +; NONEON-NOSVE-NEXT: mov v2.b[2], w8 +; NONEON-NOSVE-NEXT: sdiv w3, w2, w1 +; NONEON-NOSVE-NEXT: msub w8, w0, w17, w18 +; NONEON-NOSVE-NEXT: mov v2.b[3], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w5, w4 +; NONEON-NOSVE-NEXT: msub w8, w3, w1, w2 +; NONEON-NOSVE-NEXT: mov v2.b[4], w8 +; NONEON-NOSVE-NEXT: sdiv w12, w11, w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w4, w5 +; NONEON-NOSVE-NEXT: mov v2.b[5], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w14, w13 +; NONEON-NOSVE-NEXT: msub w8, w12, w10, w11 +; NONEON-NOSVE-NEXT: mov v2.b[6], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w13, w14 +; NONEON-NOSVE-NEXT: mov v2.b[7], w8 +; NONEON-NOSVE-NEXT: fmov d0, d2 +; NONEON-NOSVE-NEXT: ret %res = srem <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -102,6 +179,112 @@ define <16 x i8> @srem_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: mls z0.b, p0/m, z3.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: stp x28, x27, [sp, #-80]! // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x26, x25, [sp, #16] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x24, x23, [sp, #32] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 80 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -40 +; NONEON-NOSVE-NEXT: .cfi_offset w24, -48 +; NONEON-NOSVE-NEXT: .cfi_offset w25, -56 +; NONEON-NOSVE-NEXT: .cfi_offset w26, -64 +; NONEON-NOSVE-NEXT: .cfi_offset w27, -72 +; NONEON-NOSVE-NEXT: .cfi_offset w28, -80 +; NONEON-NOSVE-NEXT: smov w11, v1.b[0] +; NONEON-NOSVE-NEXT: smov w12, v0.b[0] +; NONEON-NOSVE-NEXT: smov w8, v1.b[1] +; NONEON-NOSVE-NEXT: smov w9, v0.b[1] +; NONEON-NOSVE-NEXT: smov w14, v1.b[2] +; NONEON-NOSVE-NEXT: smov w15, v0.b[2] +; NONEON-NOSVE-NEXT: smov w17, v1.b[3] +; NONEON-NOSVE-NEXT: smov w18, v0.b[3] +; NONEON-NOSVE-NEXT: smov w1, v1.b[4] +; NONEON-NOSVE-NEXT: smov w2, v0.b[4] +; NONEON-NOSVE-NEXT: smov w4, v1.b[5] +; NONEON-NOSVE-NEXT: smov w5, v0.b[5] +; NONEON-NOSVE-NEXT: sdiv w13, w12, w11 +; NONEON-NOSVE-NEXT: smov w7, v1.b[6] +; NONEON-NOSVE-NEXT: smov w19, v0.b[6] +; NONEON-NOSVE-NEXT: smov w21, v1.b[7] +; NONEON-NOSVE-NEXT: smov w22, v0.b[7] +; NONEON-NOSVE-NEXT: smov w24, v1.b[8] +; NONEON-NOSVE-NEXT: smov w25, v0.b[8] +; NONEON-NOSVE-NEXT: smov w27, v1.b[9] +; NONEON-NOSVE-NEXT: smov w28, v0.b[9] +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: smov w13, v1.b[11] +; NONEON-NOSVE-NEXT: fmov s2, w11 +; NONEON-NOSVE-NEXT: smov w11, v0.b[10] +; NONEON-NOSVE-NEXT: sdiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.b[10] +; NONEON-NOSVE-NEXT: mov v2.b[1], w8 +; NONEON-NOSVE-NEXT: sdiv w0, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: smov w14, v0.b[11] +; NONEON-NOSVE-NEXT: smov w16, v1.b[12] +; NONEON-NOSVE-NEXT: mov v2.b[2], w8 +; NONEON-NOSVE-NEXT: sdiv w3, w2, w1 +; NONEON-NOSVE-NEXT: msub w8, w0, w17, w18 +; NONEON-NOSVE-NEXT: smov w17, v0.b[12] +; NONEON-NOSVE-NEXT: smov w0, v1.b[13] +; NONEON-NOSVE-NEXT: mov v2.b[3], w8 +; NONEON-NOSVE-NEXT: sdiv w6, w5, w4 +; NONEON-NOSVE-NEXT: msub w8, w3, w1, w2 +; NONEON-NOSVE-NEXT: smov w1, v0.b[13] +; NONEON-NOSVE-NEXT: mov v2.b[4], w8 +; NONEON-NOSVE-NEXT: sdiv w20, w19, w7 +; NONEON-NOSVE-NEXT: msub w8, w6, w4, w5 +; NONEON-NOSVE-NEXT: mov v2.b[5], w8 +; NONEON-NOSVE-NEXT: sdiv w23, w22, w21 +; NONEON-NOSVE-NEXT: msub w8, w20, w7, w19 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v2.b[6], w8 +; NONEON-NOSVE-NEXT: sdiv w26, w25, w24 +; NONEON-NOSVE-NEXT: msub w8, w23, w21, w22 +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v2.b[7], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w28, w27 +; NONEON-NOSVE-NEXT: msub w8, w26, w24, w25 +; NONEON-NOSVE-NEXT: ldp x24, x23, [sp, #32] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x26, x25, [sp, #16] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v2.b[8], w8 +; NONEON-NOSVE-NEXT: sdiv w12, w11, w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w27, w28 +; NONEON-NOSVE-NEXT: mov v2.b[9], w8 +; NONEON-NOSVE-NEXT: sdiv w15, w14, w13 +; NONEON-NOSVE-NEXT: msub w8, w12, w10, w11 +; NONEON-NOSVE-NEXT: smov w10, v1.b[14] +; NONEON-NOSVE-NEXT: smov w11, v0.b[14] +; NONEON-NOSVE-NEXT: mov v2.b[10], w8 +; NONEON-NOSVE-NEXT: sdiv w18, w17, w16 +; NONEON-NOSVE-NEXT: msub w8, w15, w13, w14 +; NONEON-NOSVE-NEXT: smov w13, v1.b[15] +; NONEON-NOSVE-NEXT: smov w14, v0.b[15] +; NONEON-NOSVE-NEXT: mov v2.b[11], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w1, w0 +; NONEON-NOSVE-NEXT: msub w8, w18, w16, w17 +; NONEON-NOSVE-NEXT: mov v2.b[12], w8 +; NONEON-NOSVE-NEXT: sdiv w12, w11, w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w0, w1 +; NONEON-NOSVE-NEXT: mov v2.b[13], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w14, w13 +; NONEON-NOSVE-NEXT: msub w8, w12, w10, w11 +; NONEON-NOSVE-NEXT: mov v2.b[14], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w13, w14 +; NONEON-NOSVE-NEXT: mov v2.b[15], w8 +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ldp x28, x27, [sp], #80 // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ret %res = srem <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -189,6 +372,279 @@ define void @srem_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: mls z2.b, p0/m, z7.b, z4.b ; CHECK-NEXT: stp q2, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub sp, sp, #320 +; NONEON-NOSVE-NEXT: stp x29, x30, [sp, #224] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x28, x27, [sp, #240] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x26, x25, [sp, #256] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x24, x23, [sp, #272] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #288] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #304] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 320 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -40 +; NONEON-NOSVE-NEXT: .cfi_offset w24, -48 +; NONEON-NOSVE-NEXT: .cfi_offset w25, -56 +; NONEON-NOSVE-NEXT: .cfi_offset w26, -64 +; NONEON-NOSVE-NEXT: .cfi_offset w27, -72 +; NONEON-NOSVE-NEXT: .cfi_offset w28, -80 +; NONEON-NOSVE-NEXT: .cfi_offset w30, -88 +; NONEON-NOSVE-NEXT: .cfi_offset w29, -96 +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: str x0, [sp, #216] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w8, v1.b[1] +; NONEON-NOSVE-NEXT: smov w9, v0.b[1] +; NONEON-NOSVE-NEXT: smov w4, v3.b[1] +; NONEON-NOSVE-NEXT: smov w1, v2.b[1] +; NONEON-NOSVE-NEXT: smov w7, v3.b[7] +; NONEON-NOSVE-NEXT: smov w5, v2.b[7] +; NONEON-NOSVE-NEXT: smov w6, v3.b[8] +; NONEON-NOSVE-NEXT: smov w3, v2.b[8] +; NONEON-NOSVE-NEXT: smov w22, v3.b[9] +; NONEON-NOSVE-NEXT: smov w20, v2.b[9] +; NONEON-NOSVE-NEXT: smov w13, v3.b[0] +; NONEON-NOSVE-NEXT: smov w17, v3.b[3] +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: str w8, [sp, #100] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w8, v1.b[0] +; NONEON-NOSVE-NEXT: str w9, [sp, #108] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w9, v0.b[0] +; NONEON-NOSVE-NEXT: smov w14, v2.b[3] +; NONEON-NOSVE-NEXT: smov w15, v3.b[4] +; NONEON-NOSVE-NEXT: smov w12, v2.b[4] +; NONEON-NOSVE-NEXT: smov w2, v3.b[5] +; NONEON-NOSVE-NEXT: smov w18, v2.b[5] +; NONEON-NOSVE-NEXT: smov w0, v3.b[6] +; NONEON-NOSVE-NEXT: smov w16, v2.b[6] +; NONEON-NOSVE-NEXT: smov w21, v3.b[10] +; NONEON-NOSVE-NEXT: smov w19, v2.b[10] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #36] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: ldr w30, [sp, #36] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: str w10, [sp, #116] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[2] +; NONEON-NOSVE-NEXT: smov w9, v0.b[2] +; NONEON-NOSVE-NEXT: stp w10, w8, [sp, #44] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[3] +; NONEON-NOSVE-NEXT: stp w9, w10, [sp, #52] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w9, v0.b[3] +; NONEON-NOSVE-NEXT: sdiv w26, w14, w17 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #72] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w11, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[4] +; NONEON-NOSVE-NEXT: smov w9, v0.b[4] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #60] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[5] +; NONEON-NOSVE-NEXT: smov w9, v0.b[5] +; NONEON-NOSVE-NEXT: str w8, [sp, #96] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w9, [sp, #104] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #68] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[6] +; NONEON-NOSVE-NEXT: smov w9, v0.b[6] +; NONEON-NOSVE-NEXT: stp w11, w8, [sp, #80] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #112] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[7] +; NONEON-NOSVE-NEXT: stp w9, w10, [sp, #88] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w9, v0.b[7] +; NONEON-NOSVE-NEXT: sdiv w25, w12, w15 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #132] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[8] +; NONEON-NOSVE-NEXT: smov w9, v0.b[8] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #120] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #140] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[9] +; NONEON-NOSVE-NEXT: smov w9, v0.b[9] +; NONEON-NOSVE-NEXT: str w8, [sp, #148] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w9, [sp, #156] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w11, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[10] +; NONEON-NOSVE-NEXT: smov w9, v0.b[10] +; NONEON-NOSVE-NEXT: str w10, [sp, #128] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #204] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[11] +; NONEON-NOSVE-NEXT: smov w9, v0.b[11] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #192] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #212] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[12] +; NONEON-NOSVE-NEXT: smov w9, v0.b[12] +; NONEON-NOSVE-NEXT: str w8, [sp, #172] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w9, [sp, #180] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #200] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[13] +; NONEON-NOSVE-NEXT: smov w9, v0.b[13] +; NONEON-NOSVE-NEXT: stp w11, w8, [sp, #164] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w11, v3.b[2] +; NONEON-NOSVE-NEXT: str w9, [sp, #176] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #188] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.b[14] +; NONEON-NOSVE-NEXT: smov w9, v0.b[14] +; NONEON-NOSVE-NEXT: str w8, [sp, #144] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w9, [sp, #152] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #184] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w9, v2.b[2] +; NONEON-NOSVE-NEXT: sdiv w8, w1, w4 +; NONEON-NOSVE-NEXT: str w10, [sp, #160] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w10, v2.b[0] +; NONEON-NOSVE-NEXT: str w8, [sp, #24] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w8, w5, w7 +; NONEON-NOSVE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w8, w3, w6 +; NONEON-NOSVE-NEXT: str w8, [sp, #20] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w8, w20, w22 +; NONEON-NOSVE-NEXT: sdiv w24, w10, w13 +; NONEON-NOSVE-NEXT: str w8, [sp, #32] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: ldp w29, w8, [sp, #40] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w8, w8, w30, w29 +; NONEON-NOSVE-NEXT: ldp x29, x30, [sp, #224] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: fmov s4, w8 +; NONEON-NOSVE-NEXT: sdiv w23, w9, w11 +; NONEON-NOSVE-NEXT: msub w10, w24, w13, w10 +; NONEON-NOSVE-NEXT: ldr w13, [sp, #24] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldr w24, [sp, #100] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w13, w13, w4, w1 +; NONEON-NOSVE-NEXT: ldr w1, [sp, #116] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldr w4, [sp, #108] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: fmov s5, w10 +; NONEON-NOSVE-NEXT: msub w1, w1, w24, w4 +; NONEON-NOSVE-NEXT: mov v5.b[1], w13 +; NONEON-NOSVE-NEXT: mov v4.b[1], w1 +; NONEON-NOSVE-NEXT: ldr w1, [sp, #120] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w8, w23, w11, w9 +; NONEON-NOSVE-NEXT: ldr w11, [sp, #48] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w28, w18, w2 +; NONEON-NOSVE-NEXT: ldp w10, w9, [sp, #52] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x24, x23, [sp, #272] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[2], w8 +; NONEON-NOSVE-NEXT: msub w8, w26, w17, w14 +; NONEON-NOSVE-NEXT: ldr w14, [sp, #72] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w11, w10 +; NONEON-NOSVE-NEXT: ldr w17, [sp, #96] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: smov w10, v3.b[11] +; NONEON-NOSVE-NEXT: smov w11, v2.b[11] +; NONEON-NOSVE-NEXT: mov v4.b[2], w9 +; NONEON-NOSVE-NEXT: mov v5.b[3], w8 +; NONEON-NOSVE-NEXT: msub w8, w25, w15, w12 +; NONEON-NOSVE-NEXT: ldp w13, w9, [sp, #76] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w27, w16, w0 +; NONEON-NOSVE-NEXT: ldr w15, [sp, #104] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x26, x25, [sp, #256] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w14, w13 +; NONEON-NOSVE-NEXT: ldr w14, [sp, #60] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[4], w8 +; NONEON-NOSVE-NEXT: msub w8, w28, w2, w18 +; NONEON-NOSVE-NEXT: ldr w2, [sp, #156] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[3], w9 +; NONEON-NOSVE-NEXT: ldp w12, w9, [sp, #64] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[5], w8 +; NONEON-NOSVE-NEXT: msub w8, w27, w0, w16 +; NONEON-NOSVE-NEXT: ldr w0, [sp, #132] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w4, w19, w21 +; NONEON-NOSVE-NEXT: msub w9, w9, w14, w12 +; NONEON-NOSVE-NEXT: smov w12, v3.b[12] +; NONEON-NOSVE-NEXT: smov w14, v2.b[12] +; NONEON-NOSVE-NEXT: ldp x28, x27, [sp, #240] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[6], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #28] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[4], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #112] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w8, w8, w7, w5 +; NONEON-NOSVE-NEXT: ldr w5, [sp, #204] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w17, w15 +; NONEON-NOSVE-NEXT: ldr w17, [sp, #84] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[7], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #20] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w13, w11, w10 +; NONEON-NOSVE-NEXT: mov v4.b[5], w9 +; NONEON-NOSVE-NEXT: ldp w16, w9, [sp, #88] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w8, w8, w6, w3 +; NONEON-NOSVE-NEXT: ldr w3, [sp, #148] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w17, w16 +; NONEON-NOSVE-NEXT: smov w16, v3.b[13] +; NONEON-NOSVE-NEXT: smov w17, v2.b[13] +; NONEON-NOSVE-NEXT: mov v5.b[8], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #32] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[6], w9 +; NONEON-NOSVE-NEXT: msub w8, w8, w22, w20 +; NONEON-NOSVE-NEXT: sdiv w15, w14, w12 +; NONEON-NOSVE-NEXT: ldp w18, w9, [sp, #136] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[9], w8 +; NONEON-NOSVE-NEXT: msub w8, w4, w21, w19 +; NONEON-NOSVE-NEXT: msub w9, w9, w0, w18 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #304] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #288] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[7], w9 +; NONEON-NOSVE-NEXT: mov v5.b[10], w8 +; NONEON-NOSVE-NEXT: msub w8, w13, w10, w11 +; NONEON-NOSVE-NEXT: ldp w0, w9, [sp, #124] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp w11, w10, [sp, #196] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ldr w13, [sp, #192] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w18, w17, w16 +; NONEON-NOSVE-NEXT: msub w9, w9, w1, w0 +; NONEON-NOSVE-NEXT: mov v5.b[11], w8 +; NONEON-NOSVE-NEXT: smov w0, v3.b[14] +; NONEON-NOSVE-NEXT: msub w10, w10, w13, w11 +; NONEON-NOSVE-NEXT: smov w1, v2.b[14] +; NONEON-NOSVE-NEXT: msub w8, w15, w12, w14 +; NONEON-NOSVE-NEXT: mov v4.b[8], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #164] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp w15, w13, [sp, #168] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w3, w2 +; NONEON-NOSVE-NEXT: mov v5.b[12], w8 +; NONEON-NOSVE-NEXT: ldp w4, w3, [sp, #208] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp w14, w12, [sp, #176] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[9], w9 +; NONEON-NOSVE-NEXT: sdiv w2, w1, w0 +; NONEON-NOSVE-NEXT: smov w9, v3.b[15] +; NONEON-NOSVE-NEXT: msub w3, w3, w5, w4 +; NONEON-NOSVE-NEXT: smov w4, v2.b[15] +; NONEON-NOSVE-NEXT: msub w8, w18, w16, w17 +; NONEON-NOSVE-NEXT: ldr w16, [sp, #144] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[10], w3 +; NONEON-NOSVE-NEXT: mov v5.b[13], w8 +; NONEON-NOSVE-NEXT: mov v4.b[11], w10 +; NONEON-NOSVE-NEXT: ldr w10, [sp, #188] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w11, w4, w9 +; NONEON-NOSVE-NEXT: msub w8, w2, w0, w1 +; NONEON-NOSVE-NEXT: msub w10, w10, w13, w12 +; NONEON-NOSVE-NEXT: smov w12, v1.b[15] +; NONEON-NOSVE-NEXT: smov w13, v0.b[15] +; NONEON-NOSVE-NEXT: mov v5.b[14], w8 +; NONEON-NOSVE-NEXT: mov v4.b[12], w10 +; NONEON-NOSVE-NEXT: ldr w10, [sp, #184] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w10, w10, w15, w14 +; NONEON-NOSVE-NEXT: ldr w15, [sp, #152] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w14, w13, w12 +; NONEON-NOSVE-NEXT: msub w8, w11, w9, w4 +; NONEON-NOSVE-NEXT: mov v4.b[13], w10 +; NONEON-NOSVE-NEXT: ldr w10, [sp, #160] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[15], w8 +; NONEON-NOSVE-NEXT: ldr x8, [sp, #216] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w10, w10, w16, w15 +; NONEON-NOSVE-NEXT: mov v4.b[14], w10 +; NONEON-NOSVE-NEXT: msub w9, w14, w12, w13 +; NONEON-NOSVE-NEXT: mov v4.b[15], w9 +; NONEON-NOSVE-NEXT: stp q5, q4, [x8] +; NONEON-NOSVE-NEXT: add sp, sp, #320 +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = srem <32 x i8> %op1, %op2 @@ -210,6 +666,33 @@ define <4 x i16> @srem_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: smov w11, v1.h[0] +; NONEON-NOSVE-NEXT: smov w12, v0.h[0] +; NONEON-NOSVE-NEXT: smov w8, v1.h[1] +; NONEON-NOSVE-NEXT: smov w9, v0.h[1] +; NONEON-NOSVE-NEXT: smov w14, v1.h[2] +; NONEON-NOSVE-NEXT: smov w15, v0.h[2] +; NONEON-NOSVE-NEXT: smov w17, v1.h[3] +; NONEON-NOSVE-NEXT: smov w18, v0.h[3] +; NONEON-NOSVE-NEXT: sdiv w13, w12, w11 +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: fmov s0, w11 +; NONEON-NOSVE-NEXT: sdiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: mov v0.h[1], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: mov v0.h[2], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w17, w18 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = srem <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -238,6 +721,51 @@ define <8 x i16> @srem_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: mls z0.h, p0/m, z3.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: smov w11, v1.h[0] +; NONEON-NOSVE-NEXT: smov w12, v0.h[0] +; NONEON-NOSVE-NEXT: smov w8, v1.h[1] +; NONEON-NOSVE-NEXT: smov w9, v0.h[1] +; NONEON-NOSVE-NEXT: smov w14, v1.h[2] +; NONEON-NOSVE-NEXT: smov w15, v0.h[2] +; NONEON-NOSVE-NEXT: smov w17, v1.h[3] +; NONEON-NOSVE-NEXT: smov w18, v0.h[3] +; NONEON-NOSVE-NEXT: smov w1, v1.h[4] +; NONEON-NOSVE-NEXT: smov w2, v0.h[4] +; NONEON-NOSVE-NEXT: smov w4, v1.h[5] +; NONEON-NOSVE-NEXT: smov w5, v0.h[5] +; NONEON-NOSVE-NEXT: sdiv w13, w12, w11 +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: smov w13, v1.h[7] +; NONEON-NOSVE-NEXT: fmov s2, w11 +; NONEON-NOSVE-NEXT: smov w11, v0.h[6] +; NONEON-NOSVE-NEXT: sdiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: smov w10, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[1], w8 +; NONEON-NOSVE-NEXT: sdiv w0, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: smov w14, v0.h[7] +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: sdiv w3, w2, w1 +; NONEON-NOSVE-NEXT: msub w8, w0, w17, w18 +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w5, w4 +; NONEON-NOSVE-NEXT: msub w8, w3, w1, w2 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: sdiv w12, w11, w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w4, w5 +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w14, w13 +; NONEON-NOSVE-NEXT: msub w8, w12, w10, w11 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w13, w14 +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = srem <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -282,6 +810,139 @@ define void @srem_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: mls z0.h, p0/m, z7.h, z1.h ; CHECK-NEXT: stp q2, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub sp, sp, #144 +; NONEON-NOSVE-NEXT: stp x29, x30, [sp, #48] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x28, x27, [sp, #64] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x26, x25, [sp, #80] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x24, x23, [sp, #96] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #112] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 144 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -40 +; NONEON-NOSVE-NEXT: .cfi_offset w24, -48 +; NONEON-NOSVE-NEXT: .cfi_offset w25, -56 +; NONEON-NOSVE-NEXT: .cfi_offset w26, -64 +; NONEON-NOSVE-NEXT: .cfi_offset w27, -72 +; NONEON-NOSVE-NEXT: .cfi_offset w28, -80 +; NONEON-NOSVE-NEXT: .cfi_offset w30, -88 +; NONEON-NOSVE-NEXT: .cfi_offset w29, -96 +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: smov w8, v1.h[1] +; NONEON-NOSVE-NEXT: smov w9, v0.h[1] +; NONEON-NOSVE-NEXT: smov w20, v1.h[0] +; NONEON-NOSVE-NEXT: smov w21, v0.h[0] +; NONEON-NOSVE-NEXT: smov w19, v0.h[3] +; NONEON-NOSVE-NEXT: smov w5, v1.h[4] +; NONEON-NOSVE-NEXT: smov w2, v0.h[4] +; NONEON-NOSVE-NEXT: smov w1, v3.h[1] +; NONEON-NOSVE-NEXT: smov w23, v2.h[1] +; NONEON-NOSVE-NEXT: smov w25, v3.h[0] +; NONEON-NOSVE-NEXT: smov w26, v2.h[0] +; NONEON-NOSVE-NEXT: smov w6, v1.h[5] +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #36] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w8, v1.h[2] +; NONEON-NOSVE-NEXT: smov w9, v0.h[2] +; NONEON-NOSVE-NEXT: smov w3, v0.h[5] +; NONEON-NOSVE-NEXT: smov w4, v1.h[6] +; NONEON-NOSVE-NEXT: smov w7, v0.h[6] +; NONEON-NOSVE-NEXT: smov w28, v3.h[2] +; NONEON-NOSVE-NEXT: smov w29, v2.h[2] +; NONEON-NOSVE-NEXT: smov w15, v3.h[3] +; NONEON-NOSVE-NEXT: smov w13, v2.h[3] +; NONEON-NOSVE-NEXT: smov w12, v3.h[4] +; NONEON-NOSVE-NEXT: smov w14, v3.h[5] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #24] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w11, w21, w20 +; NONEON-NOSVE-NEXT: str w10, [sp, #44] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: smov w8, v1.h[3] +; NONEON-NOSVE-NEXT: stp w8, w11, [sp] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w11, v2.h[4] +; NONEON-NOSVE-NEXT: ldr w22, [sp, #4] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w20, w22, w20, w21 +; NONEON-NOSVE-NEXT: sdiv w9, w19, w8 +; NONEON-NOSVE-NEXT: str w10, [sp, #32] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w10, v3.h[6] +; NONEON-NOSVE-NEXT: fmov s5, w20 +; NONEON-NOSVE-NEXT: smov w20, v3.h[7] +; NONEON-NOSVE-NEXT: sdiv w8, w2, w5 +; NONEON-NOSVE-NEXT: sdiv w24, w23, w1 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #16] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: sdiv w27, w26, w25 +; NONEON-NOSVE-NEXT: msub w1, w24, w1, w23 +; NONEON-NOSVE-NEXT: ldp w24, w23, [sp, #40] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w9, w3, w6 +; NONEON-NOSVE-NEXT: msub w21, w27, w25, w26 +; NONEON-NOSVE-NEXT: ldr w25, [sp, #36] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w23, w23, w25, w24 +; NONEON-NOSVE-NEXT: ldr w25, [sp, #24] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: fmov s4, w21 +; NONEON-NOSVE-NEXT: mov v5.h[1], w23 +; NONEON-NOSVE-NEXT: ldp w23, w21, [sp, #28] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.h[1], w1 +; NONEON-NOSVE-NEXT: sdiv w8, w7, w4 +; NONEON-NOSVE-NEXT: msub w21, w21, w25, w23 +; NONEON-NOSVE-NEXT: smov w23, v2.h[7] +; NONEON-NOSVE-NEXT: ldp x26, x25, [sp, #80] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.h[2], w21 +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #112] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: sdiv w30, w29, w28 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #8] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: smov w9, v2.h[5] +; NONEON-NOSVE-NEXT: smov w8, v2.h[6] +; NONEON-NOSVE-NEXT: sdiv w18, w13, w15 +; NONEON-NOSVE-NEXT: msub w1, w30, w28, w29 +; NONEON-NOSVE-NEXT: ldp x28, x27, [sp, #64] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x29, x30, [sp, #48] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.h[2], w1 +; NONEON-NOSVE-NEXT: sdiv w16, w11, w12 +; NONEON-NOSVE-NEXT: msub w13, w18, w15, w13 +; NONEON-NOSVE-NEXT: ldr w15, [sp, #20] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldr w18, [sp] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w15, w15, w18, w19 +; NONEON-NOSVE-NEXT: mov v4.h[3], w13 +; NONEON-NOSVE-NEXT: smov w13, v1.h[7] +; NONEON-NOSVE-NEXT: mov v5.h[3], w15 +; NONEON-NOSVE-NEXT: smov w15, v0.h[7] +; NONEON-NOSVE-NEXT: sdiv w17, w9, w14 +; NONEON-NOSVE-NEXT: msub w11, w16, w12, w11 +; NONEON-NOSVE-NEXT: ldr w12, [sp, #16] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w12, w12, w5, w2 +; NONEON-NOSVE-NEXT: mov v4.h[4], w11 +; NONEON-NOSVE-NEXT: ldr w11, [sp, #12] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.h[4], w12 +; NONEON-NOSVE-NEXT: msub w11, w11, w6, w3 +; NONEON-NOSVE-NEXT: sdiv w24, w8, w10 +; NONEON-NOSVE-NEXT: msub w9, w17, w14, w9 +; NONEON-NOSVE-NEXT: mov v5.h[5], w11 +; NONEON-NOSVE-NEXT: mov v4.h[5], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #8] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w4, w7 +; NONEON-NOSVE-NEXT: sdiv w18, w23, w20 +; NONEON-NOSVE-NEXT: msub w8, w24, w10, w8 +; NONEON-NOSVE-NEXT: mov v5.h[6], w9 +; NONEON-NOSVE-NEXT: mov v4.h[6], w8 +; NONEON-NOSVE-NEXT: sdiv w12, w15, w13 +; NONEON-NOSVE-NEXT: msub w8, w18, w20, w23 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #128] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x24, x23, [sp, #96] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.h[7], w8 +; NONEON-NOSVE-NEXT: msub w9, w12, w13, w15 +; NONEON-NOSVE-NEXT: mov v5.h[7], w9 +; NONEON-NOSVE-NEXT: stp q4, q5, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #144 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = srem <16 x i16> %op1, %op2 @@ -300,6 +961,23 @@ define <2 x i32> @srem_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: mls z0.s, p0/m, z2.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: fmov w9, s0 +; NONEON-NOSVE-NEXT: mov w11, v1.s[1] +; NONEON-NOSVE-NEXT: mov w12, v0.s[1] +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: sdiv w13, w12, w11 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: msub w9, w13, w11, w12 +; NONEON-NOSVE-NEXT: mov v0.s[1], w9 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = srem <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -315,6 +993,30 @@ define <4 x i32> @srem_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: mls z0.s, p0/m, z2.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov w11, s1 +; NONEON-NOSVE-NEXT: fmov w12, s0 +; NONEON-NOSVE-NEXT: mov w8, v1.s[1] +; NONEON-NOSVE-NEXT: mov w9, v0.s[1] +; NONEON-NOSVE-NEXT: mov w14, v1.s[2] +; NONEON-NOSVE-NEXT: mov w15, v0.s[2] +; NONEON-NOSVE-NEXT: mov w17, v1.s[3] +; NONEON-NOSVE-NEXT: mov w18, v0.s[3] +; NONEON-NOSVE-NEXT: sdiv w13, w12, w11 +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: fmov s0, w11 +; NONEON-NOSVE-NEXT: sdiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: mov v0.s[1], w8 +; NONEON-NOSVE-NEXT: sdiv w9, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: mov v0.s[2], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w17, w18 +; NONEON-NOSVE-NEXT: mov v0.s[3], w8 +; NONEON-NOSVE-NEXT: ret %res = srem <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -334,6 +1036,65 @@ define void @srem_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: mls z1.s, p0/m, z5.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str x23, [sp, #-48]! // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -48 +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: fmov w12, s0 +; NONEON-NOSVE-NEXT: fmov w3, s2 +; NONEON-NOSVE-NEXT: mov w9, v0.s[1] +; NONEON-NOSVE-NEXT: fmov w11, s1 +; NONEON-NOSVE-NEXT: fmov w2, s3 +; NONEON-NOSVE-NEXT: mov w8, v1.s[1] +; NONEON-NOSVE-NEXT: mov w17, v3.s[1] +; NONEON-NOSVE-NEXT: mov w18, v2.s[1] +; NONEON-NOSVE-NEXT: mov w14, v1.s[2] +; NONEON-NOSVE-NEXT: mov w15, v0.s[2] +; NONEON-NOSVE-NEXT: mov w5, v3.s[2] +; NONEON-NOSVE-NEXT: mov w6, v2.s[2] +; NONEON-NOSVE-NEXT: sdiv w13, w12, w11 +; NONEON-NOSVE-NEXT: mov w19, v3.s[3] +; NONEON-NOSVE-NEXT: mov w20, v2.s[3] +; NONEON-NOSVE-NEXT: mov w22, v1.s[3] +; NONEON-NOSVE-NEXT: mov w23, v0.s[3] +; NONEON-NOSVE-NEXT: sdiv w4, w3, w2 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: fmov s1, w11 +; NONEON-NOSVE-NEXT: sdiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w12, w4, w2, w3 +; NONEON-NOSVE-NEXT: fmov s0, w12 +; NONEON-NOSVE-NEXT: sdiv w1, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: mov v1.s[1], w8 +; NONEON-NOSVE-NEXT: sdiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w13, w1, w17, w18 +; NONEON-NOSVE-NEXT: mov v0.s[1], w13 +; NONEON-NOSVE-NEXT: sdiv w7, w6, w5 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: mov v1.s[2], w8 +; NONEON-NOSVE-NEXT: sdiv w21, w20, w19 +; NONEON-NOSVE-NEXT: msub w10, w7, w5, w6 +; NONEON-NOSVE-NEXT: mov v0.s[2], w10 +; NONEON-NOSVE-NEXT: sdiv w9, w23, w22 +; NONEON-NOSVE-NEXT: msub w10, w21, w19, w20 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v0.s[3], w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w22, w23 +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v1.s[3], w8 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ldr x23, [sp], #48 // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = srem <8 x i32> %op1, %op2 @@ -352,6 +1113,17 @@ define <1 x i64> @srem_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: mls z0.d, p0/m, z2.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: sdiv x10, x9, x8 +; NONEON-NOSVE-NEXT: msub x8, x10, x8, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %res = srem <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -367,6 +1139,20 @@ define <2 x i64> @srem_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: mls z0.d, p0/m, z2.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: mov x11, v1.d[1] +; NONEON-NOSVE-NEXT: mov x12, v0.d[1] +; NONEON-NOSVE-NEXT: sdiv x10, x9, x8 +; NONEON-NOSVE-NEXT: sdiv x13, x12, x11 +; NONEON-NOSVE-NEXT: msub x8, x10, x8, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: msub x9, x13, x11, x12 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: ret %res = srem <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -386,6 +1172,33 @@ define void @srem_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: mls z1.d, p0/m, z5.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: srem_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: fmov x15, d2 +; NONEON-NOSVE-NEXT: mov x12, v2.d[1] +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x14, d3 +; NONEON-NOSVE-NEXT: mov x11, v3.d[1] +; NONEON-NOSVE-NEXT: mov x17, v1.d[1] +; NONEON-NOSVE-NEXT: mov x18, v0.d[1] +; NONEON-NOSVE-NEXT: sdiv x10, x9, x8 +; NONEON-NOSVE-NEXT: sdiv x16, x15, x14 +; NONEON-NOSVE-NEXT: msub x8, x10, x8, x9 +; NONEON-NOSVE-NEXT: fmov d1, x8 +; NONEON-NOSVE-NEXT: sdiv x13, x12, x11 +; NONEON-NOSVE-NEXT: msub x10, x16, x14, x15 +; NONEON-NOSVE-NEXT: fmov d0, x10 +; NONEON-NOSVE-NEXT: sdiv x1, x18, x17 +; NONEON-NOSVE-NEXT: msub x9, x13, x11, x12 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: msub x11, x1, x17, x18 +; NONEON-NOSVE-NEXT: mov v1.d[1], x11 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = srem <4 x i64> %op1, %op2 @@ -413,6 +1226,41 @@ define <4 x i8> @urem_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: umov w11, v1.h[0] +; NONEON-NOSVE-NEXT: umov w12, v0.h[0] +; NONEON-NOSVE-NEXT: umov w8, v1.h[1] +; NONEON-NOSVE-NEXT: umov w9, v0.h[1] +; NONEON-NOSVE-NEXT: umov w14, v1.h[2] +; NONEON-NOSVE-NEXT: umov w15, v0.h[2] +; NONEON-NOSVE-NEXT: umov w17, v1.h[3] +; NONEON-NOSVE-NEXT: umov w18, v0.h[3] +; NONEON-NOSVE-NEXT: and w11, w11, #0xff +; NONEON-NOSVE-NEXT: and w12, w12, #0xff +; NONEON-NOSVE-NEXT: and w8, w8, #0xff +; NONEON-NOSVE-NEXT: udiv w13, w12, w11 +; NONEON-NOSVE-NEXT: and w9, w9, #0xff +; NONEON-NOSVE-NEXT: and w14, w14, #0xff +; NONEON-NOSVE-NEXT: and w15, w15, #0xff +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: and w12, w17, #0xff +; NONEON-NOSVE-NEXT: and w13, w18, #0xff +; NONEON-NOSVE-NEXT: fmov s0, w11 +; NONEON-NOSVE-NEXT: udiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: mov v0.h[1], w8 +; NONEON-NOSVE-NEXT: udiv w9, w13, w12 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: mov v0.h[2], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w12, w13 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = urem <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -442,6 +1290,53 @@ define <8 x i8> @urem_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: mls z0.b, p0/m, z2.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: umov w11, v1.b[0] +; NONEON-NOSVE-NEXT: umov w12, v0.b[0] +; NONEON-NOSVE-NEXT: umov w8, v1.b[1] +; NONEON-NOSVE-NEXT: umov w9, v0.b[1] +; NONEON-NOSVE-NEXT: umov w14, v1.b[2] +; NONEON-NOSVE-NEXT: umov w15, v0.b[2] +; NONEON-NOSVE-NEXT: umov w17, v1.b[3] +; NONEON-NOSVE-NEXT: umov w18, v0.b[3] +; NONEON-NOSVE-NEXT: umov w1, v1.b[4] +; NONEON-NOSVE-NEXT: umov w2, v0.b[4] +; NONEON-NOSVE-NEXT: umov w4, v1.b[5] +; NONEON-NOSVE-NEXT: umov w5, v0.b[5] +; NONEON-NOSVE-NEXT: udiv w13, w12, w11 +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: umov w13, v1.b[7] +; NONEON-NOSVE-NEXT: fmov s2, w11 +; NONEON-NOSVE-NEXT: umov w11, v0.b[6] +; NONEON-NOSVE-NEXT: udiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.b[6] +; NONEON-NOSVE-NEXT: mov v2.b[1], w8 +; NONEON-NOSVE-NEXT: udiv w0, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: umov w14, v0.b[7] +; NONEON-NOSVE-NEXT: mov v2.b[2], w8 +; NONEON-NOSVE-NEXT: udiv w3, w2, w1 +; NONEON-NOSVE-NEXT: msub w8, w0, w17, w18 +; NONEON-NOSVE-NEXT: mov v2.b[3], w8 +; NONEON-NOSVE-NEXT: udiv w9, w5, w4 +; NONEON-NOSVE-NEXT: msub w8, w3, w1, w2 +; NONEON-NOSVE-NEXT: mov v2.b[4], w8 +; NONEON-NOSVE-NEXT: udiv w12, w11, w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w4, w5 +; NONEON-NOSVE-NEXT: mov v2.b[5], w8 +; NONEON-NOSVE-NEXT: udiv w9, w14, w13 +; NONEON-NOSVE-NEXT: msub w8, w12, w10, w11 +; NONEON-NOSVE-NEXT: mov v2.b[6], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w13, w14 +; NONEON-NOSVE-NEXT: mov v2.b[7], w8 +; NONEON-NOSVE-NEXT: fmov d0, d2 +; NONEON-NOSVE-NEXT: ret %res = urem <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -491,6 +1386,112 @@ define <16 x i8> @urem_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: mls z0.b, p0/m, z3.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: stp x28, x27, [sp, #-80]! // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x26, x25, [sp, #16] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x24, x23, [sp, #32] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 80 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -40 +; NONEON-NOSVE-NEXT: .cfi_offset w24, -48 +; NONEON-NOSVE-NEXT: .cfi_offset w25, -56 +; NONEON-NOSVE-NEXT: .cfi_offset w26, -64 +; NONEON-NOSVE-NEXT: .cfi_offset w27, -72 +; NONEON-NOSVE-NEXT: .cfi_offset w28, -80 +; NONEON-NOSVE-NEXT: umov w11, v1.b[0] +; NONEON-NOSVE-NEXT: umov w12, v0.b[0] +; NONEON-NOSVE-NEXT: umov w8, v1.b[1] +; NONEON-NOSVE-NEXT: umov w9, v0.b[1] +; NONEON-NOSVE-NEXT: umov w14, v1.b[2] +; NONEON-NOSVE-NEXT: umov w15, v0.b[2] +; NONEON-NOSVE-NEXT: umov w17, v1.b[3] +; NONEON-NOSVE-NEXT: umov w18, v0.b[3] +; NONEON-NOSVE-NEXT: umov w1, v1.b[4] +; NONEON-NOSVE-NEXT: umov w2, v0.b[4] +; NONEON-NOSVE-NEXT: umov w4, v1.b[5] +; NONEON-NOSVE-NEXT: umov w5, v0.b[5] +; NONEON-NOSVE-NEXT: udiv w13, w12, w11 +; NONEON-NOSVE-NEXT: umov w7, v1.b[6] +; NONEON-NOSVE-NEXT: umov w19, v0.b[6] +; NONEON-NOSVE-NEXT: umov w21, v1.b[7] +; NONEON-NOSVE-NEXT: umov w22, v0.b[7] +; NONEON-NOSVE-NEXT: umov w24, v1.b[8] +; NONEON-NOSVE-NEXT: umov w25, v0.b[8] +; NONEON-NOSVE-NEXT: umov w27, v1.b[9] +; NONEON-NOSVE-NEXT: umov w28, v0.b[9] +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: umov w13, v1.b[11] +; NONEON-NOSVE-NEXT: fmov s2, w11 +; NONEON-NOSVE-NEXT: umov w11, v0.b[10] +; NONEON-NOSVE-NEXT: udiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.b[10] +; NONEON-NOSVE-NEXT: mov v2.b[1], w8 +; NONEON-NOSVE-NEXT: udiv w0, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: umov w14, v0.b[11] +; NONEON-NOSVE-NEXT: umov w16, v1.b[12] +; NONEON-NOSVE-NEXT: mov v2.b[2], w8 +; NONEON-NOSVE-NEXT: udiv w3, w2, w1 +; NONEON-NOSVE-NEXT: msub w8, w0, w17, w18 +; NONEON-NOSVE-NEXT: umov w17, v0.b[12] +; NONEON-NOSVE-NEXT: umov w0, v1.b[13] +; NONEON-NOSVE-NEXT: mov v2.b[3], w8 +; NONEON-NOSVE-NEXT: udiv w6, w5, w4 +; NONEON-NOSVE-NEXT: msub w8, w3, w1, w2 +; NONEON-NOSVE-NEXT: umov w1, v0.b[13] +; NONEON-NOSVE-NEXT: mov v2.b[4], w8 +; NONEON-NOSVE-NEXT: udiv w20, w19, w7 +; NONEON-NOSVE-NEXT: msub w8, w6, w4, w5 +; NONEON-NOSVE-NEXT: mov v2.b[5], w8 +; NONEON-NOSVE-NEXT: udiv w23, w22, w21 +; NONEON-NOSVE-NEXT: msub w8, w20, w7, w19 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v2.b[6], w8 +; NONEON-NOSVE-NEXT: udiv w26, w25, w24 +; NONEON-NOSVE-NEXT: msub w8, w23, w21, w22 +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v2.b[7], w8 +; NONEON-NOSVE-NEXT: udiv w9, w28, w27 +; NONEON-NOSVE-NEXT: msub w8, w26, w24, w25 +; NONEON-NOSVE-NEXT: ldp x24, x23, [sp, #32] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x26, x25, [sp, #16] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v2.b[8], w8 +; NONEON-NOSVE-NEXT: udiv w12, w11, w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w27, w28 +; NONEON-NOSVE-NEXT: mov v2.b[9], w8 +; NONEON-NOSVE-NEXT: udiv w15, w14, w13 +; NONEON-NOSVE-NEXT: msub w8, w12, w10, w11 +; NONEON-NOSVE-NEXT: umov w10, v1.b[14] +; NONEON-NOSVE-NEXT: umov w11, v0.b[14] +; NONEON-NOSVE-NEXT: mov v2.b[10], w8 +; NONEON-NOSVE-NEXT: udiv w18, w17, w16 +; NONEON-NOSVE-NEXT: msub w8, w15, w13, w14 +; NONEON-NOSVE-NEXT: umov w13, v1.b[15] +; NONEON-NOSVE-NEXT: umov w14, v0.b[15] +; NONEON-NOSVE-NEXT: mov v2.b[11], w8 +; NONEON-NOSVE-NEXT: udiv w9, w1, w0 +; NONEON-NOSVE-NEXT: msub w8, w18, w16, w17 +; NONEON-NOSVE-NEXT: mov v2.b[12], w8 +; NONEON-NOSVE-NEXT: udiv w12, w11, w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w0, w1 +; NONEON-NOSVE-NEXT: mov v2.b[13], w8 +; NONEON-NOSVE-NEXT: udiv w9, w14, w13 +; NONEON-NOSVE-NEXT: msub w8, w12, w10, w11 +; NONEON-NOSVE-NEXT: mov v2.b[14], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w13, w14 +; NONEON-NOSVE-NEXT: mov v2.b[15], w8 +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ldp x28, x27, [sp], #80 // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ret %res = urem <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -578,6 +1579,279 @@ define void @urem_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: mls z2.b, p0/m, z7.b, z4.b ; CHECK-NEXT: stp q2, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub sp, sp, #320 +; NONEON-NOSVE-NEXT: stp x29, x30, [sp, #224] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x28, x27, [sp, #240] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x26, x25, [sp, #256] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x24, x23, [sp, #272] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #288] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #304] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 320 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -40 +; NONEON-NOSVE-NEXT: .cfi_offset w24, -48 +; NONEON-NOSVE-NEXT: .cfi_offset w25, -56 +; NONEON-NOSVE-NEXT: .cfi_offset w26, -64 +; NONEON-NOSVE-NEXT: .cfi_offset w27, -72 +; NONEON-NOSVE-NEXT: .cfi_offset w28, -80 +; NONEON-NOSVE-NEXT: .cfi_offset w30, -88 +; NONEON-NOSVE-NEXT: .cfi_offset w29, -96 +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: str x0, [sp, #216] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w8, v1.b[1] +; NONEON-NOSVE-NEXT: umov w9, v0.b[1] +; NONEON-NOSVE-NEXT: umov w4, v3.b[1] +; NONEON-NOSVE-NEXT: umov w1, v2.b[1] +; NONEON-NOSVE-NEXT: umov w7, v3.b[7] +; NONEON-NOSVE-NEXT: umov w5, v2.b[7] +; NONEON-NOSVE-NEXT: umov w6, v3.b[8] +; NONEON-NOSVE-NEXT: umov w3, v2.b[8] +; NONEON-NOSVE-NEXT: umov w22, v3.b[9] +; NONEON-NOSVE-NEXT: umov w20, v2.b[9] +; NONEON-NOSVE-NEXT: umov w13, v3.b[0] +; NONEON-NOSVE-NEXT: umov w17, v3.b[3] +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: str w8, [sp, #100] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w8, v1.b[0] +; NONEON-NOSVE-NEXT: str w9, [sp, #108] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w9, v0.b[0] +; NONEON-NOSVE-NEXT: umov w14, v2.b[3] +; NONEON-NOSVE-NEXT: umov w15, v3.b[4] +; NONEON-NOSVE-NEXT: umov w12, v2.b[4] +; NONEON-NOSVE-NEXT: umov w2, v3.b[5] +; NONEON-NOSVE-NEXT: umov w18, v2.b[5] +; NONEON-NOSVE-NEXT: umov w0, v3.b[6] +; NONEON-NOSVE-NEXT: umov w16, v2.b[6] +; NONEON-NOSVE-NEXT: umov w21, v3.b[10] +; NONEON-NOSVE-NEXT: umov w19, v2.b[10] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #36] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: ldr w30, [sp, #36] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: str w10, [sp, #116] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[2] +; NONEON-NOSVE-NEXT: umov w9, v0.b[2] +; NONEON-NOSVE-NEXT: stp w10, w8, [sp, #44] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[3] +; NONEON-NOSVE-NEXT: stp w9, w10, [sp, #52] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w9, v0.b[3] +; NONEON-NOSVE-NEXT: udiv w26, w14, w17 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #72] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w11, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[4] +; NONEON-NOSVE-NEXT: umov w9, v0.b[4] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #60] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[5] +; NONEON-NOSVE-NEXT: umov w9, v0.b[5] +; NONEON-NOSVE-NEXT: str w8, [sp, #96] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w9, [sp, #104] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #68] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[6] +; NONEON-NOSVE-NEXT: umov w9, v0.b[6] +; NONEON-NOSVE-NEXT: stp w11, w8, [sp, #80] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #112] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[7] +; NONEON-NOSVE-NEXT: stp w9, w10, [sp, #88] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w9, v0.b[7] +; NONEON-NOSVE-NEXT: udiv w25, w12, w15 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #132] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[8] +; NONEON-NOSVE-NEXT: umov w9, v0.b[8] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #120] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #140] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[9] +; NONEON-NOSVE-NEXT: umov w9, v0.b[9] +; NONEON-NOSVE-NEXT: str w8, [sp, #148] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w9, [sp, #156] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w11, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[10] +; NONEON-NOSVE-NEXT: umov w9, v0.b[10] +; NONEON-NOSVE-NEXT: str w10, [sp, #128] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #204] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[11] +; NONEON-NOSVE-NEXT: umov w9, v0.b[11] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #192] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #212] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[12] +; NONEON-NOSVE-NEXT: umov w9, v0.b[12] +; NONEON-NOSVE-NEXT: str w8, [sp, #172] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w9, [sp, #180] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #200] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[13] +; NONEON-NOSVE-NEXT: umov w9, v0.b[13] +; NONEON-NOSVE-NEXT: stp w11, w8, [sp, #164] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w11, v3.b[2] +; NONEON-NOSVE-NEXT: str w9, [sp, #176] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #188] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.b[14] +; NONEON-NOSVE-NEXT: umov w9, v0.b[14] +; NONEON-NOSVE-NEXT: str w8, [sp, #144] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w9, [sp, #152] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: str w10, [sp, #184] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w9, v2.b[2] +; NONEON-NOSVE-NEXT: udiv w8, w1, w4 +; NONEON-NOSVE-NEXT: str w10, [sp, #160] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w10, v2.b[0] +; NONEON-NOSVE-NEXT: str w8, [sp, #24] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w8, w5, w7 +; NONEON-NOSVE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w8, w3, w6 +; NONEON-NOSVE-NEXT: str w8, [sp, #20] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w8, w20, w22 +; NONEON-NOSVE-NEXT: udiv w24, w10, w13 +; NONEON-NOSVE-NEXT: str w8, [sp, #32] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: ldp w29, w8, [sp, #40] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w8, w8, w30, w29 +; NONEON-NOSVE-NEXT: ldp x29, x30, [sp, #224] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: fmov s4, w8 +; NONEON-NOSVE-NEXT: udiv w23, w9, w11 +; NONEON-NOSVE-NEXT: msub w10, w24, w13, w10 +; NONEON-NOSVE-NEXT: ldr w13, [sp, #24] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldr w24, [sp, #100] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w13, w13, w4, w1 +; NONEON-NOSVE-NEXT: ldr w1, [sp, #116] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldr w4, [sp, #108] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: fmov s5, w10 +; NONEON-NOSVE-NEXT: msub w1, w1, w24, w4 +; NONEON-NOSVE-NEXT: mov v5.b[1], w13 +; NONEON-NOSVE-NEXT: mov v4.b[1], w1 +; NONEON-NOSVE-NEXT: ldr w1, [sp, #120] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w8, w23, w11, w9 +; NONEON-NOSVE-NEXT: ldr w11, [sp, #48] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w28, w18, w2 +; NONEON-NOSVE-NEXT: ldp w10, w9, [sp, #52] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x24, x23, [sp, #272] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[2], w8 +; NONEON-NOSVE-NEXT: msub w8, w26, w17, w14 +; NONEON-NOSVE-NEXT: ldr w14, [sp, #72] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w11, w10 +; NONEON-NOSVE-NEXT: ldr w17, [sp, #96] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: umov w10, v3.b[11] +; NONEON-NOSVE-NEXT: umov w11, v2.b[11] +; NONEON-NOSVE-NEXT: mov v4.b[2], w9 +; NONEON-NOSVE-NEXT: mov v5.b[3], w8 +; NONEON-NOSVE-NEXT: msub w8, w25, w15, w12 +; NONEON-NOSVE-NEXT: ldp w13, w9, [sp, #76] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w27, w16, w0 +; NONEON-NOSVE-NEXT: ldr w15, [sp, #104] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x26, x25, [sp, #256] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w14, w13 +; NONEON-NOSVE-NEXT: ldr w14, [sp, #60] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[4], w8 +; NONEON-NOSVE-NEXT: msub w8, w28, w2, w18 +; NONEON-NOSVE-NEXT: ldr w2, [sp, #156] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[3], w9 +; NONEON-NOSVE-NEXT: ldp w12, w9, [sp, #64] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[5], w8 +; NONEON-NOSVE-NEXT: msub w8, w27, w0, w16 +; NONEON-NOSVE-NEXT: ldr w0, [sp, #132] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w4, w19, w21 +; NONEON-NOSVE-NEXT: msub w9, w9, w14, w12 +; NONEON-NOSVE-NEXT: umov w12, v3.b[12] +; NONEON-NOSVE-NEXT: umov w14, v2.b[12] +; NONEON-NOSVE-NEXT: ldp x28, x27, [sp, #240] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[6], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #28] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[4], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #112] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w8, w8, w7, w5 +; NONEON-NOSVE-NEXT: ldr w5, [sp, #204] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w17, w15 +; NONEON-NOSVE-NEXT: ldr w17, [sp, #84] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[7], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #20] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w13, w11, w10 +; NONEON-NOSVE-NEXT: mov v4.b[5], w9 +; NONEON-NOSVE-NEXT: ldp w16, w9, [sp, #88] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w8, w8, w6, w3 +; NONEON-NOSVE-NEXT: ldr w3, [sp, #148] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w17, w16 +; NONEON-NOSVE-NEXT: umov w16, v3.b[13] +; NONEON-NOSVE-NEXT: umov w17, v2.b[13] +; NONEON-NOSVE-NEXT: mov v5.b[8], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #32] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[6], w9 +; NONEON-NOSVE-NEXT: msub w8, w8, w22, w20 +; NONEON-NOSVE-NEXT: udiv w15, w14, w12 +; NONEON-NOSVE-NEXT: ldp w18, w9, [sp, #136] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[9], w8 +; NONEON-NOSVE-NEXT: msub w8, w4, w21, w19 +; NONEON-NOSVE-NEXT: msub w9, w9, w0, w18 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #304] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #288] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[7], w9 +; NONEON-NOSVE-NEXT: mov v5.b[10], w8 +; NONEON-NOSVE-NEXT: msub w8, w13, w10, w11 +; NONEON-NOSVE-NEXT: ldp w0, w9, [sp, #124] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp w11, w10, [sp, #196] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ldr w13, [sp, #192] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w18, w17, w16 +; NONEON-NOSVE-NEXT: msub w9, w9, w1, w0 +; NONEON-NOSVE-NEXT: mov v5.b[11], w8 +; NONEON-NOSVE-NEXT: umov w0, v3.b[14] +; NONEON-NOSVE-NEXT: msub w10, w10, w13, w11 +; NONEON-NOSVE-NEXT: umov w1, v2.b[14] +; NONEON-NOSVE-NEXT: msub w8, w15, w12, w14 +; NONEON-NOSVE-NEXT: mov v4.b[8], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #164] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp w15, w13, [sp, #168] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w3, w2 +; NONEON-NOSVE-NEXT: mov v5.b[12], w8 +; NONEON-NOSVE-NEXT: ldp w4, w3, [sp, #208] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp w14, w12, [sp, #176] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[9], w9 +; NONEON-NOSVE-NEXT: udiv w2, w1, w0 +; NONEON-NOSVE-NEXT: umov w9, v3.b[15] +; NONEON-NOSVE-NEXT: msub w3, w3, w5, w4 +; NONEON-NOSVE-NEXT: umov w4, v2.b[15] +; NONEON-NOSVE-NEXT: msub w8, w18, w16, w17 +; NONEON-NOSVE-NEXT: ldr w16, [sp, #144] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.b[10], w3 +; NONEON-NOSVE-NEXT: mov v5.b[13], w8 +; NONEON-NOSVE-NEXT: mov v4.b[11], w10 +; NONEON-NOSVE-NEXT: ldr w10, [sp, #188] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w11, w4, w9 +; NONEON-NOSVE-NEXT: msub w8, w2, w0, w1 +; NONEON-NOSVE-NEXT: msub w10, w10, w13, w12 +; NONEON-NOSVE-NEXT: umov w12, v1.b[15] +; NONEON-NOSVE-NEXT: umov w13, v0.b[15] +; NONEON-NOSVE-NEXT: mov v5.b[14], w8 +; NONEON-NOSVE-NEXT: mov v4.b[12], w10 +; NONEON-NOSVE-NEXT: ldr w10, [sp, #184] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w10, w10, w15, w14 +; NONEON-NOSVE-NEXT: ldr w15, [sp, #152] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w14, w13, w12 +; NONEON-NOSVE-NEXT: msub w8, w11, w9, w4 +; NONEON-NOSVE-NEXT: mov v4.b[13], w10 +; NONEON-NOSVE-NEXT: ldr w10, [sp, #160] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.b[15], w8 +; NONEON-NOSVE-NEXT: ldr x8, [sp, #216] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w10, w10, w16, w15 +; NONEON-NOSVE-NEXT: mov v4.b[14], w10 +; NONEON-NOSVE-NEXT: msub w9, w14, w12, w13 +; NONEON-NOSVE-NEXT: mov v4.b[15], w9 +; NONEON-NOSVE-NEXT: stp q5, q4, [x8] +; NONEON-NOSVE-NEXT: add sp, sp, #320 +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = urem <32 x i8> %op1, %op2 @@ -599,6 +1873,33 @@ define <4 x i16> @urem_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: umov w11, v1.h[0] +; NONEON-NOSVE-NEXT: umov w12, v0.h[0] +; NONEON-NOSVE-NEXT: umov w8, v1.h[1] +; NONEON-NOSVE-NEXT: umov w9, v0.h[1] +; NONEON-NOSVE-NEXT: umov w14, v1.h[2] +; NONEON-NOSVE-NEXT: umov w15, v0.h[2] +; NONEON-NOSVE-NEXT: umov w17, v1.h[3] +; NONEON-NOSVE-NEXT: umov w18, v0.h[3] +; NONEON-NOSVE-NEXT: udiv w13, w12, w11 +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: fmov s0, w11 +; NONEON-NOSVE-NEXT: udiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: mov v0.h[1], w8 +; NONEON-NOSVE-NEXT: udiv w9, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: mov v0.h[2], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w17, w18 +; NONEON-NOSVE-NEXT: mov v0.h[3], w8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = urem <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -627,6 +1928,51 @@ define <8 x i16> @urem_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: mls z0.h, p0/m, z3.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: umov w11, v1.h[0] +; NONEON-NOSVE-NEXT: umov w12, v0.h[0] +; NONEON-NOSVE-NEXT: umov w8, v1.h[1] +; NONEON-NOSVE-NEXT: umov w9, v0.h[1] +; NONEON-NOSVE-NEXT: umov w14, v1.h[2] +; NONEON-NOSVE-NEXT: umov w15, v0.h[2] +; NONEON-NOSVE-NEXT: umov w17, v1.h[3] +; NONEON-NOSVE-NEXT: umov w18, v0.h[3] +; NONEON-NOSVE-NEXT: umov w1, v1.h[4] +; NONEON-NOSVE-NEXT: umov w2, v0.h[4] +; NONEON-NOSVE-NEXT: umov w4, v1.h[5] +; NONEON-NOSVE-NEXT: umov w5, v0.h[5] +; NONEON-NOSVE-NEXT: udiv w13, w12, w11 +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: umov w13, v1.h[7] +; NONEON-NOSVE-NEXT: fmov s2, w11 +; NONEON-NOSVE-NEXT: umov w11, v0.h[6] +; NONEON-NOSVE-NEXT: udiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: umov w10, v1.h[6] +; NONEON-NOSVE-NEXT: mov v2.h[1], w8 +; NONEON-NOSVE-NEXT: udiv w0, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: umov w14, v0.h[7] +; NONEON-NOSVE-NEXT: mov v2.h[2], w8 +; NONEON-NOSVE-NEXT: udiv w3, w2, w1 +; NONEON-NOSVE-NEXT: msub w8, w0, w17, w18 +; NONEON-NOSVE-NEXT: mov v2.h[3], w8 +; NONEON-NOSVE-NEXT: udiv w9, w5, w4 +; NONEON-NOSVE-NEXT: msub w8, w3, w1, w2 +; NONEON-NOSVE-NEXT: mov v2.h[4], w8 +; NONEON-NOSVE-NEXT: udiv w12, w11, w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w4, w5 +; NONEON-NOSVE-NEXT: mov v2.h[5], w8 +; NONEON-NOSVE-NEXT: udiv w9, w14, w13 +; NONEON-NOSVE-NEXT: msub w8, w12, w10, w11 +; NONEON-NOSVE-NEXT: mov v2.h[6], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w13, w14 +; NONEON-NOSVE-NEXT: mov v2.h[7], w8 +; NONEON-NOSVE-NEXT: mov v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %res = urem <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -671,6 +2017,139 @@ define void @urem_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: mls z0.h, p0/m, z7.h, z1.h ; CHECK-NEXT: stp q2, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub sp, sp, #144 +; NONEON-NOSVE-NEXT: stp x29, x30, [sp, #48] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x28, x27, [sp, #64] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x26, x25, [sp, #80] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x24, x23, [sp, #96] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #112] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 144 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -40 +; NONEON-NOSVE-NEXT: .cfi_offset w24, -48 +; NONEON-NOSVE-NEXT: .cfi_offset w25, -56 +; NONEON-NOSVE-NEXT: .cfi_offset w26, -64 +; NONEON-NOSVE-NEXT: .cfi_offset w27, -72 +; NONEON-NOSVE-NEXT: .cfi_offset w28, -80 +; NONEON-NOSVE-NEXT: .cfi_offset w30, -88 +; NONEON-NOSVE-NEXT: .cfi_offset w29, -96 +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q2, [x0] +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: umov w8, v1.h[1] +; NONEON-NOSVE-NEXT: umov w9, v0.h[1] +; NONEON-NOSVE-NEXT: umov w20, v1.h[0] +; NONEON-NOSVE-NEXT: umov w21, v0.h[0] +; NONEON-NOSVE-NEXT: umov w19, v0.h[3] +; NONEON-NOSVE-NEXT: umov w5, v1.h[4] +; NONEON-NOSVE-NEXT: umov w2, v0.h[4] +; NONEON-NOSVE-NEXT: umov w1, v3.h[1] +; NONEON-NOSVE-NEXT: umov w23, v2.h[1] +; NONEON-NOSVE-NEXT: umov w25, v3.h[0] +; NONEON-NOSVE-NEXT: umov w26, v2.h[0] +; NONEON-NOSVE-NEXT: umov w6, v1.h[5] +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #36] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w8, v1.h[2] +; NONEON-NOSVE-NEXT: umov w9, v0.h[2] +; NONEON-NOSVE-NEXT: umov w3, v0.h[5] +; NONEON-NOSVE-NEXT: umov w4, v1.h[6] +; NONEON-NOSVE-NEXT: umov w7, v0.h[6] +; NONEON-NOSVE-NEXT: umov w28, v3.h[2] +; NONEON-NOSVE-NEXT: umov w29, v2.h[2] +; NONEON-NOSVE-NEXT: umov w15, v3.h[3] +; NONEON-NOSVE-NEXT: umov w13, v2.h[3] +; NONEON-NOSVE-NEXT: umov w12, v3.h[4] +; NONEON-NOSVE-NEXT: umov w14, v3.h[5] +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #24] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w11, w21, w20 +; NONEON-NOSVE-NEXT: str w10, [sp, #44] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: umov w8, v1.h[3] +; NONEON-NOSVE-NEXT: stp w8, w11, [sp] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w11, v2.h[4] +; NONEON-NOSVE-NEXT: ldr w22, [sp, #4] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w20, w22, w20, w21 +; NONEON-NOSVE-NEXT: udiv w9, w19, w8 +; NONEON-NOSVE-NEXT: str w10, [sp, #32] // 4-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w10, v3.h[6] +; NONEON-NOSVE-NEXT: fmov s5, w20 +; NONEON-NOSVE-NEXT: umov w20, v3.h[7] +; NONEON-NOSVE-NEXT: udiv w8, w2, w5 +; NONEON-NOSVE-NEXT: udiv w24, w23, w1 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #16] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: udiv w27, w26, w25 +; NONEON-NOSVE-NEXT: msub w1, w24, w1, w23 +; NONEON-NOSVE-NEXT: ldp w24, w23, [sp, #40] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w9, w3, w6 +; NONEON-NOSVE-NEXT: msub w21, w27, w25, w26 +; NONEON-NOSVE-NEXT: ldr w25, [sp, #36] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w23, w23, w25, w24 +; NONEON-NOSVE-NEXT: ldr w25, [sp, #24] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: fmov s4, w21 +; NONEON-NOSVE-NEXT: mov v5.h[1], w23 +; NONEON-NOSVE-NEXT: ldp w23, w21, [sp, #28] // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.h[1], w1 +; NONEON-NOSVE-NEXT: udiv w8, w7, w4 +; NONEON-NOSVE-NEXT: msub w21, w21, w25, w23 +; NONEON-NOSVE-NEXT: umov w23, v2.h[7] +; NONEON-NOSVE-NEXT: ldp x26, x25, [sp, #80] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.h[2], w21 +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #112] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: udiv w30, w29, w28 +; NONEON-NOSVE-NEXT: stp w8, w9, [sp, #8] // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: umov w9, v2.h[5] +; NONEON-NOSVE-NEXT: umov w8, v2.h[6] +; NONEON-NOSVE-NEXT: udiv w18, w13, w15 +; NONEON-NOSVE-NEXT: msub w1, w30, w28, w29 +; NONEON-NOSVE-NEXT: ldp x28, x27, [sp, #64] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x29, x30, [sp, #48] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.h[2], w1 +; NONEON-NOSVE-NEXT: udiv w16, w11, w12 +; NONEON-NOSVE-NEXT: msub w13, w18, w15, w13 +; NONEON-NOSVE-NEXT: ldr w15, [sp, #20] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: ldr w18, [sp] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w15, w15, w18, w19 +; NONEON-NOSVE-NEXT: mov v4.h[3], w13 +; NONEON-NOSVE-NEXT: umov w13, v1.h[7] +; NONEON-NOSVE-NEXT: mov v5.h[3], w15 +; NONEON-NOSVE-NEXT: umov w15, v0.h[7] +; NONEON-NOSVE-NEXT: udiv w17, w9, w14 +; NONEON-NOSVE-NEXT: msub w11, w16, w12, w11 +; NONEON-NOSVE-NEXT: ldr w12, [sp, #16] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w12, w12, w5, w2 +; NONEON-NOSVE-NEXT: mov v4.h[4], w11 +; NONEON-NOSVE-NEXT: ldr w11, [sp, #12] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v5.h[4], w12 +; NONEON-NOSVE-NEXT: msub w11, w11, w6, w3 +; NONEON-NOSVE-NEXT: udiv w24, w8, w10 +; NONEON-NOSVE-NEXT: msub w9, w17, w14, w9 +; NONEON-NOSVE-NEXT: mov v5.h[5], w11 +; NONEON-NOSVE-NEXT: mov v4.h[5], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #8] // 4-byte Folded Reload +; NONEON-NOSVE-NEXT: msub w9, w9, w4, w7 +; NONEON-NOSVE-NEXT: udiv w18, w23, w20 +; NONEON-NOSVE-NEXT: msub w8, w24, w10, w8 +; NONEON-NOSVE-NEXT: mov v5.h[6], w9 +; NONEON-NOSVE-NEXT: mov v4.h[6], w8 +; NONEON-NOSVE-NEXT: udiv w12, w15, w13 +; NONEON-NOSVE-NEXT: msub w8, w18, w20, w23 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #128] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: ldp x24, x23, [sp, #96] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v4.h[7], w8 +; NONEON-NOSVE-NEXT: msub w9, w12, w13, w15 +; NONEON-NOSVE-NEXT: mov v5.h[7], w9 +; NONEON-NOSVE-NEXT: stp q4, q5, [x0] +; NONEON-NOSVE-NEXT: add sp, sp, #144 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = urem <16 x i16> %op1, %op2 @@ -689,6 +2168,23 @@ define <2 x i32> @urem_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: mls z0.s, p0/m, z2.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: fmov w9, s0 +; NONEON-NOSVE-NEXT: mov w11, v1.s[1] +; NONEON-NOSVE-NEXT: mov w12, v0.s[1] +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: udiv w13, w12, w11 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: msub w9, w13, w11, w12 +; NONEON-NOSVE-NEXT: mov v0.s[1], w9 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = urem <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -704,6 +2200,30 @@ define <4 x i32> @urem_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: mls z0.s, p0/m, z2.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov w11, s1 +; NONEON-NOSVE-NEXT: fmov w12, s0 +; NONEON-NOSVE-NEXT: mov w8, v1.s[1] +; NONEON-NOSVE-NEXT: mov w9, v0.s[1] +; NONEON-NOSVE-NEXT: mov w14, v1.s[2] +; NONEON-NOSVE-NEXT: mov w15, v0.s[2] +; NONEON-NOSVE-NEXT: mov w17, v1.s[3] +; NONEON-NOSVE-NEXT: mov w18, v0.s[3] +; NONEON-NOSVE-NEXT: udiv w13, w12, w11 +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: fmov s0, w11 +; NONEON-NOSVE-NEXT: udiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: mov v0.s[1], w8 +; NONEON-NOSVE-NEXT: udiv w9, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: mov v0.s[2], w8 +; NONEON-NOSVE-NEXT: msub w8, w9, w17, w18 +; NONEON-NOSVE-NEXT: mov v0.s[3], w8 +; NONEON-NOSVE-NEXT: ret %res = urem <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -723,6 +2243,65 @@ define void @urem_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: mls z1.s, p0/m, z5.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str x23, [sp, #-48]! // 8-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: .cfi_offset w19, -8 +; NONEON-NOSVE-NEXT: .cfi_offset w20, -16 +; NONEON-NOSVE-NEXT: .cfi_offset w21, -24 +; NONEON-NOSVE-NEXT: .cfi_offset w22, -32 +; NONEON-NOSVE-NEXT: .cfi_offset w23, -48 +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: fmov w12, s0 +; NONEON-NOSVE-NEXT: fmov w3, s2 +; NONEON-NOSVE-NEXT: mov w9, v0.s[1] +; NONEON-NOSVE-NEXT: fmov w11, s1 +; NONEON-NOSVE-NEXT: fmov w2, s3 +; NONEON-NOSVE-NEXT: mov w8, v1.s[1] +; NONEON-NOSVE-NEXT: mov w17, v3.s[1] +; NONEON-NOSVE-NEXT: mov w18, v2.s[1] +; NONEON-NOSVE-NEXT: mov w14, v1.s[2] +; NONEON-NOSVE-NEXT: mov w15, v0.s[2] +; NONEON-NOSVE-NEXT: mov w5, v3.s[2] +; NONEON-NOSVE-NEXT: mov w6, v2.s[2] +; NONEON-NOSVE-NEXT: udiv w13, w12, w11 +; NONEON-NOSVE-NEXT: mov w19, v3.s[3] +; NONEON-NOSVE-NEXT: mov w20, v2.s[3] +; NONEON-NOSVE-NEXT: mov w22, v1.s[3] +; NONEON-NOSVE-NEXT: mov w23, v0.s[3] +; NONEON-NOSVE-NEXT: udiv w4, w3, w2 +; NONEON-NOSVE-NEXT: msub w11, w13, w11, w12 +; NONEON-NOSVE-NEXT: fmov s1, w11 +; NONEON-NOSVE-NEXT: udiv w10, w9, w8 +; NONEON-NOSVE-NEXT: msub w12, w4, w2, w3 +; NONEON-NOSVE-NEXT: fmov s0, w12 +; NONEON-NOSVE-NEXT: udiv w1, w18, w17 +; NONEON-NOSVE-NEXT: msub w8, w10, w8, w9 +; NONEON-NOSVE-NEXT: mov v1.s[1], w8 +; NONEON-NOSVE-NEXT: udiv w16, w15, w14 +; NONEON-NOSVE-NEXT: msub w13, w1, w17, w18 +; NONEON-NOSVE-NEXT: mov v0.s[1], w13 +; NONEON-NOSVE-NEXT: udiv w7, w6, w5 +; NONEON-NOSVE-NEXT: msub w8, w16, w14, w15 +; NONEON-NOSVE-NEXT: mov v1.s[2], w8 +; NONEON-NOSVE-NEXT: udiv w21, w20, w19 +; NONEON-NOSVE-NEXT: msub w10, w7, w5, w6 +; NONEON-NOSVE-NEXT: mov v0.s[2], w10 +; NONEON-NOSVE-NEXT: udiv w9, w23, w22 +; NONEON-NOSVE-NEXT: msub w10, w21, w19, w20 +; NONEON-NOSVE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v0.s[3], w10 +; NONEON-NOSVE-NEXT: msub w8, w9, w22, w23 +; NONEON-NOSVE-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload +; NONEON-NOSVE-NEXT: mov v1.s[3], w8 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ldr x23, [sp], #48 // 8-byte Folded Reload +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = urem <8 x i32> %op1, %op2 @@ -741,6 +2320,17 @@ define <1 x i64> @urem_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: mls z0.d, p0/m, z2.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d1 killed $d1 def $q1 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: udiv x10, x9, x8 +; NONEON-NOSVE-NEXT: msub x8, x10, x8, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: ret %res = urem <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -756,6 +2346,20 @@ define <2 x i64> @urem_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: mls z0.d, p0/m, z2.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: mov x11, v1.d[1] +; NONEON-NOSVE-NEXT: mov x12, v0.d[1] +; NONEON-NOSVE-NEXT: udiv x10, x9, x8 +; NONEON-NOSVE-NEXT: udiv x13, x12, x11 +; NONEON-NOSVE-NEXT: msub x8, x10, x8, x9 +; NONEON-NOSVE-NEXT: fmov d0, x8 +; NONEON-NOSVE-NEXT: msub x9, x13, x11, x12 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: ret %res = urem <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -775,6 +2379,33 @@ define void @urem_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: mls z1.d, p0/m, z5.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: urem_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q1, [x1] +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: fmov x15, d2 +; NONEON-NOSVE-NEXT: mov x12, v2.d[1] +; NONEON-NOSVE-NEXT: fmov x8, d1 +; NONEON-NOSVE-NEXT: fmov x14, d3 +; NONEON-NOSVE-NEXT: mov x11, v3.d[1] +; NONEON-NOSVE-NEXT: mov x17, v1.d[1] +; NONEON-NOSVE-NEXT: mov x18, v0.d[1] +; NONEON-NOSVE-NEXT: udiv x10, x9, x8 +; NONEON-NOSVE-NEXT: udiv x16, x15, x14 +; NONEON-NOSVE-NEXT: msub x8, x10, x8, x9 +; NONEON-NOSVE-NEXT: fmov d1, x8 +; NONEON-NOSVE-NEXT: udiv x13, x12, x11 +; NONEON-NOSVE-NEXT: msub x10, x16, x14, x15 +; NONEON-NOSVE-NEXT: fmov d0, x10 +; NONEON-NOSVE-NEXT: udiv x1, x18, x17 +; NONEON-NOSVE-NEXT: msub x9, x13, x11, x12 +; NONEON-NOSVE-NEXT: mov v0.d[1], x9 +; NONEON-NOSVE-NEXT: msub x11, x1, x17, x18 +; NONEON-NOSVE-NEXT: mov v1.d[1], x11 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = urem <4 x i64> %op1, %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll index 906112f7ac39e4..b3adf4720ece8f 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -16,6 +17,14 @@ define <4 x i8> @select_v4i8(<4 x i8> %op1, <4 x i8> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.4h, w8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <4 x i8> %op1, <4 x i8> %op2 ret <4 x i8> %sel } @@ -31,6 +40,14 @@ define <8 x i8> @select_v8i8(<8 x i8> %op1, <8 x i8> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.8b, w8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <8 x i8> %op1, <8 x i8> %op2 ret <8 x i8> %sel } @@ -46,6 +63,14 @@ define <16 x i8> @select_v16i8(<16 x i8> %op1, <16 x i8> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.16b, w8 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <16 x i8> %op1, <16 x i8> %op2 ret <16 x i8> %sel } @@ -64,6 +89,20 @@ define void @select_v32i8(ptr %a, ptr %b, i1 %mask) { ; CHECK-NEXT: sel z1.b, p0, z1.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w2, #0x1 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x0, #16] +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: ldr q4, [x1, #16] +; NONEON-NOSVE-NEXT: dup v0.16b, w8 +; NONEON-NOSVE-NEXT: bif v1.16b, v3.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load volatile <32 x i8>, ptr %a %op2 = load volatile <32 x i8>, ptr %b %sel = select i1 %mask, <32 x i8> %op1, <32 x i8> %op2 @@ -83,6 +122,14 @@ define <2 x i16> @select_v2i16(<2 x i16> %op1, <2 x i16> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.2s, w8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <2 x i16> %op1, <2 x i16> %op2 ret <2 x i16> %sel } @@ -99,6 +146,14 @@ define <4 x i16> @select_v4i16(<4 x i16> %op1, <4 x i16> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.4h, w8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <4 x i16> %op1, <4 x i16> %op2 ret <4 x i16> %sel } @@ -115,6 +170,14 @@ define <8 x i16> @select_v8i16(<8 x i16> %op1, <8 x i16> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.8h, w8 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <8 x i16> %op1, <8 x i16> %op2 ret <8 x i16> %sel } @@ -134,6 +197,20 @@ define void @select_v16i16(ptr %a, ptr %b, i1 %mask) { ; CHECK-NEXT: sel z1.h, p0, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w2, #0x1 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x0, #16] +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: ldr q4, [x1, #16] +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: bif v1.16b, v3.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load volatile <16 x i16>, ptr %a %op2 = load volatile <16 x i16>, ptr %b %sel = select i1 %mask, <16 x i16> %op1, <16 x i16> %op2 @@ -153,6 +230,14 @@ define <2 x i32> @select_v2i32(<2 x i32> %op1, <2 x i32> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.2s, w8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <2 x i32> %op1, <2 x i32> %op2 ret <2 x i32> %sel } @@ -169,6 +254,14 @@ define <4 x i32> @select_v4i32(<4 x i32> %op1, <4 x i32> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: dup v2.4s, w8 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <4 x i32> %op1, <4 x i32> %op2 ret <4 x i32> %sel } @@ -188,6 +281,20 @@ define void @select_v8i32(ptr %a, ptr %b, i1 %mask) { ; CHECK-NEXT: sel z1.s, p0, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w2, #0x1 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x0, #16] +; NONEON-NOSVE-NEXT: csetm w8, ne +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: ldr q4, [x1, #16] +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: bif v1.16b, v3.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load volatile <8 x i32>, ptr %a %op2 = load volatile <8 x i32>, ptr %b %sel = select i1 %mask, <8 x i32> %op1, <8 x i32> %op2 @@ -208,6 +315,14 @@ define <1 x i64> @select_v1i64(<1 x i64> %op1, <1 x i64> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm x8, ne +; NONEON-NOSVE-NEXT: fmov d2, x8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <1 x i64> %op1, <1 x i64> %op2 ret <1 x i64> %sel } @@ -225,6 +340,14 @@ define <2 x i64> @select_v2i64(<2 x i64> %op1, <2 x i64> %op2, i1 %mask) { ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm x8, ne +; NONEON-NOSVE-NEXT: dup v2.2d, x8 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select i1 %mask, <2 x i64> %op1, <2 x i64> %op2 ret <2 x i64> %sel } @@ -245,6 +368,20 @@ define void @select_v4i64(ptr %a, ptr %b, i1 %mask) { ; CHECK-NEXT: sel z1.d, p0, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w2, #0x1 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q2, [x0, #16] +; NONEON-NOSVE-NEXT: csetm x8, ne +; NONEON-NOSVE-NEXT: ldr q3, [x1] +; NONEON-NOSVE-NEXT: ldr q4, [x1, #16] +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: bif v1.16b, v3.16b, v0.16b +; NONEON-NOSVE-NEXT: bsl v0.16b, v2.16b, v4.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load volatile <4 x i64>, ptr %a %op2 = load volatile <4 x i64>, ptr %b %sel = select i1 %mask, <4 x i64> %op1, <4 x i64> %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll index 9ed52e321d9ab1..a429cd82a44993 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -19,6 +20,16 @@ define <4 x i8> @ashr_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d2, #0xff00ff00ff00ff +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: sshr v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: neg v1.4h, v1.4h +; NONEON-NOSVE-NEXT: sshl v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = ashr <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -32,6 +43,12 @@ define <8 x i8> @ashr_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.8b, v1.8b +; NONEON-NOSVE-NEXT: sshl v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = ashr <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -45,6 +62,12 @@ define <16 x i8> @ashr_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.16b, v1.16b +; NONEON-NOSVE-NEXT: sshl v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = ashr <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -60,6 +83,17 @@ define void @ashr_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: asr z1.b, p0/m, z1.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: neg v0.16b, v0.16b +; NONEON-NOSVE-NEXT: neg v1.16b, v1.16b +; NONEON-NOSVE-NEXT: sshl v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: sshl v1.16b, v3.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = ashr <32 x i8> %op1, %op2 @@ -78,6 +112,16 @@ define <2 x i16> @ashr_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d2, #0x00ffff0000ffff +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: sshr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: neg v1.2s, v1.2s +; NONEON-NOSVE-NEXT: sshl v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = ashr <2 x i16> %op1, %op2 ret <2 x i16> %res } @@ -91,6 +135,12 @@ define <4 x i16> @ashr_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.4h, v1.4h +; NONEON-NOSVE-NEXT: sshl v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = ashr <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -104,6 +154,12 @@ define <8 x i16> @ashr_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.8h, v1.8h +; NONEON-NOSVE-NEXT: sshl v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = ashr <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -119,6 +175,17 @@ define void @ashr_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: asr z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: neg v0.8h, v0.8h +; NONEON-NOSVE-NEXT: neg v1.8h, v1.8h +; NONEON-NOSVE-NEXT: sshl v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: sshl v1.8h, v3.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = ashr <16 x i16> %op1, %op2 @@ -135,6 +202,12 @@ define <2 x i32> @ashr_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.2s, v1.2s +; NONEON-NOSVE-NEXT: sshl v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = ashr <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -148,6 +221,12 @@ define <4 x i32> @ashr_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.4s, v1.4s +; NONEON-NOSVE-NEXT: sshl v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = ashr <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -163,6 +242,17 @@ define void @ashr_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: asr z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: neg v0.4s, v0.4s +; NONEON-NOSVE-NEXT: neg v1.4s, v1.4s +; NONEON-NOSVE-NEXT: sshl v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: sshl v1.4s, v3.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = ashr <8 x i32> %op1, %op2 @@ -179,6 +269,12 @@ define <1 x i64> @ashr_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg d1, d1 +; NONEON-NOSVE-NEXT: sshl d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = ashr <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -192,6 +288,12 @@ define <2 x i64> @ashr_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.2d, v1.2d +; NONEON-NOSVE-NEXT: sshl v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = ashr <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -207,6 +309,17 @@ define void @ashr_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: asr z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ashr_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: neg v0.2d, v0.2d +; NONEON-NOSVE-NEXT: neg v1.2d, v1.2d +; NONEON-NOSVE-NEXT: sshl v0.2d, v2.2d, v0.2d +; NONEON-NOSVE-NEXT: sshl v1.2d, v3.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = ashr <4 x i64> %op1, %op2 @@ -229,6 +342,15 @@ define <4 x i8> @lshr_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d2, #0xff00ff00ff00ff +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v2.8b +; NONEON-NOSVE-NEXT: neg v1.4h, v1.4h +; NONEON-NOSVE-NEXT: ushl v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = lshr <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -242,6 +364,12 @@ define <8 x i8> @lshr_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.8b, v1.8b +; NONEON-NOSVE-NEXT: ushl v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = lshr <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -255,6 +383,12 @@ define <16 x i8> @lshr_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.16b, v1.16b +; NONEON-NOSVE-NEXT: ushl v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = lshr <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -270,6 +404,17 @@ define void @lshr_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: lsr z1.b, p0/m, z1.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: neg v0.16b, v0.16b +; NONEON-NOSVE-NEXT: neg v1.16b, v1.16b +; NONEON-NOSVE-NEXT: ushl v0.16b, v2.16b, v0.16b +; NONEON-NOSVE-NEXT: ushl v1.16b, v3.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = lshr <32 x i8> %op1, %op2 @@ -288,6 +433,15 @@ define <2 x i16> @lshr_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d2, #0x00ffff0000ffff +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v2.8b +; NONEON-NOSVE-NEXT: neg v1.2s, v1.2s +; NONEON-NOSVE-NEXT: ushl v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = lshr <2 x i16> %op1, %op2 ret <2 x i16> %res } @@ -301,6 +455,12 @@ define <4 x i16> @lshr_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.4h, v1.4h +; NONEON-NOSVE-NEXT: ushl v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = lshr <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -314,6 +474,12 @@ define <8 x i16> @lshr_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.8h, v1.8h +; NONEON-NOSVE-NEXT: ushl v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = lshr <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -329,6 +495,17 @@ define void @lshr_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: neg v0.8h, v0.8h +; NONEON-NOSVE-NEXT: neg v1.8h, v1.8h +; NONEON-NOSVE-NEXT: ushl v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: ushl v1.8h, v3.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = lshr <16 x i16> %op1, %op2 @@ -345,6 +522,12 @@ define <2 x i32> @lshr_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.2s, v1.2s +; NONEON-NOSVE-NEXT: ushl v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = lshr <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -358,6 +541,12 @@ define <4 x i32> @lshr_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.4s, v1.4s +; NONEON-NOSVE-NEXT: ushl v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = lshr <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -373,6 +562,17 @@ define void @lshr_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: lsr z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: neg v0.4s, v0.4s +; NONEON-NOSVE-NEXT: neg v1.4s, v1.4s +; NONEON-NOSVE-NEXT: ushl v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: ushl v1.4s, v3.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = lshr <8 x i32> %op1, %op2 @@ -389,6 +589,12 @@ define <1 x i64> @lshr_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg d1, d1 +; NONEON-NOSVE-NEXT: ushl d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = lshr <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -402,6 +608,12 @@ define <2 x i64> @lshr_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: neg v1.2d, v1.2d +; NONEON-NOSVE-NEXT: ushl v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = lshr <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -417,6 +629,17 @@ define void @lshr_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: lsr z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: lshr_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: neg v0.2d, v0.2d +; NONEON-NOSVE-NEXT: neg v1.2d, v1.2d +; NONEON-NOSVE-NEXT: ushl v0.2d, v2.2d, v0.2d +; NONEON-NOSVE-NEXT: ushl v1.2d, v3.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = lshr <4 x i64> %op1, %op2 @@ -438,6 +661,13 @@ define <2 x i8> @shl_v2i8(<2 x i8> %op1, <2 x i8> %op2) { ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v2i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d2, #0x0000ff000000ff +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ushl v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = shl <2 x i8> %op1, %op2 ret <2 x i8> %res } @@ -452,6 +682,13 @@ define <4 x i8> @shl_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d2, #0xff00ff00ff00ff +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ushl v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = shl <4 x i8> %op1, %op2 ret <4 x i8> %res } @@ -465,6 +702,11 @@ define <8 x i8> @shl_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushl v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ret %res = shl <8 x i8> %op1, %op2 ret <8 x i8> %res } @@ -478,6 +720,11 @@ define <16 x i8> @shl_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushl v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %res = shl <16 x i8> %op1, %op2 ret <16 x i8> %res } @@ -493,6 +740,15 @@ define void @shl_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: lsl z1.b, p0/m, z1.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: ushl v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: ushl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = shl <32 x i8> %op1, %op2 @@ -509,6 +765,11 @@ define <4 x i16> @shl_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushl v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %res = shl <4 x i16> %op1, %op2 ret <4 x i16> %res } @@ -522,6 +783,11 @@ define <8 x i16> @shl_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushl v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: ret %res = shl <8 x i16> %op1, %op2 ret <8 x i16> %res } @@ -537,6 +803,15 @@ define void @shl_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: ushl v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: ushl v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = shl <16 x i16> %op1, %op2 @@ -553,6 +828,11 @@ define <2 x i32> @shl_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushl v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: ret %res = shl <2 x i32> %op1, %op2 ret <2 x i32> %res } @@ -566,6 +846,11 @@ define <4 x i32> @shl_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushl v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: ret %res = shl <4 x i32> %op1, %op2 ret <4 x i32> %res } @@ -581,6 +866,15 @@ define void @shl_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: lsl z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: ushl v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: ushl v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %res = shl <8 x i32> %op1, %op2 @@ -597,6 +891,11 @@ define <1 x i64> @shl_v1i64(<1 x i64> %op1, <1 x i64> %op2) { ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushl d0, d0, d1 +; NONEON-NOSVE-NEXT: ret %res = shl <1 x i64> %op1, %op2 ret <1 x i64> %res } @@ -610,6 +909,11 @@ define <2 x i64> @shl_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushl v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: ret %res = shl <2 x i64> %op1, %op2 ret <2 x i64> %res } @@ -625,6 +929,15 @@ define void @shl_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shl_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: ushl v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: ushl v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %res = shl <4 x i64> %op1, %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll index b285659258f31d..d9ca19baea7d5b 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -15,6 +16,13 @@ define <4 x half> @ucvtf_v4i16_v4f16(<4 x i16> %op1) { ; CHECK-NEXT: ucvtf z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v4i16_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = uitofp <4 x i16> %op1 to <4 x half> ret <4 x half> %res } @@ -27,6 +35,22 @@ define void @ucvtf_v8i16_v8f16(ptr %a, ptr %b) { ; CHECK-NEXT: ucvtf z0.h, p0/m, z0.h ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v8i16_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ushll v1.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: ucvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v1.4s +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: str q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i16>, ptr %a %res = uitofp <8 x i16> %op1 to <8 x half> store <8 x half> %res, ptr %b @@ -42,6 +66,29 @@ define void @ucvtf_v16i16_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ucvtf z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v16i16_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ushll v2.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v0.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ucvtf v2.4s, v2.4s +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ushll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: ucvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: ucvtf v3.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v1.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v3.4s +; NONEON-NOSVE-NEXT: stp q2, q0, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %res = uitofp <16 x i16> %op1 to <16 x half> store <16 x half> %res, ptr %b @@ -61,6 +108,13 @@ define <2 x float> @ucvtf_v2i16_v2f32(<2 x i16> %op1) { ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v2i16_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0x00ffff0000ffff +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ucvtf v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = uitofp <2 x i16> %op1 to <2 x float> ret <2 x float> %res } @@ -74,6 +128,12 @@ define <4 x float> @ucvtf_v4i16_v4f32(<4 x i16> %op1) { ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v4i16_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = uitofp <4 x i16> %op1 to <4 x float> ret <4 x float> %res } @@ -90,6 +150,20 @@ define void @ucvtf_v8i16_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.s ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v8i16_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ucvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i16>, ptr %a %res = uitofp <8 x i16> %op1 to <8 x float> store <8 x float> %res, ptr %b @@ -114,6 +188,26 @@ define void @ucvtf_v16i16_v16f32(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v16i16_v16f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ushll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: ucvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ucvtf v2.4s, v2.4s +; NONEON-NOSVE-NEXT: ucvtf v3.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %res = uitofp <16 x i16> %op1 to <16 x float> store <16 x float> %res, ptr %b @@ -132,6 +226,13 @@ define <1 x double> @ucvtf_v1i16_v1f64(<1 x i16> %op1) { ; CHECK-NEXT: and w8, w8, #0xffff ; CHECK-NEXT: ucvtf d0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v1i16_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: umov w8, v0.h[0] +; NONEON-NOSVE-NEXT: ucvtf d0, w8 +; NONEON-NOSVE-NEXT: ret %res = uitofp <1 x i16> %op1 to <1 x double> ret <1 x double> %res } @@ -146,6 +247,14 @@ define <2 x double> @ucvtf_v2i16_v2f64(<2 x i16> %op1) { ; CHECK-NEXT: ucvtf z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v2i16_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d1, #0x00ffff0000ffff +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = uitofp <2 x i16> %op1 to <2 x double> ret <2 x double> %res } @@ -163,6 +272,21 @@ define void @ucvtf_v4i16_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ucvtf z0.d, p0/m, z0.d ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v4i16_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i16>, ptr %a %res = uitofp <4 x i16> %op1 to <4 x double> store <4 x double> %res, ptr %b @@ -190,6 +314,30 @@ define void @ucvtf_v8i16_v8f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q1, [x1] ; CHECK-NEXT: stp q3, q0, [x1, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v8i16_v8f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ushll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: ucvtf v2.2d, v2.2d +; NONEON-NOSVE-NEXT: ucvtf v3.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q2, [x1] +; NONEON-NOSVE-NEXT: stp q1, q3, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i16>, ptr %a %res = uitofp <8 x i16> %op1 to <8 x double> store <8 x double> %res, ptr %b @@ -238,6 +386,46 @@ define void @ucvtf_v16i16_v16f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q1, q2, [x1, #32] ; CHECK-NEXT: stp q3, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v16i16_v16f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-96]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 96 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: ushll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ushll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: ushll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q2, q0, [sp, #32] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: stp q3, q1, [sp, #64] +; NONEON-NOSVE-NEXT: ldr d5, [sp, #56] +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #88] +; NONEON-NOSVE-NEXT: ldr d6, [sp, #72] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #40] +; NONEON-NOSVE-NEXT: ushll v5.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ushll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: ushll v4.2d, v4.2s, #0 +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: ushll v6.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: ushll v7.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: ucvtf v2.2d, v2.2d +; NONEON-NOSVE-NEXT: ucvtf v5.2d, v5.2d +; NONEON-NOSVE-NEXT: ucvtf v3.2d, v3.2d +; NONEON-NOSVE-NEXT: ucvtf v4.2d, v4.2d +; NONEON-NOSVE-NEXT: stp q0, q5, [x1] +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v7.2d +; NONEON-NOSVE-NEXT: stp q1, q4, [x1, #64] +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v6.2d +; NONEON-NOSVE-NEXT: stp q2, q0, [x1, #32] +; NONEON-NOSVE-NEXT: stp q3, q1, [x1, #96] +; NONEON-NOSVE-NEXT: add sp, sp, #96 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %res = uitofp <16 x i16> %op1 to <16 x double> store <16 x double> %res, ptr %b @@ -257,6 +445,13 @@ define <2 x half> @ucvtf_v2i32_v2f16(<2 x i32> %op1) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v2i32_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = uitofp <2 x i32> %op1 to <2 x half> ret <2 x half> %res } @@ -270,6 +465,12 @@ define <4 x half> @ucvtf_v4i32_v4f16(<4 x i32> %op1) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v4i32_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = uitofp <4 x i32> %op1 to <4 x half> ret <4 x half> %res } @@ -287,6 +488,15 @@ define <8 x half> @ucvtf_v8i32_v8f16(ptr %a) { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v8i32_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ucvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = uitofp <8 x i32> %op1 to <8 x half> ret <8 x half> %res @@ -311,6 +521,21 @@ define void @ucvtf_v16i32_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: splice z2.h, p0, z2.h, z3.h ; CHECK-NEXT: stp q2, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v16i32_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x0] +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ucvtf v2.4s, v2.4s +; NONEON-NOSVE-NEXT: ucvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: ucvtf v3.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v1.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v3.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i32>, ptr %a %res = uitofp <16 x i32> %op1 to <16 x half> store <16 x half> %res, ptr %b @@ -329,6 +554,11 @@ define <2 x float> @ucvtf_v2i32_v2f32(<2 x i32> %op1) { ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v2i32_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ucvtf v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = uitofp <2 x i32> %op1 to <2 x float> ret <2 x float> %res } @@ -341,6 +571,11 @@ define <4 x float> @ucvtf_v4i32_v4f32(<4 x i32> %op1) { ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v4i32_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = uitofp <4 x i32> %op1 to <4 x float> ret <4 x float> %res } @@ -354,6 +589,14 @@ define void @ucvtf_v8i32_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ucvtf z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v8i32_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ucvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ucvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = uitofp <8 x i32> %op1 to <8 x float> store <8 x float> %res, ptr %b @@ -373,6 +616,12 @@ define <2 x double> @ucvtf_v2i32_v2f64(<2 x i32> %op1) { ; CHECK-NEXT: ucvtf z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v2i32_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = uitofp <2 x i32> %op1 to <2 x double> ret <2 x double> %res } @@ -389,6 +638,20 @@ define void @ucvtf_v4i32_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ucvtf z0.d, p0/m, z0.d ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v4i32_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i32>, ptr %a %res = uitofp <4 x i32> %op1 to <4 x double> store <4 x double> %res, ptr %b @@ -413,6 +676,26 @@ define void @ucvtf_v8i32_v8f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v8i32_v8f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: ushll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ushll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: ushll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ushll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ucvtf v2.2d, v2.2d +; NONEON-NOSVE-NEXT: ucvtf v3.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = uitofp <8 x i32> %op1 to <8 x double> store <8 x double> %res, ptr %b @@ -439,6 +722,18 @@ define <2 x half> @ucvtf_v2i64_v2f16(<2 x i64> %op1) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v2i64_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov x8, v0.d[1] +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: ucvtf s1, x9 +; NONEON-NOSVE-NEXT: ucvtf s0, x8 +; NONEON-NOSVE-NEXT: fcvt h2, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s1 +; NONEON-NOSVE-NEXT: mov v0.h[1], v2.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = uitofp <2 x i64> %op1 to <2 x half> ret <2 x half> %res } @@ -459,6 +754,16 @@ define <4 x half> @ucvtf_v4i64_v4f16(ptr %a) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v4i64_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtn2 v0.4s, v1.2d +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %res = uitofp <4 x i64> %op1 to <4 x half> ret <4 x half> %res @@ -492,6 +797,22 @@ define <8 x half> @ucvtf_v8i64_v8f16(ptr %a) { ; CHECK-NEXT: splice z0.h, p0, z0.h, z2.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v8i64_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0, #32] +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: ucvtf v2.2d, v2.2d +; NONEON-NOSVE-NEXT: ucvtf v3.2d, v3.2d +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtn v2.2s, v2.2d +; NONEON-NOSVE-NEXT: fcvtn2 v0.4s, v1.2d +; NONEON-NOSVE-NEXT: fcvtn2 v2.4s, v3.2d +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v2.4s +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i64>, ptr %a %res = uitofp <8 x i64> %op1 to <8 x half> ret <8 x half> %res @@ -510,6 +831,12 @@ define <2 x float> @ucvtf_v2i64_v2f32(<2 x i64> %op1) { ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v2i64_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: ret %res = uitofp <2 x i64> %op1 to <2 x float> ret <2 x float> %res } @@ -527,6 +854,15 @@ define <4 x float> @ucvtf_v4i64_v4f32(ptr %a) { ; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v4i64_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtn2 v0.4s, v1.2d +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %res = uitofp <4 x i64> %op1 to <4 x float> ret <4 x float> %res @@ -551,6 +887,21 @@ define void @ucvtf_v8i64_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: splice z2.s, p0, z2.s, z3.s ; CHECK-NEXT: stp q2, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v8i64_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x0] +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ucvtf v2.2d, v2.2d +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: ucvtf v3.2d, v3.2d +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtn v1.2s, v1.2d +; NONEON-NOSVE-NEXT: fcvtn2 v0.4s, v2.2d +; NONEON-NOSVE-NEXT: fcvtn2 v1.4s, v3.2d +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i64>, ptr %a %res = uitofp <8 x i64> %op1 to <8 x float> store <8 x float> %res, ptr %b @@ -569,6 +920,11 @@ define <2 x double> @ucvtf_v2i64_v2f64(<2 x i64> %op1) { ; CHECK-NEXT: ucvtf z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v2i64_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = uitofp <2 x i64> %op1 to <2 x double> ret <2 x double> %res } @@ -582,6 +938,14 @@ define void @ucvtf_v4i64_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ucvtf z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_v4i64_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ucvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ucvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %res = uitofp <4 x i64> %op1 to <4 x double> store <4 x double> %res, ptr %b @@ -600,6 +964,13 @@ define <4 x half> @scvtf_v4i16_v4f16(<4 x i16> %op1) { ; CHECK-NEXT: scvtf z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v4i16_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = sitofp <4 x i16> %op1 to <4 x half> ret <4 x half> %res } @@ -612,6 +983,22 @@ define void @scvtf_v8i16_v8f16(ptr %a, ptr %b) { ; CHECK-NEXT: scvtf z0.h, p0/m, z0.h ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v8i16_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: sshll v1.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d0, [sp, #8] +; NONEON-NOSVE-NEXT: scvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v1.4s +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: str q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i16>, ptr %a %res = sitofp <8 x i16> %op1 to <8 x half> store <8 x half> %res, ptr %b @@ -627,6 +1014,29 @@ define void @scvtf_v16i16_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: scvtf z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v16i16_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: sshll v2.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v0.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: scvtf v2.4s, v2.4s +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: sshll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: scvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: scvtf v3.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtn v2.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v2.8h, v1.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v3.4s +; NONEON-NOSVE-NEXT: stp q2, q0, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %res = sitofp <16 x i16> %op1 to <16 x half> store <16 x half> %res, ptr %b @@ -645,6 +1055,13 @@ define <2 x float> @scvtf_v2i16_v2f32(<2 x i16> %op1) { ; CHECK-NEXT: scvtf z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v2i16_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: sshr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: scvtf v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = sitofp <2 x i16> %op1 to <2 x float> ret <2 x float> %res } @@ -658,6 +1075,12 @@ define <4 x float> @scvtf_v4i16_v4f32(<4 x i16> %op1) { ; CHECK-NEXT: scvtf z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v4i16_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = sitofp <4 x i16> %op1 to <4 x float> ret <4 x float> %res } @@ -674,6 +1097,20 @@ define void @scvtf_v8i16_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: scvtf z0.s, p0/m, z0.s ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v8i16_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: scvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i16>, ptr %a %res = sitofp <8 x i16> %op1 to <8 x float> store <8 x float> %res, ptr %b @@ -698,6 +1135,26 @@ define void @scvtf_v16i16_v16f32(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v16i16_v16f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: sshll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: scvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: scvtf v2.4s, v2.4s +; NONEON-NOSVE-NEXT: scvtf v3.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %res = sitofp <16 x i16> %op1 to <16 x float> store <16 x float> %res, ptr %b @@ -719,6 +1176,14 @@ define <2 x double> @scvtf_v2i16_v2f64(<2 x i16> %op1) { ; CHECK-NEXT: scvtf z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v2i16_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: sshr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = sitofp <2 x i16> %op1 to <2 x double> ret <2 x double> %res } @@ -736,6 +1201,21 @@ define void @scvtf_v4i16_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: scvtf z0.d, p0/m, z0.d ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v4i16_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: scvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i16>, ptr %a %res = sitofp <4 x i16> %op1 to <4 x double> store <4 x double> %res, ptr %b @@ -763,6 +1243,30 @@ define void @scvtf_v8i16_v8f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q1, [x1] ; CHECK-NEXT: stp q3, q0, [x1, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v8i16_v8f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-48]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #16] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #40] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: sshll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: scvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: scvtf v2.2d, v2.2d +; NONEON-NOSVE-NEXT: scvtf v3.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q2, [x1] +; NONEON-NOSVE-NEXT: stp q1, q3, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #48 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i16>, ptr %a %res = sitofp <8 x i16> %op1 to <8 x double> store <8 x double> %res, ptr %b @@ -811,6 +1315,46 @@ define void @scvtf_v16i16_v16f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q1, q2, [x1, #32] ; CHECK-NEXT: stp q3, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v16i16_v16f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-96]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 96 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #8] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: sshll v1.4s, v1.4h, #0 +; NONEON-NOSVE-NEXT: sshll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: sshll v3.4s, v3.4h, #0 +; NONEON-NOSVE-NEXT: stp q2, q0, [sp, #32] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: stp q3, q1, [sp, #64] +; NONEON-NOSVE-NEXT: ldr d5, [sp, #56] +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: ldr d4, [sp, #88] +; NONEON-NOSVE-NEXT: ldr d6, [sp, #72] +; NONEON-NOSVE-NEXT: ldr d7, [sp, #40] +; NONEON-NOSVE-NEXT: sshll v5.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: sshll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: sshll v4.2d, v4.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: sshll v6.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: sshll v7.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v2.2d, v2.2d +; NONEON-NOSVE-NEXT: scvtf v5.2d, v5.2d +; NONEON-NOSVE-NEXT: scvtf v3.2d, v3.2d +; NONEON-NOSVE-NEXT: scvtf v4.2d, v4.2d +; NONEON-NOSVE-NEXT: stp q0, q5, [x1] +; NONEON-NOSVE-NEXT: scvtf v0.2d, v7.2d +; NONEON-NOSVE-NEXT: stp q1, q4, [x1, #64] +; NONEON-NOSVE-NEXT: scvtf v1.2d, v6.2d +; NONEON-NOSVE-NEXT: stp q2, q0, [x1, #32] +; NONEON-NOSVE-NEXT: stp q3, q1, [x1, #96] +; NONEON-NOSVE-NEXT: add sp, sp, #96 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %res = sitofp <16 x i16> %op1 to <16 x double> store <16 x double> %res, ptr %b @@ -830,6 +1374,13 @@ define <2 x half> @scvtf_v2i32_v2f16(<2 x i32> %op1) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v2i32_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = sitofp <2 x i32> %op1 to <2 x half> ret <2 x half> %res } @@ -843,6 +1394,12 @@ define <4 x half> @scvtf_v4i32_v4f16(<4 x i32> %op1) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v4i32_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %res = sitofp <4 x i32> %op1 to <4 x half> ret <4 x half> %res } @@ -860,6 +1417,15 @@ define <8 x half> @scvtf_v8i32_v8f16(ptr %a) { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v8i32_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: scvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v0.8h, v1.4s +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = sitofp <8 x i32> %op1 to <8 x half> ret <8 x half> %res @@ -877,6 +1443,11 @@ define <2 x float> @scvtf_v2i32_v2f32(<2 x i32> %op1) { ; CHECK-NEXT: scvtf z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v2i32_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: scvtf v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %res = sitofp <2 x i32> %op1 to <2 x float> ret <2 x float> %res } @@ -889,6 +1460,11 @@ define <4 x float> @scvtf_v4i32_v4f32(<4 x i32> %op1) { ; CHECK-NEXT: scvtf z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v4i32_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %res = sitofp <4 x i32> %op1 to <4 x float> ret <4 x float> %res } @@ -902,6 +1478,14 @@ define void @scvtf_v8i32_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: scvtf z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v8i32_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: scvtf v0.4s, v0.4s +; NONEON-NOSVE-NEXT: scvtf v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = sitofp <8 x i32> %op1 to <8 x float> store <8 x float> %res, ptr %b @@ -921,6 +1505,12 @@ define <2 x double> @scvtf_v2i32_v2f64(<2 x i32> %op1) { ; CHECK-NEXT: scvtf z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v2i32_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = sitofp <2 x i32> %op1 to <2 x double> ret <2 x double> %res } @@ -937,6 +1527,20 @@ define void @scvtf_v4i32_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: scvtf z0.d, p0/m, z0.d ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v4i32_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: scvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i32>, ptr %a %res = sitofp <4 x i32> %op1 to <4 x double> store <4 x double> %res, ptr %b @@ -961,6 +1565,26 @@ define void @scvtf_v8i32_v8f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q0, [x1, #32] ; CHECK-NEXT: stp q3, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v8i32_v8f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [sp, #-32]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32 +; NONEON-NOSVE-NEXT: ldr d2, [sp, #24] +; NONEON-NOSVE-NEXT: ldr d3, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: sshll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: scvtf v2.2d, v2.2d +; NONEON-NOSVE-NEXT: scvtf v3.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add sp, sp, #32 +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = sitofp <8 x i32> %op1 to <8 x double> store <8 x double> %res, ptr %b @@ -1005,6 +1629,40 @@ define void @scvtf_v16i32_v16f64(ptr %a, ptr %b) { ; CHECK-NEXT: stp q2, q1, [x1] ; CHECK-NEXT: stp q4, q0, [x1, #32] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v16i32_v16f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #32] +; NONEON-NOSVE-NEXT: stp q0, q2, [sp, #-64]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 64 +; NONEON-NOSVE-NEXT: stp q1, q3, [sp, #32] +; NONEON-NOSVE-NEXT: ldr d4, [sp, #24] +; NONEON-NOSVE-NEXT: sshll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: ldr d5, [sp, #56] +; NONEON-NOSVE-NEXT: sshll v3.2d, v3.2s, #0 +; NONEON-NOSVE-NEXT: ldr d6, [sp, #40] +; NONEON-NOSVE-NEXT: sshll v4.2d, v4.2s, #0 +; NONEON-NOSVE-NEXT: ldr d7, [sp, #8] +; NONEON-NOSVE-NEXT: sshll v1.2d, v1.2s, #0 +; NONEON-NOSVE-NEXT: sshll v5.2d, v5.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v2.2d, v2.2d +; NONEON-NOSVE-NEXT: sshll v6.2d, v6.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v3.2d, v3.2d +; NONEON-NOSVE-NEXT: sshll v0.2d, v0.2s, #0 +; NONEON-NOSVE-NEXT: sshll v7.2d, v7.2s, #0 +; NONEON-NOSVE-NEXT: scvtf v4.2d, v4.2d +; NONEON-NOSVE-NEXT: scvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: scvtf v5.2d, v5.2d +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: stp q2, q4, [x1, #96] +; NONEON-NOSVE-NEXT: scvtf v2.2d, v6.2d +; NONEON-NOSVE-NEXT: stp q3, q5, [x1, #64] +; NONEON-NOSVE-NEXT: scvtf v3.2d, v7.2d +; NONEON-NOSVE-NEXT: stp q1, q2, [x1, #32] +; NONEON-NOSVE-NEXT: stp q0, q3, [x1] +; NONEON-NOSVE-NEXT: add sp, sp, #64 +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i32>, ptr %a %res = sitofp <16 x i32> %op1 to <16 x double> store <16 x double> %res, ptr %b @@ -1031,6 +1689,18 @@ define <2 x half> @scvtf_v2i64_v2f16(<2 x i64> %op1) { ; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v2i64_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov x8, v0.d[1] +; NONEON-NOSVE-NEXT: fmov x9, d0 +; NONEON-NOSVE-NEXT: scvtf s1, x9 +; NONEON-NOSVE-NEXT: scvtf s0, x8 +; NONEON-NOSVE-NEXT: fcvt h2, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s1 +; NONEON-NOSVE-NEXT: mov v0.h[1], v2.h[0] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %res = sitofp <2 x i64> %op1 to <2 x half> ret <2 x half> %res } @@ -1051,6 +1721,16 @@ define <4 x half> @scvtf_v4i64_v4f16(ptr %a) { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v4i64_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: scvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtn2 v0.4s, v1.2d +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %res = sitofp <4 x i64> %op1 to <4 x half> ret <4 x half> %res @@ -1069,6 +1749,12 @@ define <2 x float> @scvtf_v2i64_v2f32(<2 x i64> %op1) { ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v2i64_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: ret %res = sitofp <2 x i64> %op1 to <2 x float> ret <2 x float> %res } @@ -1086,6 +1772,15 @@ define <4 x float> @scvtf_v4i64_v4f32(ptr %a) { ; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v4i64_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: scvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: fcvtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: fcvtn2 v0.4s, v1.2d +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %res = sitofp <4 x i64> %op1 to <4 x float> ret <4 x float> %res @@ -1103,6 +1798,11 @@ define <2 x double> @scvtf_v2i64_v2f64(<2 x i64> %op1) { ; CHECK-NEXT: scvtf z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v2i64_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: ret %res = sitofp <2 x i64> %op1 to <2 x double> ret <2 x double> %res } @@ -1116,6 +1816,14 @@ define void @scvtf_v4i64_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: scvtf z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_v4i64_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: scvtf v0.2d, v0.2d +; NONEON-NOSVE-NEXT: scvtf v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %res = sitofp <4 x i64> %op1 to <4 x double> store <4 x double> %res, ptr %b @@ -1128,6 +1836,13 @@ define half @scvtf_i16_f16(ptr %0) { ; CHECK-NEXT: ldrsh w8, [x0] ; CHECK-NEXT: scvtf h0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_i16_f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldrsh w8, [x0] +; NONEON-NOSVE-NEXT: scvtf s0, w8 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %2 = load i16, ptr %0, align 64 %3 = sitofp i16 %2 to half ret half %3 @@ -1139,6 +1854,12 @@ define float @scvtf_i16_f32(ptr %0) { ; CHECK-NEXT: ldrsh w8, [x0] ; CHECK-NEXT: scvtf s0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_i16_f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldrsh w8, [x0] +; NONEON-NOSVE-NEXT: scvtf s0, w8 +; NONEON-NOSVE-NEXT: ret %2 = load i16, ptr %0, align 64 %3 = sitofp i16 %2 to float ret float %3 @@ -1150,6 +1871,12 @@ define double @scvtf_i16_f64(ptr %0) { ; CHECK-NEXT: ldrsh w8, [x0] ; CHECK-NEXT: scvtf d0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_i16_f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldrsh w8, [x0] +; NONEON-NOSVE-NEXT: scvtf d0, w8 +; NONEON-NOSVE-NEXT: ret %2 = load i16, ptr %0, align 64 %3 = sitofp i16 %2 to double ret double %3 @@ -1161,6 +1888,13 @@ define half @scvtf_i32_f16(ptr %0) { ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: scvtf h0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_i32_f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr w8, [x0] +; NONEON-NOSVE-NEXT: scvtf s0, w8 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %2 = load i32, ptr %0, align 64 %3 = sitofp i32 %2 to half ret half %3 @@ -1172,6 +1906,12 @@ define float @scvtf_i32_f32(ptr %0) { ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: scvtf s0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_i32_f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr w8, [x0] +; NONEON-NOSVE-NEXT: scvtf s0, w8 +; NONEON-NOSVE-NEXT: ret %2 = load i32, ptr %0, align 64 %3 = sitofp i32 %2 to float ret float %3 @@ -1183,6 +1923,12 @@ define double @scvtf_i32_f64(ptr %0) { ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: scvtf d0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_i32_f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr w8, [x0] +; NONEON-NOSVE-NEXT: scvtf d0, w8 +; NONEON-NOSVE-NEXT: ret %2 = load i32, ptr %0, align 64 %3 = sitofp i32 %2 to double ret double %3 @@ -1194,6 +1940,13 @@ define half @scvtf_i64_f16(ptr %0) { ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: scvtf h0, x8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_i64_f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr x8, [x0] +; NONEON-NOSVE-NEXT: scvtf s0, x8 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %2 = load i64, ptr %0, align 64 %3 = sitofp i64 %2 to half ret half %3 @@ -1205,6 +1958,12 @@ define float @scvtf_i64_f32(ptr %0) { ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: scvtf s0, x8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_i64_f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr x8, [x0] +; NONEON-NOSVE-NEXT: scvtf s0, x8 +; NONEON-NOSVE-NEXT: ret %2 = load i64, ptr %0, align 64 %3 = sitofp i64 %2 to float ret float %3 @@ -1216,6 +1975,12 @@ define double @scvtf_i64_f64(ptr %0) { ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: scvtf d0, x8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: scvtf_i64_f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr x8, [x0] +; NONEON-NOSVE-NEXT: scvtf d0, x8 +; NONEON-NOSVE-NEXT: ret %2 = load i64, ptr %0, align 64 %3 = sitofp i64 %2 to double ret double %3 @@ -1227,6 +1992,13 @@ define half @ucvtf_i16_f16(ptr %0) { ; CHECK-NEXT: ldrh w8, [x0] ; CHECK-NEXT: ucvtf h0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_i16_f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: ucvtf s0, s0 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %2 = load i16, ptr %0, align 64 %3 = uitofp i16 %2 to half ret half %3 @@ -1238,6 +2010,12 @@ define float @ucvtf_i16_f32(ptr %0) { ; CHECK-NEXT: ldr h0, [x0] ; CHECK-NEXT: ucvtf s0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_i16_f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: ucvtf s0, s0 +; NONEON-NOSVE-NEXT: ret %2 = load i16, ptr %0, align 64 %3 = uitofp i16 %2 to float ret float %3 @@ -1249,6 +2027,12 @@ define double @ucvtf_i16_f64(ptr %0) { ; CHECK-NEXT: ldr h0, [x0] ; CHECK-NEXT: ucvtf d0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_i16_f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: ucvtf d0, d0 +; NONEON-NOSVE-NEXT: ret %2 = load i16, ptr %0, align 64 %3 = uitofp i16 %2 to double ret double %3 @@ -1260,6 +2044,13 @@ define half @ucvtf_i32_f16(ptr %0) { ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: ucvtf h0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_i32_f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr w8, [x0] +; NONEON-NOSVE-NEXT: ucvtf s0, w8 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %2 = load i32, ptr %0, align 64 %3 = uitofp i32 %2 to half ret half %3 @@ -1271,6 +2062,12 @@ define float @ucvtf_i32_f32(ptr %0) { ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: ucvtf s0, w8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_i32_f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr w8, [x0] +; NONEON-NOSVE-NEXT: ucvtf s0, w8 +; NONEON-NOSVE-NEXT: ret %2 = load i32, ptr %0, align 64 %3 = uitofp i32 %2 to float ret float %3 @@ -1282,6 +2079,12 @@ define double @ucvtf_i32_f64(ptr %0) { ; CHECK-NEXT: ldr s0, [x0] ; CHECK-NEXT: ucvtf d0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_i32_f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: ucvtf d0, d0 +; NONEON-NOSVE-NEXT: ret %2 = load i32, ptr %0, align 64 %3 = uitofp i32 %2 to double ret double %3 @@ -1293,6 +2096,13 @@ define half @ucvtf_i64_f16(ptr %0) { ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: ucvtf h0, x8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_i64_f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr x8, [x0] +; NONEON-NOSVE-NEXT: ucvtf s0, x8 +; NONEON-NOSVE-NEXT: fcvt h0, s0 +; NONEON-NOSVE-NEXT: ret %2 = load i64, ptr %0, align 64 %3 = uitofp i64 %2 to half ret half %3 @@ -1304,6 +2114,12 @@ define float @ucvtf_i64_f32(ptr %0) { ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: ucvtf s0, x8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_i64_f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr x8, [x0] +; NONEON-NOSVE-NEXT: ucvtf s0, x8 +; NONEON-NOSVE-NEXT: ret %2 = load i64, ptr %0, align 64 %3 = uitofp i64 %2 to float ret float %3 @@ -1315,6 +2131,12 @@ define double @ucvtf_i64_f64(ptr %0) { ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: ucvtf d0, x8 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ucvtf_i64_f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr x8, [x0] +; NONEON-NOSVE-NEXT: ucvtf d0, x8 +; NONEON-NOSVE-NEXT: ret %2 = load i64, ptr %0, align 64 %3 = uitofp i64 %2 to double ret double %3 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll index 81bbaa92d4b471..42daa4fedc949b 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -18,6 +19,13 @@ define <4 x i8> @select_v4i8(<4 x i8> %op1, <4 x i8> %op2, <4 x i1> %mask) { ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v2.4h, v2.4h, #15 +; NONEON-NOSVE-NEXT: cmlt v2.4h, v2.4h, #0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <4 x i1> %mask, <4 x i8> %op1, <4 x i8> %op2 ret <4 x i8> %sel } @@ -36,6 +44,13 @@ define <8 x i8> @select_v8i8(<8 x i8> %op1, <8 x i8> %op2, <8 x i1> %mask) { ; CHECK-NEXT: sel z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v2.8b, v2.8b, #7 +; NONEON-NOSVE-NEXT: cmlt v2.8b, v2.8b, #0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <8 x i1> %mask, <8 x i8> %op1, <8 x i8> %op2 ret <8 x i8> %sel } @@ -54,6 +69,13 @@ define <16 x i8> @select_v16i8(<16 x i8> %op1, <16 x i8> %op2, <16 x i1> %mask) ; CHECK-NEXT: sel z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v2.16b, v2.16b, #7 +; NONEON-NOSVE-NEXT: cmlt v2.16b, v2.16b, #0 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select <16 x i1> %mask, <16 x i8> %op1, <16 x i8> %op2 ret <16 x i8> %sel } @@ -70,6 +92,18 @@ define void @select_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: sel z1.b, p0, z2.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: cmeq v4.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: cmeq v5.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %mask = icmp eq <32 x i8> %op1, %op2 @@ -92,6 +126,13 @@ define <2 x i16> @select_v2i16(<2 x i16> %op1, <2 x i16> %op2, <2 x i1> %mask) { ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v2.2s, v2.2s, #31 +; NONEON-NOSVE-NEXT: cmlt v2.2s, v2.2s, #0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <2 x i1> %mask, <2 x i16> %op1, <2 x i16> %op2 ret <2 x i16> %sel } @@ -110,6 +151,13 @@ define <4 x i16> @select_v4i16(<4 x i16> %op1, <4 x i16> %op2, <4 x i1> %mask) { ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v2.4h, v2.4h, #15 +; NONEON-NOSVE-NEXT: cmlt v2.4h, v2.4h, #0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <4 x i1> %mask, <4 x i16> %op1, <4 x i16> %op2 ret <4 x i16> %sel } @@ -129,6 +177,14 @@ define <8 x i16> @select_v8i16(<8 x i16> %op1, <8 x i16> %op2, <8 x i1> %mask) { ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v2.8h, v2.8b, #0 +; NONEON-NOSVE-NEXT: shl v2.8h, v2.8h, #15 +; NONEON-NOSVE-NEXT: cmlt v2.8h, v2.8h, #0 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select <8 x i1> %mask, <8 x i16> %op1, <8 x i16> %op2 ret <8 x i16> %sel } @@ -145,6 +201,18 @@ define void @select_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: sel z1.h, p0, z2.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: cmeq v4.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: cmeq v5.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %mask = icmp eq <16 x i16> %op1, %op2 @@ -167,6 +235,13 @@ define <2 x i32> @select_v2i32(<2 x i32> %op1, <2 x i32> %op2, <2 x i1> %mask) { ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v2.2s, v2.2s, #31 +; NONEON-NOSVE-NEXT: cmlt v2.2s, v2.2s, #0 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <2 x i1> %mask, <2 x i32> %op1, <2 x i32> %op2 ret <2 x i32> %sel } @@ -186,6 +261,14 @@ define <4 x i32> @select_v4i32(<4 x i32> %op1, <4 x i32> %op2, <4 x i1> %mask) { ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v2.4s, v2.4h, #0 +; NONEON-NOSVE-NEXT: shl v2.4s, v2.4s, #31 +; NONEON-NOSVE-NEXT: cmlt v2.4s, v2.4s, #0 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select <4 x i1> %mask, <4 x i32> %op1, <4 x i32> %op2 ret <4 x i32> %sel } @@ -202,6 +285,18 @@ define void @select_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: sel z1.s, p0, z2.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: cmeq v4.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: cmeq v5.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %mask = icmp eq <8 x i32> %op1, %op2 @@ -223,6 +318,14 @@ define <1 x i64> @select_v1i64(<1 x i64> %op1, <1 x i64> %op2, <1 x i1> %mask) { ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: tst w0, #0x1 +; NONEON-NOSVE-NEXT: csetm x8, ne +; NONEON-NOSVE-NEXT: fmov d2, x8 +; NONEON-NOSVE-NEXT: bif v0.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: ret %sel = select <1 x i1> %mask, <1 x i64> %op1, <1 x i64> %op2 ret <1 x i64> %sel } @@ -242,6 +345,14 @@ define <2 x i64> @select_v2i64(<2 x i64> %op1, <2 x i64> %op2, <2 x i1> %mask) { ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ushll v2.2d, v2.2s, #0 +; NONEON-NOSVE-NEXT: shl v2.2d, v2.2d, #63 +; NONEON-NOSVE-NEXT: cmlt v2.2d, v2.2d, #0 +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %sel = select <2 x i1> %mask, <2 x i64> %op1, <2 x i64> %op2 ret <2 x i64> %sel } @@ -258,6 +369,18 @@ define void @select_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: sel z1.d, p0, z2.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: select_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: cmeq v4.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: cmeq v5.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: bif v0.16b, v1.16b, v4.16b +; NONEON-NOSVE-NEXT: mov v1.16b, v5.16b +; NONEON-NOSVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %mask = icmp eq <4 x i64> %op1, %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll index 88503086146901..01a7a5cafd26b6 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -18,6 +19,19 @@ define <4 x i32> @test(ptr %arg1, ptr %arg2) { ; CHECK-NEXT: stp q2, q5, [x0, #32] ; CHECK-NEXT: stp q1, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test: +; NONEON-NOSVE: // %bb.0: // %entry +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q3, q4, [x0] +; NONEON-NOSVE-NEXT: add v2.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: add v5.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: dup v0.4s, v1.s[2] +; NONEON-NOSVE-NEXT: add v1.4s, v3.4s, v3.4s +; NONEON-NOSVE-NEXT: add v3.4s, v4.4s, v4.4s +; NONEON-NOSVE-NEXT: stp q2, q5, [x0, #32] +; NONEON-NOSVE-NEXT: stp q1, q3, [x0] +; NONEON-NOSVE-NEXT: ret entry: %0 = load <16 x i32>, ptr %arg1, align 256 %1 = load <16 x i32>, ptr %arg2, align 256 @@ -42,6 +56,19 @@ define <2 x i32> @test2(ptr %arg1, ptr %arg2) { ; CHECK-NEXT: stp q3, q4, [x0] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test2: +; NONEON-NOSVE: // %bb.0: // %entry +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q3, q4, [x0] +; NONEON-NOSVE-NEXT: add v2.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: dup v0.2s, v1.s[2] +; NONEON-NOSVE-NEXT: add v1.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: add v3.4s, v3.4s, v3.4s +; NONEON-NOSVE-NEXT: add v4.4s, v4.4s, v4.4s +; NONEON-NOSVE-NEXT: stp q2, q1, [x0, #32] +; NONEON-NOSVE-NEXT: stp q3, q4, [x0] +; NONEON-NOSVE-NEXT: ret entry: %0 = load <16 x i32>, ptr %arg1, align 256 %1 = load <16 x i32>, ptr %arg2, align 256 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll index 8ca8e698091359..c57f3af0d4b60f 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -11,6 +12,13 @@ define <4 x i8> @load_v4i8(ptr %a) { ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: ushll v0.8h, v0.8b, #0 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %load = load <4 x i8>, ptr %a ret <4 x i8> %load } @@ -20,6 +28,11 @@ define <8 x i8> @load_v8i8(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <8 x i8>, ptr %a ret <8 x i8> %load } @@ -29,6 +42,11 @@ define <16 x i8> @load_v16i8(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <16 x i8>, ptr %a ret <16 x i8> %load } @@ -38,6 +56,11 @@ define <32 x i8> @load_v32i8(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <32 x i8>, ptr %a ret <32 x i8> %load } @@ -49,6 +72,15 @@ define <2 x i16> @load_v2i16(ptr %a) { ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldrh w8, [x0] +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: add x8, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[2], [x8] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %load = load <2 x i16>, ptr %a ret <2 x i16> %load } @@ -58,6 +90,11 @@ define <2 x half> @load_v2f16(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr s0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <2 x half>, ptr %a ret <2 x half> %load } @@ -67,6 +104,11 @@ define <4 x i16> @load_v4i16(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <4 x i16>, ptr %a ret <4 x i16> %load } @@ -76,6 +118,11 @@ define <4 x half> @load_v4f16(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <4 x half>, ptr %a ret <4 x half> %load } @@ -85,6 +132,11 @@ define <8 x i16> @load_v8i16(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <8 x i16>, ptr %a ret <8 x i16> %load } @@ -94,6 +146,11 @@ define <8 x half> @load_v8f16(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <8 x half>, ptr %a ret <8 x half> %load } @@ -103,6 +160,11 @@ define <16 x i16> @load_v16i16(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <16 x i16>, ptr %a ret <16 x i16> %load } @@ -112,6 +174,11 @@ define <16 x half> @load_v16f16(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <16 x half>, ptr %a ret <16 x half> %load } @@ -121,6 +188,11 @@ define <2 x i32> @load_v2i32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <2 x i32>, ptr %a ret <2 x i32> %load } @@ -130,6 +202,11 @@ define <2 x float> @load_v2f32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <2 x float>, ptr %a ret <2 x float> %load } @@ -139,6 +216,11 @@ define <4 x i32> @load_v4i32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <4 x i32>, ptr %a ret <4 x i32> %load } @@ -148,6 +230,11 @@ define <4 x float> @load_v4f32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <4 x float>, ptr %a ret <4 x float> %load } @@ -157,6 +244,11 @@ define <8 x i32> @load_v8i32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <8 x i32>, ptr %a ret <8 x i32> %load } @@ -166,6 +258,11 @@ define <8 x float> @load_v8f32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <8 x float>, ptr %a ret <8 x float> %load } @@ -175,6 +272,11 @@ define <1 x i64> @load_v1i64(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <1 x i64>, ptr %a ret <1 x i64> %load } @@ -184,6 +286,11 @@ define <1 x double> @load_v1f64(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <1 x double>, ptr %a ret <1 x double> %load } @@ -193,6 +300,11 @@ define <2 x i64> @load_v2i64(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <2 x i64>, ptr %a ret <2 x i64> %load } @@ -202,6 +314,11 @@ define <2 x double> @load_v2f64(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <2 x double>, ptr %a ret <2 x double> %load } @@ -211,6 +328,11 @@ define <4 x i64> @load_v4i64(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <4 x i64>, ptr %a ret <4 x i64> %load } @@ -220,6 +342,11 @@ define <4 x double> @load_v4f64(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: load_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %load = load <4 x double>, ptr %a ret <4 x double> %load } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll index c4aeb4465c5373..65c45587e1203e 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -17,6 +18,14 @@ define i8 @andv_v4i8(<4 x i8> %a) { ; CHECK-NEXT: andv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %a) ret i8 %res } @@ -29,6 +38,15 @@ define i8 @andv_v8i8(<8 x i8> %a) { ; CHECK-NEXT: andv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #16 +; NONEON-NOSVE-NEXT: lsr x9, x8, #8 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %a) ret i8 %res } @@ -41,6 +59,20 @@ define i8 @andv_v16i8(<16 x i8> %a) { ; CHECK-NEXT: andv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #16 +; NONEON-NOSVE-NEXT: lsr x9, x8, #8 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %a) ret i8 %res } @@ -54,6 +86,22 @@ define i8 @andv_v32i8(ptr %a) { ; CHECK-NEXT: andv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: and v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #16 +; NONEON-NOSVE-NEXT: lsr x9, x8, #8 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %op) ret i8 %res @@ -67,6 +115,13 @@ define i16 @andv_v2i16(<2 x i16> %a) { ; CHECK-NEXT: andv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %a) ret i16 %res } @@ -79,6 +134,14 @@ define i16 @andv_v4i16(<4 x i16> %a) { ; CHECK-NEXT: andv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %a) ret i16 %res } @@ -91,6 +154,19 @@ define i16 @andv_v8i16(<8 x i16> %a) { ; CHECK-NEXT: andv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %a) ret i16 %res } @@ -104,6 +180,21 @@ define i16 @andv_v16i16(ptr %a) { ; CHECK-NEXT: andv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: and v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: and x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %op) ret i16 %res @@ -117,6 +208,13 @@ define i32 @andv_v2i32(<2 x i32> %a) { ; CHECK-NEXT: andv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %a) ret i32 %res } @@ -129,6 +227,18 @@ define i32 @andv_v4i32(<4 x i32> %a) { ; CHECK-NEXT: andv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a) ret i32 %res } @@ -142,6 +252,20 @@ define i32 @andv_v8i32(ptr %a) { ; CHECK-NEXT: andv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: and v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: and w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %op) ret i32 %res @@ -155,6 +279,16 @@ define i64 @andv_v2i64(<2 x i64> %a) { ; CHECK-NEXT: andv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %a) ret i64 %res } @@ -168,6 +302,18 @@ define i64 @andv_v4i64(ptr %a) { ; CHECK-NEXT: andv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: andv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: and v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %op) ret i64 %res @@ -185,6 +331,14 @@ define i8 @eorv_v4i8(<4 x i8> %a) { ; CHECK-NEXT: eorv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %a) ret i8 %res } @@ -197,6 +351,15 @@ define i8 @eorv_v8i8(<8 x i8> %a) { ; CHECK-NEXT: eorv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #16 +; NONEON-NOSVE-NEXT: lsr x9, x8, #8 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %a) ret i8 %res } @@ -209,6 +372,20 @@ define i8 @eorv_v16i8(<16 x i8> %a) { ; CHECK-NEXT: eorv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #16 +; NONEON-NOSVE-NEXT: lsr x9, x8, #8 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %a) ret i8 %res } @@ -222,6 +399,22 @@ define i8 @eorv_v32i8(ptr %a) { ; CHECK-NEXT: eorv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: eor v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #16 +; NONEON-NOSVE-NEXT: lsr x9, x8, #8 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %op) ret i8 %res @@ -235,6 +428,13 @@ define i16 @eorv_v2i16(<2 x i16> %a) { ; CHECK-NEXT: eorv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.xor.v2i16(<2 x i16> %a) ret i16 %res } @@ -247,6 +447,14 @@ define i16 @eorv_v4i16(<4 x i16> %a) { ; CHECK-NEXT: eorv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %a) ret i16 %res } @@ -259,6 +467,19 @@ define i16 @eorv_v8i16(<8 x i16> %a) { ; CHECK-NEXT: eorv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %a) ret i16 %res } @@ -272,6 +493,21 @@ define i16 @eorv_v16i16(ptr %a) { ; CHECK-NEXT: eorv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: eor v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: eor x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %op) ret i16 %res @@ -285,6 +521,13 @@ define i32 @eorv_v2i32(<2 x i32> %a) { ; CHECK-NEXT: eorv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %a) ret i32 %res } @@ -297,6 +540,18 @@ define i32 @eorv_v4i32(<4 x i32> %a) { ; CHECK-NEXT: eorv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a) ret i32 %res } @@ -310,6 +565,20 @@ define i32 @eorv_v8i32(ptr %a) { ; CHECK-NEXT: eorv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: eor v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: eor w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %op) ret i32 %res @@ -323,6 +592,16 @@ define i64 @eorv_v2i64(<2 x i64> %a) { ; CHECK-NEXT: eorv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %a) ret i64 %res } @@ -336,6 +615,18 @@ define i64 @eorv_v4i64(ptr %a) { ; CHECK-NEXT: eorv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: eorv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: eor v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: eor v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %op) ret i64 %res @@ -353,6 +644,14 @@ define i8 @orv_v4i8(<4 x i8> %a) { ; CHECK-NEXT: orv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %a) ret i8 %res } @@ -365,6 +664,15 @@ define i8 @orv_v8i8(<8 x i8> %a) { ; CHECK-NEXT: orv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #16 +; NONEON-NOSVE-NEXT: lsr x9, x8, #8 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %a) ret i8 %res } @@ -377,6 +685,20 @@ define i8 @orv_v16i8(<16 x i8> %a) { ; CHECK-NEXT: orv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #16 +; NONEON-NOSVE-NEXT: lsr x9, x8, #8 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %a) ret i8 %res } @@ -390,6 +712,22 @@ define i8 @orv_v32i8(ptr %a) { ; CHECK-NEXT: orv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: orr v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #16 +; NONEON-NOSVE-NEXT: lsr x9, x8, #8 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %op) ret i8 %res @@ -403,6 +741,13 @@ define i16 @orv_v2i16(<2 x i16> %a) { ; CHECK-NEXT: orv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.or.v2i16(<2 x i16> %a) ret i16 %res } @@ -415,6 +760,14 @@ define i16 @orv_v4i16(<4 x i16> %a) { ; CHECK-NEXT: orv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %a) ret i16 %res } @@ -427,6 +780,19 @@ define i16 @orv_v8i16(<8 x i16> %a) { ; CHECK-NEXT: orv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %a) ret i16 %res } @@ -440,6 +806,21 @@ define i16 @orv_v16i16(ptr %a) { ; CHECK-NEXT: orv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: orr v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: orr x8, x8, x8, lsr #32 +; NONEON-NOSVE-NEXT: lsr x9, x8, #16 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %op) ret i16 %res @@ -453,6 +834,13 @@ define i32 @orv_v2i32(<2 x i32> %a) { ; CHECK-NEXT: orv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %a) ret i32 %res } @@ -465,6 +853,18 @@ define i32 @orv_v4i32(<4 x i32> %a) { ; CHECK-NEXT: orv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %a) ret i32 %res } @@ -478,6 +878,20 @@ define i32 @orv_v8i32(ptr %a) { ; CHECK-NEXT: orv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: orr v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x8, d0 +; NONEON-NOSVE-NEXT: lsr x9, x8, #32 +; NONEON-NOSVE-NEXT: orr w0, w8, w9 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %op) ret i32 %res @@ -491,6 +905,16 @@ define i64 @orv_v2i64(<2 x i64> %a) { ; CHECK-NEXT: orv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %res = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %a) ret i64 %res } @@ -504,6 +928,18 @@ define i64 @orv_v4i64(ptr %a) { ; CHECK-NEXT: orv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: orv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: orr v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: str q0, [sp, #-16]! +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: ldr d1, [sp, #8] +; NONEON-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: fmov x0, d0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %op) ret i64 %res diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll index ca58099244cf5c..886f97ed988d81 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -19,6 +20,44 @@ define <4 x i8> @masked_load_v4i8(ptr %src, <4 x i1> %mask) { ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #15 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI0_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI0_0] +; NONEON-NOSVE-NEXT: cmlt v0.4h, v0.4h, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbz w8, #0, .LBB0_2 +; NONEON-NOSVE-NEXT: // %bb.1: // %cond.load +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[0], [x0] +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB0_3 +; NONEON-NOSVE-NEXT: b .LBB0_4 +; NONEON-NOSVE-NEXT: .LBB0_2: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB0_4 +; NONEON-NOSVE-NEXT: .LBB0_3: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #1 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[2], [x9] +; NONEON-NOSVE-NEXT: .LBB0_4: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB0_7 +; NONEON-NOSVE-NEXT: // %bb.5: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB0_8 +; NONEON-NOSVE-NEXT: .LBB0_6: // %else8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB0_7: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[4], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB0_6 +; NONEON-NOSVE-NEXT: .LBB0_8: // %cond.load7 +; NONEON-NOSVE-NEXT: add x8, x0, #3 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[6], [x8] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %load = call <4 x i8> @llvm.masked.load.v4i8(ptr %src, i32 8, <4 x i1> %mask, <4 x i8> zeroinitializer) ret <4 x i8> %load } @@ -34,6 +73,67 @@ define <8 x i8> @masked_load_v8i8(ptr %src, <8 x i1> %mask) { ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.8b, v0.8b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI1_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI1_0] +; NONEON-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv b0, v0.8b +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbz w8, #0, .LBB1_2 +; NONEON-NOSVE-NEXT: // %bb.1: // %cond.load +; NONEON-NOSVE-NEXT: ldr b0, [x0] +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB1_3 +; NONEON-NOSVE-NEXT: b .LBB1_4 +; NONEON-NOSVE-NEXT: .LBB1_2: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB1_4 +; NONEON-NOSVE-NEXT: .LBB1_3: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #1 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[1], [x9] +; NONEON-NOSVE-NEXT: .LBB1_4: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB1_11 +; NONEON-NOSVE-NEXT: // %bb.5: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB1_12 +; NONEON-NOSVE-NEXT: .LBB1_6: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB1_13 +; NONEON-NOSVE-NEXT: .LBB1_7: // %else11 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB1_14 +; NONEON-NOSVE-NEXT: .LBB1_8: // %else14 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB1_15 +; NONEON-NOSVE-NEXT: .LBB1_9: // %else17 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB1_16 +; NONEON-NOSVE-NEXT: .LBB1_10: // %else20 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB1_11: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB1_6 +; NONEON-NOSVE-NEXT: .LBB1_12: // %cond.load7 +; NONEON-NOSVE-NEXT: add x9, x0, #3 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[3], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB1_7 +; NONEON-NOSVE-NEXT: .LBB1_13: // %cond.load10 +; NONEON-NOSVE-NEXT: add x9, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[4], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB1_8 +; NONEON-NOSVE-NEXT: .LBB1_14: // %cond.load13 +; NONEON-NOSVE-NEXT: add x9, x0, #5 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[5], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB1_9 +; NONEON-NOSVE-NEXT: .LBB1_15: // %cond.load16 +; NONEON-NOSVE-NEXT: add x9, x0, #6 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[6], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB1_10 +; NONEON-NOSVE-NEXT: .LBB1_16: // %cond.load19 +; NONEON-NOSVE-NEXT: add x8, x0, #7 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[7], [x8] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %src, i32 8, <8 x i1> %mask, <8 x i8> zeroinitializer) ret <8 x i8> %load } @@ -49,6 +149,115 @@ define <16 x i8> @masked_load_v16i8(ptr %src, <16 x i1> %mask) { ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.16b, v0.16b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI2_0 +; NONEON-NOSVE-NEXT: ldr q1, [x8, :lo12:.LCPI2_0] +; NONEON-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; NONEON-NOSVE-NEXT: zip1 v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: addv h1, v0.8h +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB2_17 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB2_18 +; NONEON-NOSVE-NEXT: .LBB2_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB2_19 +; NONEON-NOSVE-NEXT: .LBB2_3: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB2_20 +; NONEON-NOSVE-NEXT: .LBB2_4: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB2_21 +; NONEON-NOSVE-NEXT: .LBB2_5: // %else11 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB2_22 +; NONEON-NOSVE-NEXT: .LBB2_6: // %else14 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB2_23 +; NONEON-NOSVE-NEXT: .LBB2_7: // %else17 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB2_24 +; NONEON-NOSVE-NEXT: .LBB2_8: // %else20 +; NONEON-NOSVE-NEXT: tbnz w8, #8, .LBB2_25 +; NONEON-NOSVE-NEXT: .LBB2_9: // %else23 +; NONEON-NOSVE-NEXT: tbnz w8, #9, .LBB2_26 +; NONEON-NOSVE-NEXT: .LBB2_10: // %else26 +; NONEON-NOSVE-NEXT: tbnz w8, #10, .LBB2_27 +; NONEON-NOSVE-NEXT: .LBB2_11: // %else29 +; NONEON-NOSVE-NEXT: tbnz w8, #11, .LBB2_28 +; NONEON-NOSVE-NEXT: .LBB2_12: // %else32 +; NONEON-NOSVE-NEXT: tbnz w8, #12, .LBB2_29 +; NONEON-NOSVE-NEXT: .LBB2_13: // %else35 +; NONEON-NOSVE-NEXT: tbnz w8, #13, .LBB2_30 +; NONEON-NOSVE-NEXT: .LBB2_14: // %else38 +; NONEON-NOSVE-NEXT: tbnz w8, #14, .LBB2_31 +; NONEON-NOSVE-NEXT: .LBB2_15: // %else41 +; NONEON-NOSVE-NEXT: tbnz w8, #15, .LBB2_32 +; NONEON-NOSVE-NEXT: .LBB2_16: // %else44 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB2_17: // %cond.load +; NONEON-NOSVE-NEXT: ldr b0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB2_2 +; NONEON-NOSVE-NEXT: .LBB2_18: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #1 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB2_3 +; NONEON-NOSVE-NEXT: .LBB2_19: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB2_4 +; NONEON-NOSVE-NEXT: .LBB2_20: // %cond.load7 +; NONEON-NOSVE-NEXT: add x9, x0, #3 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[3], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB2_5 +; NONEON-NOSVE-NEXT: .LBB2_21: // %cond.load10 +; NONEON-NOSVE-NEXT: add x9, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[4], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB2_6 +; NONEON-NOSVE-NEXT: .LBB2_22: // %cond.load13 +; NONEON-NOSVE-NEXT: add x9, x0, #5 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[5], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB2_7 +; NONEON-NOSVE-NEXT: .LBB2_23: // %cond.load16 +; NONEON-NOSVE-NEXT: add x9, x0, #6 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[6], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB2_8 +; NONEON-NOSVE-NEXT: .LBB2_24: // %cond.load19 +; NONEON-NOSVE-NEXT: add x9, x0, #7 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[7], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #8, .LBB2_9 +; NONEON-NOSVE-NEXT: .LBB2_25: // %cond.load22 +; NONEON-NOSVE-NEXT: add x9, x0, #8 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[8], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #9, .LBB2_10 +; NONEON-NOSVE-NEXT: .LBB2_26: // %cond.load25 +; NONEON-NOSVE-NEXT: add x9, x0, #9 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[9], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #10, .LBB2_11 +; NONEON-NOSVE-NEXT: .LBB2_27: // %cond.load28 +; NONEON-NOSVE-NEXT: add x9, x0, #10 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[10], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #11, .LBB2_12 +; NONEON-NOSVE-NEXT: .LBB2_28: // %cond.load31 +; NONEON-NOSVE-NEXT: add x9, x0, #11 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[11], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #12, .LBB2_13 +; NONEON-NOSVE-NEXT: .LBB2_29: // %cond.load34 +; NONEON-NOSVE-NEXT: add x9, x0, #12 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[12], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #13, .LBB2_14 +; NONEON-NOSVE-NEXT: .LBB2_30: // %cond.load37 +; NONEON-NOSVE-NEXT: add x9, x0, #13 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[13], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #14, .LBB2_15 +; NONEON-NOSVE-NEXT: .LBB2_31: // %cond.load40 +; NONEON-NOSVE-NEXT: add x9, x0, #14 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[14], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #15, .LBB2_16 +; NONEON-NOSVE-NEXT: .LBB2_32: // %cond.load43 +; NONEON-NOSVE-NEXT: add x8, x0, #15 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[15], [x8] +; NONEON-NOSVE-NEXT: ret %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %src, i32 8, <16 x i1> %mask, <16 x i8> zeroinitializer) ret <16 x i8> %load } @@ -130,6 +339,277 @@ define <32 x i8> @masked_load_v32i8(ptr %src, <32 x i1> %mask) { ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr w8, [sp, #72] +; NONEON-NOSVE-NEXT: fmov s1, w1 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #80] +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #88] +; NONEON-NOSVE-NEXT: mov v1.b[1], w2 +; NONEON-NOSVE-NEXT: mov v0.b[1], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp] +; NONEON-NOSVE-NEXT: mov v1.b[2], w3 +; NONEON-NOSVE-NEXT: mov v0.b[2], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #96] +; NONEON-NOSVE-NEXT: mov v1.b[3], w4 +; NONEON-NOSVE-NEXT: mov v0.b[3], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #104] +; NONEON-NOSVE-NEXT: mov v1.b[4], w5 +; NONEON-NOSVE-NEXT: mov v0.b[4], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #112] +; NONEON-NOSVE-NEXT: mov v1.b[5], w6 +; NONEON-NOSVE-NEXT: mov v0.b[5], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #120] +; NONEON-NOSVE-NEXT: mov v1.b[6], w7 +; NONEON-NOSVE-NEXT: mov v0.b[6], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #128] +; NONEON-NOSVE-NEXT: mov v1.b[7], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #8] +; NONEON-NOSVE-NEXT: mov v0.b[7], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #136] +; NONEON-NOSVE-NEXT: mov v1.b[8], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #16] +; NONEON-NOSVE-NEXT: mov v0.b[8], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #144] +; NONEON-NOSVE-NEXT: mov v1.b[9], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #24] +; NONEON-NOSVE-NEXT: mov v0.b[9], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #152] +; NONEON-NOSVE-NEXT: mov v1.b[10], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #32] +; NONEON-NOSVE-NEXT: mov v0.b[10], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #160] +; NONEON-NOSVE-NEXT: mov v1.b[11], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #40] +; NONEON-NOSVE-NEXT: mov v0.b[11], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #168] +; NONEON-NOSVE-NEXT: mov v1.b[12], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #48] +; NONEON-NOSVE-NEXT: mov v0.b[12], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #176] +; NONEON-NOSVE-NEXT: mov v1.b[13], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #56] +; NONEON-NOSVE-NEXT: mov v0.b[13], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #184] +; NONEON-NOSVE-NEXT: mov v1.b[14], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #64] +; NONEON-NOSVE-NEXT: mov v0.b[14], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #192] +; NONEON-NOSVE-NEXT: mov v1.b[15], w9 +; NONEON-NOSVE-NEXT: mov v0.b[15], w8 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI3_0 +; NONEON-NOSVE-NEXT: ldr q2, [x8, :lo12:.LCPI3_0] +; NONEON-NOSVE-NEXT: shl v1.16b, v1.16b, #7 +; NONEON-NOSVE-NEXT: shl v0.16b, v0.16b, #7 +; NONEON-NOSVE-NEXT: cmlt v1.16b, v1.16b, #0 +; NONEON-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0 +; NONEON-NOSVE-NEXT: and v1.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ext v3.16b, v1.16b, v1.16b, #8 +; NONEON-NOSVE-NEXT: ext v2.16b, v0.16b, v0.16b, #8 +; NONEON-NOSVE-NEXT: zip1 v1.16b, v1.16b, v3.16b +; NONEON-NOSVE-NEXT: zip1 v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: addv h1, v1.8h +; NONEON-NOSVE-NEXT: addv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: movi v1.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w9, s0 +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: bfi w8, w9, #16, #16 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB3_33 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB3_34 +; NONEON-NOSVE-NEXT: .LBB3_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB3_35 +; NONEON-NOSVE-NEXT: .LBB3_3: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB3_36 +; NONEON-NOSVE-NEXT: .LBB3_4: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB3_37 +; NONEON-NOSVE-NEXT: .LBB3_5: // %else11 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB3_38 +; NONEON-NOSVE-NEXT: .LBB3_6: // %else14 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB3_39 +; NONEON-NOSVE-NEXT: .LBB3_7: // %else17 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB3_40 +; NONEON-NOSVE-NEXT: .LBB3_8: // %else20 +; NONEON-NOSVE-NEXT: tbnz w8, #8, .LBB3_41 +; NONEON-NOSVE-NEXT: .LBB3_9: // %else23 +; NONEON-NOSVE-NEXT: tbnz w8, #9, .LBB3_42 +; NONEON-NOSVE-NEXT: .LBB3_10: // %else26 +; NONEON-NOSVE-NEXT: tbnz w8, #10, .LBB3_43 +; NONEON-NOSVE-NEXT: .LBB3_11: // %else29 +; NONEON-NOSVE-NEXT: tbnz w8, #11, .LBB3_44 +; NONEON-NOSVE-NEXT: .LBB3_12: // %else32 +; NONEON-NOSVE-NEXT: tbnz w8, #12, .LBB3_45 +; NONEON-NOSVE-NEXT: .LBB3_13: // %else35 +; NONEON-NOSVE-NEXT: tbnz w8, #13, .LBB3_46 +; NONEON-NOSVE-NEXT: .LBB3_14: // %else38 +; NONEON-NOSVE-NEXT: tbnz w8, #14, .LBB3_47 +; NONEON-NOSVE-NEXT: .LBB3_15: // %else41 +; NONEON-NOSVE-NEXT: tbnz w8, #15, .LBB3_48 +; NONEON-NOSVE-NEXT: .LBB3_16: // %else44 +; NONEON-NOSVE-NEXT: tbnz w8, #16, .LBB3_49 +; NONEON-NOSVE-NEXT: .LBB3_17: // %else47 +; NONEON-NOSVE-NEXT: tbnz w8, #17, .LBB3_50 +; NONEON-NOSVE-NEXT: .LBB3_18: // %else50 +; NONEON-NOSVE-NEXT: tbnz w8, #18, .LBB3_51 +; NONEON-NOSVE-NEXT: .LBB3_19: // %else53 +; NONEON-NOSVE-NEXT: tbnz w8, #19, .LBB3_52 +; NONEON-NOSVE-NEXT: .LBB3_20: // %else56 +; NONEON-NOSVE-NEXT: tbnz w8, #20, .LBB3_53 +; NONEON-NOSVE-NEXT: .LBB3_21: // %else59 +; NONEON-NOSVE-NEXT: tbnz w8, #21, .LBB3_54 +; NONEON-NOSVE-NEXT: .LBB3_22: // %else62 +; NONEON-NOSVE-NEXT: tbnz w8, #22, .LBB3_55 +; NONEON-NOSVE-NEXT: .LBB3_23: // %else65 +; NONEON-NOSVE-NEXT: tbnz w8, #23, .LBB3_56 +; NONEON-NOSVE-NEXT: .LBB3_24: // %else68 +; NONEON-NOSVE-NEXT: tbnz w8, #24, .LBB3_57 +; NONEON-NOSVE-NEXT: .LBB3_25: // %else71 +; NONEON-NOSVE-NEXT: tbnz w8, #25, .LBB3_58 +; NONEON-NOSVE-NEXT: .LBB3_26: // %else74 +; NONEON-NOSVE-NEXT: tbnz w8, #26, .LBB3_59 +; NONEON-NOSVE-NEXT: .LBB3_27: // %else77 +; NONEON-NOSVE-NEXT: tbnz w8, #27, .LBB3_60 +; NONEON-NOSVE-NEXT: .LBB3_28: // %else80 +; NONEON-NOSVE-NEXT: tbnz w8, #28, .LBB3_61 +; NONEON-NOSVE-NEXT: .LBB3_29: // %else83 +; NONEON-NOSVE-NEXT: tbnz w8, #29, .LBB3_62 +; NONEON-NOSVE-NEXT: .LBB3_30: // %else86 +; NONEON-NOSVE-NEXT: tbnz w8, #30, .LBB3_63 +; NONEON-NOSVE-NEXT: .LBB3_31: // %else89 +; NONEON-NOSVE-NEXT: tbnz w8, #31, .LBB3_64 +; NONEON-NOSVE-NEXT: .LBB3_32: // %else92 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB3_33: // %cond.load +; NONEON-NOSVE-NEXT: ldr b0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB3_2 +; NONEON-NOSVE-NEXT: .LBB3_34: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #1 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB3_3 +; NONEON-NOSVE-NEXT: .LBB3_35: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB3_4 +; NONEON-NOSVE-NEXT: .LBB3_36: // %cond.load7 +; NONEON-NOSVE-NEXT: add x9, x0, #3 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[3], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB3_5 +; NONEON-NOSVE-NEXT: .LBB3_37: // %cond.load10 +; NONEON-NOSVE-NEXT: add x9, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[4], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB3_6 +; NONEON-NOSVE-NEXT: .LBB3_38: // %cond.load13 +; NONEON-NOSVE-NEXT: add x9, x0, #5 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[5], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB3_7 +; NONEON-NOSVE-NEXT: .LBB3_39: // %cond.load16 +; NONEON-NOSVE-NEXT: add x9, x0, #6 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[6], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB3_8 +; NONEON-NOSVE-NEXT: .LBB3_40: // %cond.load19 +; NONEON-NOSVE-NEXT: add x9, x0, #7 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[7], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #8, .LBB3_9 +; NONEON-NOSVE-NEXT: .LBB3_41: // %cond.load22 +; NONEON-NOSVE-NEXT: add x9, x0, #8 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[8], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #9, .LBB3_10 +; NONEON-NOSVE-NEXT: .LBB3_42: // %cond.load25 +; NONEON-NOSVE-NEXT: add x9, x0, #9 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[9], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #10, .LBB3_11 +; NONEON-NOSVE-NEXT: .LBB3_43: // %cond.load28 +; NONEON-NOSVE-NEXT: add x9, x0, #10 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[10], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #11, .LBB3_12 +; NONEON-NOSVE-NEXT: .LBB3_44: // %cond.load31 +; NONEON-NOSVE-NEXT: add x9, x0, #11 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[11], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #12, .LBB3_13 +; NONEON-NOSVE-NEXT: .LBB3_45: // %cond.load34 +; NONEON-NOSVE-NEXT: add x9, x0, #12 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[12], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #13, .LBB3_14 +; NONEON-NOSVE-NEXT: .LBB3_46: // %cond.load37 +; NONEON-NOSVE-NEXT: add x9, x0, #13 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[13], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #14, .LBB3_15 +; NONEON-NOSVE-NEXT: .LBB3_47: // %cond.load40 +; NONEON-NOSVE-NEXT: add x9, x0, #14 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[14], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #15, .LBB3_16 +; NONEON-NOSVE-NEXT: .LBB3_48: // %cond.load43 +; NONEON-NOSVE-NEXT: add x9, x0, #15 +; NONEON-NOSVE-NEXT: ld1 { v0.b }[15], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #16, .LBB3_17 +; NONEON-NOSVE-NEXT: .LBB3_49: // %cond.load46 +; NONEON-NOSVE-NEXT: add x9, x0, #16 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[0], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #17, .LBB3_18 +; NONEON-NOSVE-NEXT: .LBB3_50: // %cond.load49 +; NONEON-NOSVE-NEXT: add x9, x0, #17 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #18, .LBB3_19 +; NONEON-NOSVE-NEXT: .LBB3_51: // %cond.load52 +; NONEON-NOSVE-NEXT: add x9, x0, #18 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #19, .LBB3_20 +; NONEON-NOSVE-NEXT: .LBB3_52: // %cond.load55 +; NONEON-NOSVE-NEXT: add x9, x0, #19 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[3], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #20, .LBB3_21 +; NONEON-NOSVE-NEXT: .LBB3_53: // %cond.load58 +; NONEON-NOSVE-NEXT: add x9, x0, #20 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[4], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #21, .LBB3_22 +; NONEON-NOSVE-NEXT: .LBB3_54: // %cond.load61 +; NONEON-NOSVE-NEXT: add x9, x0, #21 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[5], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #22, .LBB3_23 +; NONEON-NOSVE-NEXT: .LBB3_55: // %cond.load64 +; NONEON-NOSVE-NEXT: add x9, x0, #22 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[6], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #23, .LBB3_24 +; NONEON-NOSVE-NEXT: .LBB3_56: // %cond.load67 +; NONEON-NOSVE-NEXT: add x9, x0, #23 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[7], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #24, .LBB3_25 +; NONEON-NOSVE-NEXT: .LBB3_57: // %cond.load70 +; NONEON-NOSVE-NEXT: add x9, x0, #24 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[8], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #25, .LBB3_26 +; NONEON-NOSVE-NEXT: .LBB3_58: // %cond.load73 +; NONEON-NOSVE-NEXT: add x9, x0, #25 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[9], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #26, .LBB3_27 +; NONEON-NOSVE-NEXT: .LBB3_59: // %cond.load76 +; NONEON-NOSVE-NEXT: add x9, x0, #26 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[10], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #27, .LBB3_28 +; NONEON-NOSVE-NEXT: .LBB3_60: // %cond.load79 +; NONEON-NOSVE-NEXT: add x9, x0, #27 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[11], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #28, .LBB3_29 +; NONEON-NOSVE-NEXT: .LBB3_61: // %cond.load82 +; NONEON-NOSVE-NEXT: add x9, x0, #28 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[12], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #29, .LBB3_30 +; NONEON-NOSVE-NEXT: .LBB3_62: // %cond.load85 +; NONEON-NOSVE-NEXT: add x9, x0, #29 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[13], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #30, .LBB3_31 +; NONEON-NOSVE-NEXT: .LBB3_63: // %cond.load88 +; NONEON-NOSVE-NEXT: add x9, x0, #30 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[14], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #31, .LBB3_32 +; NONEON-NOSVE-NEXT: .LBB3_64: // %cond.load91 +; NONEON-NOSVE-NEXT: add x8, x0, #31 +; NONEON-NOSVE-NEXT: ld1 { v1.b }[15], [x8] +; NONEON-NOSVE-NEXT: ret %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %src, i32 8, <32 x i1> %mask, <32 x i8> zeroinitializer) ret <32 x i8> %load } @@ -155,6 +635,31 @@ define <2 x half> @masked_load_v2f16(ptr %src, <2 x i1> %mask) { ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #31 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI4_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI4_0] +; NONEON-NOSVE-NEXT: cmlt v0.2s, v0.2s, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addp v1.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: movi d0, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB4_3 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB4_4 +; NONEON-NOSVE-NEXT: .LBB4_2: // %else2 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB4_3: // %cond.load +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB4_2 +; NONEON-NOSVE-NEXT: .LBB4_4: // %cond.load1 +; NONEON-NOSVE-NEXT: add x8, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[1], [x8] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %load = call <2 x half> @llvm.masked.load.v2f16(ptr %src, i32 8, <2 x i1> %mask, <2 x half> zeroinitializer) ret <2 x half> %load } @@ -170,6 +675,43 @@ define <4 x half> @masked_load_v4f16(ptr %src, <4 x i1> %mask) { ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #15 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI5_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI5_0] +; NONEON-NOSVE-NEXT: cmlt v0.4h, v0.4h, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv h1, v0.4h +; NONEON-NOSVE-NEXT: movi d0, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB5_5 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB5_6 +; NONEON-NOSVE-NEXT: .LBB5_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB5_7 +; NONEON-NOSVE-NEXT: .LBB5_3: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB5_8 +; NONEON-NOSVE-NEXT: .LBB5_4: // %else8 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB5_5: // %cond.load +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB5_2 +; NONEON-NOSVE-NEXT: .LBB5_6: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB5_3 +; NONEON-NOSVE-NEXT: .LBB5_7: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB5_4 +; NONEON-NOSVE-NEXT: .LBB5_8: // %cond.load7 +; NONEON-NOSVE-NEXT: add x8, x0, #6 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[3], [x8] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %load = call <4 x half> @llvm.masked.load.v4f16(ptr %src, i32 8, <4 x i1> %mask, <4 x half> zeroinitializer) ret <4 x half> %load } @@ -186,6 +728,65 @@ define <8 x half> @masked_load_v8f16(ptr %src, <8 x i1> %mask) { ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.8b, v0.8b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI6_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI6_0] +; NONEON-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv b1, v0.8b +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB6_9 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB6_10 +; NONEON-NOSVE-NEXT: .LBB6_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB6_11 +; NONEON-NOSVE-NEXT: .LBB6_3: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB6_12 +; NONEON-NOSVE-NEXT: .LBB6_4: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB6_13 +; NONEON-NOSVE-NEXT: .LBB6_5: // %else11 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB6_14 +; NONEON-NOSVE-NEXT: .LBB6_6: // %else14 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB6_15 +; NONEON-NOSVE-NEXT: .LBB6_7: // %else17 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB6_16 +; NONEON-NOSVE-NEXT: .LBB6_8: // %else20 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB6_9: // %cond.load +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB6_2 +; NONEON-NOSVE-NEXT: .LBB6_10: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB6_3 +; NONEON-NOSVE-NEXT: .LBB6_11: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB6_4 +; NONEON-NOSVE-NEXT: .LBB6_12: // %cond.load7 +; NONEON-NOSVE-NEXT: add x9, x0, #6 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[3], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB6_5 +; NONEON-NOSVE-NEXT: .LBB6_13: // %cond.load10 +; NONEON-NOSVE-NEXT: add x9, x0, #8 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[4], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB6_6 +; NONEON-NOSVE-NEXT: .LBB6_14: // %cond.load13 +; NONEON-NOSVE-NEXT: add x9, x0, #10 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[5], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB6_7 +; NONEON-NOSVE-NEXT: .LBB6_15: // %cond.load16 +; NONEON-NOSVE-NEXT: add x9, x0, #12 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[6], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB6_8 +; NONEON-NOSVE-NEXT: .LBB6_16: // %cond.load19 +; NONEON-NOSVE-NEXT: add x8, x0, #14 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[7], [x8] +; NONEON-NOSVE-NEXT: ret %load = call <8 x half> @llvm.masked.load.v8f16(ptr %src, i32 8, <8 x i1> %mask, <8 x half> zeroinitializer) ret <8 x half> %load } @@ -210,6 +811,116 @@ define <16 x half> @masked_load_v16f16(ptr %src, <16 x i1> %mask) { ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, x8, lsl #1] ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.16b, v0.16b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI7_0 +; NONEON-NOSVE-NEXT: ldr q1, [x8, :lo12:.LCPI7_0] +; NONEON-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; NONEON-NOSVE-NEXT: zip1 v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: movi v1.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: addv h2, v0.8h +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s2 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB7_17 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB7_18 +; NONEON-NOSVE-NEXT: .LBB7_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB7_19 +; NONEON-NOSVE-NEXT: .LBB7_3: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB7_20 +; NONEON-NOSVE-NEXT: .LBB7_4: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB7_21 +; NONEON-NOSVE-NEXT: .LBB7_5: // %else11 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB7_22 +; NONEON-NOSVE-NEXT: .LBB7_6: // %else14 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB7_23 +; NONEON-NOSVE-NEXT: .LBB7_7: // %else17 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB7_24 +; NONEON-NOSVE-NEXT: .LBB7_8: // %else20 +; NONEON-NOSVE-NEXT: tbnz w8, #8, .LBB7_25 +; NONEON-NOSVE-NEXT: .LBB7_9: // %else23 +; NONEON-NOSVE-NEXT: tbnz w8, #9, .LBB7_26 +; NONEON-NOSVE-NEXT: .LBB7_10: // %else26 +; NONEON-NOSVE-NEXT: tbnz w8, #10, .LBB7_27 +; NONEON-NOSVE-NEXT: .LBB7_11: // %else29 +; NONEON-NOSVE-NEXT: tbnz w8, #11, .LBB7_28 +; NONEON-NOSVE-NEXT: .LBB7_12: // %else32 +; NONEON-NOSVE-NEXT: tbnz w8, #12, .LBB7_29 +; NONEON-NOSVE-NEXT: .LBB7_13: // %else35 +; NONEON-NOSVE-NEXT: tbnz w8, #13, .LBB7_30 +; NONEON-NOSVE-NEXT: .LBB7_14: // %else38 +; NONEON-NOSVE-NEXT: tbnz w8, #14, .LBB7_31 +; NONEON-NOSVE-NEXT: .LBB7_15: // %else41 +; NONEON-NOSVE-NEXT: tbnz w8, #15, .LBB7_32 +; NONEON-NOSVE-NEXT: .LBB7_16: // %else44 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB7_17: // %cond.load +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB7_2 +; NONEON-NOSVE-NEXT: .LBB7_18: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB7_3 +; NONEON-NOSVE-NEXT: .LBB7_19: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB7_4 +; NONEON-NOSVE-NEXT: .LBB7_20: // %cond.load7 +; NONEON-NOSVE-NEXT: add x9, x0, #6 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[3], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB7_5 +; NONEON-NOSVE-NEXT: .LBB7_21: // %cond.load10 +; NONEON-NOSVE-NEXT: add x9, x0, #8 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[4], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB7_6 +; NONEON-NOSVE-NEXT: .LBB7_22: // %cond.load13 +; NONEON-NOSVE-NEXT: add x9, x0, #10 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[5], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB7_7 +; NONEON-NOSVE-NEXT: .LBB7_23: // %cond.load16 +; NONEON-NOSVE-NEXT: add x9, x0, #12 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[6], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB7_8 +; NONEON-NOSVE-NEXT: .LBB7_24: // %cond.load19 +; NONEON-NOSVE-NEXT: add x9, x0, #14 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[7], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #8, .LBB7_9 +; NONEON-NOSVE-NEXT: .LBB7_25: // %cond.load22 +; NONEON-NOSVE-NEXT: add x9, x0, #16 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[0], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #9, .LBB7_10 +; NONEON-NOSVE-NEXT: .LBB7_26: // %cond.load25 +; NONEON-NOSVE-NEXT: add x9, x0, #18 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #10, .LBB7_11 +; NONEON-NOSVE-NEXT: .LBB7_27: // %cond.load28 +; NONEON-NOSVE-NEXT: add x9, x0, #20 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #11, .LBB7_12 +; NONEON-NOSVE-NEXT: .LBB7_28: // %cond.load31 +; NONEON-NOSVE-NEXT: add x9, x0, #22 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[3], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #12, .LBB7_13 +; NONEON-NOSVE-NEXT: .LBB7_29: // %cond.load34 +; NONEON-NOSVE-NEXT: add x9, x0, #24 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[4], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #13, .LBB7_14 +; NONEON-NOSVE-NEXT: .LBB7_30: // %cond.load37 +; NONEON-NOSVE-NEXT: add x9, x0, #26 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[5], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #14, .LBB7_15 +; NONEON-NOSVE-NEXT: .LBB7_31: // %cond.load40 +; NONEON-NOSVE-NEXT: add x9, x0, #28 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[6], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #15, .LBB7_16 +; NONEON-NOSVE-NEXT: .LBB7_32: // %cond.load43 +; NONEON-NOSVE-NEXT: add x8, x0, #30 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[7], [x8] +; NONEON-NOSVE-NEXT: ret %load = call <16 x half> @llvm.masked.load.v16f16(ptr %src, i32 8, <16 x i1> %mask, <16 x half> zeroinitializer) ret <16 x half> %load } @@ -225,6 +936,31 @@ define <2 x float> @masked_load_v2f32(ptr %src, <2 x i1> %mask) { ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #31 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI8_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI8_0] +; NONEON-NOSVE-NEXT: cmlt v0.2s, v0.2s, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addp v1.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: movi d0, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB8_3 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB8_4 +; NONEON-NOSVE-NEXT: .LBB8_2: // %else2 +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB8_3: // %cond.load +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB8_2 +; NONEON-NOSVE-NEXT: .LBB8_4: // %cond.load1 +; NONEON-NOSVE-NEXT: add x8, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.s }[1], [x8] +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 killed $q0 +; NONEON-NOSVE-NEXT: ret %load = call <2 x float> @llvm.masked.load.v2f32(ptr %src, i32 8, <2 x i1> %mask, <2 x float> zeroinitializer) ret <2 x float> %load } @@ -241,6 +977,41 @@ define <4 x float> @masked_load_v4f32(ptr %src, <4 x i1> %mask) { ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #15 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI9_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI9_0] +; NONEON-NOSVE-NEXT: cmlt v0.4h, v0.4h, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv h1, v0.4h +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB9_5 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB9_6 +; NONEON-NOSVE-NEXT: .LBB9_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB9_7 +; NONEON-NOSVE-NEXT: .LBB9_3: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB9_8 +; NONEON-NOSVE-NEXT: .LBB9_4: // %else8 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB9_5: // %cond.load +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB9_2 +; NONEON-NOSVE-NEXT: .LBB9_6: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.s }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB9_3 +; NONEON-NOSVE-NEXT: .LBB9_7: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #8 +; NONEON-NOSVE-NEXT: ld1 { v0.s }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB9_4 +; NONEON-NOSVE-NEXT: .LBB9_8: // %cond.load7 +; NONEON-NOSVE-NEXT: add x8, x0, #12 +; NONEON-NOSVE-NEXT: ld1 { v0.s }[3], [x8] +; NONEON-NOSVE-NEXT: ret %load = call <4 x float> @llvm.masked.load.v4f32(ptr %src, i32 8, <4 x i1> %mask, <4 x float> zeroinitializer) ret <4 x float> %load } @@ -290,6 +1061,66 @@ define <8 x float> @masked_load_v8f32(ptr %src, <8 x i1> %mask) { ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.8b, v0.8b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI10_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI10_0] +; NONEON-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: movi v1.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: addv b2, v0.8b +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s2 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB10_9 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB10_10 +; NONEON-NOSVE-NEXT: .LBB10_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB10_11 +; NONEON-NOSVE-NEXT: .LBB10_3: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB10_12 +; NONEON-NOSVE-NEXT: .LBB10_4: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB10_13 +; NONEON-NOSVE-NEXT: .LBB10_5: // %else11 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB10_14 +; NONEON-NOSVE-NEXT: .LBB10_6: // %else14 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB10_15 +; NONEON-NOSVE-NEXT: .LBB10_7: // %else17 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB10_16 +; NONEON-NOSVE-NEXT: .LBB10_8: // %else20 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB10_9: // %cond.load +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB10_2 +; NONEON-NOSVE-NEXT: .LBB10_10: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.s }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB10_3 +; NONEON-NOSVE-NEXT: .LBB10_11: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #8 +; NONEON-NOSVE-NEXT: ld1 { v0.s }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB10_4 +; NONEON-NOSVE-NEXT: .LBB10_12: // %cond.load7 +; NONEON-NOSVE-NEXT: add x9, x0, #12 +; NONEON-NOSVE-NEXT: ld1 { v0.s }[3], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB10_5 +; NONEON-NOSVE-NEXT: .LBB10_13: // %cond.load10 +; NONEON-NOSVE-NEXT: add x9, x0, #16 +; NONEON-NOSVE-NEXT: ld1 { v1.s }[0], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB10_6 +; NONEON-NOSVE-NEXT: .LBB10_14: // %cond.load13 +; NONEON-NOSVE-NEXT: add x9, x0, #20 +; NONEON-NOSVE-NEXT: ld1 { v1.s }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB10_7 +; NONEON-NOSVE-NEXT: .LBB10_15: // %cond.load16 +; NONEON-NOSVE-NEXT: add x9, x0, #24 +; NONEON-NOSVE-NEXT: ld1 { v1.s }[2], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB10_8 +; NONEON-NOSVE-NEXT: .LBB10_16: // %cond.load19 +; NONEON-NOSVE-NEXT: add x8, x0, #28 +; NONEON-NOSVE-NEXT: ld1 { v1.s }[3], [x8] +; NONEON-NOSVE-NEXT: ret %load = call <8 x float> @llvm.masked.load.v8f32(ptr %src, i32 8, <8 x i1> %mask, <8 x float> zeroinitializer) ret <8 x float> %load } @@ -306,6 +1137,29 @@ define <2 x double> @masked_load_v2f64(ptr %src, <2 x i1> %mask) { ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #31 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI11_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI11_0] +; NONEON-NOSVE-NEXT: cmlt v0.2s, v0.2s, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addp v1.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB11_3 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB11_4 +; NONEON-NOSVE-NEXT: .LBB11_2: // %else2 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB11_3: // %cond.load +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB11_2 +; NONEON-NOSVE-NEXT: .LBB11_4: // %cond.load1 +; NONEON-NOSVE-NEXT: add x8, x0, #8 +; NONEON-NOSVE-NEXT: ld1 { v0.d }[1], [x8] +; NONEON-NOSVE-NEXT: ret %load = call <2 x double> @llvm.masked.load.v2f64(ptr %src, i32 8, <2 x i1> %mask, <2 x double> zeroinitializer) ret <2 x double> %load } @@ -331,6 +1185,42 @@ define <4 x double> @masked_load_v4f64(ptr %src, <4 x i1> %mask) { ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, x8, lsl #3] ; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #15 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI12_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI12_0] +; NONEON-NOSVE-NEXT: cmlt v0.4h, v0.4h, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: movi v1.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: addv h2, v0.4h +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: fmov w8, s2 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB12_5 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB12_6 +; NONEON-NOSVE-NEXT: .LBB12_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB12_7 +; NONEON-NOSVE-NEXT: .LBB12_3: // %else5 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB12_8 +; NONEON-NOSVE-NEXT: .LBB12_4: // %else8 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB12_5: // %cond.load +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB12_2 +; NONEON-NOSVE-NEXT: .LBB12_6: // %cond.load1 +; NONEON-NOSVE-NEXT: add x9, x0, #8 +; NONEON-NOSVE-NEXT: ld1 { v0.d }[1], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB12_3 +; NONEON-NOSVE-NEXT: .LBB12_7: // %cond.load4 +; NONEON-NOSVE-NEXT: add x9, x0, #16 +; NONEON-NOSVE-NEXT: ld1 { v1.d }[0], [x9] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB12_4 +; NONEON-NOSVE-NEXT: .LBB12_8: // %cond.load7 +; NONEON-NOSVE-NEXT: add x8, x0, #24 +; NONEON-NOSVE-NEXT: ld1 { v1.d }[1], [x8] +; NONEON-NOSVE-NEXT: ret %load = call <4 x double> @llvm.masked.load.v4f64(ptr %src, i32 8, <4 x i1> %mask, <4 x double> zeroinitializer) ret <4 x double> %load } @@ -356,6 +1246,38 @@ define <3 x i32> @masked_load_zext_v3i32(ptr %load_ptr, <3 x i1> %pm) { ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_zext_v3i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub sp, sp, #16 +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: and w8, w1, #0x1 +; NONEON-NOSVE-NEXT: bfi w8, w2, #1, #1 +; NONEON-NOSVE-NEXT: bfi w8, w3, #2, #1 +; NONEON-NOSVE-NEXT: tbz w8, #0, .LBB13_2 +; NONEON-NOSVE-NEXT: // %bb.1: // %cond.load +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB13_3 +; NONEON-NOSVE-NEXT: b .LBB13_4 +; NONEON-NOSVE-NEXT: .LBB13_2: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB13_4 +; NONEON-NOSVE-NEXT: .LBB13_3: // %cond.load1 +; NONEON-NOSVE-NEXT: mov v1.16b, v0.16b +; NONEON-NOSVE-NEXT: add x9, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[1], [x9] +; NONEON-NOSVE-NEXT: mov v1.h[2], v0.h[2] +; NONEON-NOSVE-NEXT: fmov d0, d1 +; NONEON-NOSVE-NEXT: .LBB13_4: // %else2 +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB13_6 +; NONEON-NOSVE-NEXT: // %bb.5: // %cond.load4 +; NONEON-NOSVE-NEXT: mov v0.h[1], v0.h[1] +; NONEON-NOSVE-NEXT: add x8, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[2], [x8] +; NONEON-NOSVE-NEXT: .LBB13_6: // %else5 +; NONEON-NOSVE-NEXT: ushll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %load_value = tail call <3 x i16> @llvm.masked.load.v3i16.p0(ptr %load_ptr, i32 4, <3 x i1> %pm, <3 x i16> zeroinitializer) %extend = zext <3 x i16> %load_value to <3 x i32> ret <3 x i32> %extend; @@ -382,6 +1304,38 @@ define <3 x i32> @masked_load_sext_v3i32(ptr %load_ptr, <3 x i1> %pm) { ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_load_sext_v3i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: sub sp, sp, #16 +; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16 +; NONEON-NOSVE-NEXT: and w8, w1, #0x1 +; NONEON-NOSVE-NEXT: bfi w8, w2, #1, #1 +; NONEON-NOSVE-NEXT: bfi w8, w3, #2, #1 +; NONEON-NOSVE-NEXT: tbz w8, #0, .LBB14_2 +; NONEON-NOSVE-NEXT: // %bb.1: // %cond.load +; NONEON-NOSVE-NEXT: ldr h0, [x0] +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB14_3 +; NONEON-NOSVE-NEXT: b .LBB14_4 +; NONEON-NOSVE-NEXT: .LBB14_2: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB14_4 +; NONEON-NOSVE-NEXT: .LBB14_3: // %cond.load1 +; NONEON-NOSVE-NEXT: mov v1.16b, v0.16b +; NONEON-NOSVE-NEXT: add x9, x0, #2 +; NONEON-NOSVE-NEXT: ld1 { v1.h }[1], [x9] +; NONEON-NOSVE-NEXT: mov v1.h[2], v0.h[2] +; NONEON-NOSVE-NEXT: fmov d0, d1 +; NONEON-NOSVE-NEXT: .LBB14_4: // %else2 +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB14_6 +; NONEON-NOSVE-NEXT: // %bb.5: // %cond.load4 +; NONEON-NOSVE-NEXT: mov v0.h[1], v0.h[1] +; NONEON-NOSVE-NEXT: add x8, x0, #4 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[2], [x8] +; NONEON-NOSVE-NEXT: .LBB14_6: // %else5 +; NONEON-NOSVE-NEXT: sshll v0.4s, v0.4h, #0 +; NONEON-NOSVE-NEXT: add sp, sp, #16 +; NONEON-NOSVE-NEXT: ret %load_value = tail call <3 x i16> @llvm.masked.load.v3i16.p0(ptr %load_ptr, i32 4, <3 x i1> %pm, <3 x i16> zeroinitializer) %extend = sext <3 x i16> %load_value to <3 x i32> ret <3 x i32> %extend; diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll index f2b3f9b12ea718..b175dcf3e9a0d4 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -19,6 +20,37 @@ define void @masked_store_v4i8(ptr %dst, <4 x i1> %mask) { ; CHECK-NEXT: mov z0.h, #0 // =0x0 ; CHECK-NEXT: st1b { z0.h }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #15 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI0_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI0_0] +; NONEON-NOSVE-NEXT: cmlt v0.4h, v0.4h, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB0_5 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB0_6 +; NONEON-NOSVE-NEXT: .LBB0_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB0_7 +; NONEON-NOSVE-NEXT: .LBB0_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB0_8 +; NONEON-NOSVE-NEXT: .LBB0_4: // %else6 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB0_5: // %cond.store +; NONEON-NOSVE-NEXT: strb wzr, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB0_2 +; NONEON-NOSVE-NEXT: .LBB0_6: // %cond.store1 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #1] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB0_3 +; NONEON-NOSVE-NEXT: .LBB0_7: // %cond.store3 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #2] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB0_4 +; NONEON-NOSVE-NEXT: .LBB0_8: // %cond.store5 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #3] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v4i8(<4 x i8> zeroinitializer, ptr %dst, i32 8, <4 x i1> %mask) ret void } @@ -34,6 +66,57 @@ define void @masked_store_v8i8(ptr %dst, <8 x i1> %mask) { ; CHECK-NEXT: mov z0.b, #0 // =0x0 ; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.8b, v0.8b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI1_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI1_0] +; NONEON-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv b0, v0.8b +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB1_9 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB1_10 +; NONEON-NOSVE-NEXT: .LBB1_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB1_11 +; NONEON-NOSVE-NEXT: .LBB1_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB1_12 +; NONEON-NOSVE-NEXT: .LBB1_4: // %else6 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB1_13 +; NONEON-NOSVE-NEXT: .LBB1_5: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB1_14 +; NONEON-NOSVE-NEXT: .LBB1_6: // %else10 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB1_15 +; NONEON-NOSVE-NEXT: .LBB1_7: // %else12 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB1_16 +; NONEON-NOSVE-NEXT: .LBB1_8: // %else14 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB1_9: // %cond.store +; NONEON-NOSVE-NEXT: strb wzr, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB1_2 +; NONEON-NOSVE-NEXT: .LBB1_10: // %cond.store1 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #1] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB1_3 +; NONEON-NOSVE-NEXT: .LBB1_11: // %cond.store3 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #2] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB1_4 +; NONEON-NOSVE-NEXT: .LBB1_12: // %cond.store5 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #3] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB1_5 +; NONEON-NOSVE-NEXT: .LBB1_13: // %cond.store7 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #4] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB1_6 +; NONEON-NOSVE-NEXT: .LBB1_14: // %cond.store9 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #5] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB1_7 +; NONEON-NOSVE-NEXT: .LBB1_15: // %cond.store11 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #6] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB1_8 +; NONEON-NOSVE-NEXT: .LBB1_16: // %cond.store13 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #7] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v8i8(<8 x i8> zeroinitializer, ptr %dst, i32 8, <8 x i1> %mask) ret void } @@ -49,6 +132,99 @@ define void @masked_store_v16i8(ptr %dst, <16 x i1> %mask) { ; CHECK-NEXT: mov z0.b, #0 // =0x0 ; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.16b, v0.16b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI2_0 +; NONEON-NOSVE-NEXT: ldr q1, [x8, :lo12:.LCPI2_0] +; NONEON-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; NONEON-NOSVE-NEXT: zip1 v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: addv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB2_17 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB2_18 +; NONEON-NOSVE-NEXT: .LBB2_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB2_19 +; NONEON-NOSVE-NEXT: .LBB2_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB2_20 +; NONEON-NOSVE-NEXT: .LBB2_4: // %else6 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB2_21 +; NONEON-NOSVE-NEXT: .LBB2_5: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB2_22 +; NONEON-NOSVE-NEXT: .LBB2_6: // %else10 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB2_23 +; NONEON-NOSVE-NEXT: .LBB2_7: // %else12 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB2_24 +; NONEON-NOSVE-NEXT: .LBB2_8: // %else14 +; NONEON-NOSVE-NEXT: tbnz w8, #8, .LBB2_25 +; NONEON-NOSVE-NEXT: .LBB2_9: // %else16 +; NONEON-NOSVE-NEXT: tbnz w8, #9, .LBB2_26 +; NONEON-NOSVE-NEXT: .LBB2_10: // %else18 +; NONEON-NOSVE-NEXT: tbnz w8, #10, .LBB2_27 +; NONEON-NOSVE-NEXT: .LBB2_11: // %else20 +; NONEON-NOSVE-NEXT: tbnz w8, #11, .LBB2_28 +; NONEON-NOSVE-NEXT: .LBB2_12: // %else22 +; NONEON-NOSVE-NEXT: tbnz w8, #12, .LBB2_29 +; NONEON-NOSVE-NEXT: .LBB2_13: // %else24 +; NONEON-NOSVE-NEXT: tbnz w8, #13, .LBB2_30 +; NONEON-NOSVE-NEXT: .LBB2_14: // %else26 +; NONEON-NOSVE-NEXT: tbnz w8, #14, .LBB2_31 +; NONEON-NOSVE-NEXT: .LBB2_15: // %else28 +; NONEON-NOSVE-NEXT: tbnz w8, #15, .LBB2_32 +; NONEON-NOSVE-NEXT: .LBB2_16: // %else30 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB2_17: // %cond.store +; NONEON-NOSVE-NEXT: strb wzr, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB2_2 +; NONEON-NOSVE-NEXT: .LBB2_18: // %cond.store1 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #1] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB2_3 +; NONEON-NOSVE-NEXT: .LBB2_19: // %cond.store3 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #2] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB2_4 +; NONEON-NOSVE-NEXT: .LBB2_20: // %cond.store5 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #3] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB2_5 +; NONEON-NOSVE-NEXT: .LBB2_21: // %cond.store7 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #4] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB2_6 +; NONEON-NOSVE-NEXT: .LBB2_22: // %cond.store9 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #5] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB2_7 +; NONEON-NOSVE-NEXT: .LBB2_23: // %cond.store11 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #6] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB2_8 +; NONEON-NOSVE-NEXT: .LBB2_24: // %cond.store13 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #7] +; NONEON-NOSVE-NEXT: tbz w8, #8, .LBB2_9 +; NONEON-NOSVE-NEXT: .LBB2_25: // %cond.store15 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #8] +; NONEON-NOSVE-NEXT: tbz w8, #9, .LBB2_10 +; NONEON-NOSVE-NEXT: .LBB2_26: // %cond.store17 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #9] +; NONEON-NOSVE-NEXT: tbz w8, #10, .LBB2_11 +; NONEON-NOSVE-NEXT: .LBB2_27: // %cond.store19 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #10] +; NONEON-NOSVE-NEXT: tbz w8, #11, .LBB2_12 +; NONEON-NOSVE-NEXT: .LBB2_28: // %cond.store21 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #11] +; NONEON-NOSVE-NEXT: tbz w8, #12, .LBB2_13 +; NONEON-NOSVE-NEXT: .LBB2_29: // %cond.store23 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #12] +; NONEON-NOSVE-NEXT: tbz w8, #13, .LBB2_14 +; NONEON-NOSVE-NEXT: .LBB2_30: // %cond.store25 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #13] +; NONEON-NOSVE-NEXT: tbz w8, #14, .LBB2_15 +; NONEON-NOSVE-NEXT: .LBB2_31: // %cond.store27 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #14] +; NONEON-NOSVE-NEXT: tbz w8, #15, .LBB2_16 +; NONEON-NOSVE-NEXT: .LBB2_32: // %cond.store29 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #15] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v16i8(<16 x i8> zeroinitializer, ptr %dst, i32 8, <16 x i1> %mask) ret void } @@ -129,6 +305,244 @@ define void @masked_store_v32i8(ptr %dst, <32 x i1> %mask) { ; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr w8, [sp, #72] +; NONEON-NOSVE-NEXT: fmov s1, w1 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #80] +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #88] +; NONEON-NOSVE-NEXT: mov v1.b[1], w2 +; NONEON-NOSVE-NEXT: mov v0.b[1], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp] +; NONEON-NOSVE-NEXT: mov v1.b[2], w3 +; NONEON-NOSVE-NEXT: mov v0.b[2], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #96] +; NONEON-NOSVE-NEXT: mov v1.b[3], w4 +; NONEON-NOSVE-NEXT: mov v0.b[3], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #104] +; NONEON-NOSVE-NEXT: mov v1.b[4], w5 +; NONEON-NOSVE-NEXT: mov v0.b[4], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #112] +; NONEON-NOSVE-NEXT: mov v1.b[5], w6 +; NONEON-NOSVE-NEXT: mov v0.b[5], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #120] +; NONEON-NOSVE-NEXT: mov v1.b[6], w7 +; NONEON-NOSVE-NEXT: mov v0.b[6], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #128] +; NONEON-NOSVE-NEXT: mov v1.b[7], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #8] +; NONEON-NOSVE-NEXT: mov v0.b[7], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #136] +; NONEON-NOSVE-NEXT: mov v1.b[8], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #16] +; NONEON-NOSVE-NEXT: mov v0.b[8], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #144] +; NONEON-NOSVE-NEXT: mov v1.b[9], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #24] +; NONEON-NOSVE-NEXT: mov v0.b[9], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #152] +; NONEON-NOSVE-NEXT: mov v1.b[10], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #32] +; NONEON-NOSVE-NEXT: mov v0.b[10], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #160] +; NONEON-NOSVE-NEXT: mov v1.b[11], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #40] +; NONEON-NOSVE-NEXT: mov v0.b[11], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #168] +; NONEON-NOSVE-NEXT: mov v1.b[12], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #48] +; NONEON-NOSVE-NEXT: mov v0.b[12], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #176] +; NONEON-NOSVE-NEXT: mov v1.b[13], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #56] +; NONEON-NOSVE-NEXT: mov v0.b[13], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #184] +; NONEON-NOSVE-NEXT: mov v1.b[14], w9 +; NONEON-NOSVE-NEXT: ldr w9, [sp, #64] +; NONEON-NOSVE-NEXT: mov v0.b[14], w8 +; NONEON-NOSVE-NEXT: ldr w8, [sp, #192] +; NONEON-NOSVE-NEXT: mov v1.b[15], w9 +; NONEON-NOSVE-NEXT: mov v0.b[15], w8 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI3_0 +; NONEON-NOSVE-NEXT: ldr q2, [x8, :lo12:.LCPI3_0] +; NONEON-NOSVE-NEXT: shl v1.16b, v1.16b, #7 +; NONEON-NOSVE-NEXT: shl v0.16b, v0.16b, #7 +; NONEON-NOSVE-NEXT: cmlt v1.16b, v1.16b, #0 +; NONEON-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0 +; NONEON-NOSVE-NEXT: and v1.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ext v3.16b, v1.16b, v1.16b, #8 +; NONEON-NOSVE-NEXT: ext v2.16b, v0.16b, v0.16b, #8 +; NONEON-NOSVE-NEXT: zip1 v1.16b, v1.16b, v3.16b +; NONEON-NOSVE-NEXT: zip1 v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: addv h1, v1.8h +; NONEON-NOSVE-NEXT: addv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w8, s1 +; NONEON-NOSVE-NEXT: fmov w9, s0 +; NONEON-NOSVE-NEXT: bfi w8, w9, #16, #16 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB3_33 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB3_34 +; NONEON-NOSVE-NEXT: .LBB3_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB3_35 +; NONEON-NOSVE-NEXT: .LBB3_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB3_36 +; NONEON-NOSVE-NEXT: .LBB3_4: // %else6 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB3_37 +; NONEON-NOSVE-NEXT: .LBB3_5: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB3_38 +; NONEON-NOSVE-NEXT: .LBB3_6: // %else10 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB3_39 +; NONEON-NOSVE-NEXT: .LBB3_7: // %else12 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB3_40 +; NONEON-NOSVE-NEXT: .LBB3_8: // %else14 +; NONEON-NOSVE-NEXT: tbnz w8, #8, .LBB3_41 +; NONEON-NOSVE-NEXT: .LBB3_9: // %else16 +; NONEON-NOSVE-NEXT: tbnz w8, #9, .LBB3_42 +; NONEON-NOSVE-NEXT: .LBB3_10: // %else18 +; NONEON-NOSVE-NEXT: tbnz w8, #10, .LBB3_43 +; NONEON-NOSVE-NEXT: .LBB3_11: // %else20 +; NONEON-NOSVE-NEXT: tbnz w8, #11, .LBB3_44 +; NONEON-NOSVE-NEXT: .LBB3_12: // %else22 +; NONEON-NOSVE-NEXT: tbnz w8, #12, .LBB3_45 +; NONEON-NOSVE-NEXT: .LBB3_13: // %else24 +; NONEON-NOSVE-NEXT: tbnz w8, #13, .LBB3_46 +; NONEON-NOSVE-NEXT: .LBB3_14: // %else26 +; NONEON-NOSVE-NEXT: tbnz w8, #14, .LBB3_47 +; NONEON-NOSVE-NEXT: .LBB3_15: // %else28 +; NONEON-NOSVE-NEXT: tbnz w8, #15, .LBB3_48 +; NONEON-NOSVE-NEXT: .LBB3_16: // %else30 +; NONEON-NOSVE-NEXT: tbnz w8, #16, .LBB3_49 +; NONEON-NOSVE-NEXT: .LBB3_17: // %else32 +; NONEON-NOSVE-NEXT: tbnz w8, #17, .LBB3_50 +; NONEON-NOSVE-NEXT: .LBB3_18: // %else34 +; NONEON-NOSVE-NEXT: tbnz w8, #18, .LBB3_51 +; NONEON-NOSVE-NEXT: .LBB3_19: // %else36 +; NONEON-NOSVE-NEXT: tbnz w8, #19, .LBB3_52 +; NONEON-NOSVE-NEXT: .LBB3_20: // %else38 +; NONEON-NOSVE-NEXT: tbnz w8, #20, .LBB3_53 +; NONEON-NOSVE-NEXT: .LBB3_21: // %else40 +; NONEON-NOSVE-NEXT: tbnz w8, #21, .LBB3_54 +; NONEON-NOSVE-NEXT: .LBB3_22: // %else42 +; NONEON-NOSVE-NEXT: tbnz w8, #22, .LBB3_55 +; NONEON-NOSVE-NEXT: .LBB3_23: // %else44 +; NONEON-NOSVE-NEXT: tbnz w8, #23, .LBB3_56 +; NONEON-NOSVE-NEXT: .LBB3_24: // %else46 +; NONEON-NOSVE-NEXT: tbnz w8, #24, .LBB3_57 +; NONEON-NOSVE-NEXT: .LBB3_25: // %else48 +; NONEON-NOSVE-NEXT: tbnz w8, #25, .LBB3_58 +; NONEON-NOSVE-NEXT: .LBB3_26: // %else50 +; NONEON-NOSVE-NEXT: tbnz w8, #26, .LBB3_59 +; NONEON-NOSVE-NEXT: .LBB3_27: // %else52 +; NONEON-NOSVE-NEXT: tbnz w8, #27, .LBB3_60 +; NONEON-NOSVE-NEXT: .LBB3_28: // %else54 +; NONEON-NOSVE-NEXT: tbnz w8, #28, .LBB3_61 +; NONEON-NOSVE-NEXT: .LBB3_29: // %else56 +; NONEON-NOSVE-NEXT: tbnz w8, #29, .LBB3_62 +; NONEON-NOSVE-NEXT: .LBB3_30: // %else58 +; NONEON-NOSVE-NEXT: tbnz w8, #30, .LBB3_63 +; NONEON-NOSVE-NEXT: .LBB3_31: // %else60 +; NONEON-NOSVE-NEXT: tbnz w8, #31, .LBB3_64 +; NONEON-NOSVE-NEXT: .LBB3_32: // %else62 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB3_33: // %cond.store +; NONEON-NOSVE-NEXT: strb wzr, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB3_2 +; NONEON-NOSVE-NEXT: .LBB3_34: // %cond.store1 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #1] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB3_3 +; NONEON-NOSVE-NEXT: .LBB3_35: // %cond.store3 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #2] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB3_4 +; NONEON-NOSVE-NEXT: .LBB3_36: // %cond.store5 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #3] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB3_5 +; NONEON-NOSVE-NEXT: .LBB3_37: // %cond.store7 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #4] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB3_6 +; NONEON-NOSVE-NEXT: .LBB3_38: // %cond.store9 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #5] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB3_7 +; NONEON-NOSVE-NEXT: .LBB3_39: // %cond.store11 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #6] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB3_8 +; NONEON-NOSVE-NEXT: .LBB3_40: // %cond.store13 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #7] +; NONEON-NOSVE-NEXT: tbz w8, #8, .LBB3_9 +; NONEON-NOSVE-NEXT: .LBB3_41: // %cond.store15 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #8] +; NONEON-NOSVE-NEXT: tbz w8, #9, .LBB3_10 +; NONEON-NOSVE-NEXT: .LBB3_42: // %cond.store17 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #9] +; NONEON-NOSVE-NEXT: tbz w8, #10, .LBB3_11 +; NONEON-NOSVE-NEXT: .LBB3_43: // %cond.store19 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #10] +; NONEON-NOSVE-NEXT: tbz w8, #11, .LBB3_12 +; NONEON-NOSVE-NEXT: .LBB3_44: // %cond.store21 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #11] +; NONEON-NOSVE-NEXT: tbz w8, #12, .LBB3_13 +; NONEON-NOSVE-NEXT: .LBB3_45: // %cond.store23 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #12] +; NONEON-NOSVE-NEXT: tbz w8, #13, .LBB3_14 +; NONEON-NOSVE-NEXT: .LBB3_46: // %cond.store25 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #13] +; NONEON-NOSVE-NEXT: tbz w8, #14, .LBB3_15 +; NONEON-NOSVE-NEXT: .LBB3_47: // %cond.store27 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #14] +; NONEON-NOSVE-NEXT: tbz w8, #15, .LBB3_16 +; NONEON-NOSVE-NEXT: .LBB3_48: // %cond.store29 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #15] +; NONEON-NOSVE-NEXT: tbz w8, #16, .LBB3_17 +; NONEON-NOSVE-NEXT: .LBB3_49: // %cond.store31 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #16] +; NONEON-NOSVE-NEXT: tbz w8, #17, .LBB3_18 +; NONEON-NOSVE-NEXT: .LBB3_50: // %cond.store33 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #17] +; NONEON-NOSVE-NEXT: tbz w8, #18, .LBB3_19 +; NONEON-NOSVE-NEXT: .LBB3_51: // %cond.store35 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #18] +; NONEON-NOSVE-NEXT: tbz w8, #19, .LBB3_20 +; NONEON-NOSVE-NEXT: .LBB3_52: // %cond.store37 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #19] +; NONEON-NOSVE-NEXT: tbz w8, #20, .LBB3_21 +; NONEON-NOSVE-NEXT: .LBB3_53: // %cond.store39 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #20] +; NONEON-NOSVE-NEXT: tbz w8, #21, .LBB3_22 +; NONEON-NOSVE-NEXT: .LBB3_54: // %cond.store41 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #21] +; NONEON-NOSVE-NEXT: tbz w8, #22, .LBB3_23 +; NONEON-NOSVE-NEXT: .LBB3_55: // %cond.store43 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #22] +; NONEON-NOSVE-NEXT: tbz w8, #23, .LBB3_24 +; NONEON-NOSVE-NEXT: .LBB3_56: // %cond.store45 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #23] +; NONEON-NOSVE-NEXT: tbz w8, #24, .LBB3_25 +; NONEON-NOSVE-NEXT: .LBB3_57: // %cond.store47 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #24] +; NONEON-NOSVE-NEXT: tbz w8, #25, .LBB3_26 +; NONEON-NOSVE-NEXT: .LBB3_58: // %cond.store49 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #25] +; NONEON-NOSVE-NEXT: tbz w8, #26, .LBB3_27 +; NONEON-NOSVE-NEXT: .LBB3_59: // %cond.store51 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #26] +; NONEON-NOSVE-NEXT: tbz w8, #27, .LBB3_28 +; NONEON-NOSVE-NEXT: .LBB3_60: // %cond.store53 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #27] +; NONEON-NOSVE-NEXT: tbz w8, #28, .LBB3_29 +; NONEON-NOSVE-NEXT: .LBB3_61: // %cond.store55 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #28] +; NONEON-NOSVE-NEXT: tbz w8, #29, .LBB3_30 +; NONEON-NOSVE-NEXT: .LBB3_62: // %cond.store57 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #29] +; NONEON-NOSVE-NEXT: tbz w8, #30, .LBB3_31 +; NONEON-NOSVE-NEXT: .LBB3_63: // %cond.store59 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #30] +; NONEON-NOSVE-NEXT: tbz w8, #31, .LBB3_32 +; NONEON-NOSVE-NEXT: .LBB3_64: // %cond.store61 +; NONEON-NOSVE-NEXT: strb wzr, [x0, #31] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v32i8(<32 x i8> zeroinitializer, ptr %dst, i32 8, <32 x i1> %mask) ret void } @@ -154,6 +568,29 @@ define void @masked_store_v2f16(ptr %dst, <2 x i1> %mask) { ; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #31 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI4_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI4_0] +; NONEON-NOSVE-NEXT: cmlt v0.2s, v0.2s, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addp v0.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB4_3 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB4_4 +; NONEON-NOSVE-NEXT: .LBB4_2: // %else2 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB4_3: // %cond.store +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB4_2 +; NONEON-NOSVE-NEXT: .LBB4_4: // %cond.store1 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #2] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v2f16(<2 x half> zeroinitializer, ptr %dst, i32 8, <2 x i1> %mask) ret void } @@ -169,6 +606,41 @@ define void @masked_store_v4f16(ptr %dst, <4 x i1> %mask) { ; CHECK-NEXT: mov z0.h, #0 // =0x0 ; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #15 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI5_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI5_0] +; NONEON-NOSVE-NEXT: cmlt v0.4h, v0.4h, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB5_5 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB5_6 +; NONEON-NOSVE-NEXT: .LBB5_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB5_7 +; NONEON-NOSVE-NEXT: .LBB5_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB5_8 +; NONEON-NOSVE-NEXT: .LBB5_4: // %else6 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB5_5: // %cond.store +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB5_2 +; NONEON-NOSVE-NEXT: .LBB5_6: // %cond.store1 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #2] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB5_3 +; NONEON-NOSVE-NEXT: .LBB5_7: // %cond.store3 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #4] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB5_4 +; NONEON-NOSVE-NEXT: .LBB5_8: // %cond.store5 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #6] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v4f16(<4 x half> zeroinitializer, ptr %dst, i32 8, <4 x i1> %mask) ret void } @@ -185,6 +657,65 @@ define void @masked_store_v8f16(ptr %dst, <8 x i1> %mask) { ; CHECK-NEXT: mov z0.h, #0 // =0x0 ; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.8b, v0.8b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI6_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI6_0] +; NONEON-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv b0, v0.8b +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB6_9 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB6_10 +; NONEON-NOSVE-NEXT: .LBB6_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB6_11 +; NONEON-NOSVE-NEXT: .LBB6_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB6_12 +; NONEON-NOSVE-NEXT: .LBB6_4: // %else6 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB6_13 +; NONEON-NOSVE-NEXT: .LBB6_5: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB6_14 +; NONEON-NOSVE-NEXT: .LBB6_6: // %else10 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB6_15 +; NONEON-NOSVE-NEXT: .LBB6_7: // %else12 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB6_16 +; NONEON-NOSVE-NEXT: .LBB6_8: // %else14 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB6_9: // %cond.store +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB6_2 +; NONEON-NOSVE-NEXT: .LBB6_10: // %cond.store1 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #2] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB6_3 +; NONEON-NOSVE-NEXT: .LBB6_11: // %cond.store3 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #4] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB6_4 +; NONEON-NOSVE-NEXT: .LBB6_12: // %cond.store5 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #6] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB6_5 +; NONEON-NOSVE-NEXT: .LBB6_13: // %cond.store7 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #8] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB6_6 +; NONEON-NOSVE-NEXT: .LBB6_14: // %cond.store9 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #10] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB6_7 +; NONEON-NOSVE-NEXT: .LBB6_15: // %cond.store11 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #12] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB6_8 +; NONEON-NOSVE-NEXT: .LBB6_16: // %cond.store13 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #14] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v8f16(<8 x half> zeroinitializer, ptr %dst, i32 8, <8 x i1> %mask) ret void } @@ -209,6 +740,115 @@ define void @masked_store_v16f16(ptr %dst, <16 x i1> %mask) { ; CHECK-NEXT: st1h { z1.h }, p1, [x0, x8, lsl #1] ; CHECK-NEXT: st1h { z1.h }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.16b, v0.16b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI7_0 +; NONEON-NOSVE-NEXT: ldr q1, [x8, :lo12:.LCPI7_0] +; NONEON-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0 +; NONEON-NOSVE-NEXT: and v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; NONEON-NOSVE-NEXT: zip1 v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: addv h0, v0.8h +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB7_17 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB7_18 +; NONEON-NOSVE-NEXT: .LBB7_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB7_19 +; NONEON-NOSVE-NEXT: .LBB7_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB7_20 +; NONEON-NOSVE-NEXT: .LBB7_4: // %else6 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB7_21 +; NONEON-NOSVE-NEXT: .LBB7_5: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB7_22 +; NONEON-NOSVE-NEXT: .LBB7_6: // %else10 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB7_23 +; NONEON-NOSVE-NEXT: .LBB7_7: // %else12 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB7_24 +; NONEON-NOSVE-NEXT: .LBB7_8: // %else14 +; NONEON-NOSVE-NEXT: tbnz w8, #8, .LBB7_25 +; NONEON-NOSVE-NEXT: .LBB7_9: // %else16 +; NONEON-NOSVE-NEXT: tbnz w8, #9, .LBB7_26 +; NONEON-NOSVE-NEXT: .LBB7_10: // %else18 +; NONEON-NOSVE-NEXT: tbnz w8, #10, .LBB7_27 +; NONEON-NOSVE-NEXT: .LBB7_11: // %else20 +; NONEON-NOSVE-NEXT: tbnz w8, #11, .LBB7_28 +; NONEON-NOSVE-NEXT: .LBB7_12: // %else22 +; NONEON-NOSVE-NEXT: tbnz w8, #12, .LBB7_29 +; NONEON-NOSVE-NEXT: .LBB7_13: // %else24 +; NONEON-NOSVE-NEXT: tbnz w8, #13, .LBB7_30 +; NONEON-NOSVE-NEXT: .LBB7_14: // %else26 +; NONEON-NOSVE-NEXT: tbnz w8, #14, .LBB7_31 +; NONEON-NOSVE-NEXT: .LBB7_15: // %else28 +; NONEON-NOSVE-NEXT: tbnz w8, #15, .LBB7_32 +; NONEON-NOSVE-NEXT: .LBB7_16: // %else30 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB7_17: // %cond.store +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB7_2 +; NONEON-NOSVE-NEXT: .LBB7_18: // %cond.store1 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #2] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB7_3 +; NONEON-NOSVE-NEXT: .LBB7_19: // %cond.store3 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #4] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB7_4 +; NONEON-NOSVE-NEXT: .LBB7_20: // %cond.store5 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #6] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB7_5 +; NONEON-NOSVE-NEXT: .LBB7_21: // %cond.store7 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #8] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB7_6 +; NONEON-NOSVE-NEXT: .LBB7_22: // %cond.store9 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #10] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB7_7 +; NONEON-NOSVE-NEXT: .LBB7_23: // %cond.store11 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #12] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB7_8 +; NONEON-NOSVE-NEXT: .LBB7_24: // %cond.store13 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #14] +; NONEON-NOSVE-NEXT: tbz w8, #8, .LBB7_9 +; NONEON-NOSVE-NEXT: .LBB7_25: // %cond.store15 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #16] +; NONEON-NOSVE-NEXT: tbz w8, #9, .LBB7_10 +; NONEON-NOSVE-NEXT: .LBB7_26: // %cond.store17 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #18] +; NONEON-NOSVE-NEXT: tbz w8, #10, .LBB7_11 +; NONEON-NOSVE-NEXT: .LBB7_27: // %cond.store19 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #20] +; NONEON-NOSVE-NEXT: tbz w8, #11, .LBB7_12 +; NONEON-NOSVE-NEXT: .LBB7_28: // %cond.store21 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #22] +; NONEON-NOSVE-NEXT: tbz w8, #12, .LBB7_13 +; NONEON-NOSVE-NEXT: .LBB7_29: // %cond.store23 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #24] +; NONEON-NOSVE-NEXT: tbz w8, #13, .LBB7_14 +; NONEON-NOSVE-NEXT: .LBB7_30: // %cond.store25 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #26] +; NONEON-NOSVE-NEXT: tbz w8, #14, .LBB7_15 +; NONEON-NOSVE-NEXT: .LBB7_31: // %cond.store27 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #28] +; NONEON-NOSVE-NEXT: tbz w8, #15, .LBB7_16 +; NONEON-NOSVE-NEXT: .LBB7_32: // %cond.store29 +; NONEON-NOSVE-NEXT: fmov s0, wzr +; NONEON-NOSVE-NEXT: str h0, [x0, #30] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v16f16(<16 x half> zeroinitializer, ptr %dst, i32 8, <16 x i1> %mask) ret void } @@ -225,6 +865,37 @@ define void @masked_store_v4f32(ptr %dst, <4 x i1> %mask) { ; CHECK-NEXT: mov z0.s, #0 // =0x0 ; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #15 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI8_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI8_0] +; NONEON-NOSVE-NEXT: cmlt v0.4h, v0.4h, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB8_5 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB8_6 +; NONEON-NOSVE-NEXT: .LBB8_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB8_7 +; NONEON-NOSVE-NEXT: .LBB8_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB8_8 +; NONEON-NOSVE-NEXT: .LBB8_4: // %else6 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB8_5: // %cond.store +; NONEON-NOSVE-NEXT: str wzr, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB8_2 +; NONEON-NOSVE-NEXT: .LBB8_6: // %cond.store1 +; NONEON-NOSVE-NEXT: str wzr, [x0, #4] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB8_3 +; NONEON-NOSVE-NEXT: .LBB8_7: // %cond.store3 +; NONEON-NOSVE-NEXT: str wzr, [x0, #8] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB8_4 +; NONEON-NOSVE-NEXT: .LBB8_8: // %cond.store5 +; NONEON-NOSVE-NEXT: str wzr, [x0, #12] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v4f32(<4 x float> zeroinitializer, ptr %dst, i32 8, <4 x i1> %mask) ret void } @@ -275,6 +946,57 @@ define void @masked_store_v8f32(ptr %dst, <8 x i1> %mask) { ; CHECK-NEXT: st1w { z1.s }, p0, [x0] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.8b, v0.8b, #7 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI9_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI9_0] +; NONEON-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv b0, v0.8b +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB9_9 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB9_10 +; NONEON-NOSVE-NEXT: .LBB9_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB9_11 +; NONEON-NOSVE-NEXT: .LBB9_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB9_12 +; NONEON-NOSVE-NEXT: .LBB9_4: // %else6 +; NONEON-NOSVE-NEXT: tbnz w8, #4, .LBB9_13 +; NONEON-NOSVE-NEXT: .LBB9_5: // %else8 +; NONEON-NOSVE-NEXT: tbnz w8, #5, .LBB9_14 +; NONEON-NOSVE-NEXT: .LBB9_6: // %else10 +; NONEON-NOSVE-NEXT: tbnz w8, #6, .LBB9_15 +; NONEON-NOSVE-NEXT: .LBB9_7: // %else12 +; NONEON-NOSVE-NEXT: tbnz w8, #7, .LBB9_16 +; NONEON-NOSVE-NEXT: .LBB9_8: // %else14 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB9_9: // %cond.store +; NONEON-NOSVE-NEXT: str wzr, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB9_2 +; NONEON-NOSVE-NEXT: .LBB9_10: // %cond.store1 +; NONEON-NOSVE-NEXT: str wzr, [x0, #4] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB9_3 +; NONEON-NOSVE-NEXT: .LBB9_11: // %cond.store3 +; NONEON-NOSVE-NEXT: str wzr, [x0, #8] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB9_4 +; NONEON-NOSVE-NEXT: .LBB9_12: // %cond.store5 +; NONEON-NOSVE-NEXT: str wzr, [x0, #12] +; NONEON-NOSVE-NEXT: tbz w8, #4, .LBB9_5 +; NONEON-NOSVE-NEXT: .LBB9_13: // %cond.store7 +; NONEON-NOSVE-NEXT: str wzr, [x0, #16] +; NONEON-NOSVE-NEXT: tbz w8, #5, .LBB9_6 +; NONEON-NOSVE-NEXT: .LBB9_14: // %cond.store9 +; NONEON-NOSVE-NEXT: str wzr, [x0, #20] +; NONEON-NOSVE-NEXT: tbz w8, #6, .LBB9_7 +; NONEON-NOSVE-NEXT: .LBB9_15: // %cond.store11 +; NONEON-NOSVE-NEXT: str wzr, [x0, #24] +; NONEON-NOSVE-NEXT: tbz w8, #7, .LBB9_8 +; NONEON-NOSVE-NEXT: .LBB9_16: // %cond.store13 +; NONEON-NOSVE-NEXT: str wzr, [x0, #28] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v8f32(<8 x float> zeroinitializer, ptr %dst, i32 8, <8 x i1> %mask) ret void } @@ -291,6 +1013,27 @@ define void @masked_store_v2f64(ptr %dst, <2 x i1> %mask) { ; CHECK-NEXT: mov z0.d, #0 // =0x0 ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #31 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI10_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI10_0] +; NONEON-NOSVE-NEXT: cmlt v0.2s, v0.2s, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addp v0.2s, v0.2s, v0.2s +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB10_3 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB10_4 +; NONEON-NOSVE-NEXT: .LBB10_2: // %else2 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB10_3: // %cond.store +; NONEON-NOSVE-NEXT: str xzr, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB10_2 +; NONEON-NOSVE-NEXT: .LBB10_4: // %cond.store1 +; NONEON-NOSVE-NEXT: str xzr, [x0, #8] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v2f64(<2 x double> zeroinitializer, ptr %dst, i32 8, <2 x i1> %mask) ret void } @@ -315,6 +1058,37 @@ define void @masked_store_v4f64(ptr %dst, <4 x i1> %mask) { ; CHECK-NEXT: st1d { z0.d }, p1, [x0, x8, lsl #3] ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: masked_store_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #15 +; NONEON-NOSVE-NEXT: adrp x8, .LCPI11_0 +; NONEON-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI11_0] +; NONEON-NOSVE-NEXT: cmlt v0.4h, v0.4h, #0 +; NONEON-NOSVE-NEXT: and v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: addv h0, v0.4h +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: tbnz w8, #0, .LBB11_5 +; NONEON-NOSVE-NEXT: // %bb.1: // %else +; NONEON-NOSVE-NEXT: tbnz w8, #1, .LBB11_6 +; NONEON-NOSVE-NEXT: .LBB11_2: // %else2 +; NONEON-NOSVE-NEXT: tbnz w8, #2, .LBB11_7 +; NONEON-NOSVE-NEXT: .LBB11_3: // %else4 +; NONEON-NOSVE-NEXT: tbnz w8, #3, .LBB11_8 +; NONEON-NOSVE-NEXT: .LBB11_4: // %else6 +; NONEON-NOSVE-NEXT: ret +; NONEON-NOSVE-NEXT: .LBB11_5: // %cond.store +; NONEON-NOSVE-NEXT: str xzr, [x0] +; NONEON-NOSVE-NEXT: tbz w8, #1, .LBB11_2 +; NONEON-NOSVE-NEXT: .LBB11_6: // %cond.store1 +; NONEON-NOSVE-NEXT: str xzr, [x0, #8] +; NONEON-NOSVE-NEXT: tbz w8, #2, .LBB11_3 +; NONEON-NOSVE-NEXT: .LBB11_7: // %cond.store3 +; NONEON-NOSVE-NEXT: str xzr, [x0, #16] +; NONEON-NOSVE-NEXT: tbz w8, #3, .LBB11_4 +; NONEON-NOSVE-NEXT: .LBB11_8: // %cond.store5 +; NONEON-NOSVE-NEXT: str xzr, [x0, #24] +; NONEON-NOSVE-NEXT: ret call void @llvm.masked.store.v4f64(<4 x double> zeroinitializer, ptr %dst, i32 8, <4 x i1> %mask) ret void } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll index b5adea59424298..d7eaf766e7df7c 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -14,6 +15,15 @@ define void @add_v4i8(ptr %a, ptr %b) { ; CHECK-NEXT: add z0.h, z0.h, z1.h ; CHECK-NEXT: st1b { z0.h }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: ldr s1, [x1] +; NONEON-NOSVE-NEXT: uaddl v0.8h, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: uzp1 v0.8b, v0.8b, v0.8b +; NONEON-NOSVE-NEXT: str s0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i8>, ptr %a %op2 = load <4 x i8>, ptr %b %res = add <4 x i8> %op1, %op2 @@ -29,6 +39,14 @@ define void @add_v8i8(ptr %a, ptr %b) { ; CHECK-NEXT: add z0.b, z0.b, z1.b ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ldr d1, [x1] +; NONEON-NOSVE-NEXT: add v0.8b, v0.8b, v1.8b +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i8>, ptr %a %op2 = load <8 x i8>, ptr %b %res = add <8 x i8> %op1, %op2 @@ -44,6 +62,14 @@ define void @add_v16i8(ptr %a, ptr %b) { ; CHECK-NEXT: add z0.b, z0.b, z1.b ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i8>, ptr %a %op2 = load <16 x i8>, ptr %b %res = add <16 x i8> %op1, %op2 @@ -60,6 +86,15 @@ define void @add_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: add z1.b, z2.b, z3.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: add v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %res = add <32 x i8> %op1, %op2 @@ -76,6 +111,23 @@ define void @add_v2i16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: add z0.s, z0.s, z1.s ; CHECK-NEXT: st1h { z0.s }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldrh w8, [x0] +; NONEON-NOSVE-NEXT: ldrh w9, [x1] +; NONEON-NOSVE-NEXT: fmov s0, w8 +; NONEON-NOSVE-NEXT: fmov s1, w9 +; NONEON-NOSVE-NEXT: add x8, x0, #2 +; NONEON-NOSVE-NEXT: add x9, x1, #2 +; NONEON-NOSVE-NEXT: ld1 { v0.h }[2], [x8] +; NONEON-NOSVE-NEXT: ld1 { v1.h }[2], [x9] +; NONEON-NOSVE-NEXT: add v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: mov w8, v0.s[1] +; NONEON-NOSVE-NEXT: fmov w9, s0 +; NONEON-NOSVE-NEXT: strh w9, [x0] +; NONEON-NOSVE-NEXT: strh w8, [x0, #2] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x i16>, ptr %a %op2 = load <2 x i16>, ptr %b %res = add <2 x i16> %op1, %op2 @@ -91,6 +143,14 @@ define void @add_v4i16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: add z0.h, z0.h, z1.h ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ldr d1, [x1] +; NONEON-NOSVE-NEXT: add v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i16>, ptr %a %op2 = load <4 x i16>, ptr %b %res = add <4 x i16> %op1, %op2 @@ -106,6 +166,14 @@ define void @add_v8i16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: add z0.h, z0.h, z1.h ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i16>, ptr %a %op2 = load <8 x i16>, ptr %b %res = add <8 x i16> %op1, %op2 @@ -122,6 +190,15 @@ define void @add_v16i16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: add z1.h, z2.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: add_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: add v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %res = add <16 x i16> %op1, %op2 @@ -137,6 +214,13 @@ define void @abs_v2i32(ptr %a) { ; CHECK-NEXT: abs z0.s, p0/m, z0.s ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: abs v0.2s, v0.2s +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x i32>, ptr %a %res = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %op1, i1 false) store <2 x i32> %res, ptr %a @@ -151,6 +235,13 @@ define void @abs_v4i32(ptr %a) { ; CHECK-NEXT: abs z0.s, p0/m, z0.s ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: abs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i32>, ptr %a %res = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %op1, i1 false) store <4 x i32> %res, ptr %a @@ -166,6 +257,14 @@ define void @abs_v8i32(ptr %a) { ; CHECK-NEXT: abs z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: abs v0.4s, v0.4s +; NONEON-NOSVE-NEXT: abs v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %op1, i1 false) store <8 x i32> %res, ptr %a @@ -180,6 +279,13 @@ define void @abs_v2i64(ptr %a) { ; CHECK-NEXT: abs z0.d, p0/m, z0.d ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: abs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x i64>, ptr %a %res = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %op1, i1 false) store <2 x i64> %res, ptr %a @@ -195,6 +301,14 @@ define void @abs_v4i64(ptr %a) { ; CHECK-NEXT: abs z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: abs_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: abs v0.2d, v0.2d +; NONEON-NOSVE-NEXT: abs v1.2d, v1.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %res = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %op1, i1 false) store <4 x i64> %res, ptr %a @@ -211,6 +325,17 @@ define void @fadd_v2f16(ptr %a, ptr %b) { ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: str w8, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr s0, [x0] +; NONEON-NOSVE-NEXT: ldr s1, [x1] +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fadd v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: str s0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x half>, ptr %a %op2 = load <2 x half>, ptr %b %res = fadd <2 x half> %op1, %op2 @@ -227,6 +352,17 @@ define void @fadd_v4f16(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ldr d1, [x1] +; NONEON-NOSVE-NEXT: fcvtl v1.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v0.4s, v0.4h +; NONEON-NOSVE-NEXT: fadd v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x half>, ptr %a %op2 = load <4 x half>, ptr %b %res = fadd <4 x half> %op1, %op2 @@ -243,6 +379,21 @@ define void @fadd_v8f16(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fcvtl v2.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v3.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fadd v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: fadd v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v2.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: str q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x half>, ptr %a %op2 = load <8 x half>, ptr %b %res = fadd <8 x half> %op1, %op2 @@ -261,6 +412,29 @@ define void @fadd_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fcvtl v4.4s, v0.4h +; NONEON-NOSVE-NEXT: fcvtl v6.4s, v3.4h +; NONEON-NOSVE-NEXT: fcvtl2 v0.4s, v0.8h +; NONEON-NOSVE-NEXT: fcvtl v5.4s, v1.4h +; NONEON-NOSVE-NEXT: fcvtl v7.4s, v2.4h +; NONEON-NOSVE-NEXT: fcvtl2 v1.4s, v1.8h +; NONEON-NOSVE-NEXT: fcvtl2 v3.4s, v3.8h +; NONEON-NOSVE-NEXT: fcvtl2 v2.4s, v2.8h +; NONEON-NOSVE-NEXT: fadd v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: fadd v5.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: fadd v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fadd v2.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: fcvtn v1.4h, v4.4s +; NONEON-NOSVE-NEXT: fcvtn v3.4h, v5.4s +; NONEON-NOSVE-NEXT: fcvtn2 v1.8h, v0.4s +; NONEON-NOSVE-NEXT: fcvtn2 v3.8h, v2.4s +; NONEON-NOSVE-NEXT: stp q1, q3, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %res = fadd <16 x half> %op1, %op2 @@ -277,6 +451,14 @@ define void @fadd_v2f32(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ldr d1, [x1] +; NONEON-NOSVE-NEXT: fadd v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x float>, ptr %a %op2 = load <2 x float>, ptr %b %res = fadd <2 x float> %op1, %op2 @@ -293,6 +475,14 @@ define void @fadd_v4f32(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fadd v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x float>, ptr %a %op2 = load <4 x float>, ptr %b %res = fadd <4 x float> %op1, %op2 @@ -311,6 +501,15 @@ define void @fadd_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fadd v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: fadd v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %res = fadd <8 x float> %op1, %op2 @@ -327,6 +526,14 @@ define void @fadd_v2f64(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: fadd v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <2 x double>, ptr %a %op2 = load <2 x double>, ptr %b %res = fadd <2 x double> %op1, %op2 @@ -345,6 +552,15 @@ define void @fadd_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fadd_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q3, [x1] +; NONEON-NOSVE-NEXT: ldp q1, q2, [x0] +; NONEON-NOSVE-NEXT: fadd v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: fadd v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %res = fadd <4 x double> %op1, %op2 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll index 00413302798ca1..f595a4219cac9f 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -15,6 +16,14 @@ define void @test_revbv16i16(ptr %a) { ; CHECK-NEXT: revb z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revbv16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev16 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rev16 v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <32 x i8>, ptr %a %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> store <32 x i8> %tmp2, ptr %a @@ -31,6 +40,14 @@ define void @test_revbv8i32(ptr %a) { ; CHECK-NEXT: revb z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revbv8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev32 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rev32 v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <32 x i8>, ptr %a %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> store <32 x i8> %tmp2, ptr %a @@ -47,6 +64,14 @@ define void @test_revbv4i64(ptr %a) { ; CHECK-NEXT: revb z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revbv4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev64 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rev64 v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <32 x i8>, ptr %a %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> store <32 x i8> %tmp2, ptr %a @@ -63,6 +88,14 @@ define void @test_revhv8i32(ptr %a) { ; CHECK-NEXT: revh z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revhv8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev32 v0.8h, v0.8h +; NONEON-NOSVE-NEXT: rev32 v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <16 x i16>, ptr %a %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> store <16 x i16> %tmp2, ptr %a @@ -79,6 +112,14 @@ define void @test_revhv8f32(ptr %a) { ; CHECK-NEXT: revh z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revhv8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev32 v0.8h, v0.8h +; NONEON-NOSVE-NEXT: rev32 v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <16 x half>, ptr %a %tmp2 = shufflevector <16 x half> %tmp1, <16 x half> undef, <16 x i32> store <16 x half> %tmp2, ptr %a @@ -95,6 +136,14 @@ define void @test_revhv4i64(ptr %a) { ; CHECK-NEXT: revh z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revhv4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev64 v0.8h, v0.8h +; NONEON-NOSVE-NEXT: rev64 v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <16 x i16>, ptr %a %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> store <16 x i16> %tmp2, ptr %a @@ -111,6 +160,14 @@ define void @test_revwv4i64(ptr %a) { ; CHECK-NEXT: revw z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revwv4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev64 v0.4s, v0.4s +; NONEON-NOSVE-NEXT: rev64 v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x i32>, ptr %a %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> store <8 x i32> %tmp2, ptr %a @@ -127,6 +184,14 @@ define void @test_revwv4f64(ptr %a) { ; CHECK-NEXT: revw z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revwv4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev64 v0.4s, v0.4s +; NONEON-NOSVE-NEXT: rev64 v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x float>, ptr %a %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> undef, <8 x i32> store <8 x float> %tmp2, ptr %a @@ -141,6 +206,12 @@ define <16 x i8> @test_revv16i8(ptr %a) { ; CHECK-NEXT: revb z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revv16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: rev64 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %tmp1 = load <16 x i8>, ptr %a %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> ret <16 x i8> %tmp2 @@ -156,6 +227,14 @@ define void @test_revwv8i32v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: revw z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revwv8i32v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x1] +; NONEON-NOSVE-NEXT: rev64 v0.4s, v0.4s +; NONEON-NOSVE-NEXT: rev64 v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x i32>, ptr %a %tmp2 = load <8 x i32>, ptr %b %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> @@ -176,6 +255,18 @@ define void @test_revhv32i16(ptr %a) { ; CHECK-NEXT: stp q0, q1, [x0, #32] ; CHECK-NEXT: stp q2, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revhv32i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: rev64 v0.8h, v0.8h +; NONEON-NOSVE-NEXT: rev64 v1.8h, v1.8h +; NONEON-NOSVE-NEXT: rev64 v2.8h, v2.8h +; NONEON-NOSVE-NEXT: rev64 v3.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: stp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <32 x i16>, ptr %a %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <32 x i32> store <32 x i16> %tmp2, ptr %a @@ -191,6 +282,14 @@ define void @test_rev_elts_fail(ptr %a) { ; CHECK-NEXT: tbl z0.d, { z2.d }, z0.d ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_rev_elts_fail: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v1.16b, #8 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x i64>, ptr %a %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> undef, <4 x i32> store <4 x i64> %tmp2, ptr %a @@ -208,6 +307,15 @@ define void @test_revdv4i64_sve2p1(ptr %a) #1 { ; CHECK-NEXT: revd z1.q, p0/m, z1.q ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revdv4i64_sve2p1: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ptrue p0.d, vl2 +; NONEON-NOSVE-NEXT: revd z0.q, p0/m, z0.q +; NONEON-NOSVE-NEXT: revd z1.q, p0/m, z1.q +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x i64>, ptr %a %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> undef, <4 x i32> store <4 x i64> %tmp2, ptr %a @@ -223,6 +331,15 @@ define void @test_revdv4f64_sve2p1(ptr %a) #1 { ; CHECK-NEXT: revd z1.q, p0/m, z1.q ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revdv4f64_sve2p1: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ptrue p0.d +; NONEON-NOSVE-NEXT: revd z0.q, p0/m, z0.q +; NONEON-NOSVE-NEXT: revd z1.q, p0/m, z1.q +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x double>, ptr %a %tmp2 = shufflevector <4 x double> %tmp1, <4 x double> undef, <4 x i32> store <4 x double> %tmp2, ptr %a @@ -238,6 +355,16 @@ define void @test_revv8i32(ptr %a) { ; CHECK-NEXT: tbl z0.s, { z2.s }, z0.s ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_revv8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: rev64 v0.4s, v0.4s +; NONEON-NOSVE-NEXT: rev64 v1.4s, v1.4s +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v1.16b, #8 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x i32>, ptr %a %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> store <8 x i32> %tmp2, ptr %a diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll index cb73030306b023..df786933da88cb 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -68,6 +69,18 @@ define void @zip1_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: str q1, [x0, #16] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip1_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: zip2 v2.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: zip1 v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: str q2, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load volatile <32 x i8>, ptr %a %tmp2 = load volatile <32 x i8>, ptr %b %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> @@ -196,6 +209,28 @@ define void @zip_v32i16(ptr %a, ptr %b) { ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip_v32i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q4, q0, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q5, q1, [x0] +; NONEON-NOSVE-NEXT: ldp q6, q2, [x1, #32] +; NONEON-NOSVE-NEXT: ldp q7, q3, [x1] +; NONEON-NOSVE-NEXT: zip1 v17.8h, v0.8h, v2.8h +; NONEON-NOSVE-NEXT: zip2 v0.8h, v0.8h, v2.8h +; NONEON-NOSVE-NEXT: zip1 v16.8h, v1.8h, v3.8h +; NONEON-NOSVE-NEXT: zip2 v1.8h, v1.8h, v3.8h +; NONEON-NOSVE-NEXT: zip1 v2.8h, v5.8h, v7.8h +; NONEON-NOSVE-NEXT: zip1 v3.8h, v4.8h, v6.8h +; NONEON-NOSVE-NEXT: zip2 v5.8h, v5.8h, v7.8h +; NONEON-NOSVE-NEXT: zip2 v4.8h, v4.8h, v6.8h +; NONEON-NOSVE-NEXT: add v6.8h, v16.8h, v17.8h +; NONEON-NOSVE-NEXT: add v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: add v2.8h, v5.8h, v4.8h +; NONEON-NOSVE-NEXT: stp q6, q0, [x0, #32] +; NONEON-NOSVE-NEXT: stp q1, q2, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <32 x i16>, ptr %a %tmp2 = load <32 x i16>, ptr %b %tmp3 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> @@ -244,6 +279,18 @@ define void @zip1_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: str q1, [x0, #16] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip1_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: zip2 v2.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: zip1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: str q2, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load volatile <16 x i16>, ptr %a %tmp2 = load volatile <16 x i16>, ptr %b %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> @@ -276,6 +323,18 @@ define void @zip1_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: str q1, [x0, #16] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip1_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: zip2 v2.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: zip1 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: str q2, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load volatile <8 x i32>, ptr %a %tmp2 = load volatile <8 x i32>, ptr %b %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> @@ -298,6 +357,19 @@ define void @zip_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: stp q2, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x1] +; NONEON-NOSVE-NEXT: zip1 v4.2d, v1.2d, v3.2d +; NONEON-NOSVE-NEXT: zip1 v5.2d, v0.2d, v2.2d +; NONEON-NOSVE-NEXT: zip2 v1.2d, v1.2d, v3.2d +; NONEON-NOSVE-NEXT: zip2 v0.2d, v0.2d, v2.2d +; NONEON-NOSVE-NEXT: fadd v2.2d, v4.2d, v5.2d +; NONEON-NOSVE-NEXT: fadd v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: stp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x double>, ptr %a %tmp2 = load <4 x double>, ptr %b %tmp3 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> @@ -330,6 +402,16 @@ define void @zip_v4i32(ptr %a, ptr %b) { ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: zip1 v2.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: zip2 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: add v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x i32>, ptr %a %tmp2 = load <4 x i32>, ptr %b %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> @@ -351,6 +433,16 @@ define void @zip1_v8i32_undef(ptr %a) { ; CHECK-NEXT: str q1, [x0, #16] ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip1_v8i32_undef: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: zip2 v1.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: zip1 v0.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: str q1, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load volatile <8 x i32>, ptr %a %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> store volatile <8 x i32> %tmp2, ptr %a @@ -370,6 +462,19 @@ define void @trn_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: add z1.b, z1.b, z2.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trn_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: trn1 v4.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: trn2 v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: trn1 v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: trn2 v2.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: add v0.16b, v4.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <32 x i8>, ptr %a %tmp2 = load <32 x i8>, ptr %b %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> @@ -392,6 +497,19 @@ define void @trn_v8i16(ptr %a, ptr %b) { ; CHECK-NEXT: add z0.h, z1.h, z0.h ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trn_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: adrp x8, .LCPI8_0 +; NONEON-NOSVE-NEXT: adrp x9, .LCPI8_1 +; NONEON-NOSVE-NEXT: ldr q1, [x0] +; NONEON-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI8_0] +; NONEON-NOSVE-NEXT: ldr q2, [x9, :lo12:.LCPI8_1] +; NONEON-NOSVE-NEXT: tbl v0.16b, { v1.16b }, v0.16b +; NONEON-NOSVE-NEXT: tbl v1.16b, { v1.16b }, v2.16b +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x i16>, ptr %a %tmp2 = load <8 x i16>, ptr %b %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> @@ -414,6 +532,19 @@ define void @trn_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: add z1.h, z1.h, z2.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trn_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: trn1 v4.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: trn2 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: trn1 v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: trn2 v2.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: add v0.8h, v4.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v2.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <16 x i16>, ptr %a %tmp2 = load <16 x i16>, ptr %b %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> @@ -436,6 +567,19 @@ define void @trn_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: add z1.s, z1.s, z2.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trn_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: zip1 v4.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: trn2 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: trn1 v1.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: trn2 v2.4s, v2.4s, v3.4s +; NONEON-NOSVE-NEXT: add v0.4s, v4.4s, v0.4s +; NONEON-NOSVE-NEXT: add v1.4s, v1.4s, v2.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x i32>, ptr %a %tmp2 = load <8 x i32>, ptr %b %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> @@ -459,6 +603,19 @@ define void @trn_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z2.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trn_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q1, q3, [x1] +; NONEON-NOSVE-NEXT: zip1 v4.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: zip2 v0.2d, v0.2d, v1.2d +; NONEON-NOSVE-NEXT: zip1 v1.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: zip2 v2.2d, v2.2d, v3.2d +; NONEON-NOSVE-NEXT: fadd v0.2d, v4.2d, v0.2d +; NONEON-NOSVE-NEXT: fadd v1.2d, v1.2d, v2.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x double>, ptr %a %tmp2 = load <4 x double>, ptr %b %tmp3 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> @@ -479,6 +636,16 @@ define void @trn_v4f32(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z2.s ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trn_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: trn1 v2.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: trn2 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: fadd v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x float>, ptr %a %tmp2 = load <4 x float>, ptr %b %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> @@ -500,6 +667,18 @@ define void @trn_v8i32_undef(ptr %a) { ; CHECK-NEXT: add z1.s, z3.s, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trn_v8i32_undef: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: trn1 v2.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: trn2 v0.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: trn1 v3.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: trn2 v1.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: add v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: add v1.4s, v3.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x i32>, ptr %a %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> @@ -571,6 +750,18 @@ define void @zip2_v32i8(ptr %a, ptr %b) #0{ ; CHECK-NEXT: str q1, [x0, #16] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip2_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: zip2 v2.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: zip1 v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: str q2, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load volatile <32 x i8>, ptr %a %tmp2 = load volatile <32 x i8>, ptr %b %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> @@ -617,6 +808,18 @@ define void @zip2_v16i16(ptr %a, ptr %b) #0{ ; CHECK-NEXT: str q1, [x0, #16] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip2_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: zip2 v2.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: zip1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: str q2, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load volatile <16 x i16>, ptr %a %tmp2 = load volatile <16 x i16>, ptr %b %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> @@ -649,6 +852,18 @@ define void @zip2_v8i32(ptr %a, ptr %b) #0{ ; CHECK-NEXT: str q1, [x0, #16] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip2_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: zip2 v2.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: zip1 v0.4s, v0.4s, v1.4s +; NONEON-NOSVE-NEXT: str q2, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load volatile <8 x i32>, ptr %a %tmp2 = load volatile <8 x i32>, ptr %b %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> @@ -668,6 +883,16 @@ define void @zip2_v8i32_undef(ptr %a) #0{ ; CHECK-NEXT: str q1, [x0, #16] ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip2_v8i32_undef: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: zip2 v1.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: zip1 v0.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: str q1, [x0, #16] +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load volatile <8 x i32>, ptr %a %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> store volatile <8 x i32> %tmp2, ptr %a @@ -869,6 +1094,19 @@ define void @uzp_v32i8(ptr %a, ptr %b) #0{ ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uzp_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x1] +; NONEON-NOSVE-NEXT: uzp1 v4.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: uzp2 v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: uzp1 v1.16b, v3.16b, v2.16b +; NONEON-NOSVE-NEXT: uzp2 v2.16b, v3.16b, v2.16b +; NONEON-NOSVE-NEXT: add v0.16b, v4.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v2.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <32 x i8>, ptr %a %tmp2 = load <32 x i8>, ptr %b %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> @@ -891,6 +1129,17 @@ define void @uzp_v4i16(ptr %a, ptr %b) #0{ ; CHECK-NEXT: add z0.h, z1.h, z0.h ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uzp_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: ext v1.8b, v0.8b, v0.8b, #6 +; NONEON-NOSVE-NEXT: ext v2.8b, v0.8b, v0.8b, #2 +; NONEON-NOSVE-NEXT: trn1 v1.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: zip1 v0.4h, v2.4h, v0.4h +; NONEON-NOSVE-NEXT: add v0.4h, v1.4h, v0.4h +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x i16>, ptr %a %tmp2 = load <4 x i16>, ptr %b %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> @@ -1008,6 +1257,19 @@ define void @uzp_v16i16(ptr %a, ptr %b) #0{ ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uzp_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x1] +; NONEON-NOSVE-NEXT: uzp1 v4.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: uzp2 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v2.8h +; NONEON-NOSVE-NEXT: uzp2 v2.8h, v3.8h, v2.8h +; NONEON-NOSVE-NEXT: add v0.8h, v4.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v2.8h +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <16 x i16>, ptr %a %tmp2 = load <16 x i16>, ptr %b %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> @@ -1047,6 +1309,19 @@ define void @uzp_v8f32(ptr %a, ptr %b) #0{ ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uzp_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x1] +; NONEON-NOSVE-NEXT: uzp1 v4.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp2 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v1.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: uzp2 v2.4s, v3.4s, v0.4s +; NONEON-NOSVE-NEXT: fadd v0.4s, v4.4s, v0.4s +; NONEON-NOSVE-NEXT: fadd v1.4s, v1.4s, v2.4s +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x float>, ptr %a %tmp2 = load <8 x float>, ptr %b %tmp3 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> @@ -1069,6 +1344,19 @@ define void @uzp_v4i64(ptr %a, ptr %b) #0{ ; CHECK-NEXT: add z1.d, z1.d, z2.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uzp_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x1] +; NONEON-NOSVE-NEXT: zip1 v4.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: zip2 v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: zip1 v1.2d, v3.2d, v2.2d +; NONEON-NOSVE-NEXT: zip2 v2.2d, v3.2d, v2.2d +; NONEON-NOSVE-NEXT: add v0.2d, v4.2d, v0.2d +; NONEON-NOSVE-NEXT: add v1.2d, v1.2d, v2.2d +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x i64>, ptr %a %tmp2 = load <4 x i64>, ptr %b %tmp3 = shufflevector <4 x i64> %tmp1, <4 x i64> %tmp2, <4 x i32> @@ -1136,6 +1424,16 @@ define void @uzp_v8i16(ptr %a, ptr %b) #0{ ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uzp_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: uzp2 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: add v0.8h, v2.8h, v0.8h +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x i16>, ptr %a %tmp2 = load <8 x i16>, ptr %b %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> @@ -1174,6 +1472,15 @@ define void @uzp_v8i32_undef(ptr %a) #0{ ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: uzp_v8i32_undef: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: uzp1 v2.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp2 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: add v0.4s, v2.4s, v0.4s +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <8 x i32>, ptr %a %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> @@ -1197,6 +1504,19 @@ define void @zip_vscale2_4(ptr %a, ptr %b) { ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: stp q2, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: zip_vscale2_4: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x1] +; NONEON-NOSVE-NEXT: zip1 v4.2d, v1.2d, v3.2d +; NONEON-NOSVE-NEXT: zip1 v5.2d, v0.2d, v2.2d +; NONEON-NOSVE-NEXT: zip2 v1.2d, v1.2d, v3.2d +; NONEON-NOSVE-NEXT: zip2 v0.2d, v0.2d, v2.2d +; NONEON-NOSVE-NEXT: fadd v2.2d, v4.2d, v5.2d +; NONEON-NOSVE-NEXT: fadd v0.2d, v1.2d, v0.2d +; NONEON-NOSVE-NEXT: stp q2, q0, [x0] +; NONEON-NOSVE-NEXT: ret %tmp1 = load <4 x double>, ptr %a %tmp2 = load <4 x double>, ptr %b %tmp3 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll index ab7c42b3e9e37d..6b3c85f59357e7 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -35,6 +36,23 @@ define i1 @ptest_v16i1(ptr %a, ptr %b) { ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: and w0, w8, #0x1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ptest_v16i1: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: fcmeq v0.4s, v0.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v1.4s, v1.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v3.4s, v3.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v2.4s, v2.4s, #0.0 +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: mvn v0.16b, v0.16b +; NONEON-NOSVE-NEXT: umaxv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: and w0, w8, #0x1 +; NONEON-NOSVE-NEXT: ret %v0 = bitcast ptr %a to ptr %v1 = load <16 x float>, ptr %v0, align 4 %v2 = fcmp une <16 x float> %v1, zeroinitializer @@ -92,6 +110,33 @@ define i1 @ptest_or_v16i1(ptr %a, ptr %b) { ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: and w0, w8, #0x1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ptest_or_v16i1: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x1, #32] +; NONEON-NOSVE-NEXT: fcmeq v1.4s, v1.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v0.4s, v0.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v3.4s, v3.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v2.4s, v2.4s, #0.0 +; NONEON-NOSVE-NEXT: ldp q6, q7, [x1] +; NONEON-NOSVE-NEXT: fcmeq v4.4s, v4.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v5.4s, v5.4s, #0.0 +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: fcmeq v7.4s, v7.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v6.4s, v6.4s, #0.0 +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v5.8h, v4.8h +; NONEON-NOSVE-NEXT: uzp1 v3.8h, v6.8h, v7.8h +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: uzp1 v1.16b, v3.16b, v2.16b +; NONEON-NOSVE-NEXT: mvn v0.16b, v0.16b +; NONEON-NOSVE-NEXT: orn v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: umaxv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: and w0, w8, #0x1 +; NONEON-NOSVE-NEXT: ret %v0 = bitcast ptr %a to ptr %v1 = load <16 x float>, ptr %v0, align 4 %v2 = fcmp une <16 x float> %v1, zeroinitializer @@ -159,6 +204,33 @@ define i1 @ptest_and_v16i1(ptr %a, ptr %b) { ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: and w0, w8, #0x1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: ptest_and_v16i1: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q2, q3, [x0] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x1, #32] +; NONEON-NOSVE-NEXT: fcmeq v1.4s, v1.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v0.4s, v0.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v3.4s, v3.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v2.4s, v2.4s, #0.0 +; NONEON-NOSVE-NEXT: ldp q6, q7, [x1] +; NONEON-NOSVE-NEXT: fcmeq v4.4s, v4.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v5.4s, v5.4s, #0.0 +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; NONEON-NOSVE-NEXT: fcmeq v7.4s, v7.4s, #0.0 +; NONEON-NOSVE-NEXT: fcmeq v6.4s, v6.4s, #0.0 +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v5.8h, v4.8h +; NONEON-NOSVE-NEXT: uzp1 v3.8h, v6.8h, v7.8h +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: uzp1 v1.16b, v3.16b, v2.16b +; NONEON-NOSVE-NEXT: mvn v0.16b, v0.16b +; NONEON-NOSVE-NEXT: bic v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: uminv b0, v0.16b +; NONEON-NOSVE-NEXT: fmov w8, s0 +; NONEON-NOSVE-NEXT: and w0, w8, #0x1 +; NONEON-NOSVE-NEXT: ret %v0 = bitcast ptr %a to ptr %v1 = load <16 x float>, ptr %v0, align 4 %v2 = fcmp une <16 x float> %v1, zeroinitializer diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll index bfa931044bc531..0a7352bf49442d 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -18,6 +19,13 @@ define <4 x i8> @bitreverse_v4i8(<4 x i8> %op) { ; CHECK-NEXT: lsr z0.h, z0.h, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev16 v0.8b, v0.8b +; NONEON-NOSVE-NEXT: rbit v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ushr v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: ret %res = call <4 x i8> @llvm.bitreverse.v4i8(<4 x i8> %op) ret <4 x i8> %res } @@ -30,6 +38,11 @@ define <8 x i8> @bitreverse_v8i8(<8 x i8> %op) { ; CHECK-NEXT: rbit z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rbit v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> %op) ret <8 x i8> %res } @@ -42,6 +55,11 @@ define <16 x i8> @bitreverse_v16i8(<16 x i8> %op) { ; CHECK-NEXT: rbit z0.b, p0/m, z0.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rbit v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %op) ret <16 x i8> %res } @@ -55,6 +73,14 @@ define void @bitreverse_v32i8(ptr %a) { ; CHECK-NEXT: rbit z1.b, p0/m, z1.b ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rbit v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rbit v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <32 x i8>, ptr %a %res = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %op) store <32 x i8> %res, ptr %a @@ -70,6 +96,13 @@ define <2 x i16> @bitreverse_v2i16(<2 x i16> %op) { ; CHECK-NEXT: lsr z0.s, z0.s, #16 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev32 v0.8b, v0.8b +; NONEON-NOSVE-NEXT: rbit v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ushr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: ret %res = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %op) ret <2 x i16> %res } @@ -82,6 +115,12 @@ define <4 x i16> @bitreverse_v4i16(<4 x i16> %op) { ; CHECK-NEXT: rbit z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev16 v0.8b, v0.8b +; NONEON-NOSVE-NEXT: rbit v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.bitreverse.v4i16(<4 x i16> %op) ret <4 x i16> %res } @@ -94,6 +133,12 @@ define <8 x i16> @bitreverse_v8i16(<8 x i16> %op) { ; CHECK-NEXT: rbit z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev16 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rbit v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %op) ret <8 x i16> %res } @@ -107,6 +152,16 @@ define void @bitreverse_v16i16(ptr %a) { ; CHECK-NEXT: rbit z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev16 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rev16 v1.16b, v1.16b +; NONEON-NOSVE-NEXT: rbit v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rbit v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %op) store <16 x i16> %res, ptr %a @@ -121,6 +176,12 @@ define <2 x i32> @bitreverse_v2i32(<2 x i32> %op) { ; CHECK-NEXT: rbit z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev32 v0.8b, v0.8b +; NONEON-NOSVE-NEXT: rbit v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %op) ret <2 x i32> %res } @@ -133,6 +194,12 @@ define <4 x i32> @bitreverse_v4i32(<4 x i32> %op) { ; CHECK-NEXT: rbit z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev32 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rbit v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %op) ret <4 x i32> %res } @@ -146,6 +213,16 @@ define void @bitreverse_v8i32(ptr %a) { ; CHECK-NEXT: rbit z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev32 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rev32 v1.16b, v1.16b +; NONEON-NOSVE-NEXT: rbit v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rbit v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %op) store <8 x i32> %res, ptr %a @@ -160,6 +237,12 @@ define <1 x i64> @bitreverse_v1i64(<1 x i64> %op) { ; CHECK-NEXT: rbit z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev64 v0.8b, v0.8b +; NONEON-NOSVE-NEXT: rbit v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.bitreverse.v1i64(<1 x i64> %op) ret <1 x i64> %res } @@ -172,6 +255,12 @@ define <2 x i64> @bitreverse_v2i64(<2 x i64> %op) { ; CHECK-NEXT: rbit z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev64 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rbit v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %op) ret <2 x i64> %res } @@ -185,6 +274,16 @@ define void @bitreverse_v4i64(ptr %a) { ; CHECK-NEXT: rbit z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bitreverse_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev64 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rev64 v1.16b, v1.16b +; NONEON-NOSVE-NEXT: rbit v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rbit v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %op) store <4 x i64> %res, ptr %a @@ -204,6 +303,12 @@ define <2 x i16> @bswap_v2i16(<2 x i16> %op) { ; CHECK-NEXT: lsr z0.s, z0.s, #16 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev32 v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ushr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: ret %res = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %op) ret <2 x i16> %res } @@ -216,6 +321,11 @@ define <4 x i16> @bswap_v4i16(<4 x i16> %op) { ; CHECK-NEXT: revb z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev16 v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %op) ret <4 x i16> %res } @@ -228,6 +338,11 @@ define <8 x i16> @bswap_v8i16(<8 x i16> %op) { ; CHECK-NEXT: revb z0.h, p0/m, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev16 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %op) ret <8 x i16> %res } @@ -241,6 +356,14 @@ define void @bswap_v16i16(ptr %a) { ; CHECK-NEXT: revb z1.h, p0/m, z1.h ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev16 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rev16 v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <16 x i16>, ptr %a %res = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %op) store <16 x i16> %res, ptr %a @@ -255,6 +378,11 @@ define <2 x i32> @bswap_v2i32(<2 x i32> %op) { ; CHECK-NEXT: revb z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev32 v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %op) ret <2 x i32> %res } @@ -267,6 +395,11 @@ define <4 x i32> @bswap_v4i32(<4 x i32> %op) { ; CHECK-NEXT: revb z0.s, p0/m, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev32 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %op) ret <4 x i32> %res } @@ -280,6 +413,14 @@ define void @bswap_v8i32(ptr %a) { ; CHECK-NEXT: revb z1.s, p0/m, z1.s ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev32 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rev32 v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <8 x i32>, ptr %a %res = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %op) store <8 x i32> %res, ptr %a @@ -294,6 +435,11 @@ define <1 x i64> @bswap_v1i64(<1 x i64> %op) { ; CHECK-NEXT: revb z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev64 v0.8b, v0.8b +; NONEON-NOSVE-NEXT: ret %res = call <1 x i64> @llvm.bswap.v1i64(<1 x i64> %op) ret <1 x i64> %res } @@ -306,6 +452,11 @@ define <2 x i64> @bswap_v2i64(<2 x i64> %op) { ; CHECK-NEXT: revb z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev64 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %res = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %op) ret <2 x i64> %res } @@ -319,6 +470,14 @@ define void @bswap_v4i64(ptr %a) { ; CHECK-NEXT: revb z1.d, p0/m, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: bswap_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: rev64 v0.16b, v0.16b +; NONEON-NOSVE-NEXT: rev64 v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op = load <4 x i64>, ptr %a %res = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %op) store <4 x i64> %res, ptr %a diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll index 9dd42e7831e0d0..d86c7d36a1041e 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -14,6 +15,19 @@ define <4 x i8> @sdiv_v4i8(<4 x i8> %op1) { ; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #5 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v1.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: movi d2, #0xff00ff00ff00ff +; NONEON-NOSVE-NEXT: sshr v1.4h, v1.4h, #8 +; NONEON-NOSVE-NEXT: sshr v1.4h, v1.4h, #7 +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: usra v0.4h, v1.4h, #3 +; NONEON-NOSVE-NEXT: shl v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: sshr v0.4h, v0.4h, #8 +; NONEON-NOSVE-NEXT: sshr v0.4h, v0.4h, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <4 x i8> %op1, shufflevector (<4 x i8> insertelement (<4 x i8> poison, i8 32, i32 0), <4 x i8> poison, <4 x i32> zeroinitializer) ret <4 x i8> %res } @@ -26,6 +40,13 @@ define <8 x i8> @sdiv_v8i8(<8 x i8> %op1) { ; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #5 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmlt v1.8b, v0.8b, #0 +; NONEON-NOSVE-NEXT: usra v0.8b, v1.8b, #3 +; NONEON-NOSVE-NEXT: sshr v0.8b, v0.8b, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <8 x i8> %op1, shufflevector (<8 x i8> insertelement (<8 x i8> poison, i8 32, i32 0), <8 x i8> poison, <8 x i32> zeroinitializer) ret <8 x i8> %res } @@ -38,6 +59,13 @@ define <16 x i8> @sdiv_v16i8(<16 x i8> %op1) { ; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #5 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmlt v1.16b, v0.16b, #0 +; NONEON-NOSVE-NEXT: usra v0.16b, v1.16b, #3 +; NONEON-NOSVE-NEXT: sshr v0.16b, v0.16b, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <16 x i8> %op1, shufflevector (<16 x i8> insertelement (<16 x i8> poison, i8 32, i32 0), <16 x i8> poison, <16 x i32> zeroinitializer) ret <16 x i8> %res } @@ -51,6 +79,18 @@ define void @sdiv_v32i8(ptr %a) { ; CHECK-NEXT: asrd z1.b, p0/m, z1.b, #5 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmlt v2.16b, v0.16b, #0 +; NONEON-NOSVE-NEXT: cmlt v3.16b, v1.16b, #0 +; NONEON-NOSVE-NEXT: usra v0.16b, v2.16b, #3 +; NONEON-NOSVE-NEXT: usra v1.16b, v3.16b, #3 +; NONEON-NOSVE-NEXT: sshr v0.16b, v0.16b, #5 +; NONEON-NOSVE-NEXT: sshr v1.16b, v1.16b, #5 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %res = sdiv <32 x i8> %op1, shufflevector (<32 x i8> insertelement (<32 x i8> poison, i8 32, i32 0), <32 x i8> poison, <32 x i32> zeroinitializer) store <32 x i8> %res, ptr %a @@ -66,6 +106,20 @@ define <2 x i16> @sdiv_v2i16(<2 x i16> %op1) { ; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #5 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: shl v1.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: mov w8, #31 // =0x1f +; NONEON-NOSVE-NEXT: dup v2.2s, w8 +; NONEON-NOSVE-NEXT: sshr v1.2s, v1.2s, #16 +; NONEON-NOSVE-NEXT: ushr v1.2s, v1.2s, #26 +; NONEON-NOSVE-NEXT: and v1.8b, v1.8b, v2.8b +; NONEON-NOSVE-NEXT: add v0.2s, v0.2s, v1.2s +; NONEON-NOSVE-NEXT: shl v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: sshr v0.2s, v0.2s, #16 +; NONEON-NOSVE-NEXT: sshr v0.2s, v0.2s, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <2 x i16> %op1, shufflevector (<2 x i16> insertelement (<2 x i16> poison, i16 32, i32 0), <2 x i16> poison, <2 x i32> zeroinitializer) ret <2 x i16> %res } @@ -78,6 +132,13 @@ define <4 x i16> @sdiv_v4i16(<4 x i16> %op1) { ; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #5 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmlt v1.4h, v0.4h, #0 +; NONEON-NOSVE-NEXT: usra v0.4h, v1.4h, #11 +; NONEON-NOSVE-NEXT: sshr v0.4h, v0.4h, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <4 x i16> %op1, shufflevector (<4 x i16> insertelement (<4 x i16> poison, i16 32, i32 0), <4 x i16> poison, <4 x i32> zeroinitializer) ret <4 x i16> %res } @@ -90,6 +151,13 @@ define <8 x i16> @sdiv_v8i16(<8 x i16> %op1) { ; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #5 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmlt v1.8h, v0.8h, #0 +; NONEON-NOSVE-NEXT: usra v0.8h, v1.8h, #11 +; NONEON-NOSVE-NEXT: sshr v0.8h, v0.8h, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <8 x i16> %op1, shufflevector (<8 x i16> insertelement (<8 x i16> poison, i16 32, i32 0), <8 x i16> poison, <8 x i32> zeroinitializer) ret <8 x i16> %res } @@ -103,6 +171,18 @@ define void @sdiv_v16i16(ptr %a) { ; CHECK-NEXT: asrd z1.h, p0/m, z1.h, #5 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmlt v2.8h, v0.8h, #0 +; NONEON-NOSVE-NEXT: cmlt v3.8h, v1.8h, #0 +; NONEON-NOSVE-NEXT: usra v0.8h, v2.8h, #11 +; NONEON-NOSVE-NEXT: usra v1.8h, v3.8h, #11 +; NONEON-NOSVE-NEXT: sshr v0.8h, v0.8h, #5 +; NONEON-NOSVE-NEXT: sshr v1.8h, v1.8h, #5 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %res = sdiv <16 x i16> %op1, shufflevector (<16 x i16> insertelement (<16 x i16> poison, i16 32, i32 0), <16 x i16> poison, <16 x i32> zeroinitializer) store <16 x i16> %res, ptr %a @@ -117,6 +197,13 @@ define <2 x i32> @sdiv_v2i32(<2 x i32> %op1) { ; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #5 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmlt v1.2s, v0.2s, #0 +; NONEON-NOSVE-NEXT: usra v0.2s, v1.2s, #27 +; NONEON-NOSVE-NEXT: sshr v0.2s, v0.2s, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <2 x i32> %op1, shufflevector (<2 x i32> insertelement (<2 x i32> poison, i32 32, i32 0), <2 x i32> poison, <2 x i32> zeroinitializer) ret <2 x i32> %res } @@ -129,6 +216,13 @@ define <4 x i32> @sdiv_v4i32(<4 x i32> %op1) { ; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #5 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmlt v1.4s, v0.4s, #0 +; NONEON-NOSVE-NEXT: usra v0.4s, v1.4s, #27 +; NONEON-NOSVE-NEXT: sshr v0.4s, v0.4s, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <4 x i32> %op1, shufflevector (<4 x i32> insertelement (<4 x i32> poison, i32 32, i32 0), <4 x i32> poison, <4 x i32> zeroinitializer) ret <4 x i32> %res } @@ -142,6 +236,18 @@ define void @sdiv_v8i32(ptr %a) { ; CHECK-NEXT: asrd z1.s, p0/m, z1.s, #5 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmlt v2.4s, v0.4s, #0 +; NONEON-NOSVE-NEXT: cmlt v3.4s, v1.4s, #0 +; NONEON-NOSVE-NEXT: usra v0.4s, v2.4s, #27 +; NONEON-NOSVE-NEXT: usra v1.4s, v3.4s, #27 +; NONEON-NOSVE-NEXT: sshr v0.4s, v0.4s, #5 +; NONEON-NOSVE-NEXT: sshr v1.4s, v1.4s, #5 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %res = sdiv <8 x i32> %op1, shufflevector (<8 x i32> insertelement (<8 x i32> poison, i32 32, i32 0), <8 x i32> poison, <8 x i32> zeroinitializer) store <8 x i32> %res, ptr %a @@ -156,6 +262,13 @@ define <1 x i64> @sdiv_v1i64(<1 x i64> %op1) { ; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #5 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmlt d1, d0, #0 +; NONEON-NOSVE-NEXT: usra d0, d1, #59 +; NONEON-NOSVE-NEXT: sshr d0, d0, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <1 x i64> %op1, shufflevector (<1 x i64> insertelement (<1 x i64> poison, i64 32, i32 0), <1 x i64> poison, <1 x i32> zeroinitializer) ret <1 x i64> %res } @@ -169,6 +282,13 @@ define <2 x i64> @sdiv_v2i64(<2 x i64> %op1) { ; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #5 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: cmlt v1.2d, v0.2d, #0 +; NONEON-NOSVE-NEXT: usra v0.2d, v1.2d, #59 +; NONEON-NOSVE-NEXT: sshr v0.2d, v0.2d, #5 +; NONEON-NOSVE-NEXT: ret %res = sdiv <2 x i64> %op1, shufflevector (<2 x i64> insertelement (<2 x i64> poison, i64 32, i32 0), <2 x i64> poison, <2 x i32> zeroinitializer) ret <2 x i64> %res } @@ -182,6 +302,18 @@ define void @sdiv_v4i64(ptr %a) { ; CHECK-NEXT: asrd z1.d, p0/m, z1.d, #5 ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: sdiv_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: cmlt v2.2d, v0.2d, #0 +; NONEON-NOSVE-NEXT: cmlt v3.2d, v1.2d, #0 +; NONEON-NOSVE-NEXT: usra v0.2d, v2.2d, #59 +; NONEON-NOSVE-NEXT: usra v1.2d, v3.2d, #59 +; NONEON-NOSVE-NEXT: sshr v0.2d, v0.2d, #5 +; NONEON-NOSVE-NEXT: sshr v1.2d, v1.2d, #5 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %res = sdiv <4 x i64> %op1, shufflevector (<4 x i64> insertelement (<4 x i64> poison, i64 32, i32 0), <4 x i64> poison, <4 x i32> zeroinitializer) store <4 x i64> %res, ptr %a diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll index 323d5278592f3e..6489e8d94d313d 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE @@ -15,6 +16,11 @@ define <4 x i8> @splat_v4i8(i8 %a) { ; CHECK-NEXT: mov z0.h, w0 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.4h, w0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x i8> undef, i8 %a, i64 0 %splat = shufflevector <4 x i8> %insert, <4 x i8> undef, <4 x i32> zeroinitializer ret <4 x i8> %splat @@ -26,6 +32,11 @@ define <8 x i8> @splat_v8i8(i8 %a) { ; CHECK-NEXT: mov z0.b, w0 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.8b, w0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <8 x i8> undef, i8 %a, i64 0 %splat = shufflevector <8 x i8> %insert, <8 x i8> undef, <8 x i32> zeroinitializer ret <8 x i8> %splat @@ -37,6 +48,11 @@ define <16 x i8> @splat_v16i8(i8 %a) { ; CHECK-NEXT: mov z0.b, w0 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.16b, w0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <16 x i8> undef, i8 %a, i64 0 %splat = shufflevector <16 x i8> %insert, <16 x i8> undef, <16 x i32> zeroinitializer ret <16 x i8> %splat @@ -48,6 +64,12 @@ define void @splat_v32i8(i8 %a, ptr %b) { ; CHECK-NEXT: mov z0.b, w0 ; CHECK-NEXT: stp q0, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.16b, w0 +; NONEON-NOSVE-NEXT: stp q0, q0, [x1] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <32 x i8> undef, i8 %a, i64 0 %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer store <32 x i8> %splat, ptr %b @@ -60,6 +82,11 @@ define <2 x i16> @splat_v2i16(i16 %a) { ; CHECK-NEXT: mov z0.s, w0 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.2s, w0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <2 x i16> undef, i16 %a, i64 0 %splat = shufflevector <2 x i16> %insert, <2 x i16> undef, <2 x i32> zeroinitializer ret <2 x i16> %splat @@ -71,6 +98,11 @@ define <4 x i16> @splat_v4i16(i16 %a) { ; CHECK-NEXT: mov z0.h, w0 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.4h, w0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x i16> undef, i16 %a, i64 0 %splat = shufflevector <4 x i16> %insert, <4 x i16> undef, <4 x i32> zeroinitializer ret <4 x i16> %splat @@ -82,6 +114,11 @@ define <8 x i16> @splat_v8i16(i16 %a) { ; CHECK-NEXT: mov z0.h, w0 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.8h, w0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <8 x i16> undef, i16 %a, i64 0 %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer ret <8 x i16> %splat @@ -93,6 +130,12 @@ define void @splat_v16i16(i16 %a, ptr %b) { ; CHECK-NEXT: mov z0.h, w0 ; CHECK-NEXT: stp q0, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.8h, w0 +; NONEON-NOSVE-NEXT: stp q0, q0, [x1] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <16 x i16> undef, i16 %a, i64 0 %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer store <16 x i16> %splat, ptr %b @@ -105,6 +148,11 @@ define <2 x i32> @splat_v2i32(i32 %a) { ; CHECK-NEXT: mov z0.s, w0 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.2s, w0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <2 x i32> undef, i32 %a, i64 0 %splat = shufflevector <2 x i32> %insert, <2 x i32> undef, <2 x i32> zeroinitializer ret <2 x i32> %splat @@ -116,6 +164,11 @@ define <4 x i32> @splat_v4i32(i32 %a) { ; CHECK-NEXT: mov z0.s, w0 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.4s, w0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x i32> undef, i32 %a, i64 0 %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer ret <4 x i32> %splat @@ -127,6 +180,12 @@ define void @splat_v8i32(i32 %a, ptr %b) { ; CHECK-NEXT: mov z0.s, w0 ; CHECK-NEXT: stp q0, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.4s, w0 +; NONEON-NOSVE-NEXT: stp q0, q0, [x1] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <8 x i32> undef, i32 %a, i64 0 %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer store <8 x i32> %splat, ptr %b @@ -139,6 +198,11 @@ define <1 x i64> @splat_v1i64(i64 %a) { ; CHECK-NEXT: mov z0.d, x0 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov d0, x0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <1 x i64> undef, i64 %a, i64 0 %splat = shufflevector <1 x i64> %insert, <1 x i64> undef, <1 x i32> zeroinitializer ret <1 x i64> %splat @@ -150,6 +214,11 @@ define <2 x i64> @splat_v2i64(i64 %a) { ; CHECK-NEXT: mov z0.d, x0 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.2d, x0 +; NONEON-NOSVE-NEXT: ret %insert = insertelement <2 x i64> undef, i64 %a, i64 0 %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer ret <2 x i64> %splat @@ -161,6 +230,12 @@ define void @splat_v4i64(i64 %a, ptr %b) { ; CHECK-NEXT: mov z0.d, x0 ; CHECK-NEXT: stp q0, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: dup v0.2d, x0 +; NONEON-NOSVE-NEXT: stp q0, q0, [x1] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x i64> undef, i64 %a, i64 0 %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer store <4 x i64> %splat, ptr %b @@ -178,6 +253,12 @@ define <2 x half> @splat_v2f16(half %a) { ; CHECK-NEXT: mov z0.h, h0 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $h0 killed $h0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.4h, v0.h[0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <2 x half> undef, half %a, i64 0 %splat = shufflevector <2 x half> %insert, <2 x half> undef, <2 x i32> zeroinitializer ret <2 x half> %splat @@ -190,6 +271,12 @@ define <4 x half> @splat_v4f16(half %a) { ; CHECK-NEXT: mov z0.h, h0 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $h0 killed $h0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.4h, v0.h[0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x half> undef, half %a, i64 0 %splat = shufflevector <4 x half> %insert, <4 x half> undef, <4 x i32> zeroinitializer ret <4 x half> %splat @@ -202,6 +289,12 @@ define <8 x half> @splat_v8f16(half %a) { ; CHECK-NEXT: mov z0.h, h0 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $h0 killed $h0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.8h, v0.h[0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <8 x half> undef, half %a, i64 0 %splat = shufflevector <8 x half> %insert, <8 x half> undef, <8 x i32> zeroinitializer ret <8 x half> %splat @@ -214,6 +307,13 @@ define void @splat_v16f16(half %a, ptr %b) { ; CHECK-NEXT: mov z0.h, h0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $h0 killed $h0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.8h, v0.h[0] +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <16 x half> undef, half %a, i64 0 %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer store <16 x half> %splat, ptr %b @@ -227,6 +327,12 @@ define <2 x float> @splat_v2f32(float %a, <2 x float> %op2) { ; CHECK-NEXT: mov z0.s, s0 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $s0 killed $s0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.2s, v0.s[0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <2 x float> undef, float %a, i64 0 %splat = shufflevector <2 x float> %insert, <2 x float> undef, <2 x i32> zeroinitializer ret <2 x float> %splat @@ -239,6 +345,12 @@ define <4 x float> @splat_v4f32(float %a, <4 x float> %op2) { ; CHECK-NEXT: mov z0.s, s0 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $s0 killed $s0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.4s, v0.s[0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x float> undef, float %a, i64 0 %splat = shufflevector <4 x float> %insert, <4 x float> undef, <4 x i32> zeroinitializer ret <4 x float> %splat @@ -251,6 +363,13 @@ define void @splat_v8f32(float %a, ptr %b) { ; CHECK-NEXT: mov z0.s, s0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $s0 killed $s0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.4s, v0.s[0] +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <8 x float> undef, float %a, i64 0 %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer store <8 x float> %splat, ptr %b @@ -261,6 +380,10 @@ define <1 x double> @splat_v1f64(double %a, <1 x double> %op2) { ; CHECK-LABEL: splat_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ret %insert = insertelement <1 x double> undef, double %a, i64 0 %splat = shufflevector <1 x double> %insert, <1 x double> undef, <1 x i32> zeroinitializer ret <1 x double> %splat @@ -273,6 +396,12 @@ define <2 x double> @splat_v2f64(double %a, <2 x double> %op2) { ; CHECK-NEXT: mov z0.d, d0 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.2d, v0.d[0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <2 x double> undef, double %a, i64 0 %splat = shufflevector <2 x double> %insert, <2 x double> undef, <2 x i32> zeroinitializer ret <2 x double> %splat @@ -285,6 +414,13 @@ define void @splat_v4f64(double %a, ptr %b) { ; CHECK-NEXT: mov z0.d, d0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: // kill: def $d0 killed $d0 def $q0 +; NONEON-NOSVE-NEXT: dup v0.2d, v0.d[0] +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x double> undef, double %a, i64 0 %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer store <4 x double> %splat, ptr %b @@ -301,6 +437,12 @@ define void @splat_imm_v32i8(ptr %a) { ; CHECK-NEXT: mov z0.b, #1 // =0x1 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_imm_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.16b, #1 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <32 x i8> undef, i8 1, i64 0 %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer store <32 x i8> %splat, ptr %a @@ -313,6 +455,13 @@ define void @splat_imm_v16i16(ptr %a) { ; CHECK-NEXT: mov z0.h, #2 // =0x2 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_imm_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #2 // =0x2 +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <16 x i16> undef, i16 2, i64 0 %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer store <16 x i16> %splat, ptr %a @@ -325,6 +474,13 @@ define void @splat_imm_v8i32(ptr %a) { ; CHECK-NEXT: mov z0.s, #3 // =0x3 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_imm_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #3 // =0x3 +; NONEON-NOSVE-NEXT: dup v0.4s, w8 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <8 x i32> undef, i32 3, i64 0 %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer store <8 x i32> %splat, ptr %a @@ -337,6 +493,13 @@ define void @splat_imm_v4i64(ptr %a) { ; CHECK-NEXT: mov z0.d, #4 // =0x4 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_imm_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #4 // =0x4 +; NONEON-NOSVE-NEXT: dup v0.2d, x8 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x i64> undef, i64 4, i64 0 %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer store <4 x i64> %splat, ptr %a @@ -353,6 +516,13 @@ define void @splat_imm_v16f16(ptr %a) { ; CHECK-NEXT: fmov z0.h, #5.00000000 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_imm_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov w8, #17664 // =0x4500 +; NONEON-NOSVE-NEXT: dup v0.8h, w8 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <16 x half> undef, half 5.0, i64 0 %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer store <16 x half> %splat, ptr %a @@ -365,6 +535,12 @@ define void @splat_imm_v8f32(ptr %a) { ; CHECK-NEXT: fmov z0.s, #6.00000000 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_imm_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov v0.4s, #6.00000000 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <8 x float> undef, float 6.0, i64 0 %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer store <8 x float> %splat, ptr %a @@ -377,6 +553,12 @@ define void @splat_imm_v4f64(ptr %a) { ; CHECK-NEXT: fmov z0.d, #7.00000000 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: splat_imm_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov v0.2d, #7.00000000 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret %insert = insertelement <4 x double> undef, double 7.0, i64 0 %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer store <4 x double> %splat, ptr %a diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll index 06709ca3685c8e..41449aa90ba0a7 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -12,6 +13,11 @@ define void @store_v4i8(ptr %a) { ; CHECK-NEXT: ptrue p0.h, vl4 ; CHECK-NEXT: st1b { z0.h }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str wzr, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x i8> zeroinitializer, ptr %a ret void } @@ -22,6 +28,12 @@ define void @store_v8i8(ptr %a) { ; CHECK-NEXT: mov z0.b, #0 // =0x0 ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret store <8 x i8> zeroinitializer, ptr %a ret void } @@ -32,6 +44,12 @@ define void @store_v16i8(ptr %a) { ; CHECK-NEXT: mov z0.b, #0 // =0x0 ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret store <16 x i8> zeroinitializer, ptr %a ret void } @@ -42,6 +60,12 @@ define void @store_v32i8(ptr %a) { ; CHECK-NEXT: mov z0.b, #0 // =0x0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <32 x i8> zeroinitializer, ptr %a ret void } @@ -53,6 +77,11 @@ define void @store_v2i16(ptr %a) { ; CHECK-NEXT: ptrue p0.s, vl2 ; CHECK-NEXT: st1h { z0.s }, p0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str wzr, [x0] +; NONEON-NOSVE-NEXT: ret store <2 x i16> zeroinitializer, ptr %a ret void } @@ -64,6 +93,11 @@ define void @store_v2f16(ptr %a) { ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: str w8, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v2f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str wzr, [x0] +; NONEON-NOSVE-NEXT: ret store <2 x half> zeroinitializer, ptr %a ret void } @@ -74,6 +108,12 @@ define void @store_v4i16(ptr %a) { ; CHECK-NEXT: mov z0.h, #0 // =0x0 ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x i16> zeroinitializer, ptr %a ret void } @@ -84,6 +124,12 @@ define void @store_v4f16(ptr %a) { ; CHECK-NEXT: mov z0.h, #0 // =0x0 ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d0, #0000000000000000 +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x half> zeroinitializer, ptr %a ret void } @@ -94,6 +140,12 @@ define void @store_v8i16(ptr %a) { ; CHECK-NEXT: mov z0.h, #0 // =0x0 ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret store <8 x i16> zeroinitializer, ptr %a ret void } @@ -104,6 +156,12 @@ define void @store_v8f16(ptr %a) { ; CHECK-NEXT: mov z0.h, #0 // =0x0 ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: str q0, [x0] +; NONEON-NOSVE-NEXT: ret store <8 x half> zeroinitializer, ptr %a ret void } @@ -114,6 +172,12 @@ define void @store_v16i16(ptr %a) { ; CHECK-NEXT: mov z0.h, #0 // =0x0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <16 x i16> zeroinitializer, ptr %a ret void } @@ -124,6 +188,12 @@ define void @store_v16f16(ptr %a) { ; CHECK-NEXT: mov z0.h, #0 // =0x0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <16 x half> zeroinitializer, ptr %a ret void } @@ -133,6 +203,11 @@ define void @store_v2i32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str xzr, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str xzr, [x0] +; NONEON-NOSVE-NEXT: ret store <2 x i32> zeroinitializer, ptr %a ret void } @@ -142,6 +217,11 @@ define void @store_v2f32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str xzr, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: str xzr, [x0] +; NONEON-NOSVE-NEXT: ret store <2 x float> zeroinitializer, ptr %a ret void } @@ -151,6 +231,11 @@ define void @store_v4i32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: stp xzr, xzr, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: stp xzr, xzr, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x i32> zeroinitializer, ptr %a ret void } @@ -160,6 +245,11 @@ define void @store_v4f32(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: stp xzr, xzr, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: stp xzr, xzr, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x float> zeroinitializer, ptr %a ret void } @@ -170,6 +260,12 @@ define void @store_v8i32(ptr %a) { ; CHECK-NEXT: mov z0.s, #0 // =0x0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <8 x i32> zeroinitializer, ptr %a ret void } @@ -180,6 +276,12 @@ define void @store_v8f32(ptr %a) { ; CHECK-NEXT: mov z0.s, #0 // =0x0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <8 x float> zeroinitializer, ptr %a ret void } @@ -190,6 +292,12 @@ define void @store_v1i64(ptr %a) { ; CHECK-NEXT: mov z0.d, #0 // =0x0 ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v1i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret store <1 x i64> zeroinitializer, ptr %a ret void } @@ -200,6 +308,12 @@ define void @store_v1f64(ptr %a) { ; CHECK-NEXT: fmov d0, xzr ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v1f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi d0, #0000000000000000 +; NONEON-NOSVE-NEXT: str d0, [x0] +; NONEON-NOSVE-NEXT: ret store <1 x double> zeroinitializer, ptr %a ret void } @@ -209,6 +323,11 @@ define void @store_v2i64(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: stp xzr, xzr, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: stp xzr, xzr, [x0] +; NONEON-NOSVE-NEXT: ret store <2 x i64> zeroinitializer, ptr %a ret void } @@ -218,6 +337,11 @@ define void @store_v2f64(ptr %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: stp xzr, xzr, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: stp xzr, xzr, [x0] +; NONEON-NOSVE-NEXT: ret store <2 x double> zeroinitializer, ptr %a ret void } @@ -228,6 +352,12 @@ define void @store_v4i64(ptr %a) { ; CHECK-NEXT: mov z0.d, #0 // =0x0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x i64> zeroinitializer, ptr %a ret void } @@ -238,6 +368,12 @@ define void @store_v4f64(ptr %a) { ; CHECK-NEXT: mov z0.d, #0 // =0x0 ; CHECK-NEXT: stp q0, q0, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: stp q0, q0, [x0] +; NONEON-NOSVE-NEXT: ret store <4 x double> zeroinitializer, ptr %a ret void } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll index 838db0ce8185cf..d1873f43681504 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE ; Test we can code generater patterns of the form: @@ -23,6 +24,12 @@ define void @subvector_v4i8(ptr %in, ptr %out) { ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] ; CHECK-NEXT: st1b { z0.h }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v4i8: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr w8, [x0] +; NONEON-NOSVE-NEXT: str w8, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <4 x i8>, ptr %in br label %bb1 @@ -37,6 +44,12 @@ define void @subvector_v8i8(ptr %in, ptr %out) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v8i8: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <8 x i8>, ptr %in br label %bb1 @@ -51,6 +64,12 @@ define void @subvector_v16i8(ptr %in, ptr %out) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v16i8: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <16 x i8>, ptr %in br label %bb1 @@ -65,6 +84,12 @@ define void @subvector_v32i8(ptr %in, ptr %out) { ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v32i8: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <32 x i8>, ptr %in br label %bb1 @@ -81,6 +106,12 @@ define void @subvector_v2i16(ptr %in, ptr %out) { ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] ; CHECK-NEXT: st1h { z0.s }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v2i16: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr w8, [x0] +; NONEON-NOSVE-NEXT: str w8, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <2 x i16>, ptr %in br label %bb1 @@ -95,6 +126,12 @@ define void @subvector_v4i16(ptr %in, ptr %out) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v4i16: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <4 x i16>, ptr %in br label %bb1 @@ -109,6 +146,12 @@ define void @subvector_v8i16(ptr %in, ptr %out) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v8i16: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <8 x i16>, ptr %in br label %bb1 @@ -123,6 +166,12 @@ define void @subvector_v16i16(ptr %in, ptr %out) { ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v16i16: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <16 x i16>, ptr %in br label %bb1 @@ -138,6 +187,12 @@ define void @subvector_v2i32(ptr %in, ptr %out) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v2i32: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <2 x i32>, ptr %in br label %bb1 @@ -152,6 +207,12 @@ define void @subvector_v4i32(ptr %in, ptr %out) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v4i32: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <4 x i32>, ptr %in br label %bb1 @@ -166,6 +227,12 @@ define void @subvector_v8i32(ptr %in, ptr %out) { ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v8i32: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <8 x i32>, ptr %in br label %bb1 @@ -181,6 +248,12 @@ define void @subvector_v2i64(ptr %in, ptr %out) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v2i64: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <2 x i64>, ptr %in br label %bb1 @@ -195,6 +268,12 @@ define void @subvector_v4i64(ptr %in, ptr %out) { ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v4i64: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <4 x i64>, ptr %in br label %bb1 @@ -210,6 +289,12 @@ define void @subvector_v2f16(ptr %in, ptr %out) { ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: str w8, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v2f16: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr w8, [x0] +; NONEON-NOSVE-NEXT: str w8, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <2 x half>, ptr %in br label %bb1 @@ -224,6 +309,12 @@ define void @subvector_v4f16(ptr %in, ptr %out) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v4f16: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <4 x half>, ptr %in br label %bb1 @@ -238,6 +329,12 @@ define void @subvector_v8f16(ptr %in, ptr %out) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v8f16: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <8 x half>, ptr %in br label %bb1 @@ -252,6 +349,12 @@ define void @subvector_v16f16(ptr %in, ptr %out) { ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v16f16: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <16 x half>, ptr %in br label %bb1 @@ -267,6 +370,12 @@ define void @subvector_v2f32(ptr %in, ptr %out) { ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: str d0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v2f32: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr d0, [x0] +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <2 x float>, ptr %in br label %bb1 @@ -281,6 +390,12 @@ define void @subvector_v4f32(ptr %in, ptr %out) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v4f32: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <4 x float>, ptr %in br label %bb1 @@ -295,6 +410,12 @@ define void @subvector_v8f32(ptr %in, ptr %out) { ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v8f32: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <8 x float>,ptr %in br label %bb1 @@ -310,6 +431,12 @@ define void @subvector_v2f64(ptr %in, ptr %out) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v2f64: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: str q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <2 x double>, ptr %in br label %bb1 @@ -324,6 +451,12 @@ define void @subvector_v4f64(ptr %in, ptr %out) { ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: subvector_v4f64: +; NONEON-NOSVE: // %bb.0: // %bb1 +; NONEON-NOSVE-NEXT: ldp q0, q1, [x0] +; NONEON-NOSVE-NEXT: stp q0, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <4 x double>, ptr %in br label %bb1 diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll index 7e3a175c40d29c..f0a4368da3ee17 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -12,6 +13,13 @@ define void @store_trunc_v8i16i8(ptr %ap, ptr %dest) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: st1b { z0.h }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_trunc_v8i16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: xtn v0.8b, v0.8h +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <8 x i16>, ptr %ap %val = trunc <8 x i16> %a to <8 x i8> store <8 x i8> %val, ptr %dest @@ -25,6 +33,14 @@ define void @store_trunc_v4i32i8(ptr %ap, ptr %dest) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: st1b { z0.s }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_trunc_v4i32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8b, v0.8b, v0.8b +; NONEON-NOSVE-NEXT: str s0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <4 x i32>, ptr %ap %val = trunc <4 x i32> %a to <4 x i8> store <4 x i8> %val, ptr %dest @@ -38,6 +54,13 @@ define void @store_trunc_v4i32i16(ptr %ap, ptr %dest) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: st1h { z0.s }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_trunc_v4i32i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <4 x i32>, ptr %ap %val = trunc <4 x i32> %a to <4 x i16> store <4 x i16> %val, ptr %dest @@ -51,6 +74,13 @@ define void @store_trunc_v2i64i8(ptr %ap, ptr %dest) { ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: st1w { z0.d }, p0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_trunc_v2i64i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0] +; NONEON-NOSVE-NEXT: xtn v0.2s, v0.2d +; NONEON-NOSVE-NEXT: str d0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <2 x i64>, ptr %ap %val = trunc <2 x i64> %a to <2 x i32> store <2 x i32> %val, ptr %dest @@ -66,6 +96,14 @@ define void @store_trunc_v2i256i64(ptr %ap, ptr %dest) { ; CHECK-NEXT: splice z1.d, p0, z1.d, z0.d ; CHECK-NEXT: str q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: store_trunc_v2i256i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr d0, [x0, #32] +; NONEON-NOSVE-NEXT: ldr d1, [x0] +; NONEON-NOSVE-NEXT: mov v1.d[1], v0.d[0] +; NONEON-NOSVE-NEXT: str q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <2 x i256>, ptr %ap %val = trunc <2 x i256> %a to <2 x i64> store <2 x i64> %val, ptr %dest diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll index 70219dd30f7699..4895ffb6858e47 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -19,6 +20,12 @@ define <16 x i8> @trunc_v16i16_v16i8(ptr %in) nounwind { ; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v16i16_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: ret %a = load <16 x i16>, ptr %in %b = trunc <16 x i16> %a to <16 x i8> ret <16 x i8> %b @@ -41,6 +48,17 @@ define void @trunc_v32i16_v32i8(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: add z1.b, z2.b, z2.b ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v32i16_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: uzp1 v1.16b, v3.16b, v2.16b +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <32 x i16>, ptr %in %b = trunc <32 x i16> %a to <32 x i8> %c = add <32 x i8> %b, %b @@ -76,6 +94,24 @@ define void @trunc_v64i16_v64i8(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: stp q0, q1, [x1, #32] ; CHECK-NEXT: stp q2, q3, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v64i16_v64i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #64] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #96] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: ldp q6, q1, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v2.16b, v3.16b, v2.16b +; NONEON-NOSVE-NEXT: uzp1 v3.16b, v5.16b, v4.16b +; NONEON-NOSVE-NEXT: uzp1 v1.16b, v6.16b, v1.16b +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v2.16b, v2.16b, v2.16b +; NONEON-NOSVE-NEXT: add v3.16b, v3.16b, v3.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #32] +; NONEON-NOSVE-NEXT: stp q3, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <64 x i16>, ptr %in %b = trunc <64 x i16> %a to <64 x i8> %c = add <64 x i8> %b, %b @@ -133,6 +169,38 @@ define void @trunc_v128i16_v128i8(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: stp q2, q3, [x1, #32] ; CHECK-NEXT: stp q4, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v128i16_v128i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #192] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0, #224] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0, #128] +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: ldp q16, q1, [x0, #160] +; NONEON-NOSVE-NEXT: uzp1 v4.16b, v5.16b, v4.16b +; NONEON-NOSVE-NEXT: ldp q17, q5, [x0, #64] +; NONEON-NOSVE-NEXT: uzp1 v6.16b, v7.16b, v6.16b +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q18, q7, [x0, #96] +; NONEON-NOSVE-NEXT: uzp1 v1.16b, v16.16b, v1.16b +; NONEON-NOSVE-NEXT: uzp1 v5.16b, v17.16b, v5.16b +; NONEON-NOSVE-NEXT: ldp q17, q16, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v2.16b, v3.16b, v2.16b +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v4.16b, v4.16b, v4.16b +; NONEON-NOSVE-NEXT: uzp1 v7.16b, v18.16b, v7.16b +; NONEON-NOSVE-NEXT: add v3.16b, v6.16b, v6.16b +; NONEON-NOSVE-NEXT: uzp1 v6.16b, v17.16b, v16.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q4, [x1, #96] +; NONEON-NOSVE-NEXT: add v0.16b, v5.16b, v5.16b +; NONEON-NOSVE-NEXT: add v2.16b, v2.16b, v2.16b +; NONEON-NOSVE-NEXT: add v4.16b, v7.16b, v7.16b +; NONEON-NOSVE-NEXT: stp q3, q1, [x1, #64] +; NONEON-NOSVE-NEXT: add v1.16b, v6.16b, v6.16b +; NONEON-NOSVE-NEXT: stp q0, q4, [x1, #32] +; NONEON-NOSVE-NEXT: stp q2, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <128 x i16>, ptr %in %b = trunc <128 x i16> %a to <128 x i8> %c = add <128 x i8> %b, %b @@ -155,6 +223,13 @@ define <8 x i8> @trunc_v8i32_v8i8(ptr %in) nounwind { ; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v8i32_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: xtn v0.8b, v0.8h +; NONEON-NOSVE-NEXT: ret %a = load <8 x i32>, ptr %in %b = trunc <8 x i32> %a to <8 x i8> ret <8 x i8> %b @@ -178,6 +253,15 @@ define <16 x i8> @trunc_v16i32_v16i8(ptr %in) nounwind { ; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v16i32_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v3.8h, v2.8h +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: ret %a = load <16 x i32>, ptr %in %b = trunc <16 x i32> %a to <16 x i8> ret <16 x i8> %b @@ -215,6 +299,23 @@ define void @trunc_v32i32_v32i8(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: add z1.b, z3.b, z3.b ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v32i32_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #64] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #96] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v3.8h, v2.8h +; NONEON-NOSVE-NEXT: uzp1 v3.8h, v5.8h, v4.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v7.8h, v6.8h +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: uzp1 v1.16b, v3.16b, v1.16b +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <32 x i32>, ptr %in %b = trunc <32 x i32> %a to <32 x i8> %c = add <32 x i8> %b, %b @@ -279,6 +380,36 @@ define void @trunc_v64i32_v64i8(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: stp q1, q2, [x1, #32] ; CHECK-NEXT: stp q3, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v64i32_v64i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #128] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #160] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0, #192] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0, #224] +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v3.8h, v2.8h +; NONEON-NOSVE-NEXT: ldp q3, q1, [x0] +; NONEON-NOSVE-NEXT: uzp1 v4.8h, v5.8h, v4.8h +; NONEON-NOSVE-NEXT: ldp q17, q5, [x0, #64] +; NONEON-NOSVE-NEXT: uzp1 v6.8h, v7.8h, v6.8h +; NONEON-NOSVE-NEXT: ldp q16, q7, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q19, q18, [x0, #96] +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v1.8h +; NONEON-NOSVE-NEXT: uzp1 v5.8h, v17.8h, v5.8h +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v0.16b, v2.16b +; NONEON-NOSVE-NEXT: uzp1 v7.8h, v16.8h, v7.8h +; NONEON-NOSVE-NEXT: uzp1 v3.8h, v19.8h, v18.8h +; NONEON-NOSVE-NEXT: uzp1 v2.16b, v4.16b, v6.16b +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: uzp1 v1.16b, v1.16b, v7.16b +; NONEON-NOSVE-NEXT: uzp1 v3.16b, v5.16b, v3.16b +; NONEON-NOSVE-NEXT: add v2.16b, v2.16b, v2.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add v3.16b, v3.16b, v3.16b +; NONEON-NOSVE-NEXT: stp q1, q3, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <64 x i32>, ptr %in %b = trunc <64 x i32> %a to <64 x i8> %c = add <64 x i8> %b, %b @@ -300,6 +431,12 @@ define <8 x i16> @trunc_v8i32_v8i16(ptr %in) nounwind { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v8i32_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: ret %a = load <8 x i32>, ptr %in %b = trunc <8 x i32> %a to <8 x i16> ret <8 x i16> %b @@ -322,6 +459,17 @@ define void @trunc_v16i32_v16i16(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: add z1.h, z2.h, z2.h ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v16i32_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v2.8h +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <16 x i32>, ptr %in %b = trunc <16 x i32> %a to <16 x i16> %c = add <16 x i16> %b, %b @@ -357,6 +505,24 @@ define void @trunc_v32i32_v32i16(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: stp q0, q1, [x1, #32] ; CHECK-NEXT: stp q2, q3, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v32i32_v32i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #64] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #96] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: ldp q6, q1, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v3.8h, v2.8h +; NONEON-NOSVE-NEXT: uzp1 v3.8h, v5.8h, v4.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v6.8h, v1.8h +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v0.8h +; NONEON-NOSVE-NEXT: add v2.8h, v2.8h, v2.8h +; NONEON-NOSVE-NEXT: add v3.8h, v3.8h, v3.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #32] +; NONEON-NOSVE-NEXT: stp q3, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <32 x i32>, ptr %in %b = trunc <32 x i32> %a to <32 x i16> %c = add <32 x i16> %b, %b @@ -414,6 +580,38 @@ define void @trunc_v64i32_v64i16(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: stp q2, q3, [x1, #32] ; CHECK-NEXT: stp q4, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v64i32_v64i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #192] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0, #224] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0, #128] +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: ldp q16, q1, [x0, #160] +; NONEON-NOSVE-NEXT: uzp1 v4.8h, v5.8h, v4.8h +; NONEON-NOSVE-NEXT: ldp q17, q5, [x0, #64] +; NONEON-NOSVE-NEXT: uzp1 v6.8h, v7.8h, v6.8h +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q18, q7, [x0, #96] +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v16.8h, v1.8h +; NONEON-NOSVE-NEXT: uzp1 v5.8h, v17.8h, v5.8h +; NONEON-NOSVE-NEXT: ldp q17, q16, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v3.8h, v2.8h +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v0.8h +; NONEON-NOSVE-NEXT: add v4.8h, v4.8h, v4.8h +; NONEON-NOSVE-NEXT: uzp1 v7.8h, v18.8h, v7.8h +; NONEON-NOSVE-NEXT: add v3.8h, v6.8h, v6.8h +; NONEON-NOSVE-NEXT: uzp1 v6.8h, v17.8h, v16.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q4, [x1, #96] +; NONEON-NOSVE-NEXT: add v0.8h, v5.8h, v5.8h +; NONEON-NOSVE-NEXT: add v2.8h, v2.8h, v2.8h +; NONEON-NOSVE-NEXT: add v4.8h, v7.8h, v7.8h +; NONEON-NOSVE-NEXT: stp q3, q1, [x1, #64] +; NONEON-NOSVE-NEXT: add v1.8h, v6.8h, v6.8h +; NONEON-NOSVE-NEXT: stp q0, q4, [x1, #32] +; NONEON-NOSVE-NEXT: stp q2, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <64 x i32>, ptr %in %b = trunc <64 x i32> %a to <64 x i16> %c = add <64 x i16> %b, %b @@ -437,6 +635,13 @@ define <4 x i8> @trunc_v4i64_v4i8(ptr %in) nounwind { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v4i64_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %a = load <4 x i64>, ptr %in %b = trunc <4 x i64> %a to <4 x i8> ret <4 x i8> %b @@ -461,6 +666,16 @@ define <8 x i8> @trunc_v8i64_v8i8(ptr %in) nounwind { ; CHECK-NEXT: uzp1 z0.b, z1.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v8i64_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; NONEON-NOSVE-NEXT: xtn v0.8b, v0.8h +; NONEON-NOSVE-NEXT: ret %a = load <8 x i64>, ptr %in %b = trunc <8 x i64> %a to <8 x i8> ret <8 x i8> %b @@ -499,6 +714,21 @@ define <16 x i8> @trunc_v16i64_v16i8(ptr %in) nounwind { ; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v16i64_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #96] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0, #64] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: uzp1 v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: uzp1 v3.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v4.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v2.8h +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret %a = load <16 x i64>, ptr %in %b = trunc <16 x i64> %a to <16 x i8> ret <16 x i8> %b @@ -565,6 +795,35 @@ define void @trunc_v32i64_v32i8(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: add z0.b, z0.b, z0.b ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v32i64_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #224] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0, #192] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0, #96] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v1.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0, #128] +; NONEON-NOSVE-NEXT: ldp q17, q16, [x0, #160] +; NONEON-NOSVE-NEXT: uzp1 v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: ldp q19, q18, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q21, q20, [x0, #64] +; NONEON-NOSVE-NEXT: uzp1 v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: uzp1 v16.4s, v17.4s, v16.4s +; NONEON-NOSVE-NEXT: uzp1 v5.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; NONEON-NOSVE-NEXT: uzp1 v7.4s, v19.4s, v18.4s +; NONEON-NOSVE-NEXT: uzp1 v6.4s, v21.4s, v20.4s +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v4.8h, v16.8h +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v2.8h, v7.8h +; NONEON-NOSVE-NEXT: uzp1 v3.8h, v6.8h, v5.8h +; NONEON-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b +; NONEON-NOSVE-NEXT: uzp1 v1.16b, v2.16b, v3.16b +; NONEON-NOSVE-NEXT: add v0.16b, v0.16b, v0.16b +; NONEON-NOSVE-NEXT: add v1.16b, v1.16b, v1.16b +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <32 x i64>, ptr %in %b = trunc <32 x i64> %a to <32 x i8> %c = add <32 x i8> %b, %b @@ -587,6 +846,13 @@ define <4 x i16> @trunc_v4i64_v4i16(ptr %in) nounwind { ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v4i64_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: xtn v0.4h, v0.4s +; NONEON-NOSVE-NEXT: ret %a = load <4 x i64>, ptr %in %b = trunc <4 x i64> %a to <4 x i16> ret <4 x i16> %b @@ -610,6 +876,15 @@ define <8 x i16> @trunc_v8i64_v8i16(ptr %in) nounwind { ; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v8i64_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; NONEON-NOSVE-NEXT: ret %a = load <8 x i64>, ptr %in %b = trunc <8 x i64> %a to <8 x i16> ret <8 x i16> %b @@ -647,6 +922,23 @@ define void @trunc_v16i64_v16i16(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: add z1.h, z3.h, z3.h ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v16i64_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #64] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #96] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: uzp1 v3.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: uzp1 v1.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v1.8h +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v0.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <16 x i64>, ptr %in %b = trunc <16 x i64> %a to <16 x i16> %c = add <16 x i16> %b, %b @@ -711,6 +1003,36 @@ define void @trunc_v32i64_v32i16(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: stp q1, q2, [x1, #32] ; CHECK-NEXT: stp q3, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v32i64_v32i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #128] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #160] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0, #192] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0, #224] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: ldp q3, q1, [x0] +; NONEON-NOSVE-NEXT: uzp1 v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: ldp q17, q5, [x0, #64] +; NONEON-NOSVE-NEXT: uzp1 v6.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: ldp q16, q7, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q19, q18, [x0, #96] +; NONEON-NOSVE-NEXT: uzp1 v1.4s, v3.4s, v1.4s +; NONEON-NOSVE-NEXT: uzp1 v5.4s, v17.4s, v5.4s +; NONEON-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; NONEON-NOSVE-NEXT: uzp1 v7.4s, v16.4s, v7.4s +; NONEON-NOSVE-NEXT: uzp1 v3.4s, v19.4s, v18.4s +; NONEON-NOSVE-NEXT: uzp1 v2.8h, v4.8h, v6.8h +; NONEON-NOSVE-NEXT: add v0.8h, v0.8h, v0.8h +; NONEON-NOSVE-NEXT: uzp1 v1.8h, v1.8h, v7.8h +; NONEON-NOSVE-NEXT: uzp1 v3.8h, v5.8h, v3.8h +; NONEON-NOSVE-NEXT: add v2.8h, v2.8h, v2.8h +; NONEON-NOSVE-NEXT: add v1.8h, v1.8h, v1.8h +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #32] +; NONEON-NOSVE-NEXT: add v3.8h, v3.8h, v3.8h +; NONEON-NOSVE-NEXT: stp q1, q3, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <32 x i64>, ptr %in %b = trunc <32 x i64> %a to <32 x i16> %c = add <32 x i16> %b, %b @@ -732,6 +1054,12 @@ define <4 x i32> @trunc_v4i64_v4i32(ptr %in) nounwind { ; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v4i64_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: ret %a = load <4 x i64>, ptr %in %b = trunc <4 x i64> %a to <4 x i32> ret <4 x i32> %b @@ -754,6 +1082,17 @@ define void @trunc_v8i64_v8i32(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: add z1.s, z2.s, z2.s ; CHECK-NEXT: stp q1, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v8i64_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #32] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: uzp1 v1.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: add v0.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: add v1.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q1, q0, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <8 x i64>, ptr %in %b = trunc <8 x i64> %a to <8 x i32> %c = add <8 x i32> %b, %b @@ -789,6 +1128,24 @@ define void @trunc_v16i64_v16i32(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: stp q0, q1, [x1, #32] ; CHECK-NEXT: stp q2, q3, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v16i64_v16i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #64] +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0, #96] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: ldp q6, q1, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: uzp1 v3.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: uzp1 v1.4s, v6.4s, v1.4s +; NONEON-NOSVE-NEXT: add v0.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: add v2.4s, v2.4s, v2.4s +; NONEON-NOSVE-NEXT: add v3.4s, v3.4s, v3.4s +; NONEON-NOSVE-NEXT: add v1.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q2, [x1, #32] +; NONEON-NOSVE-NEXT: stp q3, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <16 x i64>, ptr %in %b = trunc <16 x i64> %a to <16 x i32> %c = add <16 x i32> %b, %b @@ -846,6 +1203,38 @@ define void @trunc_v32i64_v32i32(ptr %in, ptr %out) nounwind { ; CHECK-NEXT: stp q2, q3, [x1, #32] ; CHECK-NEXT: stp q4, q0, [x1] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: trunc_v32i64_v32i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q0, [x0, #192] +; NONEON-NOSVE-NEXT: ldp q5, q4, [x0, #224] +; NONEON-NOSVE-NEXT: ldp q7, q6, [x0, #128] +; NONEON-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s +; NONEON-NOSVE-NEXT: ldp q16, q1, [x0, #160] +; NONEON-NOSVE-NEXT: uzp1 v4.4s, v5.4s, v4.4s +; NONEON-NOSVE-NEXT: ldp q17, q5, [x0, #64] +; NONEON-NOSVE-NEXT: uzp1 v6.4s, v7.4s, v6.4s +; NONEON-NOSVE-NEXT: ldp q3, q2, [x0] +; NONEON-NOSVE-NEXT: ldp q18, q7, [x0, #96] +; NONEON-NOSVE-NEXT: uzp1 v1.4s, v16.4s, v1.4s +; NONEON-NOSVE-NEXT: uzp1 v5.4s, v17.4s, v5.4s +; NONEON-NOSVE-NEXT: ldp q17, q16, [x0, #32] +; NONEON-NOSVE-NEXT: uzp1 v2.4s, v3.4s, v2.4s +; NONEON-NOSVE-NEXT: add v0.4s, v0.4s, v0.4s +; NONEON-NOSVE-NEXT: add v4.4s, v4.4s, v4.4s +; NONEON-NOSVE-NEXT: uzp1 v7.4s, v18.4s, v7.4s +; NONEON-NOSVE-NEXT: add v3.4s, v6.4s, v6.4s +; NONEON-NOSVE-NEXT: uzp1 v6.4s, v17.4s, v16.4s +; NONEON-NOSVE-NEXT: add v1.4s, v1.4s, v1.4s +; NONEON-NOSVE-NEXT: stp q0, q4, [x1, #96] +; NONEON-NOSVE-NEXT: add v0.4s, v5.4s, v5.4s +; NONEON-NOSVE-NEXT: add v2.4s, v2.4s, v2.4s +; NONEON-NOSVE-NEXT: add v4.4s, v7.4s, v7.4s +; NONEON-NOSVE-NEXT: stp q3, q1, [x1, #64] +; NONEON-NOSVE-NEXT: add v1.4s, v6.4s, v6.4s +; NONEON-NOSVE-NEXT: stp q0, q4, [x1, #32] +; NONEON-NOSVE-NEXT: stp q2, q1, [x1] +; NONEON-NOSVE-NEXT: ret %a = load <32 x i64>, ptr %in %b = trunc <32 x i64> %a to <32 x i32> %c = add <32 x i32> %b, %b diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll index 17573148040728..dd308dfadd80c8 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -14,6 +15,12 @@ define <4 x i8> @shuffle_ext_byone_v4i8(<4 x i8> %op1, <4 x i8> %op2) { ; CHECK-NEXT: tbl z0.h, { z0.h }, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v4i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v1.8b, v0.8b, v0.8b, #6 +; NONEON-NOSVE-NEXT: trn1 v0.4h, v0.4h, v1.4h +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <4 x i8> %op1, <4 x i8> %op2, <4 x i32> ret <4 x i8> %ret } @@ -28,6 +35,11 @@ define <8 x i8> @shuffle_ext_byone_v8i8(<8 x i8> %op1, <8 x i8> %op2) { ; CHECK-NEXT: insr z1.b, w8 ; CHECK-NEXT: fmov d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v8i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.8b, v0.8b, v1.8b, #7 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <8 x i8> %op1, <8 x i8> %op2, <8 x i32> ret <8 x i8> %ret } @@ -42,6 +54,11 @@ define <16 x i8> @shuffle_ext_byone_v16i8(<16 x i8> %op1, <16 x i8> %op2) { ; CHECK-NEXT: insr z1.b, w8 ; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v16i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #15 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <16 x i8> %op1, <16 x i8> %op2, <16 x i32> ret <16 x i8> %ret @@ -60,6 +77,15 @@ define void @shuffle_ext_byone_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: insr z3.b, w8 ; CHECK-NEXT: stp q1, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v32i8: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q2, [x1] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #15 +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v2.16b, #15 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b %ret = shufflevector <32 x i8> %op1, <32 x i8> %op2, <32 x i32> @shuffle_ext_byone_v2i16(<2 x i16> %op1, <2 x i16> %op2) { ; CHECK-NEXT: revw z0.d, p0/m, z0.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v2i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: rev64 v0.2s, v0.2s +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <2 x i16> %op1, <2 x i16> %op2, <2 x i32> ret <2 x i16> %ret } @@ -92,6 +123,11 @@ define <4 x i16> @shuffle_ext_byone_v4i16(<4 x i16> %op1, <4 x i16> %op2) { ; CHECK-NEXT: insr z1.h, w8 ; CHECK-NEXT: fmov d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v4i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.8b, v0.8b, v1.8b, #6 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <4 x i16> %op1, <4 x i16> %op2, <4 x i32> ret <4 x i16> %ret } @@ -106,6 +142,11 @@ define <8 x i16> @shuffle_ext_byone_v8i16(<8 x i16> %op1, <8 x i16> %op2) { ; CHECK-NEXT: insr z1.h, w8 ; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v8i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #14 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <8 x i16> %op1, <8 x i16> %op2, <8 x i32> ret <8 x i16> %ret } @@ -123,6 +164,15 @@ define void @shuffle_ext_byone_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: insr z3.h, w8 ; CHECK-NEXT: stp q1, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v16i16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q2, [x1] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #14 +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v2.16b, #14 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b %ret = shufflevector <16 x i16> %op1, <16 x i16> %op2, <16 x i32> @shuffle_ext_byone_v2i32(<2 x i32> %op1, <2 x i32> %op2) { ; CHECK-NEXT: insr z1.s, w8 ; CHECK-NEXT: fmov d0, d1 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v2i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.8b, v0.8b, v1.8b, #4 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <2 x i32> %op1, <2 x i32> %op2, <2 x i32> ret <2 x i32> %ret } @@ -155,6 +210,11 @@ define <4 x i32> @shuffle_ext_byone_v4i32(<4 x i32> %op1, <4 x i32> %op2) { ; CHECK-NEXT: insr z1.s, w8 ; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v4i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #12 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <4 x i32> %op1, <4 x i32> %op2, <4 x i32> ret <4 x i32> %ret } @@ -172,6 +232,15 @@ define void @shuffle_ext_byone_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: insr z3.s, w8 ; CHECK-NEXT: stp q1, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v8i32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q2, [x1] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #12 +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v2.16b, #12 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b %ret = shufflevector <8 x i32> %op1, <8 x i32> %op2, <8 x i32> @@ -189,6 +258,11 @@ define <2 x i64> @shuffle_ext_byone_v2i64(<2 x i64> %op1, <2 x i64> %op2) { ; CHECK-NEXT: insr z1.d, x8 ; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v2i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #8 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <2 x i64> %op1, <2 x i64> %op2, <2 x i32> ret <2 x i64> %ret } @@ -206,6 +280,15 @@ define void @shuffle_ext_byone_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: insr z3.d, x8 ; CHECK-NEXT: stp q1, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v4i64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q2, [x1] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #8 +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v2.16b, #8 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b %ret = shufflevector <4 x i64> %op1, <4 x i64> %op2, <4 x i32> @@ -223,6 +306,11 @@ define <4 x half> @shuffle_ext_byone_v4f16(<4 x half> %op1, <4 x half> %op2) { ; CHECK-NEXT: insr z0.h, h2 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v4f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.8b, v0.8b, v1.8b, #6 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <4 x half> %op1, <4 x half> %op2, <4 x i32> ret <4 x half> %ret } @@ -236,6 +324,11 @@ define <8 x half> @shuffle_ext_byone_v8f16(<8 x half> %op1, <8 x half> %op2) { ; CHECK-NEXT: insr z0.h, h2 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v8f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #14 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <8 x half> %op1, <8 x half> %op2, <8 x i32> ret <8 x half> %ret } @@ -251,6 +344,15 @@ define void @shuffle_ext_byone_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: insr z3.h, h2 ; CHECK-NEXT: stp q1, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v16f16: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q2, [x1] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #14 +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v2.16b, #14 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b %ret = shufflevector <16 x half> %op1, <16 x half> %op2, <16 x i32> @shuffle_ext_byone_v2f32(<2 x float> %op1, <2 x float> %op2) ; CHECK-NEXT: insr z0.s, s2 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v2f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.8b, v0.8b, v1.8b, #4 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <2 x float> %op1, <2 x float> %op2, <2 x i32> ret <2 x float> %ret } @@ -281,6 +388,11 @@ define <4 x float> @shuffle_ext_byone_v4f32(<4 x float> %op1, <4 x float> %op2) ; CHECK-NEXT: insr z0.s, s2 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v4f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #12 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <4 x float> %op1, <4 x float> %op2, <4 x i32> ret <4 x float> %ret } @@ -296,6 +408,15 @@ define void @shuffle_ext_byone_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: insr z3.s, s2 ; CHECK-NEXT: stp q1, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v8f32: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q2, [x1] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #12 +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v2.16b, #12 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b %ret = shufflevector <8 x float> %op1, <8 x float> %op2, <8 x i32> @@ -312,6 +433,11 @@ define <2 x double> @shuffle_ext_byone_v2f64(<2 x double> %op1, <2 x double> %op ; CHECK-NEXT: insr z0.d, d2 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v2f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #8 +; NONEON-NOSVE-NEXT: ret %ret = shufflevector <2 x double> %op1, <2 x double> %op2, <2 x i32> ret <2 x double> %ret } @@ -327,6 +453,15 @@ define void @shuffle_ext_byone_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: insr z3.d, d2 ; CHECK-NEXT: stp q1, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_v4f64: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q1, q2, [x1] +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v1.16b, #8 +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v2.16b, #8 +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> @@ -345,6 +480,15 @@ define void @shuffle_ext_byone_reverse(ptr %a, ptr %b) { ; CHECK-NEXT: insr z3.d, d2 ; CHECK-NEXT: stp q1, q3, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_byone_reverse: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldp q0, q2, [x0] +; NONEON-NOSVE-NEXT: ldr q1, [x1, #16] +; NONEON-NOSVE-NEXT: ext v1.16b, v1.16b, v0.16b, #8 +; NONEON-NOSVE-NEXT: ext v0.16b, v0.16b, v2.16b, #8 +; NONEON-NOSVE-NEXT: stp q1, q0, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> @@ -359,6 +503,13 @@ define void @shuffle_ext_invalid(ptr %a, ptr %b) { ; CHECK-NEXT: ldr q1, [x1] ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: shuffle_ext_invalid: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: ldr q0, [x0, #16] +; NONEON-NOSVE-NEXT: ldr q1, [x1] +; NONEON-NOSVE-NEXT: stp q0, q1, [x0] +; NONEON-NOSVE-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-test-register-mov.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-test-register-mov.ll index 337a2134de5b8a..42f3f03a5ea058 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-test-register-mov.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-test-register-mov.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s ; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE target triple = "aarch64-unknown-linux-gnu" @@ -11,6 +12,11 @@ define fp128 @test_streaming_compatible_register_mov(fp128 %q0, fp128 %q1) { ; CHECK: // %bb.0: ; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: test_streaming_compatible_register_mov: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: mov v0.16b, v1.16b +; NONEON-NOSVE-NEXT: ret ret fp128 %q1 } @@ -20,6 +26,11 @@ define double @fp_zero_constant() { ; CHECK: // %bb.0: ; CHECK-NEXT: fmov d0, xzr ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fp_zero_constant: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: fmov d0, xzr +; NONEON-NOSVE-NEXT: ret ret double 0.0 } @@ -29,6 +40,11 @@ define <2 x i64> @fixed_vec_zero_constant() { ; CHECK-NEXT: mov z0.d, #0 // =0x0 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fixed_vec_zero_constant: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: ret ret <2 x i64> zeroinitializer } @@ -38,5 +54,10 @@ define <2 x double> @fixed_vec_fp_zero_constant() { ; CHECK-NEXT: mov z0.d, #0 // =0x0 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret +; +; NONEON-NOSVE-LABEL: fixed_vec_fp_zero_constant: +; NONEON-NOSVE: // %bb.0: +; NONEON-NOSVE-NEXT: movi v0.2d, #0000000000000000 +; NONEON-NOSVE-NEXT: ret ret <2 x double> } From 803e03fbb7cd97461f349fb6e235592681fc1e6c Mon Sep 17 00:00:00 2001 From: Youngsuk Kim Date: Wed, 1 May 2024 05:57:15 -0500 Subject: [PATCH 02/48] [llvm] Revive constructor of 'ResourceSegments' 582c6a82b4bc2ac5cbff803960eeb022bff10168 removed a constructor of 'ResourceSegments' that is needed in LLVM unit tests. * Revert 582c6a82b4bc2ac5cbff803960eeb022bff10168 * Update the constructor to take a const reference of `std::list` as pointed out in #89193. --- llvm/include/llvm/CodeGen/MachineScheduler.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/llvm/include/llvm/CodeGen/MachineScheduler.h b/llvm/include/llvm/CodeGen/MachineScheduler.h index 90d719e111d677..b15abf040058e8 100644 --- a/llvm/include/llvm/CodeGen/MachineScheduler.h +++ b/llvm/include/llvm/CodeGen/MachineScheduler.h @@ -807,6 +807,10 @@ class ResourceSegments { // constructor for empty set explicit ResourceSegments(){}; bool empty() const { return _Intervals.empty(); } + explicit ResourceSegments(const std::list &Intervals) + : _Intervals(Intervals) { + sortAndMerge(); + } friend bool operator==(const ResourceSegments &c1, const ResourceSegments &c2) { From 67e726a2f73964740e319d554c354a4227f29375 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Wed, 1 May 2024 07:32:33 -0400 Subject: [PATCH 03/48] [SLP]Transform stores + reverse to strided stores with stride -1, if profitable. Adds transformation of consecutive vector store + reverse to strided stores with stride -1, if it is profitable Reviewers: RKSimon, preames Reviewed By: RKSimon Pull Request: https://github.com/llvm/llvm-project/pull/90464 --- .../Transforms/Vectorize/SLPVectorizer.cpp | 74 +++++++++++++++++-- .../RISCV/strided-stores-vectorized.ll | 31 ++------ 2 files changed, 71 insertions(+), 34 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 681081de13e011..59aa2fa0554f31 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -7934,6 +7934,33 @@ void BoUpSLP::transformNodes() { } break; } + case Instruction::Store: { + Type *ScalarTy = + cast(E.getMainOp())->getValueOperand()->getType(); + auto *VecTy = FixedVectorType::get(ScalarTy, E.Scalars.size()); + Align CommonAlignment = computeCommonAlignment(E.Scalars); + // Check if profitable to represent consecutive load + reverse as strided + // load with stride -1. + if (isReverseOrder(E.ReorderIndices) && + TTI->isLegalStridedLoadStore(VecTy, CommonAlignment)) { + SmallVector Mask; + inversePermutation(E.ReorderIndices, Mask); + auto *BaseSI = cast(E.Scalars.back()); + InstructionCost OriginalVecCost = + TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(), + BaseSI->getPointerAddressSpace(), CostKind, + TTI::OperandValueInfo()) + + ::getShuffleCost(*TTI, TTI::SK_Reverse, VecTy, Mask, CostKind); + InstructionCost StridedCost = TTI->getStridedMemoryOpCost( + Instruction::Store, VecTy, BaseSI->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment, CostKind, BaseSI); + if (StridedCost < OriginalVecCost) + // Strided load is more profitable than consecutive load + reverse - + // transform the node to strided load. + E.State = TreeEntry::StridedVectorize; + } + break; + } default: break; } @@ -9466,11 +9493,22 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef VectorizedVals, cast(IsReorder ? VL[E->ReorderIndices.front()] : VL0); auto GetVectorCost = [=](InstructionCost CommonCost) { // We know that we can merge the stores. Calculate the cost. - TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0)); - return TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(), - BaseSI->getPointerAddressSpace(), CostKind, - OpInfo) + - CommonCost; + InstructionCost VecStCost; + if (E->State == TreeEntry::StridedVectorize) { + Align CommonAlignment = + computeCommonAlignment(UniqueValues.getArrayRef()); + VecStCost = TTI->getStridedMemoryOpCost( + Instruction::Store, VecTy, BaseSI->getPointerOperand(), + /*VariableMask=*/false, CommonAlignment, CostKind); + } else { + assert(E->State == TreeEntry::Vectorize && + "Expected either strided or consecutive stores."); + TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0)); + VecStCost = TTI->getMemoryOpCost( + Instruction::Store, VecTy, BaseSI->getAlign(), + BaseSI->getPointerAddressSpace(), CostKind, OpInfo); + } + return VecStCost + CommonCost; }; SmallVector PointerOps(VL.size()); for (auto [I, V] : enumerate(VL)) { @@ -12398,7 +12436,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) { bool IsReverseOrder = isReverseOrder(E->ReorderIndices); auto FinalShuffle = [&](Value *V, const TreeEntry *E, VectorType *VecTy) { ShuffleInstructionBuilder ShuffleBuilder(ScalarTy, Builder, *this); - if (E->getOpcode() == Instruction::Store) { + if (E->getOpcode() == Instruction::Store && + E->State == TreeEntry::Vectorize) { ArrayRef Mask = ArrayRef(reinterpret_cast(E->ReorderIndices.begin()), E->ReorderIndices.size()); @@ -12986,8 +13025,27 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) { VecValue = FinalShuffle(VecValue, E, VecTy); Value *Ptr = SI->getPointerOperand(); - StoreInst *ST = - Builder.CreateAlignedStore(VecValue, Ptr, SI->getAlign()); + Instruction *ST; + if (E->State == TreeEntry::Vectorize) { + ST = Builder.CreateAlignedStore(VecValue, Ptr, SI->getAlign()); + } else { + assert(E->State == TreeEntry::StridedVectorize && + "Expected either strided or conseutive stores."); + Align CommonAlignment = computeCommonAlignment(E->Scalars); + Type *StrideTy = DL->getIndexType(SI->getPointerOperandType()); + auto *Inst = Builder.CreateIntrinsic( + Intrinsic::experimental_vp_strided_store, + {VecTy, Ptr->getType(), StrideTy}, + {VecValue, Ptr, + ConstantInt::get( + StrideTy, -static_cast(DL->getTypeAllocSize(ScalarTy))), + Builder.getAllOnesMask(VecTy->getElementCount()), + Builder.getInt32(E->Scalars.size())}); + Inst->addParamAttr( + /*ArgNo=*/1, + Attribute::getWithAlignment(Inst->getContext(), CommonAlignment)); + ST = Inst; + } Value *V = propagateMetadata(ST, E->Scalars); diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-stores-vectorized.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-stores-vectorized.ll index 0dfa45da9d87f4..56e8829b0ec68b 100644 --- a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-stores-vectorized.ll +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-stores-vectorized.ll @@ -4,33 +4,12 @@ define void @store_reverse(ptr %p3) { ; CHECK-LABEL: @store_reverse( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[P3:%.*]], align 8 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[TMP0]], [[TMP1]] -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 7 -; CHECK-NEXT: store i64 [[SHL]], ptr [[ARRAYIDX2]], align 8 -; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 1 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8 -; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 9 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[ARRAYIDX4]], align 8 -; CHECK-NEXT: [[SHL5:%.*]] = shl i64 [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 6 -; CHECK-NEXT: store i64 [[SHL5]], ptr [[ARRAYIDX6]], align 8 -; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 2 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr [[ARRAYIDX7]], align 8 -; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 10 -; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[ARRAYIDX8]], align 8 -; CHECK-NEXT: [[SHL9:%.*]] = shl i64 [[TMP4]], [[TMP5]] -; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 5 -; CHECK-NEXT: store i64 [[SHL9]], ptr [[ARRAYIDX10]], align 8 -; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 3 -; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[ARRAYIDX11]], align 8 -; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 11 -; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[ARRAYIDX12]], align 8 -; CHECK-NEXT: [[SHL13:%.*]] = shl i64 [[TMP6]], [[TMP7]] +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[P3:%.*]], i64 8 ; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i64, ptr [[P3]], i64 4 -; CHECK-NEXT: store i64 [[SHL13]], ptr [[ARRAYIDX14]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr [[P3]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr [[ARRAYIDX1]], align 8 +; CHECK-NEXT: [[TMP2:%.*]] = shl <4 x i64> [[TMP0]], [[TMP1]] +; CHECK-NEXT: call void @llvm.experimental.vp.strided.store.v4i64.p0.i64(<4 x i64> [[TMP2]], ptr align 8 [[ARRAYIDX14]], i64 -8, <4 x i1> , i32 4) ; CHECK-NEXT: ret void ; entry: From 576261ac8f803e5142fd8634805e48d0063de4e1 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Wed, 1 May 2024 07:34:06 -0400 Subject: [PATCH 04/48] [SLP]Improve reordering for consts, splats and ops from same nodes + improved analysis. Improved detection of const/splat candidates, their matching and analysis of instructions from same nodes. Metric: size..text Program size..text results results0 diff results results0 diff test-suite :: MultiSource/Benchmarks/DOE-ProxyApps-C++/miniFE/miniFE.test 92952.00 93096.00 0.2% test-suite :: External/SPEC/CINT2006/464.h264ref/464.h264ref.test 779832.00 780136.00 0.0% test-suite :: MultiSource/Applications/JM/lencod/lencod.test 839923.00 840179.00 0.0% test-suite :: MultiSource/Applications/JM/ldecod/ldecod.test 392708.00 392740.00 0.0% test-suite :: External/SPEC/CFP2017rate/511.povray_r/511.povray_r.test 1171131.00 1171147.00 0.0% test-suite :: External/SPEC/CFP2017rate/538.imagick_r/538.imagick_r.test 1391089.00 1391073.00 -0.0% test-suite :: External/SPEC/CFP2017speed/638.imagick_s/638.imagick_s.test 1391089.00 1391073.00 -0.0% test-suite :: External/SPEC/CFP2017rate/526.blender_r/526.blender_r.test 12352780.00 12352636.00 -0.0% MultiSource/Benchmarks/DOE-ProxyApps-C++/miniFE/miniFE - small reordering External/SPEC/CINT2006/464.h264ref/464.h264ref - small better code after reordering MultiSource/Applications/JM/lencod/lencod - smaller code with less shuffles MultiSource/Applications/JM/ldecod/ldecod - same External/SPEC/CFP2017rate/511.povray_r/511.povray_r - 2 extra loads vectorized, smaller code External/SPEC/CFP2017rate/538.imagick_r/538.imagick_r - better code, size increased because of more constant vectors. External/SPEC/CFP2017speed/638.imagick_s/638.imagick_s - same External/SPEC/CFP2017rate/526.blender_r/526.blender_r - small change in the vectorized code, some code a bit better, some a bit worse. Reviewers: RKSimon Reviewed By: RKSimon Pull Request: https://github.com/llvm/llvm-project/pull/87091 --- .../Transforms/Vectorize/SLPVectorizer.cpp | 93 ++++++-- .../SLPVectorizer/RISCV/complex-loads.ll | 212 +++++++++--------- .../Transforms/SLPVectorizer/X86/addsub.ll | 12 +- .../SLPVectorizer/X86/entries-different-vf.ll | 10 +- .../X86/extract-many-users-buildvector.ll | 8 +- .../X86/extract-scalar-from-undef.ll | 4 +- .../extractelement-single-use-many-nodes.ll | 13 +- .../SLPVectorizer/X86/horizontal-minmax.ll | 2 +- .../SLPVectorizer/X86/operandorder.ll | 16 +- .../SLPVectorizer/X86/postponed_gathers.ll | 2 +- .../X86/replaced-external-in-reduction.ll | 4 +- .../vec_list_bias_external_insert_shuffled.ll | 32 ++- .../slp-umax-rdx-matcher-crash.ll | 2 +- 13 files changed, 229 insertions(+), 181 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 59aa2fa0554f31..c33d90d531bf53 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1395,12 +1395,19 @@ class BoUpSLP { return LookAheadHeuristics::ScoreSplat; } + auto CheckSameEntryOrFail = [&]() { + if (const TreeEntry *TE1 = R.getTreeEntry(V1); + TE1 && TE1 == R.getTreeEntry(V2)) + return LookAheadHeuristics::ScoreSplatLoads; + return LookAheadHeuristics::ScoreFail; + }; + auto *LI1 = dyn_cast(V1); auto *LI2 = dyn_cast(V2); if (LI1 && LI2) { if (LI1->getParent() != LI2->getParent() || !LI1->isSimple() || !LI2->isSimple()) - return LookAheadHeuristics::ScoreFail; + return CheckSameEntryOrFail(); std::optional Dist = getPointersDiff( LI1->getType(), LI1->getPointerOperand(), LI2->getType(), @@ -1412,7 +1419,7 @@ class BoUpSLP { FixedVectorType::get(LI1->getType(), NumLanes), LI1->getAlign())) return LookAheadHeuristics::ScoreMaskedGatherCandidate; - return LookAheadHeuristics::ScoreFail; + return CheckSameEntryOrFail(); } // The distance is too large - still may be profitable to use masked // loads/gathers. @@ -1469,14 +1476,14 @@ class BoUpSLP { } return LookAheadHeuristics::ScoreAltOpcodes; } - return LookAheadHeuristics::ScoreFail; + return CheckSameEntryOrFail(); } auto *I1 = dyn_cast(V1); auto *I2 = dyn_cast(V2); if (I1 && I2) { if (I1->getParent() != I2->getParent()) - return LookAheadHeuristics::ScoreFail; + return CheckSameEntryOrFail(); SmallVector Ops(MainAltOps.begin(), MainAltOps.end()); Ops.push_back(I1); Ops.push_back(I2); @@ -1497,7 +1504,7 @@ class BoUpSLP { if (isa(V2)) return LookAheadHeuristics::ScoreUndef; - return LookAheadHeuristics::ScoreFail; + return CheckSameEntryOrFail(); } /// Go through the operands of \p LHS and \p RHS recursively until @@ -1660,6 +1667,7 @@ class BoUpSLP { const DataLayout &DL; ScalarEvolution &SE; const BoUpSLP &R; + const Loop *L = nullptr; /// \returns the operand data at \p OpIdx and \p Lane. OperandData &getData(unsigned OpIdx, unsigned Lane) { @@ -1828,8 +1836,9 @@ class BoUpSLP { // Track if the operand must be marked as used. If the operand is set to // Score 1 explicitly (because of non power-of-2 unique scalars, we may // want to reestimate the operands again on the following iterations). - bool IsUsed = - RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant; + bool IsUsed = RMode == ReorderingMode::Splat || + RMode == ReorderingMode::Constant || + RMode == ReorderingMode::Load; // Iterate through all unused operands and look for the best. for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { // Get the operand at Idx and Lane. @@ -1850,23 +1859,44 @@ class BoUpSLP { // Look for an operand that matches the current mode. switch (RMode) { case ReorderingMode::Load: - case ReorderingMode::Constant: case ReorderingMode::Opcode: { bool LeftToRight = Lane > LastLane; Value *OpLeft = (LeftToRight) ? OpLastLane : Op; Value *OpRight = (LeftToRight) ? Op : OpLastLane; int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane, OpIdx, Idx, IsUsed); - if (Score > static_cast(BestOp.Score)) { + if (Score > static_cast(BestOp.Score) || + (Score > 0 && Score == static_cast(BestOp.Score) && + Idx == OpIdx)) { BestOp.Idx = Idx; BestOp.Score = Score; BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score; } break; } + case ReorderingMode::Constant: + if (isa(Op) || + (!BestOp.Score && L && L->isLoopInvariant(Op))) { + BestOp.Idx = Idx; + if (isa(Op)) { + BestOp.Score = LookAheadHeuristics::ScoreConstants; + BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = + LookAheadHeuristics::ScoreConstants; + } + if (isa(Op) || !isa(Op)) + IsUsed = false; + } + break; case ReorderingMode::Splat: - if (Op == OpLastLane) + if (Op == OpLastLane || (!BestOp.Score && isa(Op))) { + IsUsed = Op == OpLastLane; + if (Op == OpLastLane) { + BestOp.Score = LookAheadHeuristics::ScoreSplat; + BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = + LookAheadHeuristics::ScoreSplat; + } BestOp.Idx = Idx; + } break; case ReorderingMode::Failed: llvm_unreachable("Not expected Failed reordering mode."); @@ -2059,10 +2089,12 @@ class BoUpSLP { void clear() { OpsVec.clear(); } /// \Returns true if there are enough operands identical to \p Op to fill - /// the whole vector. + /// the whole vector (it is mixed with constants or loop invariant values). /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { bool OpAPO = getData(OpIdx, Lane).APO; + bool IsInvariant = L && L->isLoopInvariant(Op); + unsigned Cnt = 0; for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { if (Ln == Lane) continue; @@ -2072,22 +2104,51 @@ class BoUpSLP { OperandData &Data = getData(OpI, Ln); if (Data.APO != OpAPO || Data.IsUsed) continue; - if (Data.V == Op) { + Value *OpILane = getValue(OpI, Lane); + bool IsConstantOp = isa(OpILane); + // Consider the broadcast candidate if: + // 1. Same value is found in one of the operands. + if (Data.V == Op || + // 2. The operand in the given lane is not constant but there is a + // constant operand in another lane (which can be moved to the + // given lane). In this case we can represent it as a simple + // permutation of constant and broadcast. + (!IsConstantOp && + ((Lns > 2 && isa(Data.V)) || + // 2.1. If we have only 2 lanes, need to check that value in the + // next lane does not build same opcode sequence. + (Lns == 2 && + !getSameOpcode({Op, getValue((OpI + 1) % OpE, Ln)}, TLI) + .getOpcode() && + isa(Data.V)))) || + // 3. The operand in the current lane is loop invariant (can be + // hoisted out) and another operand is also a loop invariant + // (though not a constant). In this case the whole vector can be + // hoisted out. + // FIXME: need to teach the cost model about this case for better + // estimation. + (IsInvariant && !isa(Data.V) && + !getSameOpcode({Op, Data.V}, TLI).getOpcode() && + L->isLoopInvariant(Data.V))) { FoundCandidate = true; - Data.IsUsed = true; + Data.IsUsed = Data.V == Op; + if (Data.V == Op) + ++Cnt; break; } } if (!FoundCandidate) return false; } - return true; + return getNumLanes() == 2 || Cnt > 1; } public: /// Initialize with all the operands of the instruction vector \p RootVL. VLOperands(ArrayRef RootVL, const BoUpSLP &R) - : TLI(*R.TLI), DL(*R.DL), SE(*R.SE), R(R) { + : TLI(*R.TLI), DL(*R.DL), SE(*R.SE), R(R), + L(R.LI->getLoopFor( + (cast(RootVL.front())->getParent()))) { // Append all the operands of RootVL. appendOperandsOfVL(RootVL); } @@ -2219,8 +2280,6 @@ class BoUpSLP { // getBestOperand(). swap(OpIdx, *BestIdx, Lane); } else { - // We failed to find a best operand, set mode to 'Failed'. - ReorderingModes[OpIdx] = ReorderingMode::Failed; // Enable the second pass. StrategyFailed = true; } diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll index d87bdfe2689916..aa9a070a794509 100644 --- a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll @@ -37,10 +37,10 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4 ; CHECK-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4 ; CHECK-NEXT: [[TMP15:%.*]] = load <2 x i8>, ptr [[ADD_PTR_1]], align 1 -; CHECK-NEXT: [[TMP101:%.*]] = zext <2 x i8> [[TMP15]] to <2 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = zext <2 x i8> [[TMP15]] to <2 x i32> ; CHECK-NEXT: [[TMP17:%.*]] = load <2 x i8>, ptr [[ADD_PTR64_1]], align 1 ; CHECK-NEXT: [[TMP18:%.*]] = zext <2 x i8> [[TMP17]] to <2 x i32> -; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP101]], [[TMP18]] +; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP16]], [[TMP18]] ; CHECK-NEXT: [[TMP20:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_2]], align 1 ; CHECK-NEXT: [[TMP21:%.*]] = zext <2 x i8> [[TMP20]] to <2 x i32> ; CHECK-NEXT: [[TMP22:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_2]], align 1 @@ -64,15 +64,15 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[TMP36:%.*]] = sub <2 x i32> [[TMP33]], [[TMP35]] ; CHECK-NEXT: [[TMP37:%.*]] = shl <2 x i32> [[TMP36]], ; CHECK-NEXT: [[TMP38:%.*]] = add <2 x i32> [[TMP37]], [[TMP31]] -; CHECK-NEXT: [[TMP39:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 -; CHECK-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 -; CHECK-NEXT: [[ADD44_2:%.*]] = add i32 [[TMP40]], [[TMP39]] -; CHECK-NEXT: [[SUB45_2:%.*]] = sub i32 [[TMP39]], [[TMP40]] -; CHECK-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP38]], i32 0 -; CHECK-NEXT: [[TMP42:%.*]] = extractelement <2 x i32> [[TMP38]], i32 1 -; CHECK-NEXT: [[CONV:%.*]] = add i32 [[TMP42]], [[TMP41]] -; CHECK-NEXT: [[SUB47_2:%.*]] = sub i32 [[TMP41]], [[TMP42]] -; CHECK-NEXT: [[ADD48_2:%.*]] = add i32 [[CONV]], [[ADD44_2]] +; CHECK-NEXT: [[ADD44_2:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; CHECK-NEXT: [[CONV:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; CHECK-NEXT: [[ADD44_3:%.*]] = add i32 [[CONV]], [[ADD44_2]] +; CHECK-NEXT: [[SUB51_2:%.*]] = sub i32 [[ADD44_2]], [[CONV]] +; CHECK-NEXT: [[SUB45_2:%.*]] = extractelement <2 x i32> [[TMP38]], i32 0 +; CHECK-NEXT: [[SUB47_2:%.*]] = extractelement <2 x i32> [[TMP38]], i32 1 +; CHECK-NEXT: [[ADD46_2:%.*]] = add i32 [[SUB47_2]], [[SUB45_2]] +; CHECK-NEXT: [[SUB59_2:%.*]] = sub i32 [[SUB45_2]], [[SUB47_2]] +; CHECK-NEXT: [[ADD48_2:%.*]] = add i32 [[ADD46_2]], [[ADD44_3]] ; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr null, align 1 ; CHECK-NEXT: [[ARRAYIDX20_3:%.*]] = getelementptr i8, ptr null, i64 2 ; CHECK-NEXT: [[ARRAYIDX22_3:%.*]] = getelementptr i8, ptr null, i64 2 @@ -104,10 +104,10 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[TMP69:%.*]] = sub <2 x i32> [[TMP66]], [[TMP68]] ; CHECK-NEXT: [[TMP70:%.*]] = shl <2 x i32> [[TMP69]], ; CHECK-NEXT: [[TMP71:%.*]] = add <2 x i32> [[TMP70]], [[TMP63]] -; CHECK-NEXT: [[TMP16:%.*]] = add <2 x i32> [[TMP71]], [[TMP58]] -; CHECK-NEXT: [[TMP73:%.*]] = sub <2 x i32> [[TMP58]], [[TMP71]] -; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0 -; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i32> [[TMP16]], i32 1 +; CHECK-NEXT: [[TMP72:%.*]] = add <2 x i32> [[TMP71]], [[TMP58]] +; CHECK-NEXT: [[TMP190:%.*]] = sub <2 x i32> [[TMP58]], [[TMP71]] +; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i32> [[TMP72]], i32 0 +; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i32> [[TMP72]], i32 1 ; CHECK-NEXT: [[ADD48_3:%.*]] = add i32 [[TMP74]], [[TMP75]] ; CHECK-NEXT: [[ADD94:%.*]] = add i32 [[ADD48_3]], [[ADD48_2]] ; CHECK-NEXT: [[SUB102:%.*]] = sub i32 [[ADD48_2]], [[ADD48_3]] @@ -115,19 +115,19 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[SHR_I49_2:%.*]] = lshr i32 [[TMP79]], 15 ; CHECK-NEXT: [[AND_I50_2:%.*]] = and i32 [[SHR_I49_2]], 65537 ; CHECK-NEXT: [[MUL_I51_2:%.*]] = mul i32 [[AND_I50_2]], 65535 -; CHECK-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[CONV]], 15 +; CHECK-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[ADD46_2]], 15 ; CHECK-NEXT: [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537 ; CHECK-NEXT: [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535 -; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i32> [[TMP101]], i32 0 -; CHECK-NEXT: [[SHR_I49_1:%.*]] = lshr i32 [[TMP107]], 15 -; CHECK-NEXT: [[AND_I50_1:%.*]] = and i32 [[SHR_I49_1]], 65537 -; CHECK-NEXT: [[MUL_I51_1:%.*]] = mul i32 [[AND_I50_1]], 65535 +; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0 +; CHECK-NEXT: [[SHR_I49_5:%.*]] = lshr i32 [[TMP107]], 15 +; CHECK-NEXT: [[AND_I50_5:%.*]] = and i32 [[SHR_I49_5]], 65537 +; CHECK-NEXT: [[MUL_I51_5:%.*]] = mul i32 [[AND_I50_5]], 65535 ; CHECK-NEXT: [[SHR_I49_4:%.*]] = lshr i32 [[CONV_1]], 15 ; CHECK-NEXT: [[AND_I50_4:%.*]] = and i32 [[SHR_I49_4]], 65537 ; CHECK-NEXT: [[MUL_I51_4:%.*]] = mul i32 [[AND_I50_4]], 65535 -; CHECK-NEXT: [[SHR_I49_5:%.*]] = lshr i32 [[CONV1]], 15 -; CHECK-NEXT: [[AND_I50_5:%.*]] = and i32 [[SHR_I49_5]], 65537 -; CHECK-NEXT: [[MUL_I51_5:%.*]] = mul i32 [[AND_I50_5]], 65535 +; CHECK-NEXT: [[SHR_I49_6:%.*]] = lshr i32 [[CONV1]], 15 +; CHECK-NEXT: [[AND_I50_6:%.*]] = and i32 [[SHR_I49_6]], 65537 +; CHECK-NEXT: [[MUL_I51_6:%.*]] = mul i32 [[AND_I50_6]], 65535 ; CHECK-NEXT: [[TMP78:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1 ; CHECK-NEXT: [[TMP102:%.*]] = zext <2 x i8> [[TMP78]] to <2 x i32> ; CHECK-NEXT: [[TMP80:%.*]] = insertelement <2 x ptr> [[TMP5]], ptr [[ARRAYIDX22]], i32 1 @@ -151,21 +151,21 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[TMP98:%.*]] = sub <2 x i32> [[TMP97]], [[TMP90]] ; CHECK-NEXT: [[TMP104:%.*]] = add <2 x i32> [[TMP96]], [[TMP98]] ; CHECK-NEXT: [[TMP100:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV1]], i32 0 -; CHECK-NEXT: [[TMP103:%.*]] = sub <2 x i32> [[TMP100]], [[TMP82]] -; CHECK-NEXT: [[TMP200:%.*]] = add <2 x i32> [[TMP88]], [[TMP103]] +; CHECK-NEXT: [[TMP101:%.*]] = sub <2 x i32> [[TMP100]], [[TMP82]] +; CHECK-NEXT: [[TMP200:%.*]] = add <2 x i32> [[TMP88]], [[TMP101]] ; CHECK-NEXT: [[TMP128:%.*]] = shufflevector <2 x i32> [[TMP104]], <2 x i32> [[TMP200]], <2 x i32> -; CHECK-NEXT: [[TMP165:%.*]] = add <2 x i32> [[TMP104]], [[TMP200]] +; CHECK-NEXT: [[TMP106:%.*]] = add <2 x i32> [[TMP104]], [[TMP200]] ; CHECK-NEXT: [[TMP105:%.*]] = sub <2 x i32> [[TMP200]], [[TMP104]] -; CHECK-NEXT: [[TMP238:%.*]] = extractelement <2 x i32> [[TMP165]], i32 0 -; CHECK-NEXT: [[TMP143:%.*]] = extractelement <2 x i32> [[TMP165]], i32 1 -; CHECK-NEXT: [[ADD48:%.*]] = add i32 [[TMP143]], [[TMP238]] -; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i32> [[TMP105]], i32 1 -; CHECK-NEXT: [[SHR_I59:%.*]] = lshr i32 [[TMP143]], 15 -; CHECK-NEXT: [[AND_I60:%.*]] = and i32 [[SHR_I59]], 65537 -; CHECK-NEXT: [[MUL_I61:%.*]] = mul i32 [[AND_I60]], 65535 +; CHECK-NEXT: [[TMP238:%.*]] = extractelement <2 x i32> [[TMP106]], i32 0 +; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i32> [[TMP106]], i32 1 +; CHECK-NEXT: [[ADD48:%.*]] = add i32 [[TMP108]], [[TMP238]] +; CHECK-NEXT: [[TMP142:%.*]] = extractelement <2 x i32> [[TMP105]], i32 1 ; CHECK-NEXT: [[SHR_I59_1:%.*]] = lshr i32 [[TMP108]], 15 ; CHECK-NEXT: [[AND_I60_1:%.*]] = and i32 [[SHR_I59_1]], 65537 ; CHECK-NEXT: [[MUL_I61_1:%.*]] = mul i32 [[AND_I60_1]], 65535 +; CHECK-NEXT: [[SHR_I59_4:%.*]] = lshr i32 [[TMP142]], 15 +; CHECK-NEXT: [[AND_I60_4:%.*]] = and i32 [[SHR_I59_4]], 65537 +; CHECK-NEXT: [[MUL_I61_4:%.*]] = mul i32 [[AND_I60_4]], 65535 ; CHECK-NEXT: [[TMP109:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1 ; CHECK-NEXT: [[TMP110:%.*]] = zext <2 x i8> [[TMP109]] to <2 x i32> ; CHECK-NEXT: [[TMP111:%.*]] = insertelement <2 x i8> poison, i8 [[TMP12]], i32 0 @@ -185,7 +185,7 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[TMP125:%.*]] = shl <2 x i32> [[TMP124]], ; CHECK-NEXT: [[TMP126:%.*]] = getelementptr i8, <2 x ptr> [[TMP120]], <2 x i64> ; CHECK-NEXT: [[TMP127:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP126]], i32 1, <2 x i1> , <2 x i8> poison) -; CHECK-NEXT: [[TMP153:%.*]] = zext <2 x i8> [[TMP127]] to <2 x i32> +; CHECK-NEXT: [[TMP144:%.*]] = zext <2 x i8> [[TMP127]] to <2 x i32> ; CHECK-NEXT: [[TMP129:%.*]] = getelementptr i8, <2 x ptr> [[TMP115]], <2 x i64> ; CHECK-NEXT: [[TMP130:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP129]], i32 1, <2 x i1> , <2 x i8> poison) ; CHECK-NEXT: [[TMP131:%.*]] = zext <2 x i8> [[TMP130]] to <2 x i32> @@ -195,15 +195,15 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[TMP135:%.*]] = sub <2 x i32> [[TMP131]], [[TMP134]] ; CHECK-NEXT: [[TMP136:%.*]] = shl <2 x i32> [[TMP135]], ; CHECK-NEXT: [[TMP137:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV33_1]], i32 1 -; CHECK-NEXT: [[TMP138:%.*]] = sub <2 x i32> [[TMP137]], [[TMP153]] +; CHECK-NEXT: [[TMP138:%.*]] = sub <2 x i32> [[TMP137]], [[TMP144]] ; CHECK-NEXT: [[TMP139:%.*]] = add <2 x i32> [[TMP136]], [[TMP138]] ; CHECK-NEXT: [[TMP140:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV_1]], i32 0 ; CHECK-NEXT: [[TMP141:%.*]] = sub <2 x i32> [[TMP140]], [[TMP113]] -; CHECK-NEXT: [[TMP142:%.*]] = add <2 x i32> [[TMP125]], [[TMP141]] -; CHECK-NEXT: [[TMP257:%.*]] = add <2 x i32> [[TMP139]], [[TMP142]] -; CHECK-NEXT: [[TMP144:%.*]] = sub <2 x i32> [[TMP142]], [[TMP139]] -; CHECK-NEXT: [[TMP145:%.*]] = extractelement <2 x i32> [[TMP257]], i32 0 -; CHECK-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP257]], i32 1 +; CHECK-NEXT: [[TMP155:%.*]] = add <2 x i32> [[TMP125]], [[TMP141]] +; CHECK-NEXT: [[TMP143:%.*]] = add <2 x i32> [[TMP139]], [[TMP155]] +; CHECK-NEXT: [[TMP189:%.*]] = sub <2 x i32> [[TMP155]], [[TMP139]] +; CHECK-NEXT: [[TMP145:%.*]] = extractelement <2 x i32> [[TMP143]], i32 0 +; CHECK-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP143]], i32 1 ; CHECK-NEXT: [[ADD48_1:%.*]] = add i32 [[TMP146]], [[TMP145]] ; CHECK-NEXT: [[SHR_I54:%.*]] = lshr i32 [[TMP146]], 15 ; CHECK-NEXT: [[AND_I55:%.*]] = and i32 [[SHR_I54]], 65537 @@ -220,37 +220,37 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[MUL_I51_2]], [[ADD103]] ; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP79]] ; CHECK-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I51_3]], [[ADD105]] -; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[CONV]] +; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[ADD46_2]] ; CHECK-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56]], [[SUB104]] ; CHECK-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP146]] -; CHECK-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I61]], [[SUB106]] -; CHECK-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP143]] +; CHECK-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I61_1]], [[SUB106]] +; CHECK-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP108]] ; CHECK-NEXT: [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]] ; CHECK-NEXT: [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]] ; CHECK-NEXT: [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]] ; CHECK-NEXT: [[TMP150:%.*]] = shufflevector <2 x i32> [[TMP105]], <2 x i32> poison, <2 x i32> -; CHECK-NEXT: [[TMP151:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB47_2]], i32 1 -; CHECK-NEXT: [[TMP152:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB45_2]], i32 1 -; CHECK-NEXT: [[TMP163:%.*]] = add <2 x i32> [[TMP151]], [[TMP152]] -; CHECK-NEXT: [[TMP154:%.*]] = shufflevector <2 x i32> [[TMP144]], <2 x i32> [[TMP73]], <2 x i32> -; CHECK-NEXT: [[TMP155:%.*]] = shufflevector <2 x i32> [[TMP144]], <2 x i32> [[TMP73]], <2 x i32> -; CHECK-NEXT: [[TMP156:%.*]] = add <2 x i32> [[TMP154]], [[TMP155]] -; CHECK-NEXT: [[TMP157:%.*]] = extractelement <2 x i32> [[TMP163]], i32 1 +; CHECK-NEXT: [[TMP151:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB59_2]], i32 1 +; CHECK-NEXT: [[TMP152:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB51_2]], i32 1 +; CHECK-NEXT: [[TMP153:%.*]] = add <2 x i32> [[TMP151]], [[TMP152]] +; CHECK-NEXT: [[TMP154:%.*]] = shufflevector <2 x i32> [[TMP189]], <2 x i32> [[TMP190]], <2 x i32> +; CHECK-NEXT: [[TMP184:%.*]] = shufflevector <2 x i32> [[TMP189]], <2 x i32> [[TMP190]], <2 x i32> +; CHECK-NEXT: [[TMP156:%.*]] = add <2 x i32> [[TMP154]], [[TMP184]] +; CHECK-NEXT: [[TMP157:%.*]] = extractelement <2 x i32> [[TMP153]], i32 1 ; CHECK-NEXT: [[TMP158:%.*]] = extractelement <2 x i32> [[TMP156]], i32 1 -; CHECK-NEXT: [[TMP159:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP163]], <2 x i32> +; CHECK-NEXT: [[TMP159:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> ; CHECK-NEXT: [[ADD78_2:%.*]] = add i32 [[TMP158]], [[TMP157]] -; CHECK-NEXT: [[TMP160:%.*]] = extractelement <2 x i32> [[TMP163]], i32 0 +; CHECK-NEXT: [[TMP160:%.*]] = extractelement <2 x i32> [[TMP153]], i32 0 ; CHECK-NEXT: [[TMP161:%.*]] = extractelement <2 x i32> [[TMP156]], i32 0 -; CHECK-NEXT: [[TMP162:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP163]], <2 x i32> +; CHECK-NEXT: [[TMP162:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> ; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[TMP161]], [[TMP160]] -; CHECK-NEXT: [[TMP164:%.*]] = sub <2 x i32> [[TMP163]], [[TMP156]] -; CHECK-NEXT: [[TMP173:%.*]] = extractelement <2 x i32> [[TMP164]], i32 0 -; CHECK-NEXT: [[TMP174:%.*]] = extractelement <2 x i32> [[TMP164]], i32 1 -; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[TMP174]], [[TMP173]] -; CHECK-NEXT: [[SUB106_1:%.*]] = sub i32 [[TMP173]], [[TMP174]] -; CHECK-NEXT: [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_1]], [[ADD105_1]] +; CHECK-NEXT: [[TMP163:%.*]] = sub <2 x i32> [[TMP153]], [[TMP156]] +; CHECK-NEXT: [[TMP164:%.*]] = extractelement <2 x i32> [[TMP163]], i32 0 +; CHECK-NEXT: [[TMP165:%.*]] = extractelement <2 x i32> [[TMP163]], i32 1 +; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[TMP165]], [[TMP164]] +; CHECK-NEXT: [[SUB106_1:%.*]] = sub i32 [[TMP164]], [[TMP165]] +; CHECK-NEXT: [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_5]], [[ADD105_1]] ; CHECK-NEXT: [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP107]] -; CHECK-NEXT: [[TMP166:%.*]] = shufflevector <2 x i32> [[TMP101]], <2 x i32> [[TMP144]], <2 x i32> +; CHECK-NEXT: [[TMP166:%.*]] = shufflevector <2 x i32> [[TMP16]], <2 x i32> [[TMP189]], <2 x i32> ; CHECK-NEXT: [[TMP167:%.*]] = lshr <2 x i32> [[TMP166]], ; CHECK-NEXT: [[TMP168:%.*]] = and <2 x i32> [[TMP167]], ; CHECK-NEXT: [[TMP169:%.*]] = mul <2 x i32> [[TMP168]], @@ -263,44 +263,44 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[TMP283:%.*]] = shufflevector <2 x i32> [[TMP282]], <2 x i32> [[TMP211]], <2 x i32> ; CHECK-NEXT: [[TMP177:%.*]] = add <2 x i32> [[TMP169]], [[TMP283]] ; CHECK-NEXT: [[TMP178:%.*]] = xor <2 x i32> [[TMP177]], [[TMP166]] -; CHECK-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_1]], [[SUB106_1]] -; CHECK-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[TMP108]] +; CHECK-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_4]], [[SUB106_1]] +; CHECK-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[TMP142]] ; CHECK-NEXT: [[ADD108_1:%.*]] = add i32 [[XOR_I53_1]], [[ADD113]] ; CHECK-NEXT: [[TMP179:%.*]] = extractelement <2 x i32> [[TMP178]], i32 0 ; CHECK-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP179]] ; CHECK-NEXT: [[TMP180:%.*]] = extractelement <2 x i32> [[TMP178]], i32 1 ; CHECK-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP180]] ; CHECK-NEXT: [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]] -; CHECK-NEXT: [[TMP181:%.*]] = shufflevector <2 x i32> [[TMP165]], <2 x i32> poison, <2 x i32> -; CHECK-NEXT: [[TMP182:%.*]] = insertelement <2 x i32> [[TMP181]], i32 [[ADD44_2]], i32 0 -; CHECK-NEXT: [[TMP183:%.*]] = insertelement <2 x i32> [[TMP165]], i32 [[CONV]], i32 0 -; CHECK-NEXT: [[TMP184:%.*]] = sub <2 x i32> [[TMP182]], [[TMP183]] -; CHECK-NEXT: [[TMP185:%.*]] = shufflevector <2 x i32> [[TMP16]], <2 x i32> [[TMP257]], <2 x i32> -; CHECK-NEXT: [[TMP186:%.*]] = shufflevector <2 x i32> [[TMP16]], <2 x i32> [[TMP257]], <2 x i32> +; CHECK-NEXT: [[TMP181:%.*]] = shufflevector <2 x i32> [[TMP106]], <2 x i32> poison, <2 x i32> +; CHECK-NEXT: [[TMP182:%.*]] = insertelement <2 x i32> [[TMP181]], i32 [[ADD44_3]], i32 0 +; CHECK-NEXT: [[TMP183:%.*]] = insertelement <2 x i32> [[TMP106]], i32 [[ADD46_2]], i32 0 +; CHECK-NEXT: [[TMP195:%.*]] = sub <2 x i32> [[TMP182]], [[TMP183]] +; CHECK-NEXT: [[TMP185:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> +; CHECK-NEXT: [[TMP186:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> ; CHECK-NEXT: [[TMP187:%.*]] = sub <2 x i32> [[TMP185]], [[TMP186]] -; CHECK-NEXT: [[TMP188:%.*]] = extractelement <2 x i32> [[TMP184]], i32 0 -; CHECK-NEXT: [[TMP189:%.*]] = extractelement <2 x i32> [[TMP187]], i32 0 -; CHECK-NEXT: [[TMP190:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP184]], <2 x i32> -; CHECK-NEXT: [[ADD94_4:%.*]] = add i32 [[TMP189]], [[TMP188]] -; CHECK-NEXT: [[TMP191:%.*]] = extractelement <2 x i32> [[TMP184]], i32 1 +; CHECK-NEXT: [[TMP188:%.*]] = extractelement <2 x i32> [[TMP195]], i32 0 +; CHECK-NEXT: [[TMP196:%.*]] = extractelement <2 x i32> [[TMP187]], i32 0 +; CHECK-NEXT: [[TMP199:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP195]], <2 x i32> +; CHECK-NEXT: [[ADD94_4:%.*]] = add i32 [[TMP196]], [[TMP188]] +; CHECK-NEXT: [[TMP191:%.*]] = extractelement <2 x i32> [[TMP195]], i32 1 ; CHECK-NEXT: [[TMP192:%.*]] = extractelement <2 x i32> [[TMP187]], i32 1 -; CHECK-NEXT: [[TMP193:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP184]], <2 x i32> +; CHECK-NEXT: [[TMP193:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP195]], <2 x i32> ; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[TMP192]], [[TMP191]] -; CHECK-NEXT: [[TMP194:%.*]] = sub <2 x i32> [[TMP184]], [[TMP187]] +; CHECK-NEXT: [[TMP194:%.*]] = sub <2 x i32> [[TMP195]], [[TMP187]] ; CHECK-NEXT: [[TMP244:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0 ; CHECK-NEXT: [[TMP245:%.*]] = shufflevector <2 x i32> [[TMP244]], <2 x i32> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP197:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_4]], i32 0 ; CHECK-NEXT: [[TMP198:%.*]] = shufflevector <2 x i32> [[TMP197]], <2 x i32> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP246:%.*]] = add <2 x i32> [[TMP245]], [[TMP198]] -; CHECK-NEXT: [[TMP247:%.*]] = sub <2 x i32> [[TMP245]], [[TMP198]] -; CHECK-NEXT: [[TMP248:%.*]] = shufflevector <2 x i32> [[TMP246]], <2 x i32> [[TMP247]], <2 x i32> +; CHECK-NEXT: [[TMP216:%.*]] = add <2 x i32> [[TMP245]], [[TMP198]] +; CHECK-NEXT: [[TMP210:%.*]] = sub <2 x i32> [[TMP245]], [[TMP198]] +; CHECK-NEXT: [[TMP221:%.*]] = shufflevector <2 x i32> [[TMP216]], <2 x i32> [[TMP210]], <2 x i32> ; CHECK-NEXT: [[TMP215:%.*]] = extractelement <2 x i32> [[TMP194]], i32 0 ; CHECK-NEXT: [[TMP203:%.*]] = extractelement <2 x i32> [[TMP194]], i32 1 ; CHECK-NEXT: [[ADD105_2:%.*]] = add i32 [[TMP215]], [[TMP203]] ; CHECK-NEXT: [[SUB106_2:%.*]] = sub i32 [[TMP203]], [[TMP215]] ; CHECK-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_4]], [[ADD105_2]] ; CHECK-NEXT: [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]] -; CHECK-NEXT: [[TMP266:%.*]] = add <2 x i32> [[TMP149]], [[TMP248]] +; CHECK-NEXT: [[TMP266:%.*]] = add <2 x i32> [[TMP149]], [[TMP221]] ; CHECK-NEXT: [[TMP267:%.*]] = xor <2 x i32> [[TMP266]], [[TMP110]] ; CHECK-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[TMP238]], 15 ; CHECK-NEXT: [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537 @@ -313,48 +313,48 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt ; CHECK-NEXT: [[TMP207:%.*]] = extractelement <2 x i32> [[TMP267]], i32 1 ; CHECK-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP207]] ; CHECK-NEXT: [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]] -; CHECK-NEXT: [[TMP221:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB45_2]], i32 0 -; CHECK-NEXT: [[TMP222:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB47_2]], i32 0 -; CHECK-NEXT: [[TMP210:%.*]] = sub <2 x i32> [[TMP221]], [[TMP222]] -; CHECK-NEXT: [[TMP225:%.*]] = shufflevector <2 x i32> [[TMP73]], <2 x i32> [[TMP144]], <2 x i32> -; CHECK-NEXT: [[TMP212:%.*]] = shufflevector <2 x i32> [[TMP73]], <2 x i32> [[TMP144]], <2 x i32> -; CHECK-NEXT: [[TMP226:%.*]] = sub <2 x i32> [[TMP225]], [[TMP212]] -; CHECK-NEXT: [[TMP214:%.*]] = extractelement <2 x i32> [[TMP210]], i32 0 -; CHECK-NEXT: [[TMP227:%.*]] = extractelement <2 x i32> [[TMP226]], i32 0 -; CHECK-NEXT: [[TMP216:%.*]] = shufflevector <2 x i32> [[TMP226]], <2 x i32> [[TMP210]], <2 x i32> -; CHECK-NEXT: [[ADD94_3:%.*]] = add i32 [[TMP227]], [[TMP214]] -; CHECK-NEXT: [[TMP217:%.*]] = extractelement <2 x i32> [[TMP210]], i32 1 -; CHECK-NEXT: [[TMP218:%.*]] = extractelement <2 x i32> [[TMP226]], i32 1 -; CHECK-NEXT: [[TMP219:%.*]] = shufflevector <2 x i32> [[TMP226]], <2 x i32> [[TMP210]], <2 x i32> -; CHECK-NEXT: [[SUB59:%.*]] = add i32 [[TMP218]], [[TMP217]] -; CHECK-NEXT: [[TMP220:%.*]] = sub <2 x i32> [[TMP210]], [[TMP226]] -; CHECK-NEXT: [[TMP274:%.*]] = insertelement <2 x i32> poison, i32 [[SUB59]], i32 0 -; CHECK-NEXT: [[TMP275:%.*]] = shufflevector <2 x i32> [[TMP274]], <2 x i32> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP222:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB51_2]], i32 0 +; CHECK-NEXT: [[TMP225:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB59_2]], i32 0 +; CHECK-NEXT: [[TMP226:%.*]] = sub <2 x i32> [[TMP222]], [[TMP225]] +; CHECK-NEXT: [[TMP227:%.*]] = shufflevector <2 x i32> [[TMP190]], <2 x i32> [[TMP189]], <2 x i32> +; CHECK-NEXT: [[TMP212:%.*]] = shufflevector <2 x i32> [[TMP190]], <2 x i32> [[TMP189]], <2 x i32> +; CHECK-NEXT: [[TMP213:%.*]] = sub <2 x i32> [[TMP227]], [[TMP212]] +; CHECK-NEXT: [[TMP214:%.*]] = extractelement <2 x i32> [[TMP226]], i32 0 +; CHECK-NEXT: [[TMP237:%.*]] = extractelement <2 x i32> [[TMP213]], i32 0 +; CHECK-NEXT: [[TMP239:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP226]], <2 x i32> +; CHECK-NEXT: [[ADD94_5:%.*]] = add i32 [[TMP237]], [[TMP214]] +; CHECK-NEXT: [[TMP217:%.*]] = extractelement <2 x i32> [[TMP226]], i32 1 +; CHECK-NEXT: [[TMP218:%.*]] = extractelement <2 x i32> [[TMP213]], i32 1 +; CHECK-NEXT: [[TMP219:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP226]], <2 x i32> +; CHECK-NEXT: [[ADD94_3:%.*]] = add i32 [[TMP218]], [[TMP217]] +; CHECK-NEXT: [[TMP240:%.*]] = sub <2 x i32> [[TMP226]], [[TMP213]] ; CHECK-NEXT: [[TMP223:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_3]], i32 0 ; CHECK-NEXT: [[TMP224:%.*]] = shufflevector <2 x i32> [[TMP223]], <2 x i32> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP276:%.*]] = add <2 x i32> [[TMP275]], [[TMP224]] -; CHECK-NEXT: [[TMP277:%.*]] = sub <2 x i32> [[TMP275]], [[TMP224]] -; CHECK-NEXT: [[TMP278:%.*]] = shufflevector <2 x i32> [[TMP276]], <2 x i32> [[TMP277]], <2 x i32> -; CHECK-NEXT: [[TMP228:%.*]] = extractelement <2 x i32> [[TMP220]], i32 0 -; CHECK-NEXT: [[TMP229:%.*]] = extractelement <2 x i32> [[TMP220]], i32 1 +; CHECK-NEXT: [[TMP241:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_5]], i32 0 +; CHECK-NEXT: [[TMP242:%.*]] = shufflevector <2 x i32> [[TMP241]], <2 x i32> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP261:%.*]] = add <2 x i32> [[TMP224]], [[TMP242]] +; CHECK-NEXT: [[TMP262:%.*]] = sub <2 x i32> [[TMP224]], [[TMP242]] +; CHECK-NEXT: [[TMP220:%.*]] = shufflevector <2 x i32> [[TMP261]], <2 x i32> [[TMP262]], <2 x i32> +; CHECK-NEXT: [[TMP228:%.*]] = extractelement <2 x i32> [[TMP240]], i32 0 +; CHECK-NEXT: [[TMP229:%.*]] = extractelement <2 x i32> [[TMP240]], i32 1 ; CHECK-NEXT: [[ADD105_3:%.*]] = add i32 [[TMP228]], [[TMP229]] ; CHECK-NEXT: [[SUB106_3:%.*]] = sub i32 [[TMP229]], [[TMP228]] -; CHECK-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_5]], [[ADD105_3]] +; CHECK-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_6]], [[ADD105_3]] ; CHECK-NEXT: [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV1]] ; CHECK-NEXT: [[TMP230:%.*]] = lshr <2 x i32> [[TMP102]], ; CHECK-NEXT: [[TMP231:%.*]] = and <2 x i32> [[TMP230]], ; CHECK-NEXT: [[TMP232:%.*]] = mul <2 x i32> [[TMP231]], -; CHECK-NEXT: [[TMP286:%.*]] = add <2 x i32> [[TMP232]], [[TMP278]] -; CHECK-NEXT: [[TMP287:%.*]] = xor <2 x i32> [[TMP286]], [[TMP102]] +; CHECK-NEXT: [[TMP233:%.*]] = add <2 x i32> [[TMP232]], [[TMP220]] +; CHECK-NEXT: [[TMP234:%.*]] = xor <2 x i32> [[TMP233]], [[TMP102]] ; CHECK-NEXT: [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15 ; CHECK-NEXT: [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537 ; CHECK-NEXT: [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535 ; CHECK-NEXT: [[ADD_I62_3:%.*]] = add i32 [[MUL_I61_3]], [[SUB106_3]] ; CHECK-NEXT: [[XOR_I63_3:%.*]] = xor i32 [[ADD_I62_3]], [[CONV33]] ; CHECK-NEXT: [[ADD108_3:%.*]] = add i32 [[XOR_I53_3]], [[ADD113_2]] -; CHECK-NEXT: [[TMP235:%.*]] = extractelement <2 x i32> [[TMP287]], i32 0 +; CHECK-NEXT: [[TMP235:%.*]] = extractelement <2 x i32> [[TMP234]], i32 0 ; CHECK-NEXT: [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP235]] -; CHECK-NEXT: [[TMP236:%.*]] = extractelement <2 x i32> [[TMP287]], i32 1 +; CHECK-NEXT: [[TMP236:%.*]] = extractelement <2 x i32> [[TMP234]], i32 1 ; CHECK-NEXT: [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP236]] ; CHECK-NEXT: [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[XOR_I63_3]] ; CHECK-NEXT: ret i32 [[ADD113_3]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll b/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll index 94534274cab2ff..5f8941e9f88934 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll @@ -332,18 +332,14 @@ define void @reorder_alt_rightsubTree(ptr nocapture %c, ptr noalias nocapture re define void @vec_shuff_reorder() #0 { ; CHECK-LABEL: @vec_shuff_reorder( -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @fb, align 4 -; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr @fa, align 4 -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds ([4 x float], ptr @fa, i32 0, i64 1), align 4 -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr getelementptr inbounds ([4 x float], ptr @fb, i32 0, i64 1), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr @fa, align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x float>, ptr @fb, align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x float>, ptr getelementptr inbounds ([4 x float], ptr @fb, i32 0, i64 2), align 4 ; CHECK-NEXT: [[TMP6:%.*]] = load <2 x float>, ptr getelementptr inbounds ([4 x float], ptr @fa, i32 0, i64 2), align 4 -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> poison, float [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[TMP7]], float [[TMP3]], i32 1 +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <4 x i32> ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP5]], <2 x float> poison, <4 x i32> ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> [[TMP9]], <4 x i32> -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x float> poison, float [[TMP2]], i32 0 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x float> [[TMP11]], float [[TMP4]], i32 1 +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <4 x i32> ; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> poison, <4 x i32> ; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x float> [[TMP12]], <4 x float> [[TMP13]], <4 x i32> ; CHECK-NEXT: [[TMP15:%.*]] = fadd <4 x float> [[TMP10]], [[TMP14]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll b/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll index 610cc5bdeb3107..536526a5cfe06b 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/entries-different-vf.ll @@ -6,11 +6,11 @@ define i1 @test() { ; CHECK-SAME: () #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 0, 0 -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i64> , i64 [[TMP0]], i32 0 -; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i64> [[TMP1]], i64 0, i32 1 -; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i64> [[TMP2]], i64 0, i32 3 -; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> [[TMP3]], <8 x i64> poison, <4 x i32> -; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> , <8 x i64> [[TMP3]], <8 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i64> , i64 0, i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> poison, <8 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i64> , i64 [[TMP0]], i32 0 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> [[TMP11]], i64 0, i32 1 +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[TMP4]], <4 x i64> poison, <8 x i32> ; CHECK-NEXT: [[TMP6:%.*]] = or <8 x i64> [[TMP3]], [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = sub <8 x i64> [[TMP3]], [[TMP5]] ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x i64> [[TMP6]], <8 x i64> [[TMP7]], <8 x i32> diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extract-many-users-buildvector.ll b/llvm/test/Transforms/SLPVectorizer/X86/extract-many-users-buildvector.ll index cac0491d0b6431..7ae6793fba4cd1 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/extract-many-users-buildvector.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/extract-many-users-buildvector.ll @@ -8,12 +8,10 @@ define i1 @test(float %0, double %1) { ; CHECK-NEXT: [[TMP4:%.*]] = fpext <4 x float> [[TMP3]] to <4 x double> ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> , double [[TMP1]], i32 0 ; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> zeroinitializer, [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[TMP6]], i32 1 -; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> poison, <4 x i32> -; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x double> [[TMP8]], <4 x double> , <4 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> poison, <4 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> , <4 x i32> ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x double> [[TMP9]], double [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x double> [[TMP4]], <4 x double> poison, <4 x i32> -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x double> [[TMP11]], double [[TMP7]], i32 3 +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x double> [[TMP4]], <4 x double> , <4 x i32> ; CHECK-NEXT: [[TMP13:%.*]] = fmul <4 x double> [[TMP10]], [[TMP12]] ; CHECK-NEXT: [[TMP14:%.*]] = fmul <4 x double> zeroinitializer, [[TMP4]] ; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <4 x double> [[TMP13]], <4 x double> poison, <8 x i32> diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extract-scalar-from-undef.ll b/llvm/test/Transforms/SLPVectorizer/X86/extract-scalar-from-undef.ll index dd7ba71ed67368..f1580599ba1278 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/extract-scalar-from-undef.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/extract-scalar-from-undef.ll @@ -12,8 +12,8 @@ define i64 @foo(i32 %tmp7) { ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> undef, <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i32> [[TMP4]], i32 [[TMP24]], i32 6 ; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <8 x i32> [[TMP3]], [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = add nsw <8 x i32> [[TMP3]], [[TMP5]] -; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x i32> [[TMP6]], <8 x i32> [[TMP7]], <8 x i32> +; CHECK-NEXT: [[TMP77:%.*]] = add nsw <8 x i32> [[TMP3]], [[TMP5]] +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x i32> [[TMP6]], <8 x i32> [[TMP77]], <8 x i32> ; CHECK-NEXT: [[TMP9:%.*]] = add <8 x i32> zeroinitializer, [[TMP8]] ; CHECK-NEXT: [[TMP10:%.*]] = xor <8 x i32> [[TMP9]], zeroinitializer ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP10]]) diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extractelement-single-use-many-nodes.ll b/llvm/test/Transforms/SLPVectorizer/X86/extractelement-single-use-many-nodes.ll index f665dac3282b79..24b95c4e6ff2f8 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/extractelement-single-use-many-nodes.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/extractelement-single-use-many-nodes.ll @@ -7,17 +7,14 @@ define void @foo(double %i) { ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x double> , double [[I]], i32 2 ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> zeroinitializer, [[TMP0]] -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x double> [[TMP1]], i32 1 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> poison, double [[I]], i32 0 ; CHECK-NEXT: [[TMP4:%.*]] = fsub <2 x double> zeroinitializer, [[TMP3]] ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 1 -; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> poison, <8 x i32> -; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <8 x double> [[TMP6]], <8 x double> , <8 x i32> -; CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x double> [[TMP7]], double [[TMP2]], i32 3 -; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> poison, <8 x i32> -; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x double> [[TMP9]], <8 x double> , <8 x i32> -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <8 x double> [[TMP10]], double [[TMP5]], i32 6 -; CHECK-NEXT: [[TMP12:%.*]] = fmul <8 x double> [[TMP8]], [[TMP11]] +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> poison, <8 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x double> [[TMP9]], <8 x double> , <8 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x double> [[TMP6]], double [[TMP5]], i32 2 +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x double> [[TMP7]], <8 x double> poison, <8 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = fmul <8 x double> , [[TMP8]] ; CHECK-NEXT: [[TMP13:%.*]] = fadd <8 x double> zeroinitializer, [[TMP12]] ; CHECK-NEXT: [[TMP14:%.*]] = fadd <8 x double> [[TMP13]], zeroinitializer ; CHECK-NEXT: [[TMP15:%.*]] = fcmp ult <8 x double> [[TMP14]], zeroinitializer diff --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll index 4cc3c1241b56df..de06daac7a75d8 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll @@ -1295,7 +1295,7 @@ define i8 @umin_intrinsic_rdx_v16i8(ptr %p0) { define void @PR49730() { ; CHECK-LABEL: @PR49730( -; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> , <4 x i32> ) +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> , <4 x i32> ) ; CHECK-NEXT: [[TMP2:%.*]] = sub nsw <4 x i32> undef, [[TMP1]] ; CHECK-NEXT: [[T12:%.*]] = sub nsw i32 undef, undef ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[TMP2]]) diff --git a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll index 593aad82ad5d87..8562e53b153872 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll @@ -185,9 +185,13 @@ define void @shuffle_nodes_match1(ptr noalias %from, ptr noalias %to, double %v1 ; CHECK-NEXT: br label [[LP:%.*]] ; CHECK: lp: ; CHECK-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[FROM:%.*]], align 4 -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> +; CHECK-NEXT: [[FROM_1:%.*]] = getelementptr i8, ptr [[FROM:%.*]], i32 8 +; CHECK-NEXT: [[V0_1:%.*]] = load double, ptr [[FROM]], align 4 +; CHECK-NEXT: [[V0_2:%.*]] = load double, ptr [[FROM_1]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[V0_2]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP0]], double [[P]], i64 1 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> poison, double [[V0_1]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], [[TMP1]] ; CHECK-NEXT: store <2 x double> [[TMP3]], ptr [[TO:%.*]], align 4 ; CHECK-NEXT: br i1 [[C:%.*]], label [[LP]], label [[EXT:%.*]] @@ -233,13 +237,9 @@ define void @vecload_vs_broadcast4(ptr noalias %from, ptr noalias %to, double %v ; CHECK-NEXT: br label [[LP:%.*]] ; CHECK: lp: ; CHECK-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[FROM_1:%.*]] = getelementptr i8, ptr [[FROM:%.*]], i32 8 -; CHECK-NEXT: [[V0_1:%.*]] = load double, ptr [[FROM]], align 4 -; CHECK-NEXT: [[V0_2:%.*]] = load double, ptr [[FROM_1]], align 4 -; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[V0_2]], i64 0 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[FROM:%.*]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[P]], i64 1 -; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[V0_1]], i64 0 -; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]] ; CHECK-NEXT: store <2 x double> [[TMP4]], ptr [[TO:%.*]], align 4 ; CHECK-NEXT: br i1 [[C:%.*]], label [[LP]], label [[EXT:%.*]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/postponed_gathers.ll b/llvm/test/Transforms/SLPVectorizer/X86/postponed_gathers.ll index 681d131c50727d..488ca0b23cd9c5 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/postponed_gathers.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/postponed_gathers.ll @@ -10,7 +10,7 @@ define void @foo() { ; CHECK-NEXT: br label [[BCI_252:%.*]] ; CHECK: bci_252: ; CHECK-NEXT: [[TMP3:%.*]] = phi <2 x i32> [ zeroinitializer, [[BCI_0:%.*]] ], [ [[TMP16:%.*]], [[BCI_252_1:%.*]] ] -; CHECK-NEXT: [[TMP4:%.*]] = mul <2 x i32> zeroinitializer, [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = mul <2 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = or <2 x i32> [[TMP3]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = or <2 x i32> [[TMP2]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = or <2 x i32> [[TMP6]], zeroinitializer diff --git a/llvm/test/Transforms/SLPVectorizer/X86/replaced-external-in-reduction.ll b/llvm/test/Transforms/SLPVectorizer/X86/replaced-external-in-reduction.ll index 19a3a7d53df008..9df7aa1c727c87 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/replaced-external-in-reduction.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/replaced-external-in-reduction.ll @@ -5,8 +5,8 @@ define void @test(i32 %0, ptr %p) { ; CHECK-LABEL: define void @test( ; CHECK-SAME: i32 [[TMP0:%.*]], ptr [[P:%.*]]) { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> , i32 [[TMP0]], i32 3 -; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> , i32 [[TMP0]], i32 3 +; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3 ; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[PH:%.*]] ; CHECK: ph: diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vec_list_bias_external_insert_shuffled.ll b/llvm/test/Transforms/SLPVectorizer/X86/vec_list_bias_external_insert_shuffled.ll index 69ecf1852aedd7..8f1d7a11e15090 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/vec_list_bias_external_insert_shuffled.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/vec_list_bias_external_insert_shuffled.ll @@ -7,11 +7,9 @@ define void @test(ptr nocapture %t2) { ; CHECK-NEXT: [[T4:%.*]] = getelementptr inbounds i32, ptr [[T2]], i64 7 ; CHECK-NEXT: [[T5:%.*]] = load i32, ptr [[T4]], align 4 ; CHECK-NEXT: [[T8:%.*]] = getelementptr inbounds i32, ptr [[T2]], i64 1 -; CHECK-NEXT: [[T9:%.*]] = load i32, ptr [[T8]], align 4 ; CHECK-NEXT: [[T10:%.*]] = getelementptr inbounds i32, ptr [[T2]], i64 6 ; CHECK-NEXT: [[T11:%.*]] = load i32, ptr [[T10]], align 4 -; CHECK-NEXT: [[T14:%.*]] = getelementptr inbounds i32, ptr [[T2]], i64 2 -; CHECK-NEXT: [[T15:%.*]] = load i32, ptr [[T14]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[T8]], align 4 ; CHECK-NEXT: [[T16:%.*]] = getelementptr inbounds i32, ptr [[T2]], i64 5 ; CHECK-NEXT: [[T17:%.*]] = load i32, ptr [[T16]], align 4 ; CHECK-NEXT: [[T20:%.*]] = getelementptr inbounds i32, ptr [[T2]], i64 3 @@ -21,10 +19,11 @@ define void @test(ptr nocapture %t2) { ; CHECK-NEXT: [[T24:%.*]] = add nsw i32 [[T23]], [[T21]] ; CHECK-NEXT: [[T25:%.*]] = sub nsw i32 [[T21]], [[T23]] ; CHECK-NEXT: [[T27:%.*]] = sub nsw i32 [[T3]], [[T24]] +; CHECK-NEXT: [[T9:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0 +; CHECK-NEXT: [[T15:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1 ; CHECK-NEXT: [[T29:%.*]] = sub nsw i32 [[T9]], [[T15]] ; CHECK-NEXT: [[T30:%.*]] = add nsw i32 [[T27]], [[T29]] ; CHECK-NEXT: [[T31:%.*]] = mul nsw i32 [[T30]], 4433 -; CHECK-NEXT: [[T32:%.*]] = mul nsw i32 [[T27]], 6270 ; CHECK-NEXT: [[T34:%.*]] = mul nsw i32 [[T29]], -15137 ; CHECK-NEXT: [[T37:%.*]] = add nsw i32 [[T25]], [[T11]] ; CHECK-NEXT: [[T38:%.*]] = add nsw i32 [[T17]], [[T5]] @@ -34,20 +33,19 @@ define void @test(ptr nocapture %t2) { ; CHECK-NEXT: [[T42:%.*]] = mul nsw i32 [[T17]], 16819 ; CHECK-NEXT: [[T47:%.*]] = mul nsw i32 [[T37]], -16069 ; CHECK-NEXT: [[T48:%.*]] = mul nsw i32 [[T38]], -3196 -; CHECK-NEXT: [[T49:%.*]] = add nsw i32 [[T40]], [[T47]] -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> poison, i32 [[T15]], i32 0 -; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i32> [[TMP1]], i32 [[T40]], i32 1 -; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i32> poison, i32 [[T9]], i32 0 -; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[T48]], i32 1 -; CHECK-NEXT: [[TMP5:%.*]] = add nsw <2 x i32> [[TMP2]], [[TMP4]] -; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i32> [[TMP5]], <2 x i32> poison, <8 x i32> -; CHECK-NEXT: [[T67:%.*]] = insertelement <8 x i32> [[TMP6]], i32 [[T32]], i32 2 -; CHECK-NEXT: [[T68:%.*]] = insertelement <8 x i32> [[T67]], i32 [[T49]], i32 3 -; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP5]], <2 x i32> poison, <8 x i32> -; CHECK-NEXT: [[T701:%.*]] = shufflevector <8 x i32> [[T68]], <8 x i32> [[TMP7]], <8 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <4 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[T27]], i32 2 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[T47]], i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> , <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i32> [[TMP7]], i32 [[T40]], i32 3 +; CHECK-NEXT: [[TMP9:%.*]] = add nsw <4 x i32> [[TMP6]], [[TMP8]] +; CHECK-NEXT: [[TMP10:%.*]] = mul nsw <4 x i32> [[TMP6]], [[TMP8]] +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i32> [[TMP9]], <4 x i32> [[TMP10]], <4 x i32> +; CHECK-NEXT: [[T50:%.*]] = add nsw i32 [[T40]], [[T48]] +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x i32> [[TMP11]], <4 x i32> poison, <8 x i32> +; CHECK-NEXT: [[T701:%.*]] = insertelement <8 x i32> [[TMP12]], i32 [[T50]], i32 5 ; CHECK-NEXT: [[T71:%.*]] = insertelement <8 x i32> [[T701]], i32 [[T34]], i32 6 -; CHECK-NEXT: [[T72:%.*]] = insertelement <8 x i32> [[T71]], i32 [[T49]], i32 7 -; CHECK-NEXT: [[T76:%.*]] = shl <8 x i32> [[T72]], +; CHECK-NEXT: [[T76:%.*]] = shl <8 x i32> [[T71]], ; CHECK-NEXT: store <8 x i32> [[T76]], ptr [[T2]], align 4 ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/SLPVectorizer/slp-umax-rdx-matcher-crash.ll b/llvm/test/Transforms/SLPVectorizer/slp-umax-rdx-matcher-crash.ll index 8b131ccd01c017..16a9bf53b54a02 100644 --- a/llvm/test/Transforms/SLPVectorizer/slp-umax-rdx-matcher-crash.ll +++ b/llvm/test/Transforms/SLPVectorizer/slp-umax-rdx-matcher-crash.ll @@ -43,7 +43,7 @@ declare i32 @llvm.umin.i32(i32, i32) define void @test2() { ; CHECK-LABEL: @test2( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> , <4 x i32> ) +; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> , <4 x i32> ) ; CHECK-NEXT: [[TMP1:%.*]] = sub nsw <4 x i32> undef, [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[TMP1]]) ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP2]], i32 77) From 442990b93773a6f1fb9e675c3919734fe2846dac Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 1 May 2024 07:44:15 -0400 Subject: [PATCH 05/48] [gn] port 8cde1cfc60e3 (LLVM_APPEND_VC_REV for lit) --- llvm/utils/gn/secondary/llvm/test/BUILD.gn | 2 ++ 1 file changed, 2 insertions(+) diff --git a/llvm/utils/gn/secondary/llvm/test/BUILD.gn b/llvm/utils/gn/secondary/llvm/test/BUILD.gn index 3257f4b5ff2363..826dcf4e6ee9b1 100644 --- a/llvm/utils/gn/secondary/llvm/test/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/test/BUILD.gn @@ -59,9 +59,11 @@ write_lit_config("lit_site_cfg") { # LLVM_HOST_TRIPLE.) "HOST_LDFLAGS=", + "LLVM_APPEND_VC_REV=0", "LLVM_ENABLE_FFI=0", "LLVM_ENABLE_HTTPLIB=0", "LLVM_ENABLE_ZSTD=0", + "LLVM_FORCE_VC_REVISION=", "LLVM_HAVE_OPT_VIEWER_MODULES=0", "LLVM_HOST_TRIPLE=$llvm_current_triple", "LLVM_INCLUDE_DXIL_TESTS=0", From df241b19c952b904eec755d9f090737aed437986 Mon Sep 17 00:00:00 2001 From: Sean Perry <39927768+perry-ca@users.noreply.github.com> Date: Wed, 1 May 2024 07:48:57 -0400 Subject: [PATCH 06/48] [z/OS] add support for z/OS system headers to clang std header wrappers (#89995) Update the wrappers for the C std headers so that they always forward to the z/OS system headers. --- clang/lib/Headers/CMakeLists.txt | 19 +++++++++++++++++-- clang/lib/Headers/builtins.h | 3 +++ clang/lib/Headers/float.h | 5 +++++ clang/lib/Headers/inttypes.h | 4 ++++ clang/lib/Headers/iso646.h | 4 ++++ clang/lib/Headers/limits.h | 5 +++++ clang/lib/Headers/stdalign.h | 5 +++++ clang/lib/Headers/stdarg.h | 12 ++++++++++++ clang/lib/Headers/stdbool.h | 5 +++++ clang/lib/Headers/stddef.h | 17 +++++++++++++++++ clang/lib/Headers/stdint.h | 5 +++++ clang/lib/Headers/stdnoreturn.h | 6 ++++++ clang/lib/Headers/varargs.h | 6 +++++- clang/lib/Headers/zos_wrappers/builtins.h | 18 ++++++++++++++++++ 14 files changed, 111 insertions(+), 3 deletions(-) create mode 100644 clang/lib/Headers/zos_wrappers/builtins.h diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt index e6ae4e19e81db9..3416811e39de27 100644 --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -335,6 +335,10 @@ set(llvm_libc_wrapper_files llvm_libc_wrappers/time.h ) +set(zos_wrapper_files + zos_wrappers/builtins.h +) + include(GetClangResourceDir) get_clang_resource_dir(output_dir PREFIX ${LLVM_LIBRARY_OUTPUT_INTDIR}/.. SUBDIR include) set(out_files) @@ -370,7 +374,7 @@ endfunction(clang_generate_header) # Copy header files from the source directory to the build directory foreach( f ${files} ${cuda_wrapper_files} ${cuda_wrapper_bits_files} - ${ppc_wrapper_files} ${openmp_wrapper_files} ${hlsl_files} + ${ppc_wrapper_files} ${openmp_wrapper_files} ${zos_wrapper_files} ${hlsl_files} ${llvm_libc_wrapper_files}) copy_header_to_output_dir(${CMAKE_CURRENT_SOURCE_DIR} ${f}) endforeach( f ) @@ -487,7 +491,7 @@ add_header_target("mips-resource-headers" "${mips_msa_files}") add_header_target("ppc-resource-headers" "${ppc_files};${ppc_wrapper_files}") add_header_target("ppc-htm-resource-headers" "${ppc_htm_files}") add_header_target("riscv-resource-headers" "${riscv_files};${riscv_generated_files}") -add_header_target("systemz-resource-headers" "${systemz_files}") +add_header_target("systemz-resource-headers" "${systemz_files};${zos_wrapper_files}") add_header_target("ve-resource-headers" "${ve_files}") add_header_target("webassembly-resource-headers" "${webassembly_files}") add_header_target("x86-resource-headers" "${x86_files}") @@ -538,6 +542,11 @@ install( DESTINATION ${header_install_dir}/openmp_wrappers COMPONENT clang-resource-headers) +install( + FILES ${zos_wrapper_files} + DESTINATION ${header_install_dir}/zos_wrappers + COMPONENT clang-resource-headers) + ############################################################# # Install rules for separate header lists install( @@ -642,6 +651,12 @@ install( EXCLUDE_FROM_ALL COMPONENT systemz-resource-headers) +install( + FILES ${zos_wrapper_files} + DESTINATION ${header_install_dir}/zos_wrappers + EXCLUDE_FROM_ALL + COMPONENT systemz-resource-headers) + install( FILES ${ve_files} DESTINATION ${header_install_dir} diff --git a/clang/lib/Headers/builtins.h b/clang/lib/Headers/builtins.h index 65095861ca9b1c..1e534e632c8ead 100644 --- a/clang/lib/Headers/builtins.h +++ b/clang/lib/Headers/builtins.h @@ -13,4 +13,7 @@ #ifndef __BUILTINS_H #define __BUILTINS_H +#if defined(__MVS__) && __has_include_next() +#include_next +#endif /* __MVS__ */ #endif /* __BUILTINS_H */ diff --git a/clang/lib/Headers/float.h b/clang/lib/Headers/float.h index 0e73bca0a2d6e4..642c8f06cc9386 100644 --- a/clang/lib/Headers/float.h +++ b/clang/lib/Headers/float.h @@ -10,6 +10,10 @@ #ifndef __CLANG_FLOAT_H #define __CLANG_FLOAT_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + /* If we're on MinGW, fall back to the system's float.h, which might have * additional definitions provided for Windows. * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx @@ -165,4 +169,5 @@ # define FLT16_TRUE_MIN __FLT16_TRUE_MIN__ #endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */ +#endif /* __MVS__ */ #endif /* __CLANG_FLOAT_H */ diff --git a/clang/lib/Headers/inttypes.h b/clang/lib/Headers/inttypes.h index 1c894c4aca4975..5150d22f8b2e4e 100644 --- a/clang/lib/Headers/inttypes.h +++ b/clang/lib/Headers/inttypes.h @@ -13,6 +13,9 @@ #if !defined(_AIX) || !defined(_STD_TYPES_T) #define __CLANG_INTTYPES_H #endif +#if defined(__MVS__) && __has_include_next() +#include_next +#else #if defined(_MSC_VER) && _MSC_VER < 1800 #error MSVC does not have inttypes.h prior to Visual Studio 2013 @@ -94,4 +97,5 @@ #define SCNxFAST32 "x" #endif +#endif /* __MVS__ */ #endif /* __CLANG_INTTYPES_H */ diff --git a/clang/lib/Headers/iso646.h b/clang/lib/Headers/iso646.h index e0a20c6f1891b2..b53fcd9b4e5359 100644 --- a/clang/lib/Headers/iso646.h +++ b/clang/lib/Headers/iso646.h @@ -9,6 +9,9 @@ #ifndef __ISO646_H #define __ISO646_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else #ifndef __cplusplus #define and && @@ -24,4 +27,5 @@ #define xor_eq ^= #endif +#endif /* __MVS__ */ #endif /* __ISO646_H */ diff --git a/clang/lib/Headers/limits.h b/clang/lib/Headers/limits.h index 15e6bbe0abcf7d..56dffe568486cc 100644 --- a/clang/lib/Headers/limits.h +++ b/clang/lib/Headers/limits.h @@ -9,6 +9,10 @@ #ifndef __CLANG_LIMITS_H #define __CLANG_LIMITS_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + /* The system's limits.h may, in turn, try to #include_next GCC's limits.h. Avert this #include_next madness. */ #if defined __GNUC__ && !defined _GCC_LIMITS_H_ @@ -122,4 +126,5 @@ #define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL) #endif +#endif /* __MVS__ */ #endif /* __CLANG_LIMITS_H */ diff --git a/clang/lib/Headers/stdalign.h b/clang/lib/Headers/stdalign.h index 158508e65d2b34..56cdfa52d4bafa 100644 --- a/clang/lib/Headers/stdalign.h +++ b/clang/lib/Headers/stdalign.h @@ -10,6 +10,10 @@ #ifndef __STDALIGN_H #define __STDALIGN_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + #if defined(__cplusplus) || \ (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202311L) #ifndef __cplusplus @@ -21,4 +25,5 @@ #define __alignof_is_defined 1 #endif /* __STDC_VERSION__ */ +#endif /* __MVS__ */ #endif /* __STDALIGN_H */ diff --git a/clang/lib/Headers/stdarg.h b/clang/lib/Headers/stdarg.h index 94b066566f084e..6e7bd604b2df41 100644 --- a/clang/lib/Headers/stdarg.h +++ b/clang/lib/Headers/stdarg.h @@ -33,6 +33,16 @@ defined(__need_va_arg) || defined(__need___va_copy) || \ defined(__need_va_copy) +#if defined(__MVS__) && __has_include_next() +#define __STDARG_H +#undef __need___va_list +#undef __need_va_list +#undef __need_va_arg +#undef __need___va_copy +#undef __need_va_copy +#include_next + +#else #if !defined(__need___va_list) && !defined(__need_va_list) && \ !defined(__need_va_arg) && !defined(__need___va_copy) && \ !defined(__need_va_copy) @@ -76,4 +86,6 @@ #undef __need_va_copy #endif /* defined(__need_va_copy) */ +#endif /* __MVS__ */ + #endif diff --git a/clang/lib/Headers/stdbool.h b/clang/lib/Headers/stdbool.h index 9406aab0ca72c7..dfaad2b65a9b53 100644 --- a/clang/lib/Headers/stdbool.h +++ b/clang/lib/Headers/stdbool.h @@ -12,6 +12,10 @@ #define __bool_true_false_are_defined 1 +#if defined(__MVS__) && __has_include_next() +#include_next +#else + #if defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L /* FIXME: We should be issuing a deprecation warning here, but cannot yet due * to system headers which include this header file unconditionally. @@ -31,4 +35,5 @@ #endif #endif +#endif /* __MVS__ */ #endif /* __STDBOOL_H */ diff --git a/clang/lib/Headers/stddef.h b/clang/lib/Headers/stddef.h index e0ad7b8d17aff9..9ccc0a68fbff33 100644 --- a/clang/lib/Headers/stddef.h +++ b/clang/lib/Headers/stddef.h @@ -36,6 +36,22 @@ defined(__need_unreachable) || defined(__need_max_align_t) || \ defined(__need_offsetof) || defined(__need_wint_t) +#if defined(__MVS__) && __has_include_next() +#define __STDDEF_H +#undef __need_ptrdiff_t +#undef __need_size_t +#undef __need_rsize_t +#undef __need_wchar_t +#undef __need_NULL +#undef __need_nullptr_t +#undef __need_unreachable +#undef __need_max_align_t +#undef __need_offsetof +#undef __need_wint_t +#include_next + +#else + #if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \ !defined(__need_rsize_t) && !defined(__need_wchar_t) && \ !defined(__need_NULL) && !defined(__need_nullptr_t) && \ @@ -120,4 +136,5 @@ __WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */ #undef __need_wint_t #endif /* __need_wint_t */ +#endif /* __MVS__ */ #endif diff --git a/clang/lib/Headers/stdint.h b/clang/lib/Headers/stdint.h index b6699b6ca3d4bb..01feab7b1ee2c2 100644 --- a/clang/lib/Headers/stdint.h +++ b/clang/lib/Headers/stdint.h @@ -14,6 +14,10 @@ #define __CLANG_STDINT_H #endif +#if defined(__MVS__) && __has_include_next() +#include_next +#else + /* If we're hosted, fall back to the system's stdint.h, which might have * additional definitions. */ @@ -947,4 +951,5 @@ typedef __UINTMAX_TYPE__ uintmax_t; #endif #endif /* __STDC_HOSTED__ */ +#endif /* __MVS__ */ #endif /* __CLANG_STDINT_H */ diff --git a/clang/lib/Headers/stdnoreturn.h b/clang/lib/Headers/stdnoreturn.h index c90bf77e840e16..6a9b209c7218bd 100644 --- a/clang/lib/Headers/stdnoreturn.h +++ b/clang/lib/Headers/stdnoreturn.h @@ -10,9 +10,15 @@ #ifndef __STDNORETURN_H #define __STDNORETURN_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + #define noreturn _Noreturn #define __noreturn_is_defined 1 +#endif /* __MVS__ */ + #if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) && \ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS) /* The noreturn macro is deprecated in C23. We do not mark it as such because diff --git a/clang/lib/Headers/varargs.h b/clang/lib/Headers/varargs.h index d241b7de3cb2a8..d33ddc5ae7f8a5 100644 --- a/clang/lib/Headers/varargs.h +++ b/clang/lib/Headers/varargs.h @@ -8,5 +8,9 @@ */ #ifndef __VARARGS_H #define __VARARGS_H - #error "Please use instead of " +#if defined(__MVS__) && __has_include_next() +#include_next +#else +#error "Please use instead of " +#endif /* __MVS__ */ #endif diff --git a/clang/lib/Headers/zos_wrappers/builtins.h b/clang/lib/Headers/zos_wrappers/builtins.h new file mode 100644 index 00000000000000..1f0d0e27ecb3a4 --- /dev/null +++ b/clang/lib/Headers/zos_wrappers/builtins.h @@ -0,0 +1,18 @@ +/*===---- builtins.h - z/Architecture Builtin Functions --------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ZOS_WRAPPERS_BUILTINS_H +#define __ZOS_WRAPPERS_BUILTINS_H +#if defined(__MVS__) +#include_next +#if defined(__VEC__) +#include +#endif +#endif /* defined(__MVS__) */ +#endif /* __ZOS_WRAPPERS_BUILTINS_H */ From 088aa81a545421933254f19cd3c8914a0373b493 Mon Sep 17 00:00:00 2001 From: Matt Devereau Date: Tue, 30 Apr 2024 13:06:57 +0000 Subject: [PATCH 07/48] Constant Fold logf128 calls This is a second attempt to land #84501 which failed on several targets. This patch adds the HAS_IEE754_FLOAT128 define which makes the check for typedef'ing float128 more precise by checking whether __uint128_t is available and checking if the host does not use __ibm128 which is prevalent on power pc targets and replaces IEEE754 float128s. --- llvm/CMakeLists.txt | 2 ++ llvm/cmake/config-ix.cmake | 11 +++++++++ llvm/include/llvm/ADT/APFloat.h | 13 ++++++++++ llvm/include/llvm/ADT/APInt.h | 8 ++++++ llvm/include/llvm/Config/llvm-config.h.cmake | 3 +++ llvm/include/llvm/Support/float128.h | 26 ++++++++++++++++++++ llvm/lib/Analysis/CMakeLists.txt | 6 +++++ llvm/lib/Analysis/ConstantFolding.cpp | 11 +++++++++ llvm/lib/Support/APFloat.cpp | 24 ++++++++++++++++++ llvm/test/CMakeLists.txt | 1 + llvm/test/lit.cfg.py | 3 +++ llvm/test/lit.site.cfg.py.in | 1 + llvm/unittests/Analysis/CMakeLists.txt | 6 +++++ 13 files changed, 115 insertions(+) create mode 100644 llvm/include/llvm/Support/float128.h diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt index 43181af3bc1953..9163729e946eb8 100644 --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -562,6 +562,8 @@ set(LLVM_USE_STATIC_ZSTD FALSE CACHE BOOL "Use static version of zstd. Can be TR set(LLVM_ENABLE_CURL "OFF" CACHE STRING "Use libcurl for the HTTP client if available. Can be ON, OFF, or FORCE_ON") +set(LLVM_HAS_LOGF128 "OFF" CACHE STRING "Use logf128 to constant fold fp128 logarithm calls. Can be ON, OFF, or FORCE_ON") + set(LLVM_ENABLE_HTTPLIB "OFF" CACHE STRING "Use cpp-httplib HTTP server library if available. Can be ON, OFF, or FORCE_ON") set(LLVM_Z3_INSTALL_DIR "" CACHE STRING "Install directory of the Z3 solver.") diff --git a/llvm/cmake/config-ix.cmake b/llvm/cmake/config-ix.cmake index bf1b110245bb2f..ce07cab7a56c6b 100644 --- a/llvm/cmake/config-ix.cmake +++ b/llvm/cmake/config-ix.cmake @@ -257,6 +257,17 @@ else() set(LLVM_ENABLE_TERMINFO 0) endif() +if(LLVM_HAS_LOGF128) + include(CheckCXXSymbolExists) + check_cxx_symbol_exists(logf128 math.h HAS_LOGF128) + + if(LLVM_HAS_LOGF128 STREQUAL FORCE_ON AND NOT HAS_LOGF128) + message(FATAL_ERROR "Failed to configure logf128") + endif() + + set(LLVM_HAS_LOGF128 "${HAS_LOGF128}") +endif() + # function checks check_symbol_exists(arc4random "stdlib.h" HAVE_DECL_ARC4RANDOM) find_package(Backtrace) diff --git a/llvm/include/llvm/ADT/APFloat.h b/llvm/include/llvm/ADT/APFloat.h index deb74cb2fdeb1e..44a301ecc99280 100644 --- a/llvm/include/llvm/ADT/APFloat.h +++ b/llvm/include/llvm/ADT/APFloat.h @@ -19,6 +19,7 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FloatingPointMode.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/float128.h" #include #define APFLOAT_DISPATCH_ON_SEMANTICS(METHOD_CALL) \ @@ -354,6 +355,9 @@ class IEEEFloat final : public APFloatBase { Expected convertFromString(StringRef, roundingMode); APInt bitcastToAPInt() const; double convertToDouble() const; +#ifdef HAS_IEE754_FLOAT128 + float128 convertToQuad() const; +#endif float convertToFloat() const; /// @} @@ -1218,6 +1222,15 @@ class APFloat : public APFloatBase { /// shorter semantics, like IEEEsingle and others. double convertToDouble() const; + /// Converts this APFloat to host float value. + /// + /// \pre The APFloat must be built using semantics, that can be represented by + /// the host float type without loss of precision. It can be IEEEquad and + /// shorter semantics, like IEEEdouble and others. +#ifdef HAS_IEE754_FLOAT128 + float128 convertToQuad() const; +#endif + /// Converts this APFloat to host float value. /// /// \pre The APFloat must be built using semantics, that can be represented by diff --git a/llvm/include/llvm/ADT/APInt.h b/llvm/include/llvm/ADT/APInt.h index 8d3c029b2e7e91..4e543d6e7618b5 100644 --- a/llvm/include/llvm/ADT/APInt.h +++ b/llvm/include/llvm/ADT/APInt.h @@ -17,6 +17,7 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/float128.h" #include #include #include @@ -1670,6 +1671,13 @@ class [[nodiscard]] APInt { /// any bit width. Exactly 64 bits will be translated. double bitsToDouble() const { return llvm::bit_cast(getWord(0)); } +#ifdef HAS_IEE754_FLOAT128 + float128 bitsToQuad() const { + __uint128_t ul = ((__uint128_t)U.pVal[1] << 64) + U.pVal[0]; + return llvm::bit_cast(ul); + } +#endif + /// Converts APInt bits to a float /// /// The conversion does not do a translation from integer to float, it just diff --git a/llvm/include/llvm/Config/llvm-config.h.cmake b/llvm/include/llvm/Config/llvm-config.h.cmake index 6605ea60df99e1..629977cc11d683 100644 --- a/llvm/include/llvm/Config/llvm-config.h.cmake +++ b/llvm/include/llvm/Config/llvm-config.h.cmake @@ -198,4 +198,7 @@ /* Define if plugins enabled */ #cmakedefine LLVM_ENABLE_PLUGINS +/* Define if logf128 is available */ +#cmakedefine LLVM_HAS_LOGF128 + #endif diff --git a/llvm/include/llvm/Support/float128.h b/llvm/include/llvm/Support/float128.h new file mode 100644 index 00000000000000..e15a98dc5a6779 --- /dev/null +++ b/llvm/include/llvm/Support/float128.h @@ -0,0 +1,26 @@ +//===-- llvm/Support/float128.h - Compiler abstraction support --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FLOAT128 +#define LLVM_FLOAT128 + +namespace llvm { + +#if defined(__clang__) && defined(__FLOAT128__) && \ + defined(__SIZEOF_INT128__) && !defined(__LONG_DOUBLE_IBM128__) +#define HAS_IEE754_FLOAT128 +typedef __float128 float128; +#elif defined(__FLOAT128__) && defined(__SIZEOF_INT128__) && \ + !defined(__LONG_DOUBLE_IBM128__) && \ + (defined(__GNUC__) || defined(__GNUG__)) +#define HAS_IEE754_FLOAT128 +typedef _Float128 float128; +#endif + +} // namespace llvm +#endif // LLVM_FLOAT128 diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt index 474b8d20fde16f..74476cb5440c61 100644 --- a/llvm/lib/Analysis/CMakeLists.txt +++ b/llvm/lib/Analysis/CMakeLists.txt @@ -159,3 +159,9 @@ add_llvm_component_library(LLVMAnalysis Support TargetParser ) + +include(CheckCXXSymbolExists) +check_cxx_symbol_exists(logf128 math.h HAS_LOGF128) +if(HAS_LOGF128) + target_compile_definitions(LLVMAnalysis PRIVATE HAS_LOGF128) +endif() diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index 749374a3aa48af..bb4b334b96ee6e 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -2089,6 +2089,17 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, if (IntrinsicID == Intrinsic::canonicalize) return constantFoldCanonicalize(Ty, Call, U); +#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128) + if (Ty->isFP128Ty()) { + switch (IntrinsicID) { + default: + return nullptr; + case Intrinsic::log: + return ConstantFP::get(Ty, logf128(Op->getValueAPF().convertToQuad())); + } + } +#endif + if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) return nullptr; diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp index 0a4f5ac01553f1..52bf3b91c55af9 100644 --- a/llvm/lib/Support/APFloat.cpp +++ b/llvm/lib/Support/APFloat.cpp @@ -3670,6 +3670,15 @@ double IEEEFloat::convertToDouble() const { return api.bitsToDouble(); } +#ifdef HAS_IEE754_FLOAT128 +float128 IEEEFloat::convertToQuad() const { + assert(semantics == (const llvm::fltSemantics *)&semIEEEquad && + "Float semantics are not IEEEquads"); + APInt api = bitcastToAPInt(); + return api.bitsToQuad(); +} +#endif + /// Integer bit is explicit in this format. Intel hardware (387 and later) /// does not support these bit patterns: /// exponent = all 1's, integer bit 0, significand 0 ("pseudoinfinity") @@ -5265,6 +5274,21 @@ double APFloat::convertToDouble() const { return Temp.getIEEE().convertToDouble(); } +#ifdef HAS_IEE754_FLOAT128 +float128 APFloat::convertToQuad() const { + if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEquad) + return getIEEE().convertToQuad(); + assert(getSemantics().isRepresentableBy(semIEEEquad) && + "Float semantics is not representable by IEEEquad"); + APFloat Temp = *this; + bool LosesInfo; + opStatus St = Temp.convert(semIEEEquad, rmNearestTiesToEven, &LosesInfo); + assert(!(St & opInexact) && !LosesInfo && "Unexpected imprecision"); + (void)St; + return Temp.getIEEE().convertToQuad(); +} +#endif + float APFloat::convertToFloat() const { if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEsingle) return getIEEE().convertToFloat(); diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt index eb401351141641..b55fea144f2fa5 100644 --- a/llvm/test/CMakeLists.txt +++ b/llvm/test/CMakeLists.txt @@ -26,6 +26,7 @@ llvm_canonicalize_cmake_booleans( LLVM_TOOL_LLVM_DRIVER_BUILD LLVM_INCLUDE_SPIRV_TOOLS_TESTS LLVM_APPEND_VC_REV + LLVM_HAS_LOGF128 ) configure_lit_site_cfg( diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py index affd87b98c1410..fe1262893212fb 100644 --- a/llvm/test/lit.cfg.py +++ b/llvm/test/lit.cfg.py @@ -617,3 +617,6 @@ def have_ld64_plugin_support(): # "OBJECT_MODE" to 'any' by default on AIX OS. if "system-aix" in config.available_features: config.environment["OBJECT_MODE"] = "any" + +if config.has_logf128: + config.available_features.add("has_logf128") diff --git a/llvm/test/lit.site.cfg.py.in b/llvm/test/lit.site.cfg.py.in index 60a68b0edaf933..0968f6214772d0 100644 --- a/llvm/test/lit.site.cfg.py.in +++ b/llvm/test/lit.site.cfg.py.in @@ -63,6 +63,7 @@ config.have_llvm_driver = @LLVM_TOOL_LLVM_DRIVER_BUILD@ config.spirv_tools_tests = @LLVM_INCLUDE_SPIRV_TOOLS_TESTS@ config.have_vc_rev = @LLVM_APPEND_VC_REV@ config.force_vc_rev = "@LLVM_FORCE_VC_REVISION@" +config.has_logf128 = @LLVM_HAS_LOGF128@ import lit.llvm lit.llvm.initialize(lit_config, config) diff --git a/llvm/unittests/Analysis/CMakeLists.txt b/llvm/unittests/Analysis/CMakeLists.txt index b1aeaa6e71fd4c..9b3778f8a3f98f 100644 --- a/llvm/unittests/Analysis/CMakeLists.txt +++ b/llvm/unittests/Analysis/CMakeLists.txt @@ -80,5 +80,11 @@ if(NOT WIN32) export_executable_symbols_for_plugins(AnalysisTests) endif() +include(CheckCXXSymbolExists) +check_cxx_symbol_exists(logf128 math.h HAS_LOGF128) +if(HAS_LOGF128) + target_compile_definitions(AnalysisTests PRIVATE HAS_LOGF128) +endif() + add_subdirectory(InlineAdvisorPlugin) add_subdirectory(InlineOrderPlugin) From 57d0d3b4d638d170035f55d79f0202f1042de345 Mon Sep 17 00:00:00 2001 From: Kiran Chandramohan Date: Wed, 1 May 2024 12:58:50 +0100 Subject: [PATCH 08/48] [Flang][OpenMP] Handle more character allocatable cases in privatization (#90449) Fixes #84732, #81947, #81946 Note: This is a fix till we enable delayed privatization. --- flang/lib/Lower/Bridge.cpp | 41 +++---- .../OpenMP/cfg-conversion-omp.private.f90 | 2 +- ...ayed-privatization-allocatable-private.f90 | 2 +- .../OpenMP/parallel-private-clause-fixes.f90 | 104 ++++++++++++++++++ 4 files changed, 128 insertions(+), 21 deletions(-) diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index b50efac5411469..fb01789d3f8ae6 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -675,25 +675,28 @@ class FirConverter : public Fortran::lower::AbstractConverter { auto if_builder = builder->genIfThenElse(loc, isAllocated); if_builder.genThen([&]() { std::string name = mangleName(sym) + ".alloc"; - if (auto seqTy = mlir::dyn_cast(symType)) { - fir::ExtendedValue read = fir::factory::genMutableBoxRead( - *builder, loc, box, /*mayBePolymorphic=*/false); - if (auto read_arr_box = read.getBoxOf()) { - fir::factory::genInlinedAllocation( - *builder, loc, *new_box, read_arr_box->getLBounds(), - read_arr_box->getExtents(), - /*lenParams=*/std::nullopt, name, - /*mustBeHeap=*/true); - } else if (auto read_char_arr_box = - read.getBoxOf()) { - fir::factory::genInlinedAllocation( - *builder, loc, *new_box, read_char_arr_box->getLBounds(), - read_char_arr_box->getExtents(), - read_char_arr_box->getLen(), name, - /*mustBeHeap=*/true); - } else { - TODO(loc, "Unhandled allocatable box type"); - } + fir::ExtendedValue read = fir::factory::genMutableBoxRead( + *builder, loc, box, /*mayBePolymorphic=*/false); + if (auto read_arr_box = read.getBoxOf()) { + fir::factory::genInlinedAllocation( + *builder, loc, *new_box, read_arr_box->getLBounds(), + read_arr_box->getExtents(), + /*lenParams=*/std::nullopt, name, + /*mustBeHeap=*/true); + } else if (auto read_char_arr_box = + read.getBoxOf()) { + fir::factory::genInlinedAllocation( + *builder, loc, *new_box, read_char_arr_box->getLBounds(), + read_char_arr_box->getExtents(), read_char_arr_box->getLen(), + name, + /*mustBeHeap=*/true); + } else if (auto read_char_box = + read.getBoxOf()) { + fir::factory::genInlinedAllocation(*builder, loc, *new_box, + /*lbounds=*/std::nullopt, + /*extents=*/std::nullopt, + read_char_box->getLen(), name, + /*mustBeHeap=*/true); } else { fir::factory::genInlinedAllocation( *builder, loc, *new_box, box.getMutableProperties().lbounds, diff --git a/flang/test/Lower/OpenMP/cfg-conversion-omp.private.f90 b/flang/test/Lower/OpenMP/cfg-conversion-omp.private.f90 index 7f1087a7ebe372..44036492f55957 100644 --- a/flang/test/Lower/OpenMP/cfg-conversion-omp.private.f90 +++ b/flang/test/Lower/OpenMP/cfg-conversion-omp.private.f90 @@ -34,7 +34,7 @@ subroutine delayed_privatization_allocatable ! CFGConv-NEXT: %[[ALLOC_COND:.*]] = arith.cmpi ne, %[[PRIV_ARG_ADDR]], %[[C0]] : i64 ! CFGConv-NEXT: cf.cond_br %[[ALLOC_COND]], ^[[ALLOC_MEM_BB:.*]], ^[[ZERO_MEM_BB:.*]] ! CFGConv-NEXT: ^[[ALLOC_MEM_BB]]: -! CFGConv-NEXT: fir.allocmem +! CFGConv: fir.allocmem ! CFGConv: cf.br ^[[DECL_BB:.*]] ! CFGConv: ^[[ZERO_MEM_BB]]: ! CFGConv-NEXT: fir.zero_bits diff --git a/flang/test/Lower/OpenMP/delayed-privatization-allocatable-private.f90 b/flang/test/Lower/OpenMP/delayed-privatization-allocatable-private.f90 index 31a1e1962de2eb..f1fae2540aa4df 100644 --- a/flang/test/Lower/OpenMP/delayed-privatization-allocatable-private.f90 +++ b/flang/test/Lower/OpenMP/delayed-privatization-allocatable-private.f90 @@ -28,7 +28,7 @@ subroutine delayed_privatization_allocatable ! CHECK-NEXT: %[[ALLOC_COND:.*]] = arith.cmpi ne, %[[PRIV_ARG_ADDR]], %[[C0]] : i64 ! CHECK-NEXT: fir.if %[[ALLOC_COND]] { -! CHECK-NEXT: %[[PRIV_ALLOCMEM:.*]] = fir.allocmem i32 {fir.must_be_heap = true, uniq_name = "_QFdelayed_privatization_allocatableEvar1.alloc"} +! CHECK: %[[PRIV_ALLOCMEM:.*]] = fir.allocmem i32 {fir.must_be_heap = true, uniq_name = "_QFdelayed_privatization_allocatableEvar1.alloc"} ! CHECK-NEXT: %[[PRIV_ALLOCMEM_BOX:.*]] = fir.embox %[[PRIV_ALLOCMEM]] : (!fir.heap) -> !fir.box> ! CHECK-NEXT: fir.store %[[PRIV_ALLOCMEM_BOX]] to %[[PRIV_ALLOC]] : !fir.ref>> ! CHECK-NEXT: } else { diff --git a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 index 93809fde98a269..f8343338112c91 100644 --- a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 +++ b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 @@ -93,3 +93,107 @@ subroutine multiple_private_fix2() !$omp end parallel x = 1 end subroutine + + +! CHECK-LABEL: func.func @_QPsub01( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>>> {fir.bindc_name = "aaa"}) { +! CHECK: %[[VAL_1:.*]] = fir.load %[[VAL_0]] : !fir.ref>>> +! CHECK: %[[VAL_2:.*]] = fir.box_elesize %[[VAL_1]] : (!fir.box>>) -> index +! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_0]] typeparams %[[VAL_2]] {fortran_attrs = #{{.*}}, uniq_name = "_QFsub01Eaaa"} : (!fir.ref>>>, index) -> (!fir.ref>>>, !fir.ref>>>) +! CHECK: omp.parallel { +! CHECK: %[[VAL_4:.*]] = fir.alloca !fir.box>> {bindc_name = "aaa", pinned, uniq_name = "_QFsub01Eaaa"} +! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref>>> +! CHECK: %[[VAL_6:.*]] = fir.box_addr %[[VAL_5]] : (!fir.box>>) -> !fir.heap> +! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_6]] : (!fir.heap>) -> i64 +! CHECK: %[[VAL_8:.*]] = arith.constant 0 : i64 +! CHECK: %[[VAL_9:.*]] = arith.cmpi ne, %[[VAL_7]], %[[VAL_8]] : i64 +! CHECK: fir.if %[[VAL_9]] { +! CHECK: %[[VAL_10:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_11:.*]] = arith.cmpi sgt, %[[VAL_2]], %[[VAL_10]] : index +! CHECK: %[[VAL_12:.*]] = arith.select %[[VAL_11]], %[[VAL_2]], %[[VAL_10]] : index +! CHECK: %[[VAL_13:.*]] = fir.allocmem !fir.char<1,?>(%[[VAL_12]] : index) {fir.must_be_heap = true, uniq_name = "_QFsub01Eaaa.alloc"} +! CHECK: %[[VAL_14:.*]] = fir.embox %[[VAL_13]] typeparams %[[VAL_12]] : (!fir.heap>, index) -> !fir.box>> +! CHECK: fir.store %[[VAL_14]] to %[[VAL_4]] : !fir.ref>>> +! CHECK: } else { +! CHECK: %[[VAL_15:.*]] = fir.zero_bits !fir.heap> +! CHECK: %[[VAL_16:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_17:.*]] = fir.embox %[[VAL_15]] typeparams %[[VAL_16]] : (!fir.heap>, index) -> !fir.box>> +! CHECK: fir.store %[[VAL_17]] to %[[VAL_4]] : !fir.ref>>> +! CHECK: } +! CHECK: %[[VAL_18:.*]]:2 = hlfir.declare %[[VAL_4]] {fortran_attrs = #{{.*}}, uniq_name = "_QFsub01Eaaa"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) +! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_18]]#0 : !fir.ref>>> +! CHECK: %[[VAL_20:.*]] = fir.box_addr %[[VAL_19]] : (!fir.box>>) -> !fir.heap> +! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_20]] : (!fir.heap>) -> i64 +! CHECK: %[[VAL_22:.*]] = arith.constant 0 : i64 +! CHECK: %[[VAL_23:.*]] = arith.cmpi ne, %[[VAL_21]], %[[VAL_22]] : i64 +! CHECK: fir.if %[[VAL_23]] { +! CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_18]]#0 : !fir.ref>>> +! CHECK: %[[VAL_25:.*]] = fir.box_addr %[[VAL_24]] : (!fir.box>>) -> !fir.heap> +! CHECK: fir.freemem %[[VAL_25]] : !fir.heap> +! CHECK: %[[VAL_26:.*]] = fir.zero_bits !fir.heap> +! CHECK: %[[VAL_27:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_28:.*]] = fir.embox %[[VAL_26]] typeparams %[[VAL_27]] : (!fir.heap>, index) -> !fir.box>> +! CHECK: fir.store %[[VAL_28]] to %[[VAL_18]]#0 : !fir.ref>>> +! CHECK: } +! CHECK: omp.terminator +! CHECK: } +! CHECK: return +! CHECK: } + +subroutine sub01(aaa) + character(*),allocatable :: aaa + !$omp parallel private(aaa) + !$omp end parallel +end subroutine + +! CHECK-LABEL: func.func @_QPsub02( +! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>>> {fir.bindc_name = "bbb"}) { +! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {fortran_attrs = #{{.*}}, uniq_name = "_QFsub02Ebbb"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) +! CHECK: omp.parallel { +! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.box>> {bindc_name = "bbb", pinned, uniq_name = "_QFsub02Ebbb"} +! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_1]]#0 : !fir.ref>>> +! CHECK: %[[VAL_4:.*]] = fir.box_addr %[[VAL_3]] : (!fir.box>>) -> !fir.heap> +! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (!fir.heap>) -> i64 +! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i64 +! CHECK: %[[VAL_7:.*]] = arith.cmpi ne, %[[VAL_5]], %[[VAL_6]] : i64 +! CHECK: fir.if %[[VAL_7]] { +! CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_1]]#0 : !fir.ref>>> +! CHECK: %[[VAL_9:.*]] = fir.box_elesize %[[VAL_8]] : (!fir.box>>) -> index +! CHECK: %[[VAL_10:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_11:.*]] = arith.cmpi sgt, %[[VAL_9]], %[[VAL_10]] : index +! CHECK: %[[VAL_12:.*]] = arith.select %[[VAL_11]], %[[VAL_9]], %[[VAL_10]] : index +! CHECK: %[[VAL_13:.*]] = fir.allocmem !fir.char<1,?>(%[[VAL_12]] : index) {fir.must_be_heap = true, uniq_name = "_QFsub02Ebbb.alloc"} +! CHECK: %[[VAL_14:.*]] = fir.embox %[[VAL_13]] typeparams %[[VAL_12]] : (!fir.heap>, index) -> !fir.box>> +! CHECK: fir.store %[[VAL_14]] to %[[VAL_2]] : !fir.ref>>> +! CHECK: } else { +! CHECK: %[[VAL_15:.*]] = fir.zero_bits !fir.heap> +! CHECK: %[[VAL_16:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_17:.*]] = fir.embox %[[VAL_15]] typeparams %[[VAL_16]] : (!fir.heap>, index) -> !fir.box>> +! CHECK: fir.store %[[VAL_17]] to %[[VAL_2]] : !fir.ref>>> +! CHECK: } +! CHECK: %[[VAL_18:.*]]:2 = hlfir.declare %[[VAL_2]] {fortran_attrs = #{{.*}}, uniq_name = "_QFsub02Ebbb"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) +! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_18]]#0 : !fir.ref>>> +! CHECK: %[[VAL_20:.*]] = fir.box_addr %[[VAL_19]] : (!fir.box>>) -> !fir.heap> +! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_20]] : (!fir.heap>) -> i64 +! CHECK: %[[VAL_22:.*]] = arith.constant 0 : i64 +! CHECK: %[[VAL_23:.*]] = arith.cmpi ne, %[[VAL_21]], %[[VAL_22]] : i64 +! CHECK: fir.if %[[VAL_23]] { +! CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_18]]#0 : !fir.ref>>> +! CHECK: %[[VAL_25:.*]] = fir.box_addr %[[VAL_24]] : (!fir.box>>) -> !fir.heap> +! CHECK: fir.freemem %[[VAL_25]] : !fir.heap> +! CHECK: %[[VAL_26:.*]] = fir.zero_bits !fir.heap> +! CHECK: %[[VAL_27:.*]] = arith.constant 0 : index +! CHECK: %[[VAL_28:.*]] = fir.embox %[[VAL_26]] typeparams %[[VAL_27]] : (!fir.heap>, index) -> !fir.box>> +! CHECK: fir.store %[[VAL_28]] to %[[VAL_18]]#0 : !fir.ref>>> +! CHECK: } +! CHECK: omp.terminator +! CHECK: } +! CHECK: return +! CHECK: } + +subroutine sub02(bbb) + character(:),allocatable :: bbb + !$omp parallel private(bbb) + !$omp end parallel +end subroutine sub02 + From 68b863b7fa68a196bcc02d12c028dea7dcd9b97b Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 1 May 2024 08:08:45 -0400 Subject: [PATCH 09/48] [gn] port 088aa81a5454 (LLVM_HAS_LOGF128) If we want to turn this on on some platforms, we'll also want to define HAS_LOGF128 for AnalysisTest, see llvm/unittests/Analysis/CMakeLists.txt --- llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn | 1 + llvm/utils/gn/secondary/llvm/test/BUILD.gn | 1 + 2 files changed, 2 insertions(+) diff --git a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn index 80a91507fcc690..2da26d102e7723 100644 --- a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn @@ -325,6 +325,7 @@ write_cmake_config("llvm-config") { "LLVM_ENABLE_ZSTD=", "LLVM_FORCE_USE_OLD_TOOLCHAIN=", "LLVM_HAS_ATOMICS=1", + "LLVM_HAS_LOGF128=", "LLVM_HAVE_TFLITE=", "LLVM_HOST_TRIPLE=$llvm_current_triple", "LLVM_NATIVE_ARCH=$native_target", diff --git a/llvm/utils/gn/secondary/llvm/test/BUILD.gn b/llvm/utils/gn/secondary/llvm/test/BUILD.gn index 826dcf4e6ee9b1..60d6d7b8c3ce7a 100644 --- a/llvm/utils/gn/secondary/llvm/test/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/test/BUILD.gn @@ -64,6 +64,7 @@ write_lit_config("lit_site_cfg") { "LLVM_ENABLE_HTTPLIB=0", "LLVM_ENABLE_ZSTD=0", "LLVM_FORCE_VC_REVISION=", + "LLVM_HAS_LOGF128=0", "LLVM_HAVE_OPT_VIEWER_MODULES=0", "LLVM_HOST_TRIPLE=$llvm_current_triple", "LLVM_INCLUDE_DXIL_TESTS=0", From 034912d583617a7029c8efaade4422eaa4593f8c Mon Sep 17 00:00:00 2001 From: Fanbo Meng Date: Wed, 1 May 2024 08:13:10 -0400 Subject: [PATCH 10/48] [SystemZ][z/OS] Build in ASCII 64 bit mode on z/OS (#90630) Setting the correct build flags on z/OS to build LLVM as 64-bit ASCII application. --- llvm/CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt index 9163729e946eb8..b7b158996c2323 100644 --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -1158,6 +1158,11 @@ if (CMAKE_SYSTEM_NAME MATCHES "OS390") add_compile_definitions(_OPEN_SYS_FILE_EXT) # Needed for EBCDIC I/O. add_compile_definitions(_EXT) # Needed for file data. add_compile_definitions(_UNIX03_THREADS) # Multithreading support. + # Need to build LLVM as ASCII application. + # This can't be a global setting because other projects may + # need to be built in EBCDIC mode. + append("-fzos-le-char-mode=ascii" CMAKE_CXX_FLAGS CMAKE_C_FLAGS) + append("-m64" CMAKE_CXX_FLAGS CMAKE_C_FLAGS) endif() # Build with _FILE_OFFSET_BITS=64 on Solaris to match g++ >= 9. From efce8a05aa4ef0353e73e63d270a22773e090e75 Mon Sep 17 00:00:00 2001 From: Matt Devereau Date: Wed, 1 May 2024 12:18:06 +0000 Subject: [PATCH 11/48] Revert "Constant Fold logf128 calls" This reverts commit 088aa81a545421933254f19cd3c8914a0373b493. --- llvm/CMakeLists.txt | 2 -- llvm/cmake/config-ix.cmake | 11 --------- llvm/include/llvm/ADT/APFloat.h | 13 ---------- llvm/include/llvm/ADT/APInt.h | 8 ------ llvm/include/llvm/Config/llvm-config.h.cmake | 3 --- llvm/include/llvm/Support/float128.h | 26 -------------------- llvm/lib/Analysis/CMakeLists.txt | 6 ----- llvm/lib/Analysis/ConstantFolding.cpp | 11 --------- llvm/lib/Support/APFloat.cpp | 24 ------------------ llvm/test/CMakeLists.txt | 1 - llvm/test/lit.cfg.py | 3 --- llvm/test/lit.site.cfg.py.in | 1 - llvm/unittests/Analysis/CMakeLists.txt | 6 ----- 13 files changed, 115 deletions(-) delete mode 100644 llvm/include/llvm/Support/float128.h diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt index b7b158996c2323..c06e661573ed42 100644 --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -562,8 +562,6 @@ set(LLVM_USE_STATIC_ZSTD FALSE CACHE BOOL "Use static version of zstd. Can be TR set(LLVM_ENABLE_CURL "OFF" CACHE STRING "Use libcurl for the HTTP client if available. Can be ON, OFF, or FORCE_ON") -set(LLVM_HAS_LOGF128 "OFF" CACHE STRING "Use logf128 to constant fold fp128 logarithm calls. Can be ON, OFF, or FORCE_ON") - set(LLVM_ENABLE_HTTPLIB "OFF" CACHE STRING "Use cpp-httplib HTTP server library if available. Can be ON, OFF, or FORCE_ON") set(LLVM_Z3_INSTALL_DIR "" CACHE STRING "Install directory of the Z3 solver.") diff --git a/llvm/cmake/config-ix.cmake b/llvm/cmake/config-ix.cmake index ce07cab7a56c6b..bf1b110245bb2f 100644 --- a/llvm/cmake/config-ix.cmake +++ b/llvm/cmake/config-ix.cmake @@ -257,17 +257,6 @@ else() set(LLVM_ENABLE_TERMINFO 0) endif() -if(LLVM_HAS_LOGF128) - include(CheckCXXSymbolExists) - check_cxx_symbol_exists(logf128 math.h HAS_LOGF128) - - if(LLVM_HAS_LOGF128 STREQUAL FORCE_ON AND NOT HAS_LOGF128) - message(FATAL_ERROR "Failed to configure logf128") - endif() - - set(LLVM_HAS_LOGF128 "${HAS_LOGF128}") -endif() - # function checks check_symbol_exists(arc4random "stdlib.h" HAVE_DECL_ARC4RANDOM) find_package(Backtrace) diff --git a/llvm/include/llvm/ADT/APFloat.h b/llvm/include/llvm/ADT/APFloat.h index 44a301ecc99280..deb74cb2fdeb1e 100644 --- a/llvm/include/llvm/ADT/APFloat.h +++ b/llvm/include/llvm/ADT/APFloat.h @@ -19,7 +19,6 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FloatingPointMode.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/float128.h" #include #define APFLOAT_DISPATCH_ON_SEMANTICS(METHOD_CALL) \ @@ -355,9 +354,6 @@ class IEEEFloat final : public APFloatBase { Expected convertFromString(StringRef, roundingMode); APInt bitcastToAPInt() const; double convertToDouble() const; -#ifdef HAS_IEE754_FLOAT128 - float128 convertToQuad() const; -#endif float convertToFloat() const; /// @} @@ -1222,15 +1218,6 @@ class APFloat : public APFloatBase { /// shorter semantics, like IEEEsingle and others. double convertToDouble() const; - /// Converts this APFloat to host float value. - /// - /// \pre The APFloat must be built using semantics, that can be represented by - /// the host float type without loss of precision. It can be IEEEquad and - /// shorter semantics, like IEEEdouble and others. -#ifdef HAS_IEE754_FLOAT128 - float128 convertToQuad() const; -#endif - /// Converts this APFloat to host float value. /// /// \pre The APFloat must be built using semantics, that can be represented by diff --git a/llvm/include/llvm/ADT/APInt.h b/llvm/include/llvm/ADT/APInt.h index 4e543d6e7618b5..8d3c029b2e7e91 100644 --- a/llvm/include/llvm/ADT/APInt.h +++ b/llvm/include/llvm/ADT/APInt.h @@ -17,7 +17,6 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/MathExtras.h" -#include "llvm/Support/float128.h" #include #include #include @@ -1671,13 +1670,6 @@ class [[nodiscard]] APInt { /// any bit width. Exactly 64 bits will be translated. double bitsToDouble() const { return llvm::bit_cast(getWord(0)); } -#ifdef HAS_IEE754_FLOAT128 - float128 bitsToQuad() const { - __uint128_t ul = ((__uint128_t)U.pVal[1] << 64) + U.pVal[0]; - return llvm::bit_cast(ul); - } -#endif - /// Converts APInt bits to a float /// /// The conversion does not do a translation from integer to float, it just diff --git a/llvm/include/llvm/Config/llvm-config.h.cmake b/llvm/include/llvm/Config/llvm-config.h.cmake index 629977cc11d683..6605ea60df99e1 100644 --- a/llvm/include/llvm/Config/llvm-config.h.cmake +++ b/llvm/include/llvm/Config/llvm-config.h.cmake @@ -198,7 +198,4 @@ /* Define if plugins enabled */ #cmakedefine LLVM_ENABLE_PLUGINS -/* Define if logf128 is available */ -#cmakedefine LLVM_HAS_LOGF128 - #endif diff --git a/llvm/include/llvm/Support/float128.h b/llvm/include/llvm/Support/float128.h deleted file mode 100644 index e15a98dc5a6779..00000000000000 --- a/llvm/include/llvm/Support/float128.h +++ /dev/null @@ -1,26 +0,0 @@ -//===-- llvm/Support/float128.h - Compiler abstraction support --*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_FLOAT128 -#define LLVM_FLOAT128 - -namespace llvm { - -#if defined(__clang__) && defined(__FLOAT128__) && \ - defined(__SIZEOF_INT128__) && !defined(__LONG_DOUBLE_IBM128__) -#define HAS_IEE754_FLOAT128 -typedef __float128 float128; -#elif defined(__FLOAT128__) && defined(__SIZEOF_INT128__) && \ - !defined(__LONG_DOUBLE_IBM128__) && \ - (defined(__GNUC__) || defined(__GNUG__)) -#define HAS_IEE754_FLOAT128 -typedef _Float128 float128; -#endif - -} // namespace llvm -#endif // LLVM_FLOAT128 diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt index 74476cb5440c61..474b8d20fde16f 100644 --- a/llvm/lib/Analysis/CMakeLists.txt +++ b/llvm/lib/Analysis/CMakeLists.txt @@ -159,9 +159,3 @@ add_llvm_component_library(LLVMAnalysis Support TargetParser ) - -include(CheckCXXSymbolExists) -check_cxx_symbol_exists(logf128 math.h HAS_LOGF128) -if(HAS_LOGF128) - target_compile_definitions(LLVMAnalysis PRIVATE HAS_LOGF128) -endif() diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index bb4b334b96ee6e..749374a3aa48af 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -2089,17 +2089,6 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, if (IntrinsicID == Intrinsic::canonicalize) return constantFoldCanonicalize(Ty, Call, U); -#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128) - if (Ty->isFP128Ty()) { - switch (IntrinsicID) { - default: - return nullptr; - case Intrinsic::log: - return ConstantFP::get(Ty, logf128(Op->getValueAPF().convertToQuad())); - } - } -#endif - if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) return nullptr; diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp index 52bf3b91c55af9..0a4f5ac01553f1 100644 --- a/llvm/lib/Support/APFloat.cpp +++ b/llvm/lib/Support/APFloat.cpp @@ -3670,15 +3670,6 @@ double IEEEFloat::convertToDouble() const { return api.bitsToDouble(); } -#ifdef HAS_IEE754_FLOAT128 -float128 IEEEFloat::convertToQuad() const { - assert(semantics == (const llvm::fltSemantics *)&semIEEEquad && - "Float semantics are not IEEEquads"); - APInt api = bitcastToAPInt(); - return api.bitsToQuad(); -} -#endif - /// Integer bit is explicit in this format. Intel hardware (387 and later) /// does not support these bit patterns: /// exponent = all 1's, integer bit 0, significand 0 ("pseudoinfinity") @@ -5274,21 +5265,6 @@ double APFloat::convertToDouble() const { return Temp.getIEEE().convertToDouble(); } -#ifdef HAS_IEE754_FLOAT128 -float128 APFloat::convertToQuad() const { - if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEquad) - return getIEEE().convertToQuad(); - assert(getSemantics().isRepresentableBy(semIEEEquad) && - "Float semantics is not representable by IEEEquad"); - APFloat Temp = *this; - bool LosesInfo; - opStatus St = Temp.convert(semIEEEquad, rmNearestTiesToEven, &LosesInfo); - assert(!(St & opInexact) && !LosesInfo && "Unexpected imprecision"); - (void)St; - return Temp.getIEEE().convertToQuad(); -} -#endif - float APFloat::convertToFloat() const { if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEsingle) return getIEEE().convertToFloat(); diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt index b55fea144f2fa5..eb401351141641 100644 --- a/llvm/test/CMakeLists.txt +++ b/llvm/test/CMakeLists.txt @@ -26,7 +26,6 @@ llvm_canonicalize_cmake_booleans( LLVM_TOOL_LLVM_DRIVER_BUILD LLVM_INCLUDE_SPIRV_TOOLS_TESTS LLVM_APPEND_VC_REV - LLVM_HAS_LOGF128 ) configure_lit_site_cfg( diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py index fe1262893212fb..affd87b98c1410 100644 --- a/llvm/test/lit.cfg.py +++ b/llvm/test/lit.cfg.py @@ -617,6 +617,3 @@ def have_ld64_plugin_support(): # "OBJECT_MODE" to 'any' by default on AIX OS. if "system-aix" in config.available_features: config.environment["OBJECT_MODE"] = "any" - -if config.has_logf128: - config.available_features.add("has_logf128") diff --git a/llvm/test/lit.site.cfg.py.in b/llvm/test/lit.site.cfg.py.in index 0968f6214772d0..60a68b0edaf933 100644 --- a/llvm/test/lit.site.cfg.py.in +++ b/llvm/test/lit.site.cfg.py.in @@ -63,7 +63,6 @@ config.have_llvm_driver = @LLVM_TOOL_LLVM_DRIVER_BUILD@ config.spirv_tools_tests = @LLVM_INCLUDE_SPIRV_TOOLS_TESTS@ config.have_vc_rev = @LLVM_APPEND_VC_REV@ config.force_vc_rev = "@LLVM_FORCE_VC_REVISION@" -config.has_logf128 = @LLVM_HAS_LOGF128@ import lit.llvm lit.llvm.initialize(lit_config, config) diff --git a/llvm/unittests/Analysis/CMakeLists.txt b/llvm/unittests/Analysis/CMakeLists.txt index 9b3778f8a3f98f..b1aeaa6e71fd4c 100644 --- a/llvm/unittests/Analysis/CMakeLists.txt +++ b/llvm/unittests/Analysis/CMakeLists.txt @@ -80,11 +80,5 @@ if(NOT WIN32) export_executable_symbols_for_plugins(AnalysisTests) endif() -include(CheckCXXSymbolExists) -check_cxx_symbol_exists(logf128 math.h HAS_LOGF128) -if(HAS_LOGF128) - target_compile_definitions(AnalysisTests PRIVATE HAS_LOGF128) -endif() - add_subdirectory(InlineAdvisorPlugin) add_subdirectory(InlineOrderPlugin) From 9ebf2f8a67cce570d0752556fed23ff2803aef33 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 1 May 2024 08:50:35 -0400 Subject: [PATCH 12/48] Revert "[gn] port 088aa81a5454 (LLVM_HAS_LOGF128)" This reverts commit 68b863b7fa68a196bcc02d12c028dea7dcd9b97b. 088aa81a5454 was reverted in efce8a05aa4e. --- llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn | 1 - llvm/utils/gn/secondary/llvm/test/BUILD.gn | 1 - 2 files changed, 2 deletions(-) diff --git a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn index 2da26d102e7723..80a91507fcc690 100644 --- a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn @@ -325,7 +325,6 @@ write_cmake_config("llvm-config") { "LLVM_ENABLE_ZSTD=", "LLVM_FORCE_USE_OLD_TOOLCHAIN=", "LLVM_HAS_ATOMICS=1", - "LLVM_HAS_LOGF128=", "LLVM_HAVE_TFLITE=", "LLVM_HOST_TRIPLE=$llvm_current_triple", "LLVM_NATIVE_ARCH=$native_target", diff --git a/llvm/utils/gn/secondary/llvm/test/BUILD.gn b/llvm/utils/gn/secondary/llvm/test/BUILD.gn index 60d6d7b8c3ce7a..826dcf4e6ee9b1 100644 --- a/llvm/utils/gn/secondary/llvm/test/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/test/BUILD.gn @@ -64,7 +64,6 @@ write_lit_config("lit_site_cfg") { "LLVM_ENABLE_HTTPLIB=0", "LLVM_ENABLE_ZSTD=0", "LLVM_FORCE_VC_REVISION=", - "LLVM_HAS_LOGF128=0", "LLVM_HAVE_OPT_VIEWER_MODULES=0", "LLVM_HOST_TRIPLE=$llvm_current_triple", "LLVM_INCLUDE_DXIL_TESTS=0", From 0647b2a3caed3215588477dfc7aaa6bd3b4f8076 Mon Sep 17 00:00:00 2001 From: LLVM GN Syncbot Date: Wed, 1 May 2024 12:51:28 +0000 Subject: [PATCH 13/48] [gn build] Port df241b19c952 --- llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn | 1 + 1 file changed, 1 insertion(+) diff --git a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn index 25fcdc4d001512..971ceb3185ff83 100644 --- a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn @@ -312,6 +312,7 @@ copy("Headers") { "xsavesintrin.h", "xtestintrin.h", "yvals_core.h", + "zos_wrappers/builtins.h", ] outputs = [ "$clang_resource_dir/include/{{source_target_relative}}" ] } From e312f0723cebcfbc899b7b69538dfe86f426d0b4 Mon Sep 17 00:00:00 2001 From: Joseph Huber Date: Wed, 1 May 2024 09:05:18 -0500 Subject: [PATCH 14/48] [Offload] Fix CMake detection when it is not found (#90729) Summary: This variable could be unset if not found or when building standalone. We should check for that and set it to true or false. Fixes: https://github.com/llvm/llvm-project/issues/90708 --- offload/CMakeLists.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/offload/CMakeLists.txt b/offload/CMakeLists.txt index a416ac29873f4f..42e0f5740f116d 100644 --- a/offload/CMakeLists.txt +++ b/offload/CMakeLists.txt @@ -302,7 +302,12 @@ endif() pythonize_bool(LIBOMPTARGET_OMPT_SUPPORT) -set(LIBOMPTARGET_GPU_LIBC_SUPPORT ${LLVM_LIBC_GPU_BUILD} CACHE BOOL +if(${LLVM_LIBC_GPU_BUILD}) + set(LIBOMPTARGET_HAS_LIBC TRUE) +else() + set(LIBOMPTARGET_HAS_LIBC FALSE) +endif() +set(LIBOMPTARGET_GPU_LIBC_SUPPORT ${LIBOMPTARGET_HAS_LIBC} CACHE BOOL "Libomptarget support for the GPU libc") pythonize_bool(LIBOMPTARGET_GPU_LIBC_SUPPORT) From 167b50669c03ad888bf2f00c61424bedac43a050 Mon Sep 17 00:00:00 2001 From: David Spickett Date: Wed, 1 May 2024 15:10:01 +0100 Subject: [PATCH 15/48] [libcxx][ci] In picolib build, ask clang for the normalised triple (#90722) This is needed for a workaround to make sure the link later succeeds. I don't know the reason for that but it is definitely needed. https://github.com/llvm/llvm-project/pull/89234 will/wants to correct the triple normalisation for -none- and this means that clang prior to 19, and clang 19 and above will have different answers and therefore different library paths. I don't want to bootstrap a clang just for libcxx CI, or require that anyone building for Arm do the same, so ask the compiler what the triple should be. This will be compatible with 17 and 19 when we do update to that version. I'm assuming $CC is what anyone locally would set to override the compiler, and `cc` is the binary name in our CI containers. It's not perfect but it should cover most use cases. --- libcxx/utils/ci/run-buildbot | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/libcxx/utils/ci/run-buildbot b/libcxx/utils/ci/run-buildbot index 60307a7d4f350a..e40c2b635ef908 100755 --- a/libcxx/utils/ci/run-buildbot +++ b/libcxx/utils/ci/run-buildbot @@ -217,7 +217,13 @@ function test-armv7m-picolibc() { "${@}" ${NINJA} -vC "${BUILD_DIR}/compiler-rt" install - mv "${BUILD_DIR}/install/lib/armv7m-none-unknown-eabi"/* "${BUILD_DIR}/install/lib" + + # Prior to clang 19, armv7m-none-eabi normalised to armv7m-none-unknown-eabi. + # clang 19 changed this to armv7m-unknown-none-eabi. So for as long as 18.x + # is supported, we have to ask clang what the triple will be. + NORMALISED_TARGET_TRIPLE=$(${CC-cc} --target=armv7m-none-eabi -print-target-triple) + # Without this step linking fails later in the build. + mv "${BUILD_DIR}/install/lib/${NORMALISED_TARGET_TRIPLE}"/* "${BUILD_DIR}/install/lib" check-runtimes } From cfca977427230e1731c8552810785ebdd09ca605 Mon Sep 17 00:00:00 2001 From: Tomas Matheson Date: Tue, 30 Apr 2024 13:29:09 +0100 Subject: [PATCH 16/48] [AArch64][TargetParser] autogen ArchExtKind enum (#90314) Re-land 61b2a0e3336aaa0132bbed06dc185aca4ff5d2db. Some Windows builds were failing because AArch64TargetParserDef.inc is a generated header which is included transitively into some clang components, but this information is not available to the build system and therefore there is a missing edge in the dependency graph. This patch incorporates the fixes described in ac1ffd3caca12c254e0b8c847aa8ce8e51b6cfbf/D142403. Thanks to ExtensionSet::toLLVMFeatureList, all values of ArchExtKind should correspond to a particular -target-feature. The valid values of -target-feature are in turn defined by SubtargetFeature defs. Therefore we can generate ArchExtKind from the tablegen data. This is done by adding an Extension class which derives from SubtargetFeature. Because the Has* FieldNames do not always correspond to the AEK_ names ("extensions", as defined in TargetParser), and AEK_ names do not always correspond to -march strings, some additional enum entries have been added to remap the names. I have renamed these to make the naming consistent, but split them into a separate PR to keep the diff reasonable (#90320) --- llvm/cmake/modules/LLVMConfig.cmake.in | 6 + .../llvm/TargetParser/AArch64TargetParser.h | 103 ++------ llvm/lib/Target/AArch64/AArch64Features.td | 241 +++++++++--------- llvm/lib/Target/ARM/ARMFeatures.td | 12 + llvm/utils/TableGen/ARMTargetDefEmitter.cpp | 11 + 5 files changed, 177 insertions(+), 196 deletions(-) diff --git a/llvm/cmake/modules/LLVMConfig.cmake.in b/llvm/cmake/modules/LLVMConfig.cmake.in index 770a9caea322e6..397bd5815b64e9 100644 --- a/llvm/cmake/modules/LLVMConfig.cmake.in +++ b/llvm/cmake/modules/LLVMConfig.cmake.in @@ -162,6 +162,12 @@ endif() if(NOT TARGET acc_gen) add_custom_target(acc_gen) endif() +if(NOT TARGET ARMTargetParserTableGen) + add_custom_target(ARMTargetParserTableGen) +endif() +if(NOT TARGET AArch64TargetParserTableGen) + add_custom_target(AArch64TargetParserTableGen) +endif() if(NOT TARGET RISCVTargetParserTableGen) add_custom_target(RISCVTargetParserTableGen) endif() diff --git a/llvm/include/llvm/TargetParser/AArch64TargetParser.h b/llvm/include/llvm/TargetParser/AArch64TargetParser.h index f372cee7633f61..04fbaf07adfbcb 100644 --- a/llvm/include/llvm/TargetParser/AArch64TargetParser.h +++ b/llvm/include/llvm/TargetParser/AArch64TargetParser.h @@ -103,90 +103,31 @@ enum CPUFeatures { static_assert(FEAT_MAX < 62, "Number of features in CPUFeatures are limited to 62 entries"); -// Arch extension modifiers for CPUs. These are labelled with their Arm ARM -// feature name (though the canonical reference for those is AArch64.td) -// clang-format off +// Each ArchExtKind correponds directly to a possible -target-feature. enum ArchExtKind : unsigned { - AEK_NONE = 1, - AEK_CRC = 2, // FEAT_CRC32 - AEK_CRYPTO = 3, - AEK_FP = 4, // FEAT_FP - AEK_SIMD = 5, // FEAT_AdvSIMD - AEK_FP16 = 6, // FEAT_FP16 - AEK_PROFILE = 7, // FEAT_SPE - AEK_RAS = 8, // FEAT_RAS, FEAT_RASv1p1 - AEK_LSE = 9, // FEAT_LSE - AEK_SVE = 10, // FEAT_SVE - AEK_DOTPROD = 11, // FEAT_DotProd - AEK_RCPC = 12, // FEAT_LRCPC - AEK_RDM = 13, // FEAT_RDM - AEK_SM4 = 14, // FEAT_SM4, FEAT_SM3 - AEK_SHA3 = 15, // FEAT_SHA3, FEAT_SHA512 - AEK_SHA2 = 16, // FEAT_SHA1, FEAT_SHA256 - AEK_AES = 17, // FEAT_AES, FEAT_PMULL - AEK_FP16FML = 18, // FEAT_FHM - AEK_RAND = 19, // FEAT_RNG - AEK_MTE = 20, // FEAT_MTE, FEAT_MTE2 - AEK_SSBS = 21, // FEAT_SSBS, FEAT_SSBS2 - AEK_SB = 22, // FEAT_SB - AEK_PREDRES = 23, // FEAT_SPECRES - AEK_SVE2 = 24, // FEAT_SVE2 - AEK_SVE2AES = 25, // FEAT_SVE_AES, FEAT_SVE_PMULL128 - AEK_SVE2SM4 = 26, // FEAT_SVE_SM4 - AEK_SVE2SHA3 = 27, // FEAT_SVE_SHA3 - AEK_SVE2BITPERM = 28, // FEAT_SVE_BitPerm - AEK_TME = 29, // FEAT_TME - AEK_BF16 = 30, // FEAT_BF16 - AEK_I8MM = 31, // FEAT_I8MM - AEK_F32MM = 32, // FEAT_F32MM - AEK_F64MM = 33, // FEAT_F64MM - AEK_LS64 = 34, // FEAT_LS64, FEAT_LS64_V, FEAT_LS64_ACCDATA - AEK_BRBE = 35, // FEAT_BRBE - AEK_PAUTH = 36, // FEAT_PAuth - AEK_FLAGM = 37, // FEAT_FlagM - AEK_SME = 38, // FEAT_SME - AEK_SMEF64F64 = 39, // FEAT_SME_F64F64 - AEK_SMEI16I64 = 40, // FEAT_SME_I16I64 - AEK_HBC = 41, // FEAT_HBC - AEK_MOPS = 42, // FEAT_MOPS - AEK_PERFMON = 43, // FEAT_PMUv3 - AEK_SME2 = 44, // FEAT_SME2 - AEK_SVE2p1 = 45, // FEAT_SVE2p1 - AEK_SME2p1 = 46, // FEAT_SME2p1 - AEK_B16B16 = 47, // FEAT_B16B16 - AEK_SMEF16F16 = 48, // FEAT_SMEF16F16 - AEK_CSSC = 49, // FEAT_CSSC - AEK_RCPC3 = 50, // FEAT_LRCPC3 - AEK_THE = 51, // FEAT_THE - AEK_D128 = 52, // FEAT_D128 - AEK_LSE128 = 53, // FEAT_LSE128 - AEK_SPECRES2 = 54, // FEAT_SPECRES2 - AEK_RASv2 = 55, // FEAT_RASv2 - AEK_ITE = 56, // FEAT_ITE - AEK_GCS = 57, // FEAT_GCS - AEK_FPMR = 58, // FEAT_FPMR - AEK_FP8 = 59, // FEAT_FP8 - AEK_FAMINMAX = 60, // FEAT_FAMINMAX - AEK_FP8FMA = 61, // FEAT_FP8FMA - AEK_SSVE_FP8FMA = 62, // FEAT_SSVE_FP8FMA - AEK_FP8DOT2 = 63, // FEAT_FP8DOT2 - AEK_SSVE_FP8DOT2 = 64, // FEAT_SSVE_FP8DOT2 - AEK_FP8DOT4 = 65, // FEAT_FP8DOT4 - AEK_SSVE_FP8DOT4 = 66, // FEAT_SSVE_FP8DOT4 - AEK_LUT = 67, // FEAT_LUT - AEK_SME_LUTv2 = 68, // FEAT_SME_LUTv2 - AEK_SMEF8F16 = 69, // FEAT_SME_F8F16 - AEK_SMEF8F32 = 70, // FEAT_SME_F8F32 - AEK_SMEFA64 = 71, // FEAT_SME_FA64 - AEK_CPA = 72, // FEAT_CPA - AEK_PAUTHLR = 73, // FEAT_PAuth_LR - AEK_TLBIW = 74, // FEAT_TLBIW - AEK_JSCVT = 75, // FEAT_JSCVT - AEK_FCMA = 76, // FEAT_FCMA - AEK_NUM_EXTENSIONS + AEK_NONE = 1, +#define ARM_EXTENSION(NAME, ENUM) ENUM, +#include "llvm/TargetParser/AArch64TargetParserDef.inc" + AEK_NUM_EXTENSIONS, + + // FIXME temporary fixes for inconsistent naming. + AEK_F32MM = AEK_MATMULFP32, + AEK_F64MM = AEK_MATMULFP64, + AEK_FCMA = AEK_COMPLXNUM, + AEK_FP = AEK_FPARMV8, + AEK_FP16 = AEK_FULLFP16, + AEK_I8MM = AEK_MATMULINT8, + AEK_JSCVT = AEK_JS, + AEK_PROFILE = AEK_SPE, + AEK_RASv2 = AEK_RASV2, + AEK_RAND = AEK_RANDGEN, + AEK_SIMD = AEK_NEON, + AEK_SME2p1 = AEK_SME2P1, + AEK_SVE2p1 = AEK_SVE2P1, + AEK_SME_LUTv2 = AEK_SME_LUTV2, + }; using ExtensionBitset = Bitset; -// clang-format on // Represents an extension that can be enabled with -march=+. // Typically these correspond to Arm Architecture extensions, unlike diff --git a/llvm/lib/Target/AArch64/AArch64Features.td b/llvm/lib/Target/AArch64/AArch64Features.td index 3a3751a85afd1e..b6c8e5f1608916 100644 --- a/llvm/lib/Target/AArch64/AArch64Features.td +++ b/llvm/lib/Target/AArch64/AArch64Features.td @@ -9,32 +9,43 @@ // //===----------------------------------------------------------------------===// +// A SubtargetFeature that can be toggled from the command line, and therefore +// has an AEK_* entry in ArmExtKind. +class Extension< + string TargetFeatureName, // String used for -target-feature. + string Spelling, // The XYZ in HasXYZ and AEK_XYZ. + string Desc, // Description. + list Implies = [] // List of dependent features. +> : SubtargetFeature +{ + string ArchExtKindSpelling = "AEK_" # Spelling; // ArchExtKind enum name. +} + // Each SubtargetFeature which corresponds to an Arm Architecture feature should // be annotated with the respective FEAT_ feature name from the Architecture // Reference Manual. If a SubtargetFeature enables instructions from multiple // Arm Architecture Features, it should list all the relevant features. Not all // FEAT_ features have a corresponding SubtargetFeature. -def FeatureFPARMv8 : SubtargetFeature<"fp-armv8", "HasFPARMv8", "true", - "Enable ARMv8 FP (FEAT_FP)">; +def FeatureFPARMv8 : Extension<"fp-armv8", "FPARMv8", "Enable ARMv8 (FEAT_FP)">; -def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true", +def FeatureNEON : Extension<"neon", "NEON", "Enable Advanced SIMD instructions (FEAT_AdvSIMD)", [FeatureFPARMv8]>; -def FeatureSM4 : SubtargetFeature< - "sm4", "HasSM4", "true", +def FeatureSM4 : Extension< + "sm4", "SM4", "Enable SM3 and SM4 support (FEAT_SM4, FEAT_SM3)", [FeatureNEON]>; -def FeatureSHA2 : SubtargetFeature< - "sha2", "HasSHA2", "true", +def FeatureSHA2 : Extension< + "sha2", "SHA2", "Enable SHA1 and SHA256 support (FEAT_SHA1, FEAT_SHA256)", [FeatureNEON]>; -def FeatureSHA3 : SubtargetFeature< - "sha3", "HasSHA3", "true", +def FeatureSHA3 : Extension< + "sha3", "SHA3", "Enable SHA512 and SHA3 support (FEAT_SHA3, FEAT_SHA512)", [FeatureNEON, FeatureSHA2]>; -def FeatureAES : SubtargetFeature< - "aes", "HasAES", "true", +def FeatureAES : Extension< + "aes", "AES", "Enable AES support (FEAT_AES, FEAT_PMULL)", [FeatureNEON]>; // Crypto has been split up and any combination is now valid (see the @@ -45,20 +56,20 @@ def FeatureAES : SubtargetFeature< // meaning anymore. We kept the Crypto definition here for backward // compatibility, and now imply features SHA2 and AES, which was the // "traditional" meaning of Crypto. -def FeatureCrypto : SubtargetFeature<"crypto", "HasCrypto", "true", +def FeatureCrypto : Extension<"crypto", "Crypto", "Enable cryptographic instructions", [FeatureNEON, FeatureSHA2, FeatureAES]>; -def FeatureCRC : SubtargetFeature<"crc", "HasCRC", "true", +def FeatureCRC : Extension<"crc", "CRC", "Enable ARMv8 CRC-32 checksum instructions (FEAT_CRC32)">; -def FeatureRAS : SubtargetFeature<"ras", "HasRAS", "true", +def FeatureRAS : Extension<"ras", "RAS", "Enable ARMv8 Reliability, Availability and Serviceability Extensions (FEAT_RAS, FEAT_RASv1p1)">; -def FeatureRASv2 : SubtargetFeature<"rasv2", "HasRASv2", "true", +def FeatureRASv2 : Extension<"rasv2", "RASv2", "Enable ARMv8.9-A Reliability, Availability and Serviceability Extensions (FEAT_RASv2)", [FeatureRAS]>; -def FeatureLSE : SubtargetFeature<"lse", "HasLSE", "true", +def FeatureLSE : Extension<"lse", "LSE", "Enable ARMv8.1 Large System Extension (LSE) atomic instructions (FEAT_LSE)">; def FeatureLSE2 : SubtargetFeature<"lse2", "HasLSE2", "true", @@ -70,7 +81,7 @@ def FeatureOutlineAtomics : SubtargetFeature<"outline-atomics", "OutlineAtomics" def FeatureFMV : SubtargetFeature<"fmv", "HasFMV", "true", "Enable Function Multi Versioning support.">; -def FeatureRDM : SubtargetFeature<"rdm", "HasRDM", "true", +def FeatureRDM : Extension<"rdm", "RDM", "Enable ARMv8.1 Rounding Double Multiply Add/Subtract instructions (FEAT_RDM)", [FeatureNEON]>; @@ -91,16 +102,16 @@ def FeatureVH : SubtargetFeature<"vh", "HasVH", "true", // This SubtargetFeature is special. It controls only whether codegen will turn // `llvm.readcyclecounter()` into an access to a PMUv3 System Register. The // `FEAT_PMUv3*` system registers are always available for assembly/disassembly. -def FeaturePerfMon : SubtargetFeature<"perfmon", "HasPerfMon", "true", +def FeaturePerfMon : Extension<"perfmon", "PerfMon", "Enable Code Generation for ARMv8 PMUv3 Performance Monitors extension (FEAT_PMUv3)">; -def FeatureFullFP16 : SubtargetFeature<"fullfp16", "HasFullFP16", "true", +def FeatureFullFP16 : Extension<"fullfp16", "FullFP16", "Full FP16 (FEAT_FP16)", [FeatureFPARMv8]>; -def FeatureFP16FML : SubtargetFeature<"fp16fml", "HasFP16FML", "true", +def FeatureFP16FML : Extension<"fp16fml", "FP16FML", "Enable FP16 FML instructions (FEAT_FHM)", [FeatureFullFP16]>; -def FeatureSPE : SubtargetFeature<"spe", "HasSPE", "true", +def FeatureSPE : Extension<"spe", "SPE", "Enable Statistical Profiling extension (FEAT_SPE)">; def FeaturePAN_RWV : SubtargetFeature< @@ -115,13 +126,13 @@ def FeaturePsUAO : SubtargetFeature< "uaops", "HasPsUAO", "true", def FeatureCCPP : SubtargetFeature<"ccpp", "HasCCPP", "true", "Enable v8.2 data Cache Clean to Point of Persistence (FEAT_DPB)" >; -def FeatureSVE : SubtargetFeature<"sve", "HasSVE", "true", +def FeatureSVE : Extension<"sve", "SVE", "Enable Scalable Vector Extension (SVE) instructions (FEAT_SVE)", [FeatureFullFP16]>; -def FeatureFPMR : SubtargetFeature<"fpmr", "HasFPMR", "true", +def FeatureFPMR : Extension<"fpmr", "FPMR", "Enable FPMR Register (FEAT_FPMR)">; -def FeatureFP8 : SubtargetFeature<"fp8", "HasFP8", "true", +def FeatureFP8 : Extension<"fp8", "FP8", "Enable FP8 instructions (FEAT_FP8)">; // This flag is currently still labeled as Experimental, but when fully @@ -145,33 +156,33 @@ def FeatureExperimentalZeroingPseudos def FeatureUseScalarIncVL : SubtargetFeature<"use-scalar-inc-vl", "UseScalarIncVL", "true", "Prefer inc/dec over add+cnt">; -def FeatureBF16 : SubtargetFeature<"bf16", "HasBF16", - "true", "Enable BFloat16 Extension (FEAT_BF16)" >; +def FeatureBF16 : Extension<"bf16", "BF16", + "Enable BFloat16 Extension (FEAT_BF16)" >; def FeatureNoSVEFPLD1R : SubtargetFeature<"no-sve-fp-ld1r", "NoSVEFPLD1R", "true", "Avoid using LD1RX instructions for FP">; -def FeatureSVE2 : SubtargetFeature<"sve2", "HasSVE2", "true", +def FeatureSVE2 : Extension<"sve2", "SVE2", "Enable Scalable Vector Extension 2 (SVE2) instructions (FEAT_SVE2)", [FeatureSVE, FeatureUseScalarIncVL]>; -def FeatureSVE2AES : SubtargetFeature<"sve2-aes", "HasSVE2AES", "true", +def FeatureSVE2AES : Extension<"sve2-aes", "SVE2AES", "Enable AES SVE2 instructions (FEAT_SVE_AES, FEAT_SVE_PMULL128)", [FeatureSVE2, FeatureAES]>; -def FeatureSVE2SM4 : SubtargetFeature<"sve2-sm4", "HasSVE2SM4", "true", +def FeatureSVE2SM4 : Extension<"sve2-sm4", "SVE2SM4", "Enable SM4 SVE2 instructions (FEAT_SVE_SM4)", [FeatureSVE2, FeatureSM4]>; -def FeatureSVE2SHA3 : SubtargetFeature<"sve2-sha3", "HasSVE2SHA3", "true", +def FeatureSVE2SHA3 : Extension<"sve2-sha3", "SVE2SHA3", "Enable SHA3 SVE2 instructions (FEAT_SVE_SHA3)", [FeatureSVE2, FeatureSHA3]>; -def FeatureSVE2BitPerm : SubtargetFeature<"sve2-bitperm", "HasSVE2BitPerm", "true", +def FeatureSVE2BitPerm : Extension<"sve2-bitperm", "SVE2BitPerm", "Enable bit permutation SVE2 instructions (FEAT_SVE_BitPerm)", [FeatureSVE2]>; -def FeatureSVE2p1: SubtargetFeature<"sve2p1", "HasSVE2p1", "true", +def FeatureSVE2p1: Extension<"sve2p1", "SVE2p1", "Enable Scalable Vector Extension 2.1 instructions", [FeatureSVE2]>; -def FeatureB16B16 : SubtargetFeature<"b16b16", "HasB16B16", "true", +def FeatureB16B16 : Extension<"b16b16", "B16B16", "Enable SVE2.1 or SME2.1 non-widening BFloat16 to BFloat16 instructions (FEAT_B16B16)", [FeatureBF16]>; def FeatureZCRegMove : SubtargetFeature<"zcm", "HasZeroCycleRegMove", "true", @@ -303,23 +314,23 @@ def FeatureForce32BitJumpTables : SubtargetFeature<"force-32bit-jump-tables", "Force32BitJumpTables", "true", "Force jump table entries to be 32-bits wide except at MinSize">; -def FeatureRCPC : SubtargetFeature<"rcpc", "HasRCPC", "true", - "Enable support for RCPC extension (FEAT_LRCPC)">; +def FeatureRCPC : Extension<"rcpc", "RCPC", + "Enable support for RCPC extension (FEAT_LRCPC)">; def FeatureUseRSqrt : SubtargetFeature< "use-reciprocal-square-root", "UseRSqrt", "true", "Use the reciprocal square root approximation">; -def FeatureDotProd : SubtargetFeature< - "dotprod", "HasDotProd", "true", +def FeatureDotProd : Extension< + "dotprod", "DotProd", "Enable dot product support (FEAT_DotProd)", [FeatureNEON]>; -def FeaturePAuth : SubtargetFeature< - "pauth", "HasPAuth", "true", +def FeaturePAuth : Extension< + "pauth", "PAuth", "Enable v8.3-A Pointer Authentication extension (FEAT_PAuth)">; -def FeatureJS : SubtargetFeature< - "jsconv", "HasJS", "true", +def FeatureJS : Extension< + "jsconv", "JS", "Enable v8.3-A JavaScript FP conversion instructions (FEAT_JSCVT)", [FeatureFPARMv8]>; @@ -327,8 +338,8 @@ def FeatureCCIDX : SubtargetFeature< "ccidx", "HasCCIDX", "true", "Enable v8.3-A Extend of the CCSIDR number of sets (FEAT_CCIDX)">; -def FeatureComplxNum : SubtargetFeature< - "complxnum", "HasComplxNum", "true", +def FeatureComplxNum : Extension< + "complxnum", "ComplxNum", "Enable v8.3-A Floating-point complex number support (FEAT_FCMA)", [FeatureNEON]>; @@ -365,8 +376,8 @@ def FeatureTLB_RMI : SubtargetFeature< "tlb-rmi", "HasTLB_RMI", "true", "Enable v8.4-A TLB Range and Maintenance Instructions (FEAT_TLBIOS, FEAT_TLBIRANGE)">; -def FeatureFlagM : SubtargetFeature< - "flagm", "HasFlagM", "true", +def FeatureFlagM : Extension< + "flagm", "FlagM", "Enable v8.4-A Flag Manipulation Instructions (FEAT_FlagM)">; // 8.4 RCPC enchancements: LDAPR & STLR instructions with Immediate Offset @@ -414,50 +425,50 @@ def FeatureFRInt3264 : SubtargetFeature<"fptoint", "HasFRInt3264", "true", def FeatureSpecRestrict : SubtargetFeature<"specrestrict", "HasSpecRestrict", "true", "Enable architectural speculation restriction (FEAT_CSV2_2)">; -def FeatureSB : SubtargetFeature<"sb", "HasSB", - "true", "Enable v8.5 Speculation Barrier (FEAT_SB)" >; +def FeatureSB : Extension<"sb", "SB", + "Enable v8.5 Speculation Barrier (FEAT_SB)" >; -def FeatureSSBS : SubtargetFeature<"ssbs", "HasSSBS", - "true", "Enable Speculative Store Bypass Safe bit (FEAT_SSBS, FEAT_SSBS2)" >; +def FeatureSSBS : Extension<"ssbs", "SSBS", + "Enable Speculative Store Bypass Safe bit (FEAT_SSBS, FEAT_SSBS2)" >; -def FeaturePredRes : SubtargetFeature<"predres", "HasPredRes", "true", +def FeaturePredRes : Extension<"predres", "PredRes", "Enable v8.5a execution and data prediction invalidation instructions (FEAT_SPECRES)" >; -def FeatureCacheDeepPersist : SubtargetFeature<"ccdp", "HasCCDP", - "true", "Enable v8.5 Cache Clean to Point of Deep Persistence (FEAT_DPB2)" >; +def FeatureCacheDeepPersist : Extension<"ccdp", "CCDP", + "Enable v8.5 Cache Clean to Point of Deep Persistence (FEAT_DPB2)" >; -def FeatureBranchTargetId : SubtargetFeature<"bti", "HasBTI", - "true", "Enable Branch Target Identification (FEAT_BTI)" >; +def FeatureBranchTargetId : Extension<"bti", "BTI", + "Enable Branch Target Identification (FEAT_BTI)" >; -def FeatureRandGen : SubtargetFeature<"rand", "HasRandGen", - "true", "Enable Random Number generation instructions (FEAT_RNG)" >; +def FeatureRandGen : Extension<"rand", "RandGen", + "Enable Random Number generation instructions (FEAT_RNG)" >; -def FeatureMTE : SubtargetFeature<"mte", "HasMTE", - "true", "Enable Memory Tagging Extension (FEAT_MTE, FEAT_MTE2)" >; +def FeatureMTE : Extension<"mte", "MTE", + "Enable Memory Tagging Extension (FEAT_MTE, FEAT_MTE2)" >; -def FeatureTRBE : SubtargetFeature<"trbe", "HasTRBE", - "true", "Enable Trace Buffer Extension (FEAT_TRBE)">; +def FeatureTRBE : Extension<"trbe", "TRBE", + "Enable Trace Buffer Extension (FEAT_TRBE)">; -def FeatureETE : SubtargetFeature<"ete", "HasETE", - "true", "Enable Embedded Trace Extension (FEAT_ETE)", +def FeatureETE : Extension<"ete", "ETE", + "Enable Embedded Trace Extension (FEAT_ETE)", [FeatureTRBE]>; -def FeatureTME : SubtargetFeature<"tme", "HasTME", - "true", "Enable Transactional Memory Extension (FEAT_TME)" >; +def FeatureTME : Extension<"tme", "TME", + "Enable Transactional Memory Extension (FEAT_TME)" >; def FeatureTaggedGlobals : SubtargetFeature<"tagged-globals", "AllowTaggedGlobals", "true", "Use an instruction sequence for taking the address of a global " "that allows a memory tag in the upper address bits">; -def FeatureMatMulInt8 : SubtargetFeature<"i8mm", "HasMatMulInt8", - "true", "Enable Matrix Multiply Int8 Extension (FEAT_I8MM)">; +def FeatureMatMulInt8 : Extension<"i8mm", "MatMulInt8", + "Enable Matrix Multiply Int8 Extension (FEAT_I8MM)">; -def FeatureMatMulFP32 : SubtargetFeature<"f32mm", "HasMatMulFP32", - "true", "Enable Matrix Multiply FP32 Extension (FEAT_F32MM)", [FeatureSVE]>; +def FeatureMatMulFP32 : Extension<"f32mm", "MatMulFP32", + "Enable Matrix Multiply FP32 Extension (FEAT_F32MM)", [FeatureSVE]>; -def FeatureMatMulFP64 : SubtargetFeature<"f64mm", "HasMatMulFP64", - "true", "Enable Matrix Multiply FP64 Extension (FEAT_F64MM)", [FeatureSVE]>; +def FeatureMatMulFP64 : Extension<"f64mm", "MatMulFP64", + "Enable Matrix Multiply FP64 Extension (FEAT_F64MM)", [FeatureSVE]>; def FeatureXS : SubtargetFeature<"xs", "HasXS", "true", "Enable Armv8.7-A limited-TLB-maintenance instruction (FEAT_XS)">; @@ -468,20 +479,20 @@ def FeatureWFxT : SubtargetFeature<"wfxt", "HasWFxT", def FeatureHCX : SubtargetFeature< "hcx", "HasHCX", "true", "Enable Armv8.7-A HCRX_EL2 system register (FEAT_HCX)">; -def FeatureLS64 : SubtargetFeature<"ls64", "HasLS64", - "true", "Enable Armv8.7-A LD64B/ST64B Accelerator Extension (FEAT_LS64, FEAT_LS64_V, FEAT_LS64_ACCDATA)">; +def FeatureLS64 : Extension<"ls64", "LS64", + "Enable Armv8.7-A LD64B/ST64B Accelerator Extension (FEAT_LS64, FEAT_LS64_V, FEAT_LS64_ACCDATA)">; -def FeatureHBC : SubtargetFeature<"hbc", "HasHBC", - "true", "Enable Armv8.8-A Hinted Conditional Branches Extension (FEAT_HBC)">; +def FeatureHBC : Extension<"hbc", "HBC", + "Enable Armv8.8-A Hinted Conditional Branches Extension (FEAT_HBC)">; -def FeatureMOPS : SubtargetFeature<"mops", "HasMOPS", - "true", "Enable Armv8.8-A memcpy and memset acceleration instructions (FEAT_MOPS)">; +def FeatureMOPS : Extension<"mops", "MOPS", + "Enable Armv8.8-A memcpy and memset acceleration instructions (FEAT_MOPS)">; def FeatureNMI : SubtargetFeature<"nmi", "HasNMI", "true", "Enable Armv8.8-A Non-maskable Interrupts (FEAT_NMI, FEAT_GICv3_NMI)">; -def FeatureBRBE : SubtargetFeature<"brbe", "HasBRBE", - "true", "Enable Branch Record Buffer Extension (FEAT_BRBE)">; +def FeatureBRBE : Extension<"brbe", "BRBE", + "Enable Branch Record Buffer Extension (FEAT_BRBE)">; def FeatureSPE_EEF : SubtargetFeature<"spe-eef", "HasSPE_EEF", "true", "Enable extra register in the Statistical Profiling Extension (FEAT_SPEv1p2)">; @@ -496,57 +507,57 @@ def FeatureEnhancedCounterVirtualization : def FeatureRME : SubtargetFeature<"rme", "HasRME", "true", "Enable Realm Management Extension (FEAT_RME)">; -def FeatureSME : SubtargetFeature<"sme", "HasSME", "true", +def FeatureSME : Extension<"sme", "SME", "Enable Scalable Matrix Extension (SME) (FEAT_SME)", [FeatureBF16, FeatureUseScalarIncVL]>; -def FeatureSMEF64F64 : SubtargetFeature<"sme-f64f64", "HasSMEF64F64", "true", +def FeatureSMEF64F64 : Extension<"sme-f64f64", "SMEF64F64", "Enable Scalable Matrix Extension (SME) F64F64 instructions (FEAT_SME_F64F64)", [FeatureSME]>; -def FeatureSMEI16I64 : SubtargetFeature<"sme-i16i64", "HasSMEI16I64", "true", +def FeatureSMEI16I64 : Extension<"sme-i16i64", "SMEI16I64", "Enable Scalable Matrix Extension (SME) I16I64 instructions (FEAT_SME_I16I64)", [FeatureSME]>; -def FeatureSMEFA64 : SubtargetFeature<"sme-fa64", "HasSMEFA64", "true", +def FeatureSMEFA64 : Extension<"sme-fa64", "SMEFA64", "Enable the full A64 instruction set in streaming SVE mode (FEAT_SME_FA64)", [FeatureSME, FeatureSVE2]>; -def FeatureSME2 : SubtargetFeature<"sme2", "HasSME2", "true", +def FeatureSME2 : Extension<"sme2", "SME2", "Enable Scalable Matrix Extension 2 (SME2) instructions", [FeatureSME]>; -def FeatureSMEF16F16 : SubtargetFeature<"sme-f16f16", "HasSMEF16F16", "true", +def FeatureSMEF16F16 : Extension<"sme-f16f16", "SMEF16F16", "Enable SME non-widening Float16 instructions (FEAT_SME_F16F16)", [FeatureSME2]>; -def FeatureSME2p1 : SubtargetFeature<"sme2p1", "HasSME2p1", "true", +def FeatureSME2p1 : Extension<"sme2p1", "SME2p1", "Enable Scalable Matrix Extension 2.1 (FEAT_SME2p1) instructions", [FeatureSME2]>; -def FeatureFAMINMAX: SubtargetFeature<"faminmax", "HasFAMINMAX", "true", +def FeatureFAMINMAX: Extension<"faminmax", "FAMINMAX", "Enable FAMIN and FAMAX instructions (FEAT_FAMINMAX)">; -def FeatureFP8FMA : SubtargetFeature<"fp8fma", "HasFP8FMA", "true", +def FeatureFP8FMA : Extension<"fp8fma", "FP8FMA", "Enable fp8 multiply-add instructions (FEAT_FP8FMA)">; -def FeatureSSVE_FP8FMA : SubtargetFeature<"ssve-fp8fma", "HasSSVE_FP8FMA", "true", +def FeatureSSVE_FP8FMA : Extension<"ssve-fp8fma", "SSVE_FP8FMA", "Enable SVE2 fp8 multiply-add instructions (FEAT_SSVE_FP8FMA)", [FeatureSME2]>; -def FeatureFP8DOT2: SubtargetFeature<"fp8dot2", "HasFP8DOT2", "true", +def FeatureFP8DOT2: Extension<"fp8dot2", "FP8DOT2", "Enable fp8 2-way dot instructions (FEAT_FP8DOT2)">; -def FeatureSSVE_FP8DOT2 : SubtargetFeature<"ssve-fp8dot2", "HasSSVE_FP8DOT2", "true", +def FeatureSSVE_FP8DOT2 : Extension<"ssve-fp8dot2", "SSVE_FP8DOT2", "Enable SVE2 fp8 2-way dot product instructions (FEAT_SSVE_FP8DOT2)", [FeatureSME2]>; -def FeatureFP8DOT4: SubtargetFeature<"fp8dot4", "HasFP8DOT4", "true", +def FeatureFP8DOT4: Extension<"fp8dot4", "FP8DOT4", "Enable fp8 4-way dot instructions (FEAT_FP8DOT4)">; -def FeatureSSVE_FP8DOT4 : SubtargetFeature<"ssve-fp8dot4", "HasSSVE_FP8DOT4", "true", +def FeatureSSVE_FP8DOT4 : Extension<"ssve-fp8dot4", "SSVE_FP8DOT4", "Enable SVE2 fp8 4-way dot product instructions (FEAT_SSVE_FP8DOT4)", [FeatureSME2]>; -def FeatureLUT: SubtargetFeature<"lut", "HasLUT", "true", +def FeatureLUT: Extension<"lut", "LUT", "Enable Lookup Table instructions (FEAT_LUT)">; -def FeatureSME_LUTv2 : SubtargetFeature<"sme-lutv2", "HasSME_LUTv2", "true", +def FeatureSME_LUTv2 : Extension<"sme-lutv2", "SME_LUTv2", "Enable Scalable Matrix Extension (SME) LUTv2 instructions (FEAT_SME_LUTv2)">; -def FeatureSMEF8F16 : SubtargetFeature<"sme-f8f16", "HasSMEF8F16", "true", +def FeatureSMEF8F16 : Extension<"sme-f8f16", "SMEF8F16", "Enable Scalable Matrix Extension (SME) F8F16 instructions(FEAT_SME_F8F16)", [FeatureSME2, FeatureFP8]>; -def FeatureSMEF8F32 : SubtargetFeature<"sme-f8f32", "HasSMEF8F32", "true", +def FeatureSMEF8F32 : Extension<"sme-f8f32", "SMEF8F32", "Enable Scalable Matrix Extension (SME) F8F32 instructions (FEAT_SME_F8F32)", [FeatureSME2, FeatureFP8]>; def FeatureAppleA7SysReg : SubtargetFeature<"apple-a7-sysreg", "HasAppleA7SysReg", "true", @@ -558,7 +569,7 @@ def FeatureEL2VMSA : SubtargetFeature<"el2vmsa", "HasEL2VMSA", "true", def FeatureEL3 : SubtargetFeature<"el3", "HasEL3", "true", "Enable Exception Level 3">; -def FeatureCSSC : SubtargetFeature<"cssc", "HasCSSC", "true", +def FeatureCSSC : Extension<"cssc", "CSSC", "Enable Common Short Sequence Compression (CSSC) instructions (FEAT_CSSC)">; def FeatureFixCortexA53_835769 : SubtargetFeature<"fix-cortex-a53-835769", @@ -572,8 +583,8 @@ def FeatureNoBTIAtReturnTwice : SubtargetFeature<"no-bti-at-return-twice", def FeatureCHK : SubtargetFeature<"chk", "HasCHK", "true", "Enable Armv8.0-A Check Feature Status Extension (FEAT_CHK)">; -def FeatureGCS : SubtargetFeature<"gcs", "HasGCS", - "true", "Enable Armv9.4-A Guarded Call Stack Extension", [FeatureCHK]>; +def FeatureGCS : Extension<"gcs", "GCS", + "Enable Armv9.4-A Guarded Call Stack Extension", [FeatureCHK]>; def FeatureCLRBHB : SubtargetFeature<"clrbhb", "HasCLRBHB", "true", "Enable Clear BHB instruction (FEAT_CLRBHB)">; @@ -581,32 +592,32 @@ def FeatureCLRBHB : SubtargetFeature<"clrbhb", "HasCLRBHB", def FeaturePRFM_SLC : SubtargetFeature<"prfm-slc-target", "HasPRFM_SLC", "true", "Enable SLC target for PRFM instruction">; -def FeatureSPECRES2 : SubtargetFeature<"specres2", "HasSPECRES2", - "true", "Enable Speculation Restriction Instruction (FEAT_SPECRES2)", +def FeatureSPECRES2 : Extension<"specres2", "SPECRES2", + "Enable Speculation Restriction Instruction (FEAT_SPECRES2)", [FeaturePredRes]>; def FeatureMEC : SubtargetFeature<"mec", "HasMEC", "true", "Enable Memory Encryption Contexts Extension", [FeatureRME]>; -def FeatureITE : SubtargetFeature<"ite", "HasITE", - "true", "Enable Armv9.4-A Instrumentation Extension FEAT_ITE", [FeatureETE, +def FeatureITE : Extension<"ite", "ITE", + "Enable Armv9.4-A Instrumentation Extension FEAT_ITE", [FeatureETE, FeatureTRBE]>; -def FeatureRCPC3 : SubtargetFeature<"rcpc3", "HasRCPC3", - "true", "Enable Armv8.9-A RCPC instructions for A64 and Advanced SIMD and floating-point instruction set (FEAT_LRCPC3)", +def FeatureRCPC3 : Extension<"rcpc3", "RCPC3", + "Enable Armv8.9-A RCPC instructions for A64 and Advanced SIMD and floating-point instruction set (FEAT_LRCPC3)", [FeatureRCPC_IMMO]>; -def FeatureTHE : SubtargetFeature<"the", "HasTHE", - "true", "Enable Armv8.9-A Translation Hardening Extension (FEAT_THE)">; +def FeatureTHE : Extension<"the", "THE", + "Enable Armv8.9-A Translation Hardening Extension (FEAT_THE)">; -def FeatureLSE128 : SubtargetFeature<"lse128", "HasLSE128", - "true", "Enable Armv9.4-A 128-bit Atomic Instructions (FEAT_LSE128)", +def FeatureLSE128 : Extension<"lse128", "LSE128", + "Enable Armv9.4-A 128-bit Atomic Instructions (FEAT_LSE128)", [FeatureLSE]>; // FEAT_D128, FEAT_LVA3, FEAT_SYSREG128, and FEAT_SYSINSTR128 are mutually implicit. // Therefore group them all under a single feature flag, d128: -def FeatureD128 : SubtargetFeature<"d128", "HasD128", - "true", "Enable Armv9.4-A 128-bit Page Table Descriptors, System Registers " +def FeatureD128 : Extension<"d128", "D128", + "Enable Armv9.4-A 128-bit Page Table Descriptors, System Registers " "and Instructions (FEAT_D128, FEAT_LVA3, FEAT_SYSREG128, FEAT_SYSINSTR128)", [FeatureLSE128]>; @@ -624,13 +635,13 @@ def FeatureStpAlignedOnly : SubtargetFeature<"stp-aligned-only", "HasStpAlignedO // AArch64 2023 Architecture Extensions (v9.5-A) -def FeatureCPA : SubtargetFeature<"cpa", "HasCPA", "true", +def FeatureCPA : Extension<"cpa", "CPA", "Enable Armv9.5-A Checked Pointer Arithmetic (FEAT_CPA)">; -def FeaturePAuthLR : SubtargetFeature<"pauth-lr", "HasPAuthLR", - "true", "Enable Armv9.5-A PAC enhancements (FEAT_PAuth_LR)">; +def FeaturePAuthLR : Extension<"pauth-lr", "PAuthLR", + "Enable Armv9.5-A PAC enhancements (FEAT_PAuth_LR)">; -def FeatureTLBIW : SubtargetFeature<"tlbiw", "HasTLBIW", "true", +def FeatureTLBIW : Extension<"tlbiw", "TLBIW", "Enable ARMv9.5-A TLBI VMALL for Dirty State (FEAT_TLBIW)">; diff --git a/llvm/lib/Target/ARM/ARMFeatures.td b/llvm/lib/Target/ARM/ARMFeatures.td index 111c87838291f6..84481af650be7a 100644 --- a/llvm/lib/Target/ARM/ARMFeatures.td +++ b/llvm/lib/Target/ARM/ARMFeatures.td @@ -15,6 +15,18 @@ def ModeSoftFloat : SubtargetFeature<"soft-float","UseSoftFloat", // ARM Subtarget features. // +// This is currently only used by AArch64, but is required here because ARM and +// AArch64 share a tablegen backend for TargetParser. +class Extension< + string TargetFeatureName, // String used for -target-feature. + string Spelling, // The XYZ in HasXYZ and AEK_XYZ. + string Desc, // Description. + list Implies = [] // List of dependent features. +> : SubtargetFeature +{ + string ArchExtKindSpelling = "AEK_" # Spelling; // ArchExtKind enum name. +} + // Floating Point, HW Division and Neon Support // FP loads/stores/moves, shared between VFP and MVE (even in the integer-only diff --git a/llvm/utils/TableGen/ARMTargetDefEmitter.cpp b/llvm/utils/TableGen/ARMTargetDefEmitter.cpp index db87ac3336c184..05aa146b57159d 100644 --- a/llvm/utils/TableGen/ARMTargetDefEmitter.cpp +++ b/llvm/utils/TableGen/ARMTargetDefEmitter.cpp @@ -56,6 +56,17 @@ static void EmitARMTargetDef(RecordKeeper &RK, raw_ostream &OS) { for (const StringRef &Arch : ARMArchVals.keys()) OS << "ARM_ARCHITECTURE(" << Arch << ")\n"; OS << "\n#undef ARM_ARCHITECTURE\n\n"; + + // Emit information for each defined Extension; used to build ArmExtKind. + OS << "#ifndef ARM_EXTENSION\n" + << "#define ARM_EXTENSION(NAME, ENUM)\n" + << "#endif\n\n"; + for (const Record *Rec : RK.getAllDerivedDefinitions("Extension")) { + StringRef Name = Rec->getValueAsString("Name"); + std::string Enum = Rec->getValueAsString("ArchExtKindSpelling").upper(); + OS << "ARM_EXTENSION(" << Name << ", " << Enum << ")\n"; + } + OS << "\n#undef ARM_EXTENSION\n\n"; } static TableGen::Emitter::Opt From f07a2edc64650f44bc592d74bb4c99ddde3772d3 Mon Sep 17 00:00:00 2001 From: Gleb Popov <6yearold@gmail.com> Date: Wed, 1 May 2024 17:18:55 +0300 Subject: [PATCH 17/48] [lldb] Teach LocateExecutableSymbolFile to look into LOCALBASE on FreeBSD (#81355) FreeBSD ports will now install debuginfo under $LOCALBASE/lib/debug/, where $LOCALBASE is typically /usr/local. On FreeBSD search this path in addition to existing debug info paths. Relevant change on the FreeBSD side: https://reviews.freebsd.org/D43515 --- .../Default/SymbolLocatorDefault.cpp | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/lldb/source/Plugins/SymbolLocator/Default/SymbolLocatorDefault.cpp b/lldb/source/Plugins/SymbolLocator/Default/SymbolLocatorDefault.cpp index 6f0126b16cdc00..edb1d59cf42f88 100644 --- a/lldb/source/Plugins/SymbolLocator/Default/SymbolLocatorDefault.cpp +++ b/lldb/source/Plugins/SymbolLocator/Default/SymbolLocatorDefault.cpp @@ -36,6 +36,10 @@ #include "llvm/Support/FileSystem.h" #include "llvm/Support/ThreadPool.h" +#if defined(__FreeBSD__) +#include +#endif + // From MacOSX system header "mach/machine.h" typedef int cpu_type_t; typedef int cpu_subtype_t; @@ -141,6 +145,24 @@ std::optional SymbolLocatorDefault::LocateExecutableSymbolFile( FileSystem::Instance().Resolve(file_spec); debug_file_search_paths.AppendIfUnique(file_spec); } +#if defined(__FreeBSD__) + // Add $LOCALBASE/lib/debug directory, where LOCALBASE is + // usually /usr/local, but may be adjusted by the end user. + { + int mib[2]; + char buf[PATH_MAX]; + size_t len = PATH_MAX; + + mib[0] = CTL_USER; + mib[1] = USER_LOCALBASE; + if (::sysctl(mib, 2, buf, &len, NULL, 0) == 0) { + FileSpec file_spec("/lib/debug"); + file_spec.PrependPathComponent(StringRef(buf)); + FileSystem::Instance().Resolve(file_spec); + debug_file_search_paths.AppendIfUnique(file_spec); + } + } +#endif // __FreeBSD__ #endif #endif // _WIN32 } From be5075ab8daf58a0e981e6bda9579a86fba9a748 Mon Sep 17 00:00:00 2001 From: "Yaxun (Sam) Liu" Date: Wed, 1 May 2024 10:24:23 -0400 Subject: [PATCH 18/48] [CUDA] make kernel stub ICF-proof (#90155) MSVC linker merges functions having comdat which have identical set of instructions. CUDA uses kernel stub function as key to look up kernels in device executables. If kernel stub function for different kernels are merged by ICF, incorrect kernels will be launched. To prevent ICF from merging kernel stub functions, an unique global variable is created for each kernel stub function having comdat and a store is added to the kernel stub function. This makes the set of instructions in each kernel function unique. Fixes: https://github.com/llvm/llvm-project/issues/88883 --- clang/lib/CodeGen/CGCUDANV.cpp | 27 ++++++ clang/test/CodeGenCUDA/kernel-stub-name.cu | 101 +++++++++++++-------- 2 files changed, 88 insertions(+), 40 deletions(-) diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp index 370642cb3d5364..670bc4bf72cecb 100644 --- a/clang/lib/CodeGen/CGCUDANV.cpp +++ b/clang/lib/CodeGen/CGCUDANV.cpp @@ -424,6 +424,33 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF, CGM.CreateRuntimeFunction(FTy, LaunchKernelName); CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(), LaunchKernelArgs); + + // To prevent CUDA device stub functions from being merged by ICF in MSVC + // environment, create an unique global variable for each kernel and write to + // the variable in the device stub. + if (CGM.getContext().getTargetInfo().getCXXABI().isMicrosoft() && + !CGF.getLangOpts().HIP) { + llvm::Function *KernelFunction = llvm::cast(Kernel); + std::string GlobalVarName = (KernelFunction->getName() + ".id").str(); + + llvm::GlobalVariable *HandleVar = + CGM.getModule().getNamedGlobal(GlobalVarName); + if (!HandleVar) { + HandleVar = new llvm::GlobalVariable( + CGM.getModule(), CGM.Int8Ty, + /*Constant=*/false, KernelFunction->getLinkage(), + llvm::ConstantInt::get(CGM.Int8Ty, 0), GlobalVarName); + HandleVar->setDSOLocal(KernelFunction->isDSOLocal()); + HandleVar->setVisibility(KernelFunction->getVisibility()); + if (KernelFunction->hasComdat()) + HandleVar->setComdat(CGM.getModule().getOrInsertComdat(GlobalVarName)); + } + + CGF.Builder.CreateAlignedStore(llvm::ConstantInt::get(CGM.Int8Ty, 1), + HandleVar, CharUnits::One(), + /*IsVolatile=*/true); + } + CGF.EmitBranch(EndBlock); CGF.EmitBlock(EndBlock); diff --git a/clang/test/CodeGenCUDA/kernel-stub-name.cu b/clang/test/CodeGenCUDA/kernel-stub-name.cu index 23df7f5d721b56..0faea75cbbe536 100644 --- a/clang/test/CodeGenCUDA/kernel-stub-name.cu +++ b/clang/test/CodeGenCUDA/kernel-stub-name.cu @@ -2,7 +2,7 @@ // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - -x hip\ -// RUN: | FileCheck -check-prefixes=CHECK,GNU %s +// RUN: | FileCheck -check-prefixes=CHECK,GNU,GNU-HIP,HIP %s // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - -x hip\ @@ -11,7 +11,12 @@ // RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \ // RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \ // RUN: %t -o - -x hip\ -// RUN: | FileCheck -check-prefixes=CHECK,MSVC %s +// RUN: | FileCheck -check-prefixes=CHECK,MSVC,MSVC-HIP,HIP %s + +// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \ +// RUN: -aux-triple nvptx64 -fcuda-include-gpubinary \ +// RUN: %t -target-sdk-version=9.2 -o - \ +// RUN: | FileCheck -check-prefixes=CHECK,MSVC,CUDA %s // RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \ // RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \ @@ -22,19 +27,23 @@ // Check kernel handles are emitted for non-MSVC target but not for MSVC target. -// GNU: @[[HCKERN:ckernel]] = constant ptr @[[CSTUB:__device_stub__ckernel]], align 8 -// GNU: @[[HNSKERN:_ZN2ns8nskernelEv]] = constant ptr @[[NSSTUB:_ZN2ns23__device_stub__nskernelEv]], align 8 -// GNU: @[[HTKERN:_Z10kernelfuncIiEvv]] = linkonce_odr constant ptr @[[TSTUB:_Z25__device_stub__kernelfuncIiEvv]], comdat, align 8 -// GNU: @[[HDKERN:_Z11kernel_declv]] = external constant ptr, align 8 -// GNU: @[[HTDKERN:_Z20template_kernel_declIiEvT_]] = external constant ptr, align 8 - -// MSVC: @[[HCKERN:ckernel]] = dso_local constant ptr @[[CSTUB:__device_stub__ckernel]], align 8 -// MSVC: @[[HNSKERN:"\?nskernel@ns@@YAXXZ.*"]] = dso_local constant ptr @[[NSSTUB:"\?__device_stub__nskernel@ns@@YAXXZ"]], align 8 -// MSVC: @[[HTKERN:"\?\?\$kernelfunc@H@@YAXXZ.*"]] = linkonce_odr dso_local constant ptr @[[TSTUB:"\?\?\$__device_stub__kernelfunc@H@@YAXXZ.*"]], comdat, align 8 -// MSVC: @[[HDKERN:"\?kernel_decl@@YAXXZ.*"]] = external dso_local constant ptr, align 8 -// MSVC: @[[HTDKERN:"\?\?\$template_kernel_decl@H@@YAXH.*"]] = external dso_local constant ptr, align 8 +// GNU-HIP: @[[HCKERN:ckernel]] = constant ptr @[[CSTUB:__device_stub__ckernel]], align 8 +// GNU-HIP: @[[HNSKERN:_ZN2ns8nskernelEv]] = constant ptr @[[NSSTUB:_ZN2ns23__device_stub__nskernelEv]], align 8 +// GNU-HIP: @[[HTKERN:_Z10kernelfuncIiEvv]] = linkonce_odr constant ptr @[[TSTUB:_Z25__device_stub__kernelfuncIiEvv]], comdat, align 8 +// GNU-HIP: @[[HDKERN:_Z11kernel_declv]] = external constant ptr, align 8 +// GNU-HIP: @[[HTDKERN:_Z20template_kernel_declIiEvT_]] = external constant ptr, align 8 + +// MSVC-HIP: @[[HCKERN:ckernel]] = dso_local constant ptr @[[CSTUB:__device_stub__ckernel]], align 8 +// MSVC-HIP: @[[HNSKERN:"\?nskernel@ns@@YAXXZ.*"]] = dso_local constant ptr @[[NSSTUB:"\?__device_stub__nskernel@ns@@YAXXZ"]], align 8 +// MSVC-HIP: @[[HTKERN:"\?\?\$kernelfunc@H@@YAXXZ.*"]] = linkonce_odr dso_local constant ptr @[[TSTUB:"\?\?\$__device_stub__kernelfunc@H@@YAXXZ.*"]], comdat, align 8 +// MSVC-HIP: @[[HDKERN:"\?kernel_decl@@YAXXZ.*"]] = external dso_local constant ptr, align 8 +// MSVC-HIP: @[[HTDKERN:"\?\?\$template_kernel_decl@H@@YAXH.*"]] = external dso_local constant ptr, align 8 extern "C" __global__ void ckernel() {} +// CUDA: @[[HCKERN:__device_stub__ckernel\.id]] = dso_local global i8 0 +// CUDA: @[[HNSKERN:"\?__device_stub__nskernel@ns@@YAXXZ\.id"]] = dso_local global i8 0 +// CUDA: @[[HTKERN:"\?\?\$__device_stub__kernelfunc@H@@YAXXZ\.id"]] = linkonce_odr dso_local global i8 0, comdat + namespace ns { __global__ void nskernel() {} } // namespace ns @@ -60,18 +69,27 @@ extern "C" void launch(void *kern); // Non-template kernel stub functions -// CHECK: define{{.*}}@[[CSTUB]] -// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HCKERN]] +// HIP: define{{.*}}@[[CSTUB]] +// CUDA: define{{.*}}@[[CSTUB:__device_stub__ckernel]] +// HIP: call{{.*}}@hipLaunchByPtr{{.*}}@[[HCKERN]] +// CUDA: call{{.*}}@cudaLaunch{{.*}}@[[CSTUB]] +// CUDA: store volatile i8 1, ptr @[[HCKERN]], align 1 +// CHECK: ret void -// CHECK: define{{.*}}@[[NSSTUB]] -// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HNSKERN]] +// HIP: define{{.*}}@[[NSSTUB]] +// CUDA: define{{.*}}@[[NSSTUB:"\?__device_stub__nskernel@ns@@YAXXZ"]] +// HIP: call{{.*}}@hipLaunchByPtr{{.*}}@[[HNSKERN]] +// CUDA: call{{.*}}@cudaLaunch{{.*}}@[[NSSTUB]] +// CUDA: store volatile i8 1, ptr @[[HNSKERN]], align 1 +// CHECK: ret void // Check kernel stub is called for triple chevron. // CHECK-LABEL: define{{.*}}@fun1() // CHECK: call void @[[CSTUB]]() // CHECK: call void @[[NSSTUB]]() -// CHECK: call void @[[TSTUB]]() +// HIP: call void @[[TSTUB]]() +// CUDA: call void @[[TSTUB:"\?\?\$__device_stub__kernelfunc@H@@YAXXZ.*"]]() // GNU: call void @[[DSTUB:_Z26__device_stub__kernel_declv]]() // GNU: call void @[[TDSTUB:_Z35__device_stub__template_kernel_declIiEvT_]]( // MSVC: call void @[[DSTUB:"\?__device_stub__kernel_decl@@YAXXZ"]]() @@ -88,7 +106,10 @@ extern "C" void fun1(void) { // Template kernel stub functions // CHECK: define{{.*}}@[[TSTUB]] -// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HTKERN]] +// HIP: call{{.*}}@hipLaunchByPtr{{.*}}@[[HTKERN]] +// CUDA: call{{.*}}@cudaLaunch{{.*}}@[[TSTUB]] +// CUDA: store volatile i8 1, ptr @[[HTKERN]], align 1 +// CHECK: ret void // Check declaration of stub function for external kernel. @@ -98,11 +119,11 @@ extern "C" void fun1(void) { // Check kernel handle is used for passing the kernel as a function pointer. // CHECK-LABEL: define{{.*}}@fun2() -// CHECK: call void @launch({{.*}}[[HCKERN]] -// CHECK: call void @launch({{.*}}[[HNSKERN]] -// CHECK: call void @launch({{.*}}[[HTKERN]] -// CHECK: call void @launch({{.*}}[[HDKERN]] -// CHECK: call void @launch({{.*}}[[HTDKERN]] +// HIP: call void @launch({{.*}}[[HCKERN]] +// HIP: call void @launch({{.*}}[[HNSKERN]] +// HIP: call void @launch({{.*}}[[HTKERN]] +// HIP: call void @launch({{.*}}[[HDKERN]] +// HIP: call void @launch({{.*}}[[HTDKERN]] extern "C" void fun2() { launch((void *)ckernel); launch((void *)ns::nskernel); @@ -114,10 +135,10 @@ extern "C" void fun2() { // Check kernel handle is used for assigning a kernel to a function pointer. // CHECK-LABEL: define{{.*}}@fun3() -// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr, align 8 -// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr, align 8 -// CHECK: store ptr @[[HCKERN]], ptr @void_ptr, align 8 -// CHECK: store ptr @[[HCKERN]], ptr @void_ptr, align 8 +// HIP: store ptr @[[HCKERN]], ptr @kernel_ptr, align 8 +// HIP: store ptr @[[HCKERN]], ptr @kernel_ptr, align 8 +// HIP: store ptr @[[HCKERN]], ptr @void_ptr, align 8 +// HIP: store ptr @[[HCKERN]], ptr @void_ptr, align 8 extern "C" void fun3() { kernel_ptr = ckernel; kernel_ptr = &ckernel; @@ -129,11 +150,11 @@ extern "C" void fun3() { // used with triple chevron. // CHECK-LABEL: define{{.*}}@fun4() -// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr -// CHECK: call noundef i32 @{{.*hipConfigureCall}} -// CHECK: %[[HANDLE:.*]] = load ptr, ptr @kernel_ptr, align 8 -// CHECK: %[[STUB:.*]] = load ptr, ptr %[[HANDLE]], align 8 -// CHECK: call void %[[STUB]]() +// HIP: store ptr @[[HCKERN]], ptr @kernel_ptr +// HIP: call noundef i32 @{{.*hipConfigureCall}} +// HIP: %[[HANDLE:.*]] = load ptr, ptr @kernel_ptr, align 8 +// HIP: %[[STUB:.*]] = load ptr, ptr %[[HANDLE]], align 8 +// HIP: call void %[[STUB]]() extern "C" void fun4() { kernel_ptr = ckernel; kernel_ptr<<<1,1>>>(); @@ -142,9 +163,9 @@ extern "C" void fun4() { // Check kernel handle is passed to a function. // CHECK-LABEL: define{{.*}}@fun5() -// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr -// CHECK: %[[HANDLE:.*]] = load ptr, ptr @kernel_ptr, align 8 -// CHECK: call void @launch(ptr noundef %[[HANDLE]]) +// HIP: store ptr @[[HCKERN]], ptr @kernel_ptr +// HIP: %[[HANDLE:.*]] = load ptr, ptr @kernel_ptr, align 8 +// HIP: call void @launch(ptr noundef %[[HANDLE]]) extern "C" void fun5() { kernel_ptr = ckernel; launch((void *)kernel_ptr); @@ -152,10 +173,10 @@ extern "C" void fun5() { // Check kernel handle is registered. -// CHECK-LABEL: define{{.*}}@__hip_register_globals -// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HCKERN]]{{.*}}@[[CKERN]] -// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HNSKERN]]{{.*}}@[[NSKERN]] -// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HTKERN]]{{.*}}@[[TKERN]] +// HIP-LABEL: define{{.*}}@__hip_register_globals +// HIP: call{{.*}}@__hipRegisterFunction{{.*}}@[[HCKERN]]{{.*}}@[[CKERN]] +// HIP: call{{.*}}@__hipRegisterFunction{{.*}}@[[HNSKERN]]{{.*}}@[[NSKERN]] +// HIP: call{{.*}}@__hipRegisterFunction{{.*}}@[[HTKERN]]{{.*}}@[[TKERN]] // NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}__device_stub // NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}kernel_decl // NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}template_kernel_decl From f050660f4a60415cd840f7fba7ac3698c38376d0 Mon Sep 17 00:00:00 2001 From: jyu2-git Date: Wed, 1 May 2024 07:34:10 -0700 Subject: [PATCH 19/48] [OpenMP][TR12] change property of map-type modifier. (#90499) map-type change to "default" instead "ultimate" from [OpenMP5.2] The change is allowed map-type to be placed any locations within map modifiers, besides the last location in the modifiers-list, also map-type can be omitted afterward. --- .../clang/Basic/DiagnosticParseKinds.td | 5 + clang/lib/Parse/ParseOpenMP.cpp | 44 +++++++- clang/test/OpenMP/target_ast_print.cpp | 58 ++++++++++ clang/test/OpenMP/target_map_messages.cpp | 105 ++++++++++-------- 4 files changed, 162 insertions(+), 50 deletions(-) diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td index fdffb35ea0d955..44bc4e0e130de8 100644 --- a/clang/include/clang/Basic/DiagnosticParseKinds.td +++ b/clang/include/clang/Basic/DiagnosticParseKinds.td @@ -1438,6 +1438,9 @@ def err_omp_decl_in_declare_simd_variant : Error< def err_omp_sink_and_source_iteration_not_allowd: Error<" '%0 %select{sink:|source:}1' must be with '%select{omp_cur_iteration - 1|omp_cur_iteration}1'">; def err_omp_unknown_map_type : Error< "incorrect map type, expected one of 'to', 'from', 'tofrom', 'alloc', 'release', or 'delete'">; +def err_omp_more_one_map_type : Error<"map type is already specified">; +def note_previous_map_type_specified_here + : Note<"map type '%0' is previous specified here">; def err_omp_unknown_map_type_modifier : Error< "incorrect map type modifier, expected one of: 'always', 'close', 'mapper'" "%select{|, 'present'|, 'present', 'iterator'}0%select{|, 'ompx_hold'}1">; @@ -1445,6 +1448,8 @@ def err_omp_map_type_missing : Error< "missing map type">; def err_omp_map_type_modifier_missing : Error< "missing map type modifier">; +def err_omp_map_modifier_specification_list : Error< + "empty modifier-specification-list is not allowed">; def err_omp_declare_simd_inbranch_notinbranch : Error< "unexpected '%0' clause, '%1' is specified already">; def err_omp_expected_clause_argument diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp index 480201bc06f613..53d89ce2fa3e99 100644 --- a/clang/lib/Parse/ParseOpenMP.cpp +++ b/clang/lib/Parse/ParseOpenMP.cpp @@ -4228,13 +4228,20 @@ bool Parser::parseMapperModifier(SemaOpenMP::OpenMPVarListDataTy &Data) { return T.consumeClose(); } +static OpenMPMapClauseKind isMapType(Parser &P); + /// Parse map-type-modifiers in map clause. -/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) +/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] [map-type] : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) | /// present +/// where, map-type ::= alloc | delete | from | release | to | tofrom bool Parser::parseMapTypeModifiers(SemaOpenMP::OpenMPVarListDataTy &Data) { + bool HasMapType = false; + SourceLocation PreMapLoc = Tok.getLocation(); + StringRef PreMapName = ""; while (getCurToken().isNot(tok::colon)) { OpenMPMapModifierKind TypeModifier = isMapModifier(*this); + OpenMPMapClauseKind MapKind = isMapType(*this); if (TypeModifier == OMPC_MAP_MODIFIER_always || TypeModifier == OMPC_MAP_MODIFIER_close || TypeModifier == OMPC_MAP_MODIFIER_present || @@ -4257,6 +4264,19 @@ bool Parser::parseMapTypeModifiers(SemaOpenMP::OpenMPVarListDataTy &Data) { Diag(Data.MapTypeModifiersLoc.back(), diag::err_omp_missing_comma) << "map type modifier"; + } else if (getLangOpts().OpenMP >= 60 && MapKind != OMPC_MAP_unknown) { + if (!HasMapType) { + HasMapType = true; + Data.ExtraModifier = MapKind; + MapKind = OMPC_MAP_unknown; + PreMapLoc = Tok.getLocation(); + PreMapName = Tok.getIdentifierInfo()->getName(); + } else { + Diag(Tok, diag::err_omp_more_one_map_type); + Diag(PreMapLoc, diag::note_previous_map_type_specified_here) + << PreMapName; + } + ConsumeToken(); } else { // For the case of unknown map-type-modifier or a map-type. // Map-type is followed by a colon; the function returns when it @@ -4267,8 +4287,14 @@ bool Parser::parseMapTypeModifiers(SemaOpenMP::OpenMPVarListDataTy &Data) { continue; } // Potential map-type token as it is followed by a colon. - if (PP.LookAhead(0).is(tok::colon)) - return false; + if (PP.LookAhead(0).is(tok::colon)) { + if (getLangOpts().OpenMP >= 60) { + break; + } else { + return false; + } + } + Diag(Tok, diag::err_omp_unknown_map_type_modifier) << (getLangOpts().OpenMP >= 51 ? (getLangOpts().OpenMP >= 52 ? 2 : 1) : 0) @@ -4278,6 +4304,14 @@ bool Parser::parseMapTypeModifiers(SemaOpenMP::OpenMPVarListDataTy &Data) { if (getCurToken().is(tok::comma)) ConsumeToken(); } + if (getLangOpts().OpenMP >= 60 && !HasMapType) { + if (!Tok.is(tok::colon)) { + Diag(Tok, diag::err_omp_unknown_map_type); + ConsumeToken(); + } else { + Data.ExtraModifier = OMPC_MAP_unknown; + } + } return false; } @@ -4675,8 +4709,10 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind, // Only parse map-type-modifier[s] and map-type if a colon is present in // the map clause. if (ColonPresent) { + if (getLangOpts().OpenMP >= 60 && getCurToken().is(tok::colon)) + Diag(Tok, diag::err_omp_map_modifier_specification_list); IsInvalidMapperModifier = parseMapTypeModifiers(Data); - if (!IsInvalidMapperModifier) + if (getLangOpts().OpenMP < 60 && !IsInvalidMapperModifier) parseMapType(*this, Data); else SkipUntil(tok::colon, tok::annot_pragma_openmp_end, StopBeforeMatch); diff --git a/clang/test/OpenMP/target_ast_print.cpp b/clang/test/OpenMP/target_ast_print.cpp index 45907e93321a82..4e066bcf5e43a4 100644 --- a/clang/test/OpenMP/target_ast_print.cpp +++ b/clang/test/OpenMP/target_ast_print.cpp @@ -1201,6 +1201,64 @@ foo(); } #endif // OMP52 +#ifdef OMP60 + +///==========================================================================/// +// RUN: %clang_cc1 -DOMP60 -verify -Wno-vla -fopenmp -fopenmp-version=60 -ast-print %s | FileCheck %s --check-prefix OMP60 +// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -emit-pch -o %t %s +// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -std=c++11 -include-pch %t -fsyntax-only -verify -Wno-vla %s -ast-print | FileCheck %s --check-prefix OMP60 + +// RUN: %clang_cc1 -DOMP60 -verify -Wno-vla -fopenmp-simd -fopenmp-version=60 -ast-print %s | FileCheck %s --check-prefix OMP60 +// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -emit-pch -o %t %s +// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -std=c++11 -include-pch %t -fsyntax-only -verify -Wno-vla %s -ast-print | FileCheck %s --check-prefix OMP60 + +void foo() {} +template +T tmain(T argc, T *argv) { + T i; +#pragma omp target map(from always: i) + foo(); +#pragma omp target map(from, close: i) + foo(); +#pragma omp target map(always,close: i) + foo(); + return 0; +} +//OMP60: template T tmain(T argc, T *argv) { +//OMP60-NEXT: T i; +//OMP60-NEXT: #pragma omp target map(always,from: i) +//OMP60-NEXT: foo(); +//OMP60-NEXT: #pragma omp target map(close,from: i) +//OMP60-NEXT: foo(); +//OMP60-NEXT: #pragma omp target map(always,close,tofrom: i) +//OMP60-NEXT: foo(); +//OMP60-NEXT: return 0; +//OMP60-NEXT:} +//OMP60: template<> int tmain(int argc, int *argv) { +//OMP60-NEXT: int i; +//OMP60-NEXT: #pragma omp target map(always,from: i) +//OMP60-NEXT: foo(); +//OMP60-NEXT: #pragma omp target map(close,from: i) +//OMP60-NEXT: foo(); +//OMP60-NEXT: #pragma omp target map(always,close,tofrom: i) +//OMP60-NEXT: foo(); +//OMP60-NEXT: return 0; +//OMP60-NEXT:} +//OMP60: template<> char tmain(char argc, char *argv) { +//OMP60-NEXT: char i; +//OMP60-NEXT: #pragma omp target map(always,from: i) +//OMP60-NEXT: foo(); +//OMP60-NEXT: #pragma omp target map(close,from: i) +//OMP60-NEXT: foo(); +//OMP60-NEXT: #pragma omp target map(always,close,tofrom: i) +//OMP60-NEXT: foo(); +//OMP60-NEXT: return 0; +//OMP60-NEXT:} +int main (int argc, char **argv) { + return tmain(argc, &argc) + tmain(argv[0][0], argv[0]); +} +#endif // OMP60 + #ifdef OMPX // RUN: %clang_cc1 -DOMPX -verify -Wno-vla -fopenmp -fopenmp-extensions -ast-print %s | FileCheck %s --check-prefix=OMPX diff --git a/clang/test/OpenMP/target_map_messages.cpp b/clang/test/OpenMP/target_map_messages.cpp index a6776ee12c0ee2..3bd432b47e637f 100644 --- a/clang/test/OpenMP/target_map_messages.cpp +++ b/clang/test/OpenMP/target_map_messages.cpp @@ -1,34 +1,35 @@ // -fopenmp, -fno-openmp-extensions -// RUN: %clang_cc1 -verify=expected,ge50,lt51,omp,lt51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,lt50,lt51,omp,lt51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=40 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,lt50,lt51,omp,lt51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=45 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,ge50,lt51,omp,lt51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,ge50,ge51,omp,ge51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=51 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,ge50,ge51,ge52,omp,ge52-omp,omp52 -fopenmp -fno-openmp-extensions -fopenmp-version=52 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,lt51,lt60,omp,lt51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,lt50,lt51,lt60,omp,lt51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=40 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,lt50,lt51,lt60,omp,lt51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=45 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,lt51,lt60,omp,lt51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,ge51,lt60,omp,ge51-omp -fopenmp -fno-openmp-extensions -fopenmp-version=51 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,ge51,ge52,lt60,omp,ge52-omp,omp52 -fopenmp -fno-openmp-extensions -fopenmp-version=52 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,ge52,ge60,omp,ge60-omp,omp60 -fopenmp -fno-openmp-extensions -fopenmp-version=60 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla // RUN: %clang_cc1 -DCCODE -verify -fopenmp -fno-openmp-extensions -ferror-limit 300 -x c %s -Wno-openmp -Wuninitialized -Wno-vla // -fopenmp-simd, -fno-openmp-extensions -// RUN: %clang_cc1 -verify=expected,ge50,lt51,omp,lt51-omp -fopenmp-simd -fno-openmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,lt50,lt51,omp,lt51-omp -fopenmp-simd -fno-openmp-extensions -fopenmp-version=40 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,lt50,lt51,omp,lt51-omp -fopenmp-simd -fno-openmp-extensions -fopenmp-version=45 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,ge50,lt51,omp,lt51-omp -fopenmp-simd -fno-openmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,ge50,ge51,omp,ge51-omp -fopenmp-simd -fno-openmp-extensions -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,lt51,lt60,omp,lt51-omp -fopenmp-simd -fno-openmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,lt50,lt51,lt60,omp,lt51-omp -fopenmp-simd -fno-openmp-extensions -fopenmp-version=40 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,lt50,lt51,lt60,omp,lt51-omp -fopenmp-simd -fno-openmp-extensions -fopenmp-version=45 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,lt51,lt60,omp,lt51-omp -fopenmp-simd -fno-openmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,ge51,lt60,omp,ge51-omp -fopenmp-simd -fno-openmp-extensions -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla // RUN: %clang_cc1 -DCCODE -verify -fopenmp-simd -fno-openmp-extensions -ferror-limit 300 -x c %s -Wno-openmp-mapping -Wuninitialized -Wno-vla // -fopenmp -fopenmp-extensions -// RUN: %clang_cc1 -verify=expected,ge50,lt51,ompx,lt51-ompx -fopenmp -fopenmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,lt50,lt51,ompx,lt51-ompx -fopenmp -fopenmp-extensions -fopenmp-version=40 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,lt50,lt51,ompx,lt51-ompx -fopenmp -fopenmp-extensions -fopenmp-version=45 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,ge50,lt51,ompx,lt51-ompx -fopenmp -fopenmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,ge50,ge51,ompx,ge51-ompx -fopenmp -fopenmp-extensions -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,lt51,lt60,ompx,lt51-ompx -fopenmp -fopenmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,lt50,lt51,lt60,ompx,lt51-ompx -fopenmp -fopenmp-extensions -fopenmp-version=40 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,lt50,lt51,lt60,ompx,lt51-ompx -fopenmp -fopenmp-extensions -fopenmp-version=45 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,lt51,lt60,ompx,lt51-ompx -fopenmp -fopenmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,ge51,lt60,ompx,ge51-ompx -fopenmp -fopenmp-extensions -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla // RUN: %clang_cc1 -DCCODE -verify -fopenmp -fopenmp-extensions -ferror-limit 300 -x c %s -Wno-openmp -Wuninitialized -Wno-vla // -fopenmp-simd -fopenmp-extensions -// RUN: %clang_cc1 -verify=expected,ge50,lt51,ompx,lt51-ompx -fopenmp-simd -fopenmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,lt50,lt51,ompx,lt51-ompx -fopenmp-simd -fopenmp-extensions -fopenmp-version=40 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,lt50,lt51,ompx,lt51-ompx -fopenmp-simd -fopenmp-extensions -fopenmp-version=45 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,ge50,lt51,ompx,lt51-ompx -fopenmp-simd -fopenmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla -// RUN: %clang_cc1 -verify=expected,ge50,ge51,ompx,ge51-ompx -fopenmp-simd -fopenmp-extensions -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,lt51,lt60,ompx,lt51-ompx -fopenmp-simd -fopenmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,lt50,lt51,lt60,ompx,lt51-ompx -fopenmp-simd -fopenmp-extensions -fopenmp-version=40 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,lt50,lt51,lt60,ompx,lt51-ompx -fopenmp-simd -fopenmp-extensions -fopenmp-version=45 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,lt51,lt60,ompx,lt51-ompx -fopenmp-simd -fopenmp-extensions -fopenmp-version=50 -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla +// RUN: %clang_cc1 -verify=expected,ge50,ge51,lt60,ompx,ge51-ompx -fopenmp-simd -fopenmp-extensions -ferror-limit 300 %s -Wno-openmp-target -Wuninitialized -Wno-vla // RUN: %clang_cc1 -DCCODE -verify -fopenmp-simd -fopenmp-extensions -ferror-limit 300 -x c %s -Wno-openmp-mapping -Wuninitialized -Wno-vla // Check @@ -113,7 +114,7 @@ struct SA { #pragma omp target map(b[true:true]) {} - #pragma omp target map(: c,f) // expected-error {{missing map type}} + #pragma omp target map(: c,f) // lt60-error {{missing map type}} // ge60-error {{empty modifier-specification-list is not allowed}} {} #pragma omp target map(always, tofrom: c,f) {} @@ -159,28 +160,28 @@ struct SA { // expected-error@+1 {{use of undeclared identifier 'present'}} #pragma omp target map(present) {} - // ge52-omp-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} + // ge52-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} // ge51-omp-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-omp-error@+1 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} #pragma omp target map(ompx_hold, tofrom: c,f) {} - // ge52-omp-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} + // ge52-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} // ge51-omp-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-omp-error@+1 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} #pragma omp target map(ompx_hold, tofrom: c[1:2],f) {} - // ge52-omp-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} + // ge52-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} // ge51-omp-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-omp-error@+1 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} #pragma omp target map(ompx_hold, tofrom: c,f[1:2]) {} - // ge52-omp-error@+4 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} + // ge52-error@+4 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} // expected-error@+3 {{section length is unspecified and cannot be inferred because subscripted value is not an array}} // ge51-omp-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-omp-error@+1 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} #pragma omp target map(ompx_hold, tofrom: c[:],f) {} - // ge52-omp-error@+4 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} + // ge52-error@+4 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} // expected-error@+3 {{section length is unspecified and cannot be inferred because subscripted value is not an array}} // ge51-omp-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-omp-error@+1 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} @@ -193,19 +194,19 @@ struct SA { {} #pragma omp target map(always, close, always, close, tofrom: a) // expected-error 2 {{same map type modifier has been specified more than once}} {} + // ge60-error@+3 {{same map type modifier has been specified more than once}} // ge51-error@+2 {{same map type modifier has been specified more than once}} // lt51-error@+1 2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} #pragma omp target map(present, present, tofrom: a) {} - // ge52-omp-error@+5 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} - // ge52-omp-error@+4 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} + // ge52-error@+4 2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} // ompx-error@+3 {{same map type modifier has been specified more than once}} // ge51-omp-error@+2 2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-omp-error@+1 2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} #pragma omp target map(ompx_hold, ompx_hold, tofrom: a) {} - // ge52-omp-error@+9 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} - // ge52-omp-error@+8 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} + // ge60-error@+9 {{same map type modifier has been specified more than once}} + // ge52-error@+8 2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} // expected-error@+7 2 {{same map type modifier has been specified more than once}} // ge51-error@+6 {{same map type modifier has been specified more than once}} // lt51-ompx-error@+5 2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'ompx_hold'}} @@ -219,34 +220,45 @@ struct SA { {} #pragma omp target map( , , tofrom: a) // expected-error {{missing map type modifier}} expected-error {{missing map type modifier}} {} - #pragma omp target map( , , : a) // expected-error {{missing map type modifier}} expected-error {{missing map type modifier}} expected-error {{missing map type}} + #pragma omp target map( , , : a) // expected-error {{missing map type modifier}} expected-error {{missing map type modifier}} lt60-error {{missing map type}} {} + // ge60-error@+4 2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator'}} // ge51-error@+3 2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-error@+2 2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} // expected-error@+1 {{incorrect map type, expected one of 'to', 'from', 'tofrom', 'alloc', 'release', or 'delete'}} #pragma omp target map( d, f, bf: a) {} + // ge60-error@+5 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'iterator}} // expected-error@+4 {{missing map type modifier}} // ge51-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} - // expected-error@+1 {{missing map type}} + // lt60-error@+1 {{missing map type}} #pragma omp target map( , f, : a) {} - #pragma omp target map(always close: a) // expected-error {{missing map type}} omp52-error{{missing ',' after map type modifier}} + #pragma omp target map(always close: a) // lt60-error {{missing map type}} ge52-error{{missing ',' after map type modifier}} {} - #pragma omp target map(always close bf: a) // omp52-error 2 {{missing ',' after map type modifier}} expected-error {{incorrect map type, expected one of 'to', 'from', 'tofrom', 'alloc', 'release', or 'delete'}} + #pragma omp target map(always close bf: a) // ge52-error 2 {{missing ',' after map type modifier}} expected-error {{incorrect map type, expected one of 'to', 'from', 'tofrom', 'alloc', 'release', or 'delete'}} {} - // omp52-error@+4 {{missing ',' after map type modifier}} + // ge52-error@+4 {{missing ',' after map type modifier}} // ge51-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} - // expected-error@+1 {{missing map type}} + // lt60-error@+1 {{missing map type}} #pragma omp target map(always tofrom close: a) {} + // ge60-note@+4 {{map type 'tofrom' is previous specified here}} + // ge60-error@+3 {{map type is already specified}} // ge51-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-error@+1 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} #pragma omp target map(tofrom from: a) {} - #pragma omp target map(close bf: a) // omp52-error {{missing ',' after map type modifier}} expected-error {{incorrect map type, expected one of 'to', 'from', 'tofrom', 'alloc', 'release', or 'delete'}} + // ge60-note@+5 {{map type 'to' is previous specified here}} + // ge60-error@+4 {{map type is already specified}} + // ge52-error@+3 {{missing ',' after map type modifier}} + // ge51-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} + // lt51-error@+1 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} + #pragma omp target map(to always from: a) + {} + #pragma omp target map(close bf: a) // ge52-error {{missing ',' after map type modifier}} expected-error {{incorrect map type, expected one of 'to', 'from', 'tofrom', 'alloc', 'release', or 'delete'}} {} #pragma omp target map(([b[I]][bf])f) // lt50-error {{expected ',' or ']' in lambda capture list}} lt50-error {{expected ')'}} lt50-note {{to match this '('}} {} @@ -266,6 +278,7 @@ struct SA { // lt51-omp-error@+1 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} #pragma omp target map(iterator(it=0:10, it=0:20), tofrom:a) {} + // ge60-error@+7 {{expected '(' after 'iterator'}} // ge51-ompx-error@+6 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present', 'ompx_hold'}} // lt51-ompx-error@+5 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'ompx_hold'}} // lt51-error@+4 {{expected '(' after 'iterator'}} @@ -694,20 +707,20 @@ T tmain(T argc) { foo(); #pragma omp target data map(always, tofrom: x) -#pragma omp target data map(always: x) // expected-error {{missing map type}} +#pragma omp target data map(always: x) // lt60-error {{missing map type}} // ge51-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} -// expected-error@+1 {{missing map type}} +// lt60-error@+1 {{missing map type}} #pragma omp target data map(tofrom, always: x) #pragma omp target data map(always, tofrom: always, tofrom, x) #pragma omp target map(tofrom j) // expected-error {{expected ',' or ')' in 'map' clause}} foo(); #pragma omp target data map(close, tofrom: x) -#pragma omp target data map(close: x) // expected-error {{missing map type}} +#pragma omp target data map(close: x) // lt60-error {{missing map type}} // ge51-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} -// expected-error@+1 {{missing map type}} +// lt60-error@+1 {{missing map type}} #pragma omp target data map(tofrom, close: x) #pragma omp target data map(close, tofrom: close, tofrom, x) foo(); @@ -829,19 +842,19 @@ int main(int argc, char **argv) { foo(); #pragma omp target data map(always, tofrom: x) -#pragma omp target data map(always: x) // expected-error {{missing map type}} +#pragma omp target data map(always: x) // lt60-error {{missing map type}} // ge51-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} -// expected-error@+1 {{missing map type}} +// lt60-error@+1 {{missing map type}} #pragma omp target data map(tofrom, always: x) #pragma omp target data map(always, tofrom: always, tofrom, x) #pragma omp target map(tofrom j) // expected-error {{expected ',' or ')' in 'map' clause}} foo(); #pragma omp target data map(close, tofrom: x) -#pragma omp target data map(close: x) // expected-error {{missing map type}} +#pragma omp target data map(close: x) // lt60-error {{missing map type}} // ge51-error@+3 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper', 'present'}} // lt51-error@+2 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} -// expected-error@+1 {{missing map type}} +// lt60-error@+1 {{missing map type}} #pragma omp target data map(tofrom, close: x) foo(); // lt51-error@+1 {{incorrect map type modifier, expected one of: 'always', 'close', 'mapper'}} From 78270cb81bded99bebc6fd8d515bf7cbeff62db4 Mon Sep 17 00:00:00 2001 From: annamthomas Date: Wed, 1 May 2024 10:38:22 -0400 Subject: [PATCH 20/48] [UndefOrPoison] [CompileTime] Avoid IDom walk unless required. NFC (#90092) If the value is not boolean and we are checking for `Undef` or `UndefOrPoison`, we can avoid the potentially expensive IDom walk. This should improve compile time for isGuaranteedNotToBeUndefOrPoison and isGuaranteedNotToBeUndef. --- llvm/lib/Analysis/ValueTracking.cpp | 46 ++++++++++++++++------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 1b461e7cfd01f0..fed2061aae3a0d 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -7289,31 +7289,35 @@ static bool isGuaranteedNotToBeUndefOrPoison( // BB1: // CtxI ; V cannot be undef or poison here auto *Dominator = DNode->getIDom(); - while (Dominator) { - auto *TI = Dominator->getBlock()->getTerminator(); - - Value *Cond = nullptr; - if (auto BI = dyn_cast_or_null(TI)) { - if (BI->isConditional()) - Cond = BI->getCondition(); - } else if (auto SI = dyn_cast_or_null(TI)) { - Cond = SI->getCondition(); - } + // This check is purely for compile time reasons: we can skip the IDom walk + // if what we are checking for includes undef and the value is not an integer. + if (!includesUndef(Kind) || V->getType()->isIntegerTy()) + while (Dominator) { + auto *TI = Dominator->getBlock()->getTerminator(); + + Value *Cond = nullptr; + if (auto BI = dyn_cast_or_null(TI)) { + if (BI->isConditional()) + Cond = BI->getCondition(); + } else if (auto SI = dyn_cast_or_null(TI)) { + Cond = SI->getCondition(); + } - if (Cond) { - if (Cond == V) - return true; - else if (!includesUndef(Kind) && isa(Cond)) { - // For poison, we can analyze further - auto *Opr = cast(Cond); - if (any_of(Opr->operands(), - [V](const Use &U) { return V == U && propagatesPoison(U); })) + if (Cond) { + if (Cond == V) return true; + else if (!includesUndef(Kind) && isa(Cond)) { + // For poison, we can analyze further + auto *Opr = cast(Cond); + if (any_of(Opr->operands(), [V](const Use &U) { + return V == U && propagatesPoison(U); + })) + return true; + } } - } - Dominator = Dominator->getIDom(); - } + Dominator = Dominator->getIDom(); + } if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC)) return true; From e22ce615fe31a78857a8574c12a32bddc6da465e Mon Sep 17 00:00:00 2001 From: Sean Perry <39927768+perry-ca@users.noreply.github.com> Date: Wed, 1 May 2024 10:39:41 -0400 Subject: [PATCH 21/48] [z/OS] treat text files as text files so auto-conversion is done (#90128) To support auto-conversion on z/OS text files need to be opened as text files. These changes will fix a number of LIT failures due to text files not being converted to the internal code page. update a number of tools so they open the text files as text files add support in the cat.py to open a text file as a text file (Windows will continue to treat all files as binary so new lines are handled correctly) add env var definitions to enable auto-conversion in the lit config file. --- clang/tools/clang-format/ClangFormat.cpp | 7 ++++--- llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp | 3 ++- llvm/tools/llvm-cxxmap/llvm-cxxmap.cpp | 9 ++++++--- llvm/tools/yaml2obj/yaml2obj.cpp | 2 +- llvm/utils/lit/lit/builtin_commands/cat.py | 18 ++++++++++++++++-- llvm/utils/lit/lit/llvm/config.py | 7 +++++++ 6 files changed, 36 insertions(+), 10 deletions(-) diff --git a/clang/tools/clang-format/ClangFormat.cpp b/clang/tools/clang-format/ClangFormat.cpp index feb733fe3c9e0b..01f7c6047726e2 100644 --- a/clang/tools/clang-format/ClangFormat.cpp +++ b/clang/tools/clang-format/ClangFormat.cpp @@ -413,8 +413,9 @@ static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) { // On Windows, overwriting a file with an open file mapping doesn't work, // so read the whole file into memory when formatting in-place. ErrorOr> CodeOrErr = - !OutputXML && Inplace ? MemoryBuffer::getFileAsStream(FileName) - : MemoryBuffer::getFileOrSTDIN(FileName); + !OutputXML && Inplace + ? MemoryBuffer::getFileAsStream(FileName) + : MemoryBuffer::getFileOrSTDIN(FileName, /*IsText=*/true); if (std::error_code EC = CodeOrErr.getError()) { errs() << EC.message() << "\n"; return true; @@ -558,7 +559,7 @@ static int dumpConfig() { // Read in the code in case the filename alone isn't enough to detect the // language. ErrorOr> CodeOrErr = - MemoryBuffer::getFileOrSTDIN(FileNames[0]); + MemoryBuffer::getFileOrSTDIN(FileNames[0], /*IsText=*/true); if (std::error_code EC = CodeOrErr.getError()) { llvm::errs() << EC.message() << "\n"; return 1; diff --git a/llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp b/llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp index c3015d895230ea..40ee59c014b09f 100644 --- a/llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp +++ b/llvm/lib/ToolDrivers/llvm-lib/LibDriver.cpp @@ -95,7 +95,8 @@ static std::vector getSearchPaths(opt::InputArgList *Args, // Opens a file. Path has to be resolved already. (used for def file) std::unique_ptr openFile(const Twine &Path) { - ErrorOr> MB = MemoryBuffer::getFile(Path); + ErrorOr> MB = + MemoryBuffer::getFile(Path, /*IsText=*/true); if (std::error_code EC = MB.getError()) { llvm::errs() << "cannot open file " << Path << ": " << EC.message() << "\n"; diff --git a/llvm/tools/llvm-cxxmap/llvm-cxxmap.cpp b/llvm/tools/llvm-cxxmap/llvm-cxxmap.cpp index 6a5646965df2cf..c5ccd64f116539 100644 --- a/llvm/tools/llvm-cxxmap/llvm-cxxmap.cpp +++ b/llvm/tools/llvm-cxxmap/llvm-cxxmap.cpp @@ -144,15 +144,18 @@ int main(int argc, const char *argv[]) { cl::HideUnrelatedOptions({&CXXMapCategory, &getColorCategory()}); cl::ParseCommandLineOptions(argc, argv, "LLVM C++ mangled name remapper\n"); - auto OldSymbolBufOrError = MemoryBuffer::getFileOrSTDIN(OldSymbolFile); + auto OldSymbolBufOrError = + MemoryBuffer::getFileOrSTDIN(OldSymbolFile, /*IsText=*/true); if (!OldSymbolBufOrError) exitWithErrorCode(OldSymbolBufOrError.getError(), OldSymbolFile); - auto NewSymbolBufOrError = MemoryBuffer::getFileOrSTDIN(NewSymbolFile); + auto NewSymbolBufOrError = + MemoryBuffer::getFileOrSTDIN(NewSymbolFile, /*IsText=*/true); if (!NewSymbolBufOrError) exitWithErrorCode(NewSymbolBufOrError.getError(), NewSymbolFile); - auto RemappingBufOrError = MemoryBuffer::getFileOrSTDIN(RemappingFile); + auto RemappingBufOrError = + MemoryBuffer::getFileOrSTDIN(RemappingFile, /*IsText=*/true); if (!RemappingBufOrError) exitWithErrorCode(RemappingBufOrError.getError(), RemappingFile); diff --git a/llvm/tools/yaml2obj/yaml2obj.cpp b/llvm/tools/yaml2obj/yaml2obj.cpp index b7f5356e22a9e6..4a060e1aad427f 100644 --- a/llvm/tools/yaml2obj/yaml2obj.cpp +++ b/llvm/tools/yaml2obj/yaml2obj.cpp @@ -130,7 +130,7 @@ int main(int argc, char **argv) { } ErrorOr> Buf = - MemoryBuffer::getFileOrSTDIN(Input); + MemoryBuffer::getFileOrSTDIN(Input, /*IsText=*/true); if (!Buf) return 1; diff --git a/llvm/utils/lit/lit/builtin_commands/cat.py b/llvm/utils/lit/lit/builtin_commands/cat.py index 37f55c0aef210b..6fb2152ef9332d 100644 --- a/llvm/utils/lit/lit/builtin_commands/cat.py +++ b/llvm/utils/lit/lit/builtin_commands/cat.py @@ -55,10 +55,24 @@ def main(argv): msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) for filename in filenames: try: - fileToCat = open(filename, "rb") - contents = fileToCat.read() + contents = None + is_text = False + try: + if sys.platform != "win32": + fileToCat = open(filename, "r") + contents = fileToCat.read() + is_text = True + except: + pass + + if contents is None: + fileToCat = open(filename, "rb") + contents = fileToCat.read() + if show_nonprinting: contents = convertToCaretAndMNotation(contents) + elif is_text: + contents = contents.encode() writer.write(contents) sys.stdout.flush() fileToCat.close() diff --git a/llvm/utils/lit/lit/llvm/config.py b/llvm/utils/lit/lit/llvm/config.py index 96b4f7bc86772d..1d4babc99984bf 100644 --- a/llvm/utils/lit/lit/llvm/config.py +++ b/llvm/utils/lit/lit/llvm/config.py @@ -57,6 +57,13 @@ def __init__(self, lit_config, config): self.lit_config.note("using lit tools: {}".format(path)) lit_path_displayed = True + if platform.system() == "OS/390": + self.with_environment("_BPXK_AUTOCVT", "ON") + self.with_environment("_TAG_REDIR_IN", "TXT") + self.with_environment("_TAG_REDIR_OUT", "TXT") + self.with_environment("_TAG_REDIR_ERR", "TXT") + self.with_environment("_CEE_RUNOPTS", "FILETAG(AUTOCVT,AUTOTAG) POSIX(ON)") + # Choose between lit's internal shell pipeline runner and a real shell. # If LIT_USE_INTERNAL_SHELL is in the environment, we use that as an # override. From e83c6ddf46d088713a19d9a662ad8c30d5cd207d Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Wed, 1 May 2024 07:33:47 -0700 Subject: [PATCH 22/48] [SLP][NFC]Add a test with the non profitable masked gather loads. --- .../RISCV/combined-loads-stored.ll | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 llvm/test/Transforms/SLPVectorizer/RISCV/combined-loads-stored.ll diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/combined-loads-stored.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/combined-loads-stored.ll new file mode 100644 index 00000000000000..05c3151fca543a --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/combined-loads-stored.ll @@ -0,0 +1,35 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux -mattr=+v < %s | FileCheck %s + +define void @test(ptr noalias %p, ptr %p1) { +; CHECK-LABEL: define void @test( +; CHECK-SAME: ptr noalias [[P:%.*]], ptr [[P1:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[GEP799:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 16 +; CHECK-NEXT: [[L3:%.*]] = load i16, ptr [[GEP799]], align 2 +; CHECK-NEXT: [[GEP3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 18 +; CHECK-NEXT: [[L4:%.*]] = load i16, ptr [[GEP3]], align 2 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[P]], align 2 +; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[P1]], align 2 +; CHECK-NEXT: [[GEPS2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 4 +; CHECK-NEXT: store i16 [[L3]], ptr [[GEPS2]], align 2 +; CHECK-NEXT: [[GEPS3:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 6 +; CHECK-NEXT: store i16 [[L4]], ptr [[GEPS3]], align 2 +; CHECK-NEXT: ret void +; + %l1 = load i16, ptr %p, align 2 + %gep1 = getelementptr inbounds i8, ptr %p, i64 2 + %l2 = load i16, ptr %gep1, align 2 + %gep2 = getelementptr inbounds i8, ptr %p, i64 16 + %l3 = load i16, ptr %gep2, align 2 + %gep3 = getelementptr inbounds i8, ptr %p, i64 18 + %l4 = load i16, ptr %gep3, align 2 + store i16 %l1, ptr %p1, align 2 + %geps1 = getelementptr inbounds i8, ptr %p1, i64 2 + store i16 %l2, ptr %geps1, align 2 + %geps2 = getelementptr inbounds i8, ptr %p1, i64 4 + store i16 %l3, ptr %geps2, align 2 + %geps3 = getelementptr inbounds i8, ptr %p1, i64 6 + store i16 %l4, ptr %geps3, align 2 + ret void +} + From 39e24bdd8ee5f7dd3cce4157167e41a48896e09f Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Wed, 1 May 2024 16:52:04 +0200 Subject: [PATCH 23/48] MachineLICM: Allow hoisting REG_SEQUENCE (#90638) --- llvm/lib/CodeGen/MachineLICM.cpp | 45 +++--- .../AMDGPU/global_atomics_i64_system.ll | 10 +- .../AMDGPU/machinelicm-copy-like-instrs.mir | 134 ++++++++++++++++++ .../CodeGen/AMDGPU/optimize-negated-cond.ll | 4 +- .../CodeGen/Hexagon/expand-vstorerw-undef.ll | 1 + 5 files changed, 168 insertions(+), 26 deletions(-) create mode 100644 llvm/test/CodeGen/AMDGPU/machinelicm-copy-like-instrs.mir diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp index d569a082cebe06..727a98c41bce4c 100644 --- a/llvm/lib/CodeGen/MachineLICM.cpp +++ b/llvm/lib/CodeGen/MachineLICM.cpp @@ -1264,25 +1264,32 @@ bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI, // If we have a COPY with other uses in the loop, hoist to allow the users to // also be hoisted. - Register DefReg; - if (MI.isCopy() && (DefReg = MI.getOperand(0).getReg()).isVirtual() && - MI.getOperand(1).getReg().isVirtual() && - IsLoopInvariantInst(MI, CurLoop) && - any_of(MRI->use_nodbg_instructions(MI.getOperand(0).getReg()), - [&CurLoop, this, DefReg, Cost](MachineInstr &UseMI) { - if (!CurLoop->contains(&UseMI)) - return false; - - // COPY is a cheap instruction, but if moving it won't cause high - // RP we're fine to hoist it even if the user can't be hoisted - // later Otherwise we want to check the user if it's hoistable - if (CanCauseHighRegPressure(Cost, false) && - !CurLoop->isLoopInvariant(UseMI, DefReg)) - return false; - - return true; - })) - return true; + // TODO: Handle all isCopyLike? + if (MI.isCopy() || MI.isRegSequence()) { + Register DefReg = MI.getOperand(0).getReg(); + if (DefReg.isVirtual() && + all_of(MI.uses(), + [](const MachineOperand &UseOp) { + return !UseOp.isReg() || UseOp.getReg().isVirtual(); + }) && + IsLoopInvariantInst(MI, CurLoop) && + any_of(MRI->use_nodbg_instructions(DefReg), + [&CurLoop, this, DefReg, Cost](MachineInstr &UseMI) { + if (!CurLoop->contains(&UseMI)) + return false; + + // COPY is a cheap instruction, but if moving it won't cause + // high RP we're fine to hoist it even if the user can't be + // hoisted later Otherwise we want to check the user if it's + // hoistable + if (CanCauseHighRegPressure(Cost, false) && + !CurLoop->isLoopInvariant(UseMI, DefReg)) + return false; + + return true; + })) + return true; + } // High register pressure situation, only hoist if the instruction is going // to be remat'ed. diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll index f5c2bd6286cb8e..41a883302e8f70 100644 --- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll +++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll @@ -8907,17 +8907,17 @@ define amdgpu_kernel void @atomic_min_i64(ptr addrspace(1) %out, i64 %in) { ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x0 +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: v_mov_b32_e32 v4, s3 ; SI-NEXT: v_mov_b32_e32 v5, s2 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_mov_b32 s4, s0 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v2, s10 -; SI-NEXT: v_mov_b32_e32 v3, s11 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: v_mov_b32_e32 v3, s5 ; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_mov_b32 s4, s0 +; SI-NEXT: s_mov_b32 s5, s1 ; SI-NEXT: .LBB127_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] diff --git a/llvm/test/CodeGen/AMDGPU/machinelicm-copy-like-instrs.mir b/llvm/test/CodeGen/AMDGPU/machinelicm-copy-like-instrs.mir new file mode 100644 index 00000000000000..e9945f005d2645 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/machinelicm-copy-like-instrs.mir @@ -0,0 +1,134 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 +# RUN: llc -mtriple=amdgcn -run-pass=early-machinelicm -simplify-mir -o - %s | FileCheck %s + +# Test to check machine LICM does not hoist convergent instructions, +# DS_PERMUTE_B32 in this example. + +--- +name: licm_reg_sequence +body: | + ; CHECK-LABEL: name: licm_reg_sequence + ; CHECK: bb.0: + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: S_NOP 0, implicit [[REG_SEQUENCE]] + ; CHECK-NEXT: S_CBRANCH_SCC1 %bb.1, implicit undef $scc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]] + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + liveins: $vgpr0, $vgpr1 + successors: %bb.1 + + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + + bb.1: + successors: %bb.1, %bb.2 + + %3:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 + S_NOP 0, implicit %3 + S_CBRANCH_SCC1 %bb.1, implicit undef $scc + S_BRANCH %bb.2 + + bb.2: + $vgpr0 = COPY %3 + S_ENDPGM 0 + +... + +# Don't bother handling reg_sequence with physreg uses (is there any +# reason for these to be legal)? +--- +name: licm_reg_sequence_physreg_use +body: | + ; CHECK-LABEL: name: licm_reg_sequence_physreg_use + ; CHECK: bb.0: + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, $vgpr1, %subreg.sub1 + ; CHECK-NEXT: S_NOP 0, implicit [[REG_SEQUENCE]] + ; CHECK-NEXT: S_CBRANCH_SCC1 %bb.1, implicit undef $scc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]] + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + liveins: $vgpr0, $vgpr1 + successors: %bb.1 + + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + + bb.1: + successors: %bb.1, %bb.2 + liveins: $vgpr0 + + %3:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, $vgpr1, %subreg.sub1 + S_NOP 0, implicit %3 + S_CBRANCH_SCC1 %bb.1, implicit undef $scc + S_BRANCH %bb.2 + + bb.2: + $vgpr0 = COPY %3 + S_ENDPGM 0 + +... + +--- +name: licm_insert_subreg +body: | + ; CHECK-LABEL: name: licm_insert_subreg + ; CHECK: bb.0: + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF + ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vreg_64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub0 + ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vreg_64 = INSERT_SUBREG [[INSERT_SUBREG]], [[COPY1]], %subreg.sub1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: S_NOP 0, implicit [[INSERT_SUBREG1]] + ; CHECK-NEXT: S_CBRANCH_SCC1 %bb.1, implicit undef $scc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[INSERT_SUBREG1]] + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + liveins: $vgpr0, $vgpr1 + successors: %bb.1 + + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + + bb.1: + successors: %bb.1, %bb.2 + + %3:vreg_64 = IMPLICIT_DEF + %4:vreg_64 = INSERT_SUBREG %3, %0, %subreg.sub0 + %5:vreg_64 = INSERT_SUBREG %4, %1, %subreg.sub1 + S_NOP 0, implicit %5 + S_CBRANCH_SCC1 %bb.1, implicit undef $scc + S_BRANCH %bb.2 + + bb.2: + $vgpr0_vgpr1 = COPY %5 + S_ENDPGM 0 + +... diff --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll index 7c351d2b8443b1..a50a0766f67c2c 100644 --- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll +++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll @@ -8,9 +8,10 @@ define amdgpu_kernel void @negated_cond(ptr addrspace(1) %arg1) { ; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_mov_b32 s10, -1 ; GCN-NEXT: s_mov_b32 s6, 0 +; GCN-NEXT: s_mov_b32 s11, s7 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b32 s9, s5 ; GCN-NEXT: s_mov_b32 s8, s4 +; GCN-NEXT: s_mov_b32 s9, s5 ; GCN-NEXT: v_mov_b32_e32 v0, 0 ; GCN-NEXT: s_branch .LBB0_2 ; GCN-NEXT: .LBB0_1: ; %loop.exit.guard @@ -20,7 +21,6 @@ define amdgpu_kernel void @negated_cond(ptr addrspace(1) %arg1) { ; GCN-NEXT: .LBB0_2: ; %bb1 ; GCN-NEXT: ; =>This Loop Header: Depth=1 ; GCN-NEXT: ; Child Loop BB0_4 Depth 2 -; GCN-NEXT: s_mov_b32 s11, s7 ; GCN-NEXT: buffer_load_dword v1, off, s[8:11], 0 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_cmp_ne_u32_e64 s[2:3], 0, v1 diff --git a/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll b/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll index 867ce3b930f8fc..69ba266227265c 100644 --- a/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll +++ b/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll @@ -69,6 +69,7 @@ b18: ; preds = %b16, %b7 br label %b22 b21: ; preds = %b22 + store volatile <64 x i32> %v20, ptr null tail call void @sammy() #3 br label %b7 From 0606747c9664b353fe592069c7b00067ba52d832 Mon Sep 17 00:00:00 2001 From: Jay Foad Date: Wed, 1 May 2024 16:04:12 +0100 Subject: [PATCH 24/48] [AMDGPU] Remove some pointless fallthrough annotations --- llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp | 2 +- llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index f0c111eaf0600c..4fd9ed2a89279a 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -2342,7 +2342,7 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) { case 8: NeedWaitStates = SMFMA16x16WritesVGPROverlappedSrcABWaitStates; break; - case 16: [[fallthrough]]; + case 16: default: NeedWaitStates = SMFMA32x32WritesVGPROverlappedSrcABWaitStates; } diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index e2e70ba9733b5b..ba01b8513dca7b 100644 --- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -376,14 +376,14 @@ static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) { case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM: case AMDGPU::S_LOAD_DWORDX8_IMM: return 8; - case AMDGPU::DS_READ_B32: [[fallthrough]]; - case AMDGPU::DS_READ_B32_gfx9: [[fallthrough]]; - case AMDGPU::DS_WRITE_B32: [[fallthrough]]; + case AMDGPU::DS_READ_B32: + case AMDGPU::DS_READ_B32_gfx9: + case AMDGPU::DS_WRITE_B32: case AMDGPU::DS_WRITE_B32_gfx9: return 1; - case AMDGPU::DS_READ_B64: [[fallthrough]]; - case AMDGPU::DS_READ_B64_gfx9: [[fallthrough]]; - case AMDGPU::DS_WRITE_B64: [[fallthrough]]; + case AMDGPU::DS_READ_B64: + case AMDGPU::DS_READ_B64_gfx9: + case AMDGPU::DS_WRITE_B64: case AMDGPU::DS_WRITE_B64_gfx9: return 2; default: From 92266885964f8a906a1b877932da1b0c5f0af7ee Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Wed, 1 May 2024 16:09:26 +0100 Subject: [PATCH 25/48] [mlir][ArmSME] Add a tests showing liveness issues in the tile allocator (#90447) This test shows a few cases (not at all complete) where the current ArmSME tile allocator produces incorrect results. The plan is to resolve these issues with a future tile allocator that uses liveness information. --- .../ArmSME/tile-allocation-liveness.mlir | 183 ++++++++++++++++++ 1 file changed, 183 insertions(+) create mode 100644 mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir diff --git a/mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir b/mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir new file mode 100644 index 00000000000000..2dedcb2fbc24e4 --- /dev/null +++ b/mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir @@ -0,0 +1,183 @@ +// RUN: mlir-opt %s -allocate-arm-sme-tiles -split-input-file -verify-diagnostics | FileCheck %s --check-prefix=CHECK-BAD + +// This file tests some aspects of liveness issues in the SME tile allocator. +// These tests were designed with a new liveness-based tile allocator in mind +// (where the names of test cases make more sense), with the current tile +// allocator these tests all give incorrect results (which is documented by +// `CHECK-BAD`). + +// Incorrect result! The second `move_vector_to_tile_slice` overwrites the first (which is still live). +// +// CHECK-BAD-LABEL: @constant_with_multiple_users +// CHECK-BAD: %[[ZERO_TILE:.*]] = arm_sme.zero {tile_id = 0 : i32} : vector<[4]x[4]xf32> +// CHECK-BAD: %[[INSERT_TILE_1:.*]] = arm_sme.move_vector_to_tile_slice %{{.*}} {tile_id = 0 : i32} : vector<[4]xf32> into vector<[4]x[4]xf32> +// CHECK-BAD: %[[INSERT_TILE_0:.*]] = arm_sme.move_vector_to_tile_slice %{{.*}} {tile_id = 0 : i32} : vector<[4]xf32> into vector<[4]x[4]xf32> +func.func @constant_with_multiple_users(%a: vector<[4]xf32>, %b: vector<[4]xf32>, %index: index) { + %zero = arm_sme.zero : vector<[4]x[4]xf32> + %tile_a = arm_sme.move_vector_to_tile_slice %a, %zero, %index : vector<[4]xf32> into vector<[4]x[4]xf32> + %tile_b = arm_sme.move_vector_to_tile_slice %b, %zero, %index : vector<[4]xf32> into vector<[4]x[4]xf32> + "test.some_use"(%tile_a) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_b) : (vector<[4]x[4]xf32>) -> () + return +} + +// ----- + +// (No tile IDs -- the current tile allocator ignores this case) + +// CHECK-BAD-LABEL: @value_with_multiple_users +// CHECK-BAD-NOT: tile_id +func.func @value_with_multiple_users(%tile: vector<[4]x[4]xf32>, %a: vector<[4]xf32>, %b: vector<[4]xf32>, %index: index) { + // A future allocator should error here (as `%tile` would need to be copied). + %tile_a = arm_sme.move_vector_to_tile_slice %a, %tile, %index : vector<[4]xf32> into vector<[4]x[4]xf32> + %tile_b = arm_sme.move_vector_to_tile_slice %b, %tile, %index : vector<[4]xf32> into vector<[4]x[4]xf32> + "test.some_use"(%tile_a) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_b) : (vector<[4]x[4]xf32>) -> () + return +} + +// ----- + +// CHECK-BAD-LABEL: @reuse_tiles_after_initial_use +func.func @reuse_tiles_after_initial_use() { + // CHECK-BAD: arm_sme.get_tile {tile_id = 0 : i32} + // CHECK-BAD: arm_sme.get_tile {tile_id = 1 : i32} + // CHECK-BAD: arm_sme.get_tile {tile_id = 2 : i32} + // CHECK-BAD: arm_sme.get_tile {tile_id = 3 : i32} + %tile_a = arm_sme.get_tile : vector<[4]x[4]xf32> + %tile_b = arm_sme.get_tile : vector<[4]x[4]xf32> + %tile_c = arm_sme.get_tile : vector<[4]x[4]xf32> + %tile_d = arm_sme.get_tile : vector<[4]x[4]xf32> + "test.dummy"(): () -> () + "test.dummy"(): () -> () + "test.dummy"(): () -> () + "test.some_use"(%tile_a) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_b) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_c) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_d) : (vector<[4]x[4]xf32>) -> () + // -> Spills after the fourth tile (unnecessary): + // CHECK-BAD: arm_sme.zero {tile_id = 16 : i32} + // CHECK-BAD: arm_sme.zero {tile_id = 17 : i32} + // CHECK-BAD: arm_sme.zero {tile_id = 18 : i32} + // CHECK-BAD: arm_sme.zero {tile_id = 19 : i32} + // Unnecessary spills: + // expected-warning @below {{failed to allocate SME virtual tile to operation, all tile operations will go through memory, expect degraded performance}} + %tile_1 = arm_sme.zero : vector<[4]x[4]xf32> + // expected-warning @below {{failed to allocate SME virtual tile to operation, all tile operations will go through memory, expect degraded performance}} + %tile_2 = arm_sme.zero : vector<[4]x[4]xf32> + // expected-warning @below {{failed to allocate SME virtual tile to operation, all tile operations will go through memory, expect degraded performance}} + %tile_3 = arm_sme.zero : vector<[4]x[4]xf32> + // expected-warning @below {{failed to allocate SME virtual tile to operation, all tile operations will go through memory, expect degraded performance}} + %tile_4 = arm_sme.zero : vector<[4]x[4]xf32> + "test.dummy"(): () -> () + "test.dummy"(): () -> () + "test.dummy"(): () -> () + "test.some_use"(%tile_1) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_2) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_3) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_4) : (vector<[4]x[4]xf32>) -> () + return +} + +// ----- + +// Incorrect result! Both branches should yield the result via the same tile. +// +// CHECK-BAD-LABEL: @non_overlapping_branches +// CHECK-BAD: arm_sme.zero {tile_id = 0 : i32} : vector<[4]x[4]xf32> +// CHECK-BAD: arm_sme.get_tile {tile_id = 1 : i32} : vector<[4]x[4]xf32> +func.func @non_overlapping_branches(%cond: i1) { + %tile = scf.if %cond -> vector<[4]x[4]xf32> { + %zero = arm_sme.zero : vector<[4]x[4]xf32> + scf.yield %zero : vector<[4]x[4]xf32> + } else { + %undef = arm_sme.get_tile : vector<[4]x[4]xf32> + scf.yield %undef : vector<[4]x[4]xf32> + } + "test.some_use"(%tile) : (vector<[4]x[4]xf32>) -> () + return +} + +// ----- + +// Incorrect result! Everything assigned to tile 0 (which means values that are still live are overwritten). +// +// CHECK-BAD-LABEL: @constant_loop_init_with_multiple_users +// CHECK-BAD: arm_sme.zero {tile_id = 0 : i32} : vector<[4]x[4]xf32> +// CHECK-BAD: arm_sme.move_vector_to_tile_slice {{.*}} {tile_id = 0 : i32} : vector<[4]xf32> into vector<[4]x[4]xf32> +// CHECK-BAD: arm_sme.move_vector_to_tile_slice {{.*}} {tile_id = 0 : i32} : vector<[4]xf32> into vector<[4]x[4]xf32> +func.func @constant_loop_init_with_multiple_users(%a: vector<[4]xf32>, %b: vector<[4]xf32>) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %c10 = arith.constant 10 : index + %init = arm_sme.zero : vector<[4]x[4]xf32> + %tile_a = scf.for %i = %c0 to %c10 step %c1 iter_args(%iter = %init) -> vector<[4]x[4]xf32> { + %new_tile = arm_sme.move_vector_to_tile_slice %a, %iter, %i : vector<[4]xf32> into vector<[4]x[4]xf32> + scf.yield %new_tile : vector<[4]x[4]xf32> + } + %tile_b = scf.for %i = %c0 to %c10 step %c1 iter_args(%iter = %init) -> vector<[4]x[4]xf32> { + %new_tile = arm_sme.move_vector_to_tile_slice %a, %iter, %i : vector<[4]xf32> into vector<[4]x[4]xf32> + scf.yield %new_tile : vector<[4]x[4]xf32> + } + "test.some_use"(%tile_a) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_b) : (vector<[4]x[4]xf32>) -> () + return +} + +// ----- + +// Incorrect result! Everything assigned to tile 0 (which means values that are still live are overwritten). +// +// CHECK-BAD-LABEL: @run_out_of_tiles_but_avoid_spill +// CHECK-BAD: arm_sme.zero {tile_id = 0 : i32} +// CHECK-BAD-COUNT-4: arm_sme.move_vector_to_tile_slice {{.*}} {tile_id = 0 : i32} : vector<[4]xf32> into vector<[4]x[4]xf32> +func.func @run_out_of_tiles_but_avoid_spill(%a: vector<[4]xf32>, %b: vector<[4]xf32>, %c: vector<[4]xf32>, %d: vector<[4]xf32>) { + %init = arm_sme.zero : vector<[4]x[4]xf32> + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %c10 = arith.constant 10 : index + scf.for %i = %c0 to %c10 step %c1 { + %tile_a, %tile_b, %tile_c, %tile_d = scf.for %j = %c0 to %c10 step %c1 + iter_args(%iter_a = %init, %iter_b = %init, %iter_c = %init, %iter_d = %init) + -> (vector<[4]x[4]xf32>, vector<[4]x[4]xf32> , vector<[4]x[4]xf32> , vector<[4]x[4]xf32>) { + %new_a = arm_sme.move_vector_to_tile_slice %a, %iter_a, %i : vector<[4]xf32> into vector<[4]x[4]xf32> + %new_b = arm_sme.move_vector_to_tile_slice %b, %iter_b, %i : vector<[4]xf32> into vector<[4]x[4]xf32> + %new_c = arm_sme.move_vector_to_tile_slice %c, %iter_c, %i : vector<[4]xf32> into vector<[4]x[4]xf32> + %new_d = arm_sme.move_vector_to_tile_slice %d, %iter_d, %i : vector<[4]xf32> into vector<[4]x[4]xf32> + scf.yield %new_a, %new_b, %new_c, %new_d : vector<[4]x[4]xf32>, vector<[4]x[4]xf32>, vector<[4]x[4]xf32>, vector<[4]x[4]xf32> + } + "test.some_use"(%tile_a) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_b) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_c) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_d) : (vector<[4]x[4]xf32>) -> () + } + return +} + +// ----- + +// Incorrect result! Everything other than zero assigned to tile 1 (which means values that are still live are overwritten). +// +// CHECK-BAD-LABEL: @avoidable_spill +// CHECK-BAD: arm_sme.zero {tile_id = 0 : i32} +// CHECK-BAD: arm_sme.get_tile {tile_id = 1 : i32} +// CHECK-BAD-COUNT-4: arm_sme.move_vector_to_tile_slice {{.*}} {tile_id = 1 : i32} +func.func @avoidable_spill(%a: vector<[4]xf32>, %b: vector<[4]xf32>, %c: vector<[4]xf32>, %d: vector<[4]xf32>) { + %zero = arm_sme.zero : vector<[4]x[4]xf32> + %tile = arm_sme.get_tile : vector<[4]x[4]xf32> + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %c10 = arith.constant 10 : index + scf.for %i = %c0 to %c10 step %c1 { + "test.some_use"(%zero) : (vector<[4]x[4]xf32>) -> () + %tile_a = arm_sme.move_vector_to_tile_slice %a, %tile, %c0 : vector<[4]xf32> into vector<[4]x[4]xf32> + %tile_b = arm_sme.move_vector_to_tile_slice %b, %tile, %c0 : vector<[4]xf32> into vector<[4]x[4]xf32> + %tile_c = arm_sme.move_vector_to_tile_slice %c, %tile, %c0 : vector<[4]xf32> into vector<[4]x[4]xf32> + %tile_d = arm_sme.move_vector_to_tile_slice %d, %tile, %c0 : vector<[4]xf32> into vector<[4]x[4]xf32> + "test.some_use"(%tile_a) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_b) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_c) : (vector<[4]x[4]xf32>) -> () + "test.some_use"(%tile_d) : (vector<[4]x[4]xf32>) -> () + } + return +} From 167427f5db023308c2908b2e9a23d2de15527a07 Mon Sep 17 00:00:00 2001 From: Gang Chen Date: Wed, 1 May 2024 08:16:55 -0700 Subject: [PATCH 26/48] [AMDGPU] change order of fp and sp in kernel prologue (#90626) change order of fp and sp in kernel prologue also related codegen tests to make it easier to merge code into our downstream branches Signed-off-by: gangc --- llvm/lib/Target/AMDGPU/SIFrameLowering.cpp | 12 ++++++------ .../CodeGen/AMDGPU/GlobalISel/non-entry-alloca.ll | 4 ++-- llvm/test/CodeGen/AMDGPU/cc-update.ll | 12 ++++++------ .../AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll | 2 +- llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll | 8 ++++---- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp index 4f106bf0dfb114..eae666ab0e7d77 100644 --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -683,6 +683,12 @@ void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, } assert(ScratchWaveOffsetReg || !PreloadedScratchWaveOffsetReg); + if (hasFP(MF)) { + Register FPReg = MFI->getFrameOffsetReg(); + assert(FPReg != AMDGPU::FP_REG); + BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0); + } + if (requiresStackPointerReference(MF)) { Register SPReg = MFI->getStackPtrOffsetReg(); assert(SPReg != AMDGPU::SP_REG); @@ -690,12 +696,6 @@ void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, .addImm(FrameInfo.getStackSize() * getScratchScaleFactor(ST)); } - if (hasFP(MF)) { - Register FPReg = MFI->getFrameOffsetReg(); - assert(FPReg != AMDGPU::FP_REG); - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0); - } - bool NeedsFlatScratchInit = MFI->getUserSGPRInfo().hasFlatScratchInit() && (MRI.isPhysRegUsed(AMDGPU::FLAT_SCR) || FrameInfo.hasCalls() || diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/non-entry-alloca.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/non-entry-alloca.ll index b940dc74839b26..eaaeb3dc77a419 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/non-entry-alloca.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/non-entry-alloca.ll @@ -16,8 +16,8 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache ; GCN-NEXT: s_load_dword s6, s[4:5], 0x8 ; GCN-NEXT: s_add_u32 s0, s0, s9 ; GCN-NEXT: s_addc_u32 s1, s1, 0 -; GCN-NEXT: s_movk_i32 s32, 0x400 ; GCN-NEXT: s_mov_b32 s33, 0 +; GCN-NEXT: s_movk_i32 s32, 0x400 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_cmp_lg_u32 s6, 0 ; GCN-NEXT: s_cbranch_scc1 .LBB0_3 @@ -87,8 +87,8 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache ; GCN-NEXT: s_load_dword s6, s[4:5], 0x8 ; GCN-NEXT: s_add_u32 s0, s0, s9 ; GCN-NEXT: s_addc_u32 s1, s1, 0 -; GCN-NEXT: s_movk_i32 s32, 0x1000 ; GCN-NEXT: s_mov_b32 s33, 0 +; GCN-NEXT: s_movk_i32 s32, 0x1000 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_cmp_lg_u32 s6, 0 ; GCN-NEXT: s_cbranch_scc1 .LBB1_2 diff --git a/llvm/test/CodeGen/AMDGPU/cc-update.ll b/llvm/test/CodeGen/AMDGPU/cc-update.ll index c674aebabcc8d2..8e773cad3b3357 100644 --- a/llvm/test/CodeGen/AMDGPU/cc-update.ll +++ b/llvm/test/CodeGen/AMDGPU/cc-update.ll @@ -321,8 +321,8 @@ define amdgpu_kernel void @test_force_fp_kern_call() local_unnamed_addr #2 { ; GFX803-NEXT: s_mov_b64 s[10:11], s[8:9] ; GFX803-NEXT: v_or_b32_e32 v31, v0, v2 ; GFX803-NEXT: s_mov_b64 s[8:9], s[6:7] -; GFX803-NEXT: s_mov_b32 s32, 0 ; GFX803-NEXT: s_mov_b32 s33, 0 +; GFX803-NEXT: s_mov_b32 s32, 0 ; GFX803-NEXT: s_getpc_b64 s[16:17] ; GFX803-NEXT: s_add_u32 s16, s16, ex@rel32@lo+4 ; GFX803-NEXT: s_addc_u32 s17, s17, ex@rel32@hi+12 @@ -340,8 +340,8 @@ define amdgpu_kernel void @test_force_fp_kern_call() local_unnamed_addr #2 { ; GFX900-NEXT: s_mov_b64 s[10:11], s[8:9] ; GFX900-NEXT: v_or3_b32 v31, v0, v1, v2 ; GFX900-NEXT: s_mov_b64 s[8:9], s[6:7] -; GFX900-NEXT: s_mov_b32 s32, 0 ; GFX900-NEXT: s_mov_b32 s33, 0 +; GFX900-NEXT: s_mov_b32 s32, 0 ; GFX900-NEXT: s_getpc_b64 s[16:17] ; GFX900-NEXT: s_add_u32 s16, s16, ex@rel32@lo+4 ; GFX900-NEXT: s_addc_u32 s17, s17, ex@rel32@hi+12 @@ -351,8 +351,8 @@ define amdgpu_kernel void @test_force_fp_kern_call() local_unnamed_addr #2 { ; GFX1010-LABEL: test_force_fp_kern_call: ; GFX1010: ; %bb.0: ; %entry ; GFX1010-NEXT: s_add_u32 s10, s10, s15 -; GFX1010-NEXT: s_mov_b32 s32, 0 ; GFX1010-NEXT: s_mov_b32 s33, 0 +; GFX1010-NEXT: s_mov_b32 s32, 0 ; GFX1010-NEXT: s_addc_u32 s11, s11, 0 ; GFX1010-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s10 ; GFX1010-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s11 @@ -378,16 +378,16 @@ define amdgpu_kernel void @test_force_fp_kern_call() local_unnamed_addr #2 { ; GFX1100-NEXT: s_mov_b64 s[8:9], s[2:3] ; GFX1100-NEXT: s_mov_b32 s13, s14 ; GFX1100-NEXT: s_mov_b32 s14, s15 -; GFX1100-NEXT: s_mov_b32 s32, 0 ; GFX1100-NEXT: s_mov_b32 s33, 0 +; GFX1100-NEXT: s_mov_b32 s32, 0 ; GFX1100-NEXT: s_getpc_b64 s[6:7] ; GFX1100-NEXT: s_add_u32 s6, s6, ex@rel32@lo+4 ; GFX1100-NEXT: s_addc_u32 s7, s7, ex@rel32@hi+12 ; GFX1100-NEXT: s_swappc_b64 s[30:31], s[6:7] ; GFX1100-NEXT: s_endpgm ; GFX1010-NEXT s_add_u32 s12, s12, s17 -; GFX1010-NEXT s_mov_b32 s32, 0 ; GFX1010-NEXT s_mov_b32 s33, 0 +; GFX1010-NEXT s_mov_b32 s32, 0 ; GFX1010-NEXT s_addc_u32 s13, s13, 0 ; GFX1010-NEXT s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12 ; GFX1010-NEXT s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 @@ -459,8 +459,8 @@ define amdgpu_kernel void @test_force_fp_kern_stack_and_call() local_unnamed_add ; GFX1010-LABEL: test_force_fp_kern_stack_and_call: ; GFX1010: ; %bb.0: ; %entry ; GFX1010-NEXT: s_add_u32 s10, s10, s15 -; GFX1010-NEXT: s_movk_i32 s32, 0x200 ; GFX1010-NEXT: s_mov_b32 s33, 0 +; GFX1010-NEXT: s_movk_i32 s32, 0x200 ; GFX1010-NEXT: s_addc_u32 s11, s11, 0 ; GFX1010-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s10 ; GFX1010-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s11 diff --git a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll index fbf2ee1145ae94..ec446f1f3bf27d 100644 --- a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll +++ b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll @@ -7,8 +7,8 @@ define amdgpu_kernel void @test_kernel(i32 %val) #0 { ; CHECK-LABEL: test_kernel: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_mov_b32 s32, 0x180000 ; CHECK-NEXT: s_mov_b32 s33, 0 +; CHECK-NEXT: s_mov_b32 s32, 0x180000 ; CHECK-NEXT: s_add_u32 flat_scratch_lo, s10, s15 ; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s11, 0 ; CHECK-NEXT: s_add_u32 s0, s0, s15 diff --git a/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll b/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll index 125e6bc0f787f1..ba012b208c957a 100644 --- a/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll +++ b/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll @@ -21,8 +21,8 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache ; MUBUF-NEXT: s_add_u32 s0, s0, s9 ; MUBUF-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8 ; MUBUF-NEXT: s_addc_u32 s1, s1, 0 -; MUBUF-NEXT: s_movk_i32 s32, 0x400 ; MUBUF-NEXT: s_mov_b32 s33, 0 +; MUBUF-NEXT: s_movk_i32 s32, 0x400 ; MUBUF-NEXT: s_waitcnt lgkmcnt(0) ; MUBUF-NEXT: s_cmp_lg_u32 s8, 0 ; MUBUF-NEXT: s_cbranch_scc1 .LBB0_3 @@ -57,8 +57,8 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache ; FLATSCR-NEXT: s_add_u32 flat_scratch_lo, s2, s5 ; FLATSCR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8 ; FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s3, 0 -; FLATSCR-NEXT: s_mov_b32 s32, 16 ; FLATSCR-NEXT: s_mov_b32 s33, 0 +; FLATSCR-NEXT: s_mov_b32 s32, 16 ; FLATSCR-NEXT: s_waitcnt lgkmcnt(0) ; FLATSCR-NEXT: s_cmp_lg_u32 s4, 0 ; FLATSCR-NEXT: s_cbranch_scc1 .LBB0_3 @@ -125,8 +125,8 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache ; MUBUF-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x8 ; MUBUF-NEXT: s_add_u32 s0, s0, s9 ; MUBUF-NEXT: s_addc_u32 s1, s1, 0 -; MUBUF-NEXT: s_movk_i32 s32, 0x1000 ; MUBUF-NEXT: s_mov_b32 s33, 0 +; MUBUF-NEXT: s_movk_i32 s32, 0x1000 ; MUBUF-NEXT: s_waitcnt lgkmcnt(0) ; MUBUF-NEXT: s_cmp_lg_u32 s6, 0 ; MUBUF-NEXT: s_cbranch_scc1 .LBB1_2 @@ -159,8 +159,8 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache ; FLATSCR-NEXT: s_add_u32 flat_scratch_lo, s2, s5 ; FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s3, 0 ; FLATSCR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; FLATSCR-NEXT: s_mov_b32 s32, 64 ; FLATSCR-NEXT: s_mov_b32 s33, 0 +; FLATSCR-NEXT: s_mov_b32 s32, 64 ; FLATSCR-NEXT: s_waitcnt lgkmcnt(0) ; FLATSCR-NEXT: s_cmp_lg_u32 s2, 0 ; FLATSCR-NEXT: s_cbranch_scc1 .LBB1_2 From 2f01fd99eb8c8ab3db9aba72c4f00e31e9e60a05 Mon Sep 17 00:00:00 2001 From: Stephen Tozer Date: Wed, 1 May 2024 16:50:12 +0100 Subject: [PATCH 27/48] [RemoveDIs] Load into new debug info format by default in LLVM (#89799) This patch enables parsing and creating modules directly into the new debug info format. Prior to this patch, all modules were constructed with the old debug info format by default, and would be converted into the new format just before running LLVM passes. This is an important milestone, in that this means that every tool will now be exposed to debug records, rather than those that run LLVM passes. As far as I've tested, all LLVM tools/projects now either handle debug records, or convert them to the old intrinsic format. There are a few unit tests that need updating for this patch; these are either cases of tests that previously needed to set the debug info format to function, or tests that depend on the old debug info format in some way. There should be no visible change in the output of any LLVM tool as a result of this patch, although the likelihood of this patch breaking downstream code means an NFC tag might be a little misleading, if not technically incorrect: This will probably break some downstream tools that don't already handle debug records. If your downstream code breaks as a result of this change, the simplest fix is to convert the module in question to the old debug format before you process it, using `Module::convertFromNewDbgValues()`. For more information about how to handle debug records or about what has changed, see the migration document: https://llvm.org/docs/RemoveDIsDebugInfo.html --- llvm/docs/ReleaseNotes.rst | 7 +++ llvm/include/llvm/AsmParser/LLParser.h | 1 - llvm/lib/AsmParser/LLParser.cpp | 34 ++++++----- llvm/lib/Bitcode/Reader/BitcodeReader.cpp | 2 +- llvm/lib/IR/BasicBlock.cpp | 2 +- llvm/lib/IR/Function.cpp | 4 +- llvm/lib/IR/Module.cpp | 4 +- llvm/tools/llvm-as/llvm-as.cpp | 7 +-- llvm/tools/llvm-dis/llvm-dis.cpp | 2 +- llvm/tools/llvm-link/llvm-link.cpp | 8 +-- .../Analysis/IRSimilarityIdentifierTest.cpp | 32 +++++++++++ llvm/unittests/IR/BasicBlockDbgInfoTest.cpp | 17 ------ llvm/unittests/IR/DebugInfoTest.cpp | 18 +++--- llvm/unittests/IR/InstructionsTest.cpp | 5 ++ .../Transforms/Utils/CloningTest.cpp | 5 +- llvm/unittests/Transforms/Utils/LocalTest.cpp | 56 +++++++++++++++++++ 16 files changed, 143 insertions(+), 61 deletions(-) diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst index d8cc667723f554..4f01ba1920130c 100644 --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -170,6 +170,13 @@ Changes to the Metadata Info Changes to the Debug Info --------------------------------- +* LLVM has switched from using debug intrinsics internally to using debug + records by default. This should happen transparently when using the DIBuilder + to construct debug variable information, but will require changes for any code + that interacts with debug intrinsics directly. Debug intrinsics will only be + supported on a best-effort basis from here onwards; for more information, see + the `migration docs `_. + Changes to the LLVM tools --------------------------------- * llvm-nm and llvm-objdump can now print symbol information from linked diff --git a/llvm/include/llvm/AsmParser/LLParser.h b/llvm/include/llvm/AsmParser/LLParser.h index b2dcdfad0a04b4..e687254f6c4c70 100644 --- a/llvm/include/llvm/AsmParser/LLParser.h +++ b/llvm/include/llvm/AsmParser/LLParser.h @@ -337,7 +337,6 @@ namespace llvm { // Top-Level Entities bool parseTopLevelEntities(); - bool finalizeDebugInfoFormat(Module *M); void dropUnknownMetadataReferences(); bool validateEndOfModule(bool UpgradeDebugInfo); bool validateEndOfIndex(); diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index 2902bd9fe17c48..34053a5ca9c8e8 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -74,23 +74,6 @@ static std::string getTypeString(Type *T) { return Tmp.str(); } -// Whatever debug info format we parsed, we should convert to the expected debug -// info format immediately afterwards. -bool LLParser::finalizeDebugInfoFormat(Module *M) { - // We should have already returned an error if we observed both intrinsics and - // records in this IR. - assert(!(SeenNewDbgInfoFormat && SeenOldDbgInfoFormat) && - "Mixed debug intrinsics/records seen without a parsing error?"); - if (PreserveInputDbgFormat == cl::boolOrDefault::BOU_TRUE) { - UseNewDbgInfoFormat = SeenNewDbgInfoFormat; - WriteNewDbgInfoFormatToBitcode = SeenNewDbgInfoFormat; - WriteNewDbgInfoFormat = SeenNewDbgInfoFormat; - } else if (M) { - M->setIsNewDbgInfoFormat(false); - } - return false; -} - /// Run: module ::= toplevelentity* bool LLParser::Run(bool UpgradeDebugInfo, DataLayoutCallbackTy DataLayoutCallback) { @@ -108,7 +91,7 @@ bool LLParser::Run(bool UpgradeDebugInfo, } return parseTopLevelEntities() || validateEndOfModule(UpgradeDebugInfo) || - validateEndOfIndex() || finalizeDebugInfoFormat(M); + validateEndOfIndex(); } bool LLParser::parseStandaloneConstantValue(Constant *&C, @@ -207,6 +190,18 @@ void LLParser::dropUnknownMetadataReferences() { bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) { if (!M) return false; + + // We should have already returned an error if we observed both intrinsics and + // records in this IR. + assert(!(SeenNewDbgInfoFormat && SeenOldDbgInfoFormat) && + "Mixed debug intrinsics/records seen without a parsing error?"); + if (PreserveInputDbgFormat == cl::boolOrDefault::BOU_TRUE) { + UseNewDbgInfoFormat = SeenNewDbgInfoFormat; + WriteNewDbgInfoFormatToBitcode = SeenNewDbgInfoFormat; + WriteNewDbgInfoFormat = SeenNewDbgInfoFormat; + M->setNewDbgInfoFormatFlag(SeenNewDbgInfoFormat); + } + // Handle any function attribute group forward references. for (const auto &RAG : ForwardRefAttrGroups) { Value *V = RAG.first; @@ -439,6 +434,9 @@ bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) { UpgradeModuleFlags(*M); UpgradeSectionAttributes(*M); + if (PreserveInputDbgFormat != cl::boolOrDefault::BOU_TRUE) + M->setIsNewDbgInfoFormat(UseNewDbgInfoFormat); + if (!Slots) return false; // Initialize the slot mapping. diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 0b7fcd88418894..73fe63b5b8f6f7 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -4319,7 +4319,7 @@ Error BitcodeReader::parseModule(uint64_t ResumeBit, if (PreserveInputDbgFormat != cl::boolOrDefault::BOU_TRUE) { TheModule->IsNewDbgInfoFormat = UseNewDbgInfoFormat && - LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_TRUE; + LoadBitcodeIntoNewDbgInfoFormat != cl::boolOrDefault::BOU_FALSE; } this->ValueTypeCallback = std::move(Callbacks.ValueType); diff --git a/llvm/lib/IR/BasicBlock.cpp b/llvm/lib/IR/BasicBlock.cpp index 29f2cbf611fa3a..aea9425ebebaab 100644 --- a/llvm/lib/IR/BasicBlock.cpp +++ b/llvm/lib/IR/BasicBlock.cpp @@ -181,7 +181,7 @@ template class llvm::SymbolTableListTraits NonGlobalValueMaxNameSize( "non-global-value-max-name-size", cl::Hidden, cl::init(1024), cl::desc("Maximum size for the name of non-global values.")); +extern cl::opt UseNewDbgInfoFormat; + void Function::convertToNewDbgValues() { IsNewDbgInfoFormat = true; for (auto &BB : *this) { @@ -438,7 +440,7 @@ Function::Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, : GlobalObject(Ty, Value::FunctionVal, OperandTraits::op_begin(this), 0, Linkage, name, computeAddrSpace(AddrSpace, ParentModule)), - NumArgs(Ty->getNumParams()), IsNewDbgInfoFormat(false) { + NumArgs(Ty->getNumParams()), IsNewDbgInfoFormat(UseNewDbgInfoFormat) { assert(FunctionType::isValidReturnType(getReturnType()) && "invalid return type"); setGlobalObjectSubClassData(0); diff --git a/llvm/lib/IR/Module.cpp b/llvm/lib/IR/Module.cpp index a8696ed9e3ce5d..915fa5097383cc 100644 --- a/llvm/lib/IR/Module.cpp +++ b/llvm/lib/IR/Module.cpp @@ -54,6 +54,8 @@ using namespace llvm; +extern cl::opt UseNewDbgInfoFormat; + //===----------------------------------------------------------------------===// // Methods to implement the globals and functions lists. // @@ -72,7 +74,7 @@ template class llvm::SymbolTableListTraits; Module::Module(StringRef MID, LLVMContext &C) : Context(C), ValSymTab(std::make_unique(-1)), ModuleID(std::string(MID)), SourceFileName(std::string(MID)), DL(""), - IsNewDbgInfoFormat(false) { + IsNewDbgInfoFormat(UseNewDbgInfoFormat) { Context.addModule(this); } diff --git a/llvm/tools/llvm-as/llvm-as.cpp b/llvm/tools/llvm-as/llvm-as.cpp index e48e3f4d22c123..0958e16c2197ac 100644 --- a/llvm/tools/llvm-as/llvm-as.cpp +++ b/llvm/tools/llvm-as/llvm-as.cpp @@ -142,11 +142,10 @@ int main(int argc, char **argv) { } // Convert to new debug format if requested. - assert(!M->IsNewDbgInfoFormat && "Unexpectedly in new debug mode"); - if (UseNewDbgInfoFormat && WriteNewDbgInfoFormatToBitcode) { - M->convertToNewDbgValues(); + M->setIsNewDbgInfoFormat(UseNewDbgInfoFormat && + WriteNewDbgInfoFormatToBitcode); + if (M->IsNewDbgInfoFormat) M->removeDebugIntrinsicDeclarations(); - } std::unique_ptr Index = std::move(ModuleAndIndex.Index); diff --git a/llvm/tools/llvm-dis/llvm-dis.cpp b/llvm/tools/llvm-dis/llvm-dis.cpp index fbbb5506e43e05..d28af85bc739eb 100644 --- a/llvm/tools/llvm-dis/llvm-dis.cpp +++ b/llvm/tools/llvm-dis/llvm-dis.cpp @@ -258,7 +258,7 @@ int main(int argc, char **argv) { // All that llvm-dis does is write the assembly to a file. if (!DontPrint) { if (M) { - ScopedDbgInfoFormatSetter FormatSetter(*M, WriteNewDbgInfoFormat); + M->setIsNewDbgInfoFormat(WriteNewDbgInfoFormat); if (WriteNewDbgInfoFormat) M->removeDebugIntrinsicDeclarations(); M->print(Out->os(), Annotator.get(), PreserveAssemblyUseListOrder); diff --git a/llvm/tools/llvm-link/llvm-link.cpp b/llvm/tools/llvm-link/llvm-link.cpp index 7794f2d81ed064..b84469d1c757f8 100644 --- a/llvm/tools/llvm-link/llvm-link.cpp +++ b/llvm/tools/llvm-link/llvm-link.cpp @@ -489,12 +489,6 @@ int main(int argc, char **argv) { if (LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_UNSET) LoadBitcodeIntoNewDbgInfoFormat = cl::boolOrDefault::BOU_TRUE; - // RemoveDIs debug-info transition: tests may request that we /try/ to use the - // new debug-info format. - if (TryUseNewDbgInfoFormat) { - // Turn the new debug-info format on. - UseNewDbgInfoFormat = true; - } // Since llvm-link collects multiple IR modules together, for simplicity's // sake we disable the "PreserveInputDbgFormat" flag to enforce a single // debug info format. @@ -556,7 +550,7 @@ int main(int argc, char **argv) { SetFormat(WriteNewDbgInfoFormat); Composite->print(Out.os(), nullptr, PreserveAssemblyUseListOrder); } else if (Force || !CheckBitcodeOutputToConsole(Out.os())) { - SetFormat(WriteNewDbgInfoFormatToBitcode); + SetFormat(UseNewDbgInfoFormat && WriteNewDbgInfoFormatToBitcode); WriteBitcodeToFile(*Composite, Out.os(), PreserveBitcodeUseListOrder); } diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp index f6a053792f8529..0a08ca3cb99db9 100644 --- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp +++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/IRSimilarityIdentifier.h" +#include "llvm/ADT/ScopeExit.h" #include "llvm/AsmParser/Parser.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" @@ -22,6 +23,27 @@ using namespace llvm; using namespace IRSimilarity; +extern llvm::cl::opt UseNewDbgInfoFormat; +extern cl::opt PreserveInputDbgFormat; +extern bool WriteNewDbgInfoFormatToBitcode; +extern cl::opt WriteNewDbgInfoFormat; + +// Backup all of the existing settings that may be modified when +// PreserveInputDbgFormat=true, so that when the test is finished we return them +// (and the "preserve" setting) to their original values. +static auto SaveDbgInfoFormat() { + return make_scope_exit( + [OldPreserveInputDbgFormat = PreserveInputDbgFormat.getValue(), + OldUseNewDbgInfoFormat = UseNewDbgInfoFormat.getValue(), + OldWriteNewDbgInfoFormatToBitcode = WriteNewDbgInfoFormatToBitcode, + OldWriteNewDbgInfoFormat = WriteNewDbgInfoFormat.getValue()] { + PreserveInputDbgFormat = OldPreserveInputDbgFormat; + UseNewDbgInfoFormat = OldUseNewDbgInfoFormat; + WriteNewDbgInfoFormatToBitcode = OldWriteNewDbgInfoFormatToBitcode; + WriteNewDbgInfoFormat = OldWriteNewDbgInfoFormat; + }); +} + static std::unique_ptr makeLLVMModule(LLVMContext &Context, StringRef ModuleStr) { SMDiagnostic Err; @@ -1308,6 +1330,9 @@ TEST(IRInstructionMapper, CallBrInstIllegal) { // Checks that an debuginfo intrinsics are mapped to be invisible. Since they // do not semantically change the program, they can be recognized as similar. +// FIXME: PreserveInputDbgFormat is set to true because this test contains +// malformed debug info that cannot be converted to the new debug info format; +// this test should be updated later to use valid debug info. TEST(IRInstructionMapper, DebugInfoInvisible) { StringRef ModuleString = R"( define i32 @f(i32 %a, i32 %b) { @@ -1320,6 +1345,8 @@ TEST(IRInstructionMapper, DebugInfoInvisible) { declare void @llvm.dbg.value(metadata) !0 = distinct !{!"test\00", i32 10})"; + auto SettingGuard = SaveDbgInfoFormat(); + PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; LLVMContext Context; std::unique_ptr M = makeLLVMModule(Context, ModuleString); @@ -1916,6 +1943,9 @@ TEST(IRSimilarityCandidate, CheckRegionsDifferentTypes) { // Check that debug instructions do not impact similarity. They are marked as // invisible. +// FIXME: PreserveInputDbgFormat is set to true because this test contains +// malformed debug info that cannot be converted to the new debug info format; +// this test should be updated later to use valid debug info. TEST(IRSimilarityCandidate, IdenticalWithDebug) { StringRef ModuleString = R"( define i32 @f(i32 %a, i32 %b) { @@ -1938,6 +1968,8 @@ TEST(IRSimilarityCandidate, IdenticalWithDebug) { declare void @llvm.dbg.value(metadata) !0 = distinct !{!"test\00", i32 10} !1 = distinct !{!"test\00", i32 11})"; + auto SettingGuard = SaveDbgInfoFormat(); + PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; LLVMContext Context; std::unique_ptr M = makeLLVMModule(Context, ModuleString); diff --git a/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp b/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp index f873bbd4293af5..26c00b8113f2b2 100644 --- a/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp +++ b/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp @@ -72,8 +72,6 @@ TEST(BasicBlockDbgInfoTest, InsertAfterSelf) { !11 = !DILocation(line: 1, column: 1, scope: !6) )"); - // Convert the module to "new" form debug-info. - M->convertToNewDbgValues(); // Fetch the entry block. BasicBlock &BB = M->getFunction("f")->getEntryBlock(); @@ -104,8 +102,6 @@ TEST(BasicBlockDbgInfoTest, InsertAfterSelf) { auto Range2 = RetInst->getDbgRecordRange(); EXPECT_EQ(std::distance(Range2.begin(), Range2.end()), 1u); - M->convertFromNewDbgValues(); - UseNewDbgInfoFormat = false; } @@ -196,8 +192,6 @@ TEST(BasicBlockDbgInfoTest, MarkerOperations) { // Fetch the entry block, BasicBlock &BB = M->getFunction("f")->getEntryBlock(); - // Convert the module to "new" form debug-info. - M->convertToNewDbgValues(); EXPECT_EQ(BB.size(), 2u); // Fetch out our two markers, @@ -332,8 +326,6 @@ TEST(BasicBlockDbgInfoTest, HeadBitOperations) { // Test that the movement of debug-data when using moveBefore etc and // insertBefore etc are governed by the "head" bit of iterators. BasicBlock &BB = M->getFunction("f")->getEntryBlock(); - // Convert the module to "new" form debug-info. - M->convertToNewDbgValues(); // Test that the head bit behaves as expected: it should be set when the // code wants the _start_ of the block, but not otherwise. @@ -441,8 +433,6 @@ TEST(BasicBlockDbgInfoTest, InstrDbgAccess) { // Check that DbgVariableRecords can be accessed from Instructions without // digging into the depths of DbgMarkers. BasicBlock &BB = M->getFunction("f")->getEntryBlock(); - // Convert the module to "new" form debug-info. - M->convertToNewDbgValues(); Instruction *BInst = &*BB.begin(); Instruction *CInst = BInst->getNextNode(); @@ -579,7 +569,6 @@ class DbgSpliceTest : public ::testing::Test { void SetUp() override { UseNewDbgInfoFormat = true; M = parseIR(C, SpliceTestIR.c_str()); - M->convertToNewDbgValues(); BBEntry = &M->getFunction("f")->getEntryBlock(); BBExit = BBEntry->getNextNode(); @@ -1219,7 +1208,6 @@ TEST(BasicBlockDbgInfoTest, DbgSpliceTrailing) { BasicBlock &Entry = M->getFunction("f")->getEntryBlock(); BasicBlock &Exit = *Entry.getNextNode(); - M->convertToNewDbgValues(); // Begin by forcing entry block to have dangling DbgVariableRecord. Entry.getTerminator()->eraseFromParent(); @@ -1273,7 +1261,6 @@ TEST(BasicBlockDbgInfoTest, RemoveInstAndReinsert) { )"); BasicBlock &Entry = M->getFunction("f")->getEntryBlock(); - M->convertToNewDbgValues(); // Fetch the relevant instructions from the converted function. Instruction *SubInst = &*Entry.begin(); @@ -1352,7 +1339,6 @@ TEST(BasicBlockDbgInfoTest, RemoveInstAndReinsertForOneDbgVariableRecord) { )"); BasicBlock &Entry = M->getFunction("f")->getEntryBlock(); - M->convertToNewDbgValues(); // Fetch the relevant instructions from the converted function. Instruction *SubInst = &*Entry.begin(); @@ -1436,7 +1422,6 @@ TEST(BasicBlockDbgInfoTest, DbgSpliceToEmpty1) { Function &F = *M->getFunction("f"); BasicBlock &Entry = F.getEntryBlock(); BasicBlock &Exit = *Entry.getNextNode(); - M->convertToNewDbgValues(); // Begin by forcing entry block to have dangling DbgVariableRecord. Entry.getTerminator()->eraseFromParent(); @@ -1506,7 +1491,6 @@ TEST(BasicBlockDbgInfoTest, DbgSpliceToEmpty2) { Function &F = *M->getFunction("f"); BasicBlock &Entry = F.getEntryBlock(); BasicBlock &Exit = *Entry.getNextNode(); - M->convertToNewDbgValues(); // Begin by forcing entry block to have dangling DbgVariableRecord. Entry.getTerminator()->eraseFromParent(); @@ -1576,7 +1560,6 @@ TEST(BasicBlockDbgInfoTest, DbgMoveToEnd) { Function &F = *M->getFunction("f"); BasicBlock &Entry = F.getEntryBlock(); BasicBlock &Exit = *Entry.getNextNode(); - M->convertToNewDbgValues(); // Move the return to the end of the entry block. Instruction *Br = Entry.getTerminator(); diff --git a/llvm/unittests/IR/DebugInfoTest.cpp b/llvm/unittests/IR/DebugInfoTest.cpp index d06b979bf4a1c4..ef6aa7fc10df27 100644 --- a/llvm/unittests/IR/DebugInfoTest.cpp +++ b/llvm/unittests/IR/DebugInfoTest.cpp @@ -237,6 +237,9 @@ TEST(DbgVariableIntrinsic, EmptyMDIsKillLocation) { // Duplicate of above test, but in DbgVariableRecord representation. TEST(MetadataTest, DeleteInstUsedByDbgVariableRecord) { LLVMContext C; + bool OldDbgValueMode = UseNewDbgInfoFormat; + UseNewDbgInfoFormat = true; + std::unique_ptr M = parseIR(C, R"( define i16 @f(i16 %a) !dbg !6 { %b = add i16 %a, 1, !dbg !11 @@ -262,10 +265,7 @@ TEST(MetadataTest, DeleteInstUsedByDbgVariableRecord) { !11 = !DILocation(line: 1, column: 1, scope: !6) )"); - bool OldDbgValueMode = UseNewDbgInfoFormat; - UseNewDbgInfoFormat = true; Instruction &I = *M->getFunction("f")->getEntryBlock().getFirstNonPHI(); - M->convertToNewDbgValues(); // Find the DbgVariableRecords using %b. SmallVector DVIs; @@ -1044,9 +1044,8 @@ TEST(MetadataTest, ConvertDbgToDbgVariableRecord) { TEST(MetadataTest, DbgVariableRecordConversionRoutines) { LLVMContext C; - // For the purpose of this test, set and un-set the command line option - // corresponding to UseNewDbgInfoFormat. - UseNewDbgInfoFormat = true; + bool OldDbgValueMode = UseNewDbgInfoFormat; + UseNewDbgInfoFormat = false; std::unique_ptr M = parseIR(C, R"( define i16 @f(i16 %a) !dbg !6 { @@ -1077,6 +1076,11 @@ TEST(MetadataTest, DbgVariableRecordConversionRoutines) { !11 = !DILocation(line: 1, column: 1, scope: !6) )"); + // For the purpose of this test, set and un-set the command line option + // corresponding to UseNewDbgInfoFormat, but only after parsing, to ensure + // that the IR starts off in the old format. + UseNewDbgInfoFormat = true; + // Check that the conversion routines and utilities between dbg.value // debug-info format and DbgVariableRecords works. Function *F = M->getFunction("f"); @@ -1181,7 +1185,7 @@ TEST(MetadataTest, DbgVariableRecordConversionRoutines) { EXPECT_EQ(DVI2->getVariable(), DLV2); EXPECT_EQ(DVI2->getExpression(), Expr2); - UseNewDbgInfoFormat = false; + UseNewDbgInfoFormat = OldDbgValueMode; } } // end namespace diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp index b47c73f0b329ae..6c4debf4dbec8c 100644 --- a/llvm/unittests/IR/InstructionsTest.cpp +++ b/llvm/unittests/IR/InstructionsTest.cpp @@ -31,6 +31,8 @@ #include "gtest/gtest.h" #include +extern llvm::cl::opt PreserveInputDbgFormat; + namespace llvm { namespace { @@ -1460,6 +1462,8 @@ TEST(InstructionsTest, GetSplat) { TEST(InstructionsTest, SkipDebug) { LLVMContext C; + cl::boolOrDefault OldDbgFormat = PreserveInputDbgFormat; + PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; std::unique_ptr M = parseIR(C, R"( declare void @llvm.dbg.value(metadata, metadata, metadata) @@ -1495,6 +1499,7 @@ TEST(InstructionsTest, SkipDebug) { // After the terminator, there are no non-debug instructions. EXPECT_EQ(nullptr, Term->getNextNonDebugInstruction()); + PreserveInputDbgFormat = OldDbgFormat; } TEST(InstructionsTest, PhiMightNotBeFPMathOperator) { diff --git a/llvm/unittests/Transforms/Utils/CloningTest.cpp b/llvm/unittests/Transforms/Utils/CloningTest.cpp index 025771f07ce5d4..6f4e860d604680 100644 --- a/llvm/unittests/Transforms/Utils/CloningTest.cpp +++ b/llvm/unittests/Transforms/Utils/CloningTest.cpp @@ -844,8 +844,9 @@ TEST(CloneFunction, CloneFunctionWithInlinedSubprograms) { EXPECT_FALSE(verifyModule(*ImplModule, &errs())); // Check that DILexicalBlock of inlined function was not cloned. - auto DbgDeclareI = Func->begin()->begin(); - auto ClonedDbgDeclareI = ClonedFunc->begin()->begin(); + auto DbgDeclareI = Func->begin()->begin()->getDbgRecordRange().begin(); + auto ClonedDbgDeclareI = + ClonedFunc->begin()->begin()->getDbgRecordRange().begin(); const DebugLoc &DbgLoc = DbgDeclareI->getDebugLoc(); const DebugLoc &ClonedDbgLoc = ClonedDbgDeclareI->getDebugLoc(); EXPECT_NE(DbgLoc.get(), ClonedDbgLoc.get()); diff --git a/llvm/unittests/Transforms/Utils/LocalTest.cpp b/llvm/unittests/Transforms/Utils/LocalTest.cpp index a0119ed5159d5a..cf1ccd5607b2fd 100644 --- a/llvm/unittests/Transforms/Utils/LocalTest.cpp +++ b/llvm/unittests/Transforms/Utils/LocalTest.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "llvm/Transforms/Utils/Local.h" +#include "llvm/ADT/ScopeExit.h" #include "llvm/Analysis/DomTreeUpdater.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/PostDominators.h" @@ -26,6 +27,27 @@ using namespace llvm; +extern llvm::cl::opt UseNewDbgInfoFormat; +extern cl::opt PreserveInputDbgFormat; +extern bool WriteNewDbgInfoFormatToBitcode; +extern cl::opt WriteNewDbgInfoFormat; + +// Backup all of the existing settings that may be modified when +// PreserveInputDbgFormat=true, so that when the test is finished we return them +// (and the "preserve" setting) to their original values. +static auto SaveDbgInfoFormat() { + return make_scope_exit( + [OldPreserveInputDbgFormat = PreserveInputDbgFormat.getValue(), + OldUseNewDbgInfoFormat = UseNewDbgInfoFormat.getValue(), + OldWriteNewDbgInfoFormatToBitcode = WriteNewDbgInfoFormatToBitcode, + OldWriteNewDbgInfoFormat = WriteNewDbgInfoFormat.getValue()] { + PreserveInputDbgFormat = OldPreserveInputDbgFormat; + UseNewDbgInfoFormat = OldUseNewDbgInfoFormat; + WriteNewDbgInfoFormatToBitcode = OldWriteNewDbgInfoFormatToBitcode; + WriteNewDbgInfoFormat = OldWriteNewDbgInfoFormat; + }); +} + TEST(Local, RecursivelyDeleteDeadPHINodes) { LLVMContext C; @@ -116,6 +138,11 @@ static std::unique_ptr parseIR(LLVMContext &C, const char *IR) { TEST(Local, ReplaceDbgDeclare) { LLVMContext C; + // FIXME: PreserveInputDbgFormat is set to true because this test has + // been written to expect debug intrinsics rather than debug records; use the + // intrinsic format until we update the test checks. + auto SettingGuard = SaveDbgInfoFormat(); + PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; // Original C source to get debug info for a local variable: // void f() { int x; } @@ -493,6 +520,14 @@ struct SalvageDebugInfoTest : ::testing::Test { Function *F = nullptr; void SetUp() override { + // FIXME: PreserveInputDbgFormat is set to true because this test has + // been written to expect debug intrinsics rather than debug records; use + // the intrinsic format until we update the test checks. Note that the + // temporary setting of this flag only needs to cover the parsing step, not + // the test body itself. + auto SettingGuard = SaveDbgInfoFormat(); + PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; + M = parseIR(C, R"( define void @f() !dbg !8 { @@ -591,6 +626,12 @@ TEST_F(SalvageDebugInfoTest, RecursiveBlockSimplification) { TEST(Local, wouldInstructionBeTriviallyDead) { LLVMContext Ctx; + // FIXME: PreserveInputDbgFormat is set to true because this test has + // been written to expect debug intrinsics rather than debug records. + // TODO: This test doesn't have a DbgRecord equivalent form so delete + // it when debug intrinsics are removed. + auto SettingGuard = SaveDbgInfoFormat(); + PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; std::unique_ptr M = parseIR(Ctx, R"( define dso_local void @fun() local_unnamed_addr #0 !dbg !9 { @@ -680,6 +721,11 @@ TEST(Local, ChangeToUnreachable) { TEST(Local, FindDbgUsers) { LLVMContext Ctx; + // FIXME: PreserveInputDbgFormat is set to true because this test has + // been written to expect debug intrinsics rather than debug records; use the + // intrinsic format until we update the test checks. + auto SettingGuard = SaveDbgInfoFormat(); + PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; std::unique_ptr M = parseIR(Ctx, R"( define dso_local void @fun(ptr %a) #0 !dbg !11 { @@ -797,6 +843,11 @@ TEST(Local, ReplaceAllDbgUsesWith) { using namespace llvm::dwarf; LLVMContext Ctx; + // FIXME: PreserveInputDbgFormat is set to true because this test has + // been written to expect debug intrinsics rather than debug records; use the + // intrinsic format until we update the test checks. + auto SettingGuard = SaveDbgInfoFormat(); + PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; // Note: The datalayout simulates Darwin/x86_64. std::unique_ptr M = parseIR(Ctx, @@ -1345,6 +1396,11 @@ TEST(Local, ExpressionForConstant) { TEST(Local, ReplaceDbgVariableRecord) { LLVMContext C; + // FIXME: PreserveInputDbgFormat is set to true because this test has + // been written to expect debug intrinsics rather than debug records; use the + // intrinsic format until we update the test checks. + auto SettingGuard = SaveDbgInfoFormat(); + PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; // Test that RAUW also replaces the operands of DbgVariableRecord objects, // i.e. non-instruction stored debugging information. From 00821fed09969305b0003d3313c44d1e761a7131 Mon Sep 17 00:00:00 2001 From: Stephen Tozer Date: Wed, 1 May 2024 16:56:34 +0100 Subject: [PATCH 28/48] Revert "[RemoveDIs] Load into new debug info format by default in LLVM (#89799)" A unit test was broken by the above commit: https://lab.llvm.org/buildbot/#/builders/139/builds/64627 This reverts commit 2f01fd99eb8c8ab3db9aba72c4f00e31e9e60a05. --- llvm/docs/ReleaseNotes.rst | 7 --- llvm/include/llvm/AsmParser/LLParser.h | 1 + llvm/lib/AsmParser/LLParser.cpp | 34 +++++------ llvm/lib/Bitcode/Reader/BitcodeReader.cpp | 2 +- llvm/lib/IR/BasicBlock.cpp | 2 +- llvm/lib/IR/Function.cpp | 4 +- llvm/lib/IR/Module.cpp | 4 +- llvm/tools/llvm-as/llvm-as.cpp | 7 ++- llvm/tools/llvm-dis/llvm-dis.cpp | 2 +- llvm/tools/llvm-link/llvm-link.cpp | 8 ++- .../Analysis/IRSimilarityIdentifierTest.cpp | 32 ----------- llvm/unittests/IR/BasicBlockDbgInfoTest.cpp | 17 ++++++ llvm/unittests/IR/DebugInfoTest.cpp | 18 +++--- llvm/unittests/IR/InstructionsTest.cpp | 5 -- .../Transforms/Utils/CloningTest.cpp | 5 +- llvm/unittests/Transforms/Utils/LocalTest.cpp | 56 ------------------- 16 files changed, 61 insertions(+), 143 deletions(-) diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst index 4f01ba1920130c..d8cc667723f554 100644 --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -170,13 +170,6 @@ Changes to the Metadata Info Changes to the Debug Info --------------------------------- -* LLVM has switched from using debug intrinsics internally to using debug - records by default. This should happen transparently when using the DIBuilder - to construct debug variable information, but will require changes for any code - that interacts with debug intrinsics directly. Debug intrinsics will only be - supported on a best-effort basis from here onwards; for more information, see - the `migration docs `_. - Changes to the LLVM tools --------------------------------- * llvm-nm and llvm-objdump can now print symbol information from linked diff --git a/llvm/include/llvm/AsmParser/LLParser.h b/llvm/include/llvm/AsmParser/LLParser.h index e687254f6c4c70..b2dcdfad0a04b4 100644 --- a/llvm/include/llvm/AsmParser/LLParser.h +++ b/llvm/include/llvm/AsmParser/LLParser.h @@ -337,6 +337,7 @@ namespace llvm { // Top-Level Entities bool parseTopLevelEntities(); + bool finalizeDebugInfoFormat(Module *M); void dropUnknownMetadataReferences(); bool validateEndOfModule(bool UpgradeDebugInfo); bool validateEndOfIndex(); diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index 34053a5ca9c8e8..2902bd9fe17c48 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -74,6 +74,23 @@ static std::string getTypeString(Type *T) { return Tmp.str(); } +// Whatever debug info format we parsed, we should convert to the expected debug +// info format immediately afterwards. +bool LLParser::finalizeDebugInfoFormat(Module *M) { + // We should have already returned an error if we observed both intrinsics and + // records in this IR. + assert(!(SeenNewDbgInfoFormat && SeenOldDbgInfoFormat) && + "Mixed debug intrinsics/records seen without a parsing error?"); + if (PreserveInputDbgFormat == cl::boolOrDefault::BOU_TRUE) { + UseNewDbgInfoFormat = SeenNewDbgInfoFormat; + WriteNewDbgInfoFormatToBitcode = SeenNewDbgInfoFormat; + WriteNewDbgInfoFormat = SeenNewDbgInfoFormat; + } else if (M) { + M->setIsNewDbgInfoFormat(false); + } + return false; +} + /// Run: module ::= toplevelentity* bool LLParser::Run(bool UpgradeDebugInfo, DataLayoutCallbackTy DataLayoutCallback) { @@ -91,7 +108,7 @@ bool LLParser::Run(bool UpgradeDebugInfo, } return parseTopLevelEntities() || validateEndOfModule(UpgradeDebugInfo) || - validateEndOfIndex(); + validateEndOfIndex() || finalizeDebugInfoFormat(M); } bool LLParser::parseStandaloneConstantValue(Constant *&C, @@ -190,18 +207,6 @@ void LLParser::dropUnknownMetadataReferences() { bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) { if (!M) return false; - - // We should have already returned an error if we observed both intrinsics and - // records in this IR. - assert(!(SeenNewDbgInfoFormat && SeenOldDbgInfoFormat) && - "Mixed debug intrinsics/records seen without a parsing error?"); - if (PreserveInputDbgFormat == cl::boolOrDefault::BOU_TRUE) { - UseNewDbgInfoFormat = SeenNewDbgInfoFormat; - WriteNewDbgInfoFormatToBitcode = SeenNewDbgInfoFormat; - WriteNewDbgInfoFormat = SeenNewDbgInfoFormat; - M->setNewDbgInfoFormatFlag(SeenNewDbgInfoFormat); - } - // Handle any function attribute group forward references. for (const auto &RAG : ForwardRefAttrGroups) { Value *V = RAG.first; @@ -434,9 +439,6 @@ bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) { UpgradeModuleFlags(*M); UpgradeSectionAttributes(*M); - if (PreserveInputDbgFormat != cl::boolOrDefault::BOU_TRUE) - M->setIsNewDbgInfoFormat(UseNewDbgInfoFormat); - if (!Slots) return false; // Initialize the slot mapping. diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 73fe63b5b8f6f7..0b7fcd88418894 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -4319,7 +4319,7 @@ Error BitcodeReader::parseModule(uint64_t ResumeBit, if (PreserveInputDbgFormat != cl::boolOrDefault::BOU_TRUE) { TheModule->IsNewDbgInfoFormat = UseNewDbgInfoFormat && - LoadBitcodeIntoNewDbgInfoFormat != cl::boolOrDefault::BOU_FALSE; + LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_TRUE; } this->ValueTypeCallback = std::move(Callbacks.ValueType); diff --git a/llvm/lib/IR/BasicBlock.cpp b/llvm/lib/IR/BasicBlock.cpp index aea9425ebebaab..29f2cbf611fa3a 100644 --- a/llvm/lib/IR/BasicBlock.cpp +++ b/llvm/lib/IR/BasicBlock.cpp @@ -181,7 +181,7 @@ template class llvm::SymbolTableListTraits NonGlobalValueMaxNameSize( "non-global-value-max-name-size", cl::Hidden, cl::init(1024), cl::desc("Maximum size for the name of non-global values.")); -extern cl::opt UseNewDbgInfoFormat; - void Function::convertToNewDbgValues() { IsNewDbgInfoFormat = true; for (auto &BB : *this) { @@ -440,7 +438,7 @@ Function::Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, : GlobalObject(Ty, Value::FunctionVal, OperandTraits::op_begin(this), 0, Linkage, name, computeAddrSpace(AddrSpace, ParentModule)), - NumArgs(Ty->getNumParams()), IsNewDbgInfoFormat(UseNewDbgInfoFormat) { + NumArgs(Ty->getNumParams()), IsNewDbgInfoFormat(false) { assert(FunctionType::isValidReturnType(getReturnType()) && "invalid return type"); setGlobalObjectSubClassData(0); diff --git a/llvm/lib/IR/Module.cpp b/llvm/lib/IR/Module.cpp index 915fa5097383cc..a8696ed9e3ce5d 100644 --- a/llvm/lib/IR/Module.cpp +++ b/llvm/lib/IR/Module.cpp @@ -54,8 +54,6 @@ using namespace llvm; -extern cl::opt UseNewDbgInfoFormat; - //===----------------------------------------------------------------------===// // Methods to implement the globals and functions lists. // @@ -74,7 +72,7 @@ template class llvm::SymbolTableListTraits; Module::Module(StringRef MID, LLVMContext &C) : Context(C), ValSymTab(std::make_unique(-1)), ModuleID(std::string(MID)), SourceFileName(std::string(MID)), DL(""), - IsNewDbgInfoFormat(UseNewDbgInfoFormat) { + IsNewDbgInfoFormat(false) { Context.addModule(this); } diff --git a/llvm/tools/llvm-as/llvm-as.cpp b/llvm/tools/llvm-as/llvm-as.cpp index 0958e16c2197ac..e48e3f4d22c123 100644 --- a/llvm/tools/llvm-as/llvm-as.cpp +++ b/llvm/tools/llvm-as/llvm-as.cpp @@ -142,10 +142,11 @@ int main(int argc, char **argv) { } // Convert to new debug format if requested. - M->setIsNewDbgInfoFormat(UseNewDbgInfoFormat && - WriteNewDbgInfoFormatToBitcode); - if (M->IsNewDbgInfoFormat) + assert(!M->IsNewDbgInfoFormat && "Unexpectedly in new debug mode"); + if (UseNewDbgInfoFormat && WriteNewDbgInfoFormatToBitcode) { + M->convertToNewDbgValues(); M->removeDebugIntrinsicDeclarations(); + } std::unique_ptr Index = std::move(ModuleAndIndex.Index); diff --git a/llvm/tools/llvm-dis/llvm-dis.cpp b/llvm/tools/llvm-dis/llvm-dis.cpp index d28af85bc739eb..fbbb5506e43e05 100644 --- a/llvm/tools/llvm-dis/llvm-dis.cpp +++ b/llvm/tools/llvm-dis/llvm-dis.cpp @@ -258,7 +258,7 @@ int main(int argc, char **argv) { // All that llvm-dis does is write the assembly to a file. if (!DontPrint) { if (M) { - M->setIsNewDbgInfoFormat(WriteNewDbgInfoFormat); + ScopedDbgInfoFormatSetter FormatSetter(*M, WriteNewDbgInfoFormat); if (WriteNewDbgInfoFormat) M->removeDebugIntrinsicDeclarations(); M->print(Out->os(), Annotator.get(), PreserveAssemblyUseListOrder); diff --git a/llvm/tools/llvm-link/llvm-link.cpp b/llvm/tools/llvm-link/llvm-link.cpp index b84469d1c757f8..7794f2d81ed064 100644 --- a/llvm/tools/llvm-link/llvm-link.cpp +++ b/llvm/tools/llvm-link/llvm-link.cpp @@ -489,6 +489,12 @@ int main(int argc, char **argv) { if (LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_UNSET) LoadBitcodeIntoNewDbgInfoFormat = cl::boolOrDefault::BOU_TRUE; + // RemoveDIs debug-info transition: tests may request that we /try/ to use the + // new debug-info format. + if (TryUseNewDbgInfoFormat) { + // Turn the new debug-info format on. + UseNewDbgInfoFormat = true; + } // Since llvm-link collects multiple IR modules together, for simplicity's // sake we disable the "PreserveInputDbgFormat" flag to enforce a single // debug info format. @@ -550,7 +556,7 @@ int main(int argc, char **argv) { SetFormat(WriteNewDbgInfoFormat); Composite->print(Out.os(), nullptr, PreserveAssemblyUseListOrder); } else if (Force || !CheckBitcodeOutputToConsole(Out.os())) { - SetFormat(UseNewDbgInfoFormat && WriteNewDbgInfoFormatToBitcode); + SetFormat(WriteNewDbgInfoFormatToBitcode); WriteBitcodeToFile(*Composite, Out.os(), PreserveBitcodeUseListOrder); } diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp index 0a08ca3cb99db9..f6a053792f8529 100644 --- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp +++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp @@ -12,7 +12,6 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/IRSimilarityIdentifier.h" -#include "llvm/ADT/ScopeExit.h" #include "llvm/AsmParser/Parser.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" @@ -23,27 +22,6 @@ using namespace llvm; using namespace IRSimilarity; -extern llvm::cl::opt UseNewDbgInfoFormat; -extern cl::opt PreserveInputDbgFormat; -extern bool WriteNewDbgInfoFormatToBitcode; -extern cl::opt WriteNewDbgInfoFormat; - -// Backup all of the existing settings that may be modified when -// PreserveInputDbgFormat=true, so that when the test is finished we return them -// (and the "preserve" setting) to their original values. -static auto SaveDbgInfoFormat() { - return make_scope_exit( - [OldPreserveInputDbgFormat = PreserveInputDbgFormat.getValue(), - OldUseNewDbgInfoFormat = UseNewDbgInfoFormat.getValue(), - OldWriteNewDbgInfoFormatToBitcode = WriteNewDbgInfoFormatToBitcode, - OldWriteNewDbgInfoFormat = WriteNewDbgInfoFormat.getValue()] { - PreserveInputDbgFormat = OldPreserveInputDbgFormat; - UseNewDbgInfoFormat = OldUseNewDbgInfoFormat; - WriteNewDbgInfoFormatToBitcode = OldWriteNewDbgInfoFormatToBitcode; - WriteNewDbgInfoFormat = OldWriteNewDbgInfoFormat; - }); -} - static std::unique_ptr makeLLVMModule(LLVMContext &Context, StringRef ModuleStr) { SMDiagnostic Err; @@ -1330,9 +1308,6 @@ TEST(IRInstructionMapper, CallBrInstIllegal) { // Checks that an debuginfo intrinsics are mapped to be invisible. Since they // do not semantically change the program, they can be recognized as similar. -// FIXME: PreserveInputDbgFormat is set to true because this test contains -// malformed debug info that cannot be converted to the new debug info format; -// this test should be updated later to use valid debug info. TEST(IRInstructionMapper, DebugInfoInvisible) { StringRef ModuleString = R"( define i32 @f(i32 %a, i32 %b) { @@ -1345,8 +1320,6 @@ TEST(IRInstructionMapper, DebugInfoInvisible) { declare void @llvm.dbg.value(metadata) !0 = distinct !{!"test\00", i32 10})"; - auto SettingGuard = SaveDbgInfoFormat(); - PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; LLVMContext Context; std::unique_ptr M = makeLLVMModule(Context, ModuleString); @@ -1943,9 +1916,6 @@ TEST(IRSimilarityCandidate, CheckRegionsDifferentTypes) { // Check that debug instructions do not impact similarity. They are marked as // invisible. -// FIXME: PreserveInputDbgFormat is set to true because this test contains -// malformed debug info that cannot be converted to the new debug info format; -// this test should be updated later to use valid debug info. TEST(IRSimilarityCandidate, IdenticalWithDebug) { StringRef ModuleString = R"( define i32 @f(i32 %a, i32 %b) { @@ -1968,8 +1938,6 @@ TEST(IRSimilarityCandidate, IdenticalWithDebug) { declare void @llvm.dbg.value(metadata) !0 = distinct !{!"test\00", i32 10} !1 = distinct !{!"test\00", i32 11})"; - auto SettingGuard = SaveDbgInfoFormat(); - PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; LLVMContext Context; std::unique_ptr M = makeLLVMModule(Context, ModuleString); diff --git a/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp b/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp index 26c00b8113f2b2..f873bbd4293af5 100644 --- a/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp +++ b/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp @@ -72,6 +72,8 @@ TEST(BasicBlockDbgInfoTest, InsertAfterSelf) { !11 = !DILocation(line: 1, column: 1, scope: !6) )"); + // Convert the module to "new" form debug-info. + M->convertToNewDbgValues(); // Fetch the entry block. BasicBlock &BB = M->getFunction("f")->getEntryBlock(); @@ -102,6 +104,8 @@ TEST(BasicBlockDbgInfoTest, InsertAfterSelf) { auto Range2 = RetInst->getDbgRecordRange(); EXPECT_EQ(std::distance(Range2.begin(), Range2.end()), 1u); + M->convertFromNewDbgValues(); + UseNewDbgInfoFormat = false; } @@ -192,6 +196,8 @@ TEST(BasicBlockDbgInfoTest, MarkerOperations) { // Fetch the entry block, BasicBlock &BB = M->getFunction("f")->getEntryBlock(); + // Convert the module to "new" form debug-info. + M->convertToNewDbgValues(); EXPECT_EQ(BB.size(), 2u); // Fetch out our two markers, @@ -326,6 +332,8 @@ TEST(BasicBlockDbgInfoTest, HeadBitOperations) { // Test that the movement of debug-data when using moveBefore etc and // insertBefore etc are governed by the "head" bit of iterators. BasicBlock &BB = M->getFunction("f")->getEntryBlock(); + // Convert the module to "new" form debug-info. + M->convertToNewDbgValues(); // Test that the head bit behaves as expected: it should be set when the // code wants the _start_ of the block, but not otherwise. @@ -433,6 +441,8 @@ TEST(BasicBlockDbgInfoTest, InstrDbgAccess) { // Check that DbgVariableRecords can be accessed from Instructions without // digging into the depths of DbgMarkers. BasicBlock &BB = M->getFunction("f")->getEntryBlock(); + // Convert the module to "new" form debug-info. + M->convertToNewDbgValues(); Instruction *BInst = &*BB.begin(); Instruction *CInst = BInst->getNextNode(); @@ -569,6 +579,7 @@ class DbgSpliceTest : public ::testing::Test { void SetUp() override { UseNewDbgInfoFormat = true; M = parseIR(C, SpliceTestIR.c_str()); + M->convertToNewDbgValues(); BBEntry = &M->getFunction("f")->getEntryBlock(); BBExit = BBEntry->getNextNode(); @@ -1208,6 +1219,7 @@ TEST(BasicBlockDbgInfoTest, DbgSpliceTrailing) { BasicBlock &Entry = M->getFunction("f")->getEntryBlock(); BasicBlock &Exit = *Entry.getNextNode(); + M->convertToNewDbgValues(); // Begin by forcing entry block to have dangling DbgVariableRecord. Entry.getTerminator()->eraseFromParent(); @@ -1261,6 +1273,7 @@ TEST(BasicBlockDbgInfoTest, RemoveInstAndReinsert) { )"); BasicBlock &Entry = M->getFunction("f")->getEntryBlock(); + M->convertToNewDbgValues(); // Fetch the relevant instructions from the converted function. Instruction *SubInst = &*Entry.begin(); @@ -1339,6 +1352,7 @@ TEST(BasicBlockDbgInfoTest, RemoveInstAndReinsertForOneDbgVariableRecord) { )"); BasicBlock &Entry = M->getFunction("f")->getEntryBlock(); + M->convertToNewDbgValues(); // Fetch the relevant instructions from the converted function. Instruction *SubInst = &*Entry.begin(); @@ -1422,6 +1436,7 @@ TEST(BasicBlockDbgInfoTest, DbgSpliceToEmpty1) { Function &F = *M->getFunction("f"); BasicBlock &Entry = F.getEntryBlock(); BasicBlock &Exit = *Entry.getNextNode(); + M->convertToNewDbgValues(); // Begin by forcing entry block to have dangling DbgVariableRecord. Entry.getTerminator()->eraseFromParent(); @@ -1491,6 +1506,7 @@ TEST(BasicBlockDbgInfoTest, DbgSpliceToEmpty2) { Function &F = *M->getFunction("f"); BasicBlock &Entry = F.getEntryBlock(); BasicBlock &Exit = *Entry.getNextNode(); + M->convertToNewDbgValues(); // Begin by forcing entry block to have dangling DbgVariableRecord. Entry.getTerminator()->eraseFromParent(); @@ -1560,6 +1576,7 @@ TEST(BasicBlockDbgInfoTest, DbgMoveToEnd) { Function &F = *M->getFunction("f"); BasicBlock &Entry = F.getEntryBlock(); BasicBlock &Exit = *Entry.getNextNode(); + M->convertToNewDbgValues(); // Move the return to the end of the entry block. Instruction *Br = Entry.getTerminator(); diff --git a/llvm/unittests/IR/DebugInfoTest.cpp b/llvm/unittests/IR/DebugInfoTest.cpp index ef6aa7fc10df27..d06b979bf4a1c4 100644 --- a/llvm/unittests/IR/DebugInfoTest.cpp +++ b/llvm/unittests/IR/DebugInfoTest.cpp @@ -237,9 +237,6 @@ TEST(DbgVariableIntrinsic, EmptyMDIsKillLocation) { // Duplicate of above test, but in DbgVariableRecord representation. TEST(MetadataTest, DeleteInstUsedByDbgVariableRecord) { LLVMContext C; - bool OldDbgValueMode = UseNewDbgInfoFormat; - UseNewDbgInfoFormat = true; - std::unique_ptr M = parseIR(C, R"( define i16 @f(i16 %a) !dbg !6 { %b = add i16 %a, 1, !dbg !11 @@ -265,7 +262,10 @@ TEST(MetadataTest, DeleteInstUsedByDbgVariableRecord) { !11 = !DILocation(line: 1, column: 1, scope: !6) )"); + bool OldDbgValueMode = UseNewDbgInfoFormat; + UseNewDbgInfoFormat = true; Instruction &I = *M->getFunction("f")->getEntryBlock().getFirstNonPHI(); + M->convertToNewDbgValues(); // Find the DbgVariableRecords using %b. SmallVector DVIs; @@ -1044,8 +1044,9 @@ TEST(MetadataTest, ConvertDbgToDbgVariableRecord) { TEST(MetadataTest, DbgVariableRecordConversionRoutines) { LLVMContext C; - bool OldDbgValueMode = UseNewDbgInfoFormat; - UseNewDbgInfoFormat = false; + // For the purpose of this test, set and un-set the command line option + // corresponding to UseNewDbgInfoFormat. + UseNewDbgInfoFormat = true; std::unique_ptr M = parseIR(C, R"( define i16 @f(i16 %a) !dbg !6 { @@ -1076,11 +1077,6 @@ TEST(MetadataTest, DbgVariableRecordConversionRoutines) { !11 = !DILocation(line: 1, column: 1, scope: !6) )"); - // For the purpose of this test, set and un-set the command line option - // corresponding to UseNewDbgInfoFormat, but only after parsing, to ensure - // that the IR starts off in the old format. - UseNewDbgInfoFormat = true; - // Check that the conversion routines and utilities between dbg.value // debug-info format and DbgVariableRecords works. Function *F = M->getFunction("f"); @@ -1185,7 +1181,7 @@ TEST(MetadataTest, DbgVariableRecordConversionRoutines) { EXPECT_EQ(DVI2->getVariable(), DLV2); EXPECT_EQ(DVI2->getExpression(), Expr2); - UseNewDbgInfoFormat = OldDbgValueMode; + UseNewDbgInfoFormat = false; } } // end namespace diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp index 6c4debf4dbec8c..b47c73f0b329ae 100644 --- a/llvm/unittests/IR/InstructionsTest.cpp +++ b/llvm/unittests/IR/InstructionsTest.cpp @@ -31,8 +31,6 @@ #include "gtest/gtest.h" #include -extern llvm::cl::opt PreserveInputDbgFormat; - namespace llvm { namespace { @@ -1462,8 +1460,6 @@ TEST(InstructionsTest, GetSplat) { TEST(InstructionsTest, SkipDebug) { LLVMContext C; - cl::boolOrDefault OldDbgFormat = PreserveInputDbgFormat; - PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; std::unique_ptr M = parseIR(C, R"( declare void @llvm.dbg.value(metadata, metadata, metadata) @@ -1499,7 +1495,6 @@ TEST(InstructionsTest, SkipDebug) { // After the terminator, there are no non-debug instructions. EXPECT_EQ(nullptr, Term->getNextNonDebugInstruction()); - PreserveInputDbgFormat = OldDbgFormat; } TEST(InstructionsTest, PhiMightNotBeFPMathOperator) { diff --git a/llvm/unittests/Transforms/Utils/CloningTest.cpp b/llvm/unittests/Transforms/Utils/CloningTest.cpp index 6f4e860d604680..025771f07ce5d4 100644 --- a/llvm/unittests/Transforms/Utils/CloningTest.cpp +++ b/llvm/unittests/Transforms/Utils/CloningTest.cpp @@ -844,9 +844,8 @@ TEST(CloneFunction, CloneFunctionWithInlinedSubprograms) { EXPECT_FALSE(verifyModule(*ImplModule, &errs())); // Check that DILexicalBlock of inlined function was not cloned. - auto DbgDeclareI = Func->begin()->begin()->getDbgRecordRange().begin(); - auto ClonedDbgDeclareI = - ClonedFunc->begin()->begin()->getDbgRecordRange().begin(); + auto DbgDeclareI = Func->begin()->begin(); + auto ClonedDbgDeclareI = ClonedFunc->begin()->begin(); const DebugLoc &DbgLoc = DbgDeclareI->getDebugLoc(); const DebugLoc &ClonedDbgLoc = ClonedDbgDeclareI->getDebugLoc(); EXPECT_NE(DbgLoc.get(), ClonedDbgLoc.get()); diff --git a/llvm/unittests/Transforms/Utils/LocalTest.cpp b/llvm/unittests/Transforms/Utils/LocalTest.cpp index cf1ccd5607b2fd..a0119ed5159d5a 100644 --- a/llvm/unittests/Transforms/Utils/LocalTest.cpp +++ b/llvm/unittests/Transforms/Utils/LocalTest.cpp @@ -7,7 +7,6 @@ //===----------------------------------------------------------------------===// #include "llvm/Transforms/Utils/Local.h" -#include "llvm/ADT/ScopeExit.h" #include "llvm/Analysis/DomTreeUpdater.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/PostDominators.h" @@ -27,27 +26,6 @@ using namespace llvm; -extern llvm::cl::opt UseNewDbgInfoFormat; -extern cl::opt PreserveInputDbgFormat; -extern bool WriteNewDbgInfoFormatToBitcode; -extern cl::opt WriteNewDbgInfoFormat; - -// Backup all of the existing settings that may be modified when -// PreserveInputDbgFormat=true, so that when the test is finished we return them -// (and the "preserve" setting) to their original values. -static auto SaveDbgInfoFormat() { - return make_scope_exit( - [OldPreserveInputDbgFormat = PreserveInputDbgFormat.getValue(), - OldUseNewDbgInfoFormat = UseNewDbgInfoFormat.getValue(), - OldWriteNewDbgInfoFormatToBitcode = WriteNewDbgInfoFormatToBitcode, - OldWriteNewDbgInfoFormat = WriteNewDbgInfoFormat.getValue()] { - PreserveInputDbgFormat = OldPreserveInputDbgFormat; - UseNewDbgInfoFormat = OldUseNewDbgInfoFormat; - WriteNewDbgInfoFormatToBitcode = OldWriteNewDbgInfoFormatToBitcode; - WriteNewDbgInfoFormat = OldWriteNewDbgInfoFormat; - }); -} - TEST(Local, RecursivelyDeleteDeadPHINodes) { LLVMContext C; @@ -138,11 +116,6 @@ static std::unique_ptr parseIR(LLVMContext &C, const char *IR) { TEST(Local, ReplaceDbgDeclare) { LLVMContext C; - // FIXME: PreserveInputDbgFormat is set to true because this test has - // been written to expect debug intrinsics rather than debug records; use the - // intrinsic format until we update the test checks. - auto SettingGuard = SaveDbgInfoFormat(); - PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; // Original C source to get debug info for a local variable: // void f() { int x; } @@ -520,14 +493,6 @@ struct SalvageDebugInfoTest : ::testing::Test { Function *F = nullptr; void SetUp() override { - // FIXME: PreserveInputDbgFormat is set to true because this test has - // been written to expect debug intrinsics rather than debug records; use - // the intrinsic format until we update the test checks. Note that the - // temporary setting of this flag only needs to cover the parsing step, not - // the test body itself. - auto SettingGuard = SaveDbgInfoFormat(); - PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; - M = parseIR(C, R"( define void @f() !dbg !8 { @@ -626,12 +591,6 @@ TEST_F(SalvageDebugInfoTest, RecursiveBlockSimplification) { TEST(Local, wouldInstructionBeTriviallyDead) { LLVMContext Ctx; - // FIXME: PreserveInputDbgFormat is set to true because this test has - // been written to expect debug intrinsics rather than debug records. - // TODO: This test doesn't have a DbgRecord equivalent form so delete - // it when debug intrinsics are removed. - auto SettingGuard = SaveDbgInfoFormat(); - PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; std::unique_ptr M = parseIR(Ctx, R"( define dso_local void @fun() local_unnamed_addr #0 !dbg !9 { @@ -721,11 +680,6 @@ TEST(Local, ChangeToUnreachable) { TEST(Local, FindDbgUsers) { LLVMContext Ctx; - // FIXME: PreserveInputDbgFormat is set to true because this test has - // been written to expect debug intrinsics rather than debug records; use the - // intrinsic format until we update the test checks. - auto SettingGuard = SaveDbgInfoFormat(); - PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; std::unique_ptr M = parseIR(Ctx, R"( define dso_local void @fun(ptr %a) #0 !dbg !11 { @@ -843,11 +797,6 @@ TEST(Local, ReplaceAllDbgUsesWith) { using namespace llvm::dwarf; LLVMContext Ctx; - // FIXME: PreserveInputDbgFormat is set to true because this test has - // been written to expect debug intrinsics rather than debug records; use the - // intrinsic format until we update the test checks. - auto SettingGuard = SaveDbgInfoFormat(); - PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; // Note: The datalayout simulates Darwin/x86_64. std::unique_ptr M = parseIR(Ctx, @@ -1396,11 +1345,6 @@ TEST(Local, ExpressionForConstant) { TEST(Local, ReplaceDbgVariableRecord) { LLVMContext C; - // FIXME: PreserveInputDbgFormat is set to true because this test has - // been written to expect debug intrinsics rather than debug records; use the - // intrinsic format until we update the test checks. - auto SettingGuard = SaveDbgInfoFormat(); - PreserveInputDbgFormat = cl::boolOrDefault::BOU_TRUE; // Test that RAUW also replaces the operands of DbgVariableRecord objects, // i.e. non-instruction stored debugging information. From fa535452b2508e2878b2697fabf546c997d9ca5d Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Wed, 1 May 2024 09:01:46 -0700 Subject: [PATCH 29/48] [llvm-install-name-tool] Error on non-Mach-O binaries (#90351) Previously if you passed an ELF binary it would be silently copied with no changes. --- .../llvm-objcopy/MachO/install-name-tool.test | 19 +++++++++++++++++++ llvm/tools/llvm-objcopy/ObjcopyOptions.cpp | 12 ++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 llvm/test/tools/llvm-objcopy/MachO/install-name-tool.test diff --git a/llvm/test/tools/llvm-objcopy/MachO/install-name-tool.test b/llvm/test/tools/llvm-objcopy/MachO/install-name-tool.test new file mode 100644 index 00000000000000..b56543bd8cfc8e --- /dev/null +++ b/llvm/test/tools/llvm-objcopy/MachO/install-name-tool.test @@ -0,0 +1,19 @@ +## This test checks general llvm-install-name-tool behavior. + +# RUN: yaml2obj %s -o %t + +## Passing something that doesn't exist +# RUN: not llvm-install-name-tool -add_rpath foo non-existent-binary 2>&1 | FileCheck %s --check-prefix=DOES_NOT_EXIST + +# DOES_NOT_EXIST: {{.*}}non-existent-binary + +## Passing a non-Mach-O binary +# RUN: not llvm-install-name-tool -add_rpath foo %t 2>&1 | FileCheck %s --check-prefix=NON_MACH_O -DFILE=%t + +# NON_MACH_O: error: input file: [[FILE]] is not a Mach-O file + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC diff --git a/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp b/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp index 70e85460d3df0d..a1897334cff2ed 100644 --- a/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp +++ b/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp @@ -15,6 +15,7 @@ #include "llvm/ObjCopy/CommonConfig.h" #include "llvm/ObjCopy/ConfigManager.h" #include "llvm/ObjCopy/MachO/MachOConfig.h" +#include "llvm/Object/Binary.h" #include "llvm/Option/Arg.h" #include "llvm/Option/ArgList.h" #include "llvm/Support/CRC.h" @@ -26,6 +27,7 @@ using namespace llvm; using namespace llvm::objcopy; +using namespace llvm::object; using namespace llvm::opt; namespace { @@ -1242,6 +1244,16 @@ objcopy::parseInstallNameToolOptions(ArrayRef ArgsArr) { Config.InputFilename = Positional[0]; Config.OutputFilename = Positional[0]; + Expected> BinaryOrErr = + createBinary(Config.InputFilename); + if (!BinaryOrErr) + return createFileError(Config.InputFilename, BinaryOrErr.takeError()); + auto *Binary = (*BinaryOrErr).getBinary(); + if (!Binary->isMachO() && !Binary->isMachOUniversalBinary()) + return createStringError(errc::invalid_argument, + "input file: %s is not a Mach-O file", + Config.InputFilename.str().c_str()); + DC.CopyConfigs.push_back(std::move(ConfigMgr)); return std::move(DC); } From 6e31714d249f857f15262518327b0f0c9509db72 Mon Sep 17 00:00:00 2001 From: Congcong Cai Date: Thu, 2 May 2024 00:11:32 +0800 Subject: [PATCH 30/48] [analysis] assume expr is not mutated after analysis to avoid recursive (#90581) Fixes: #89376. --- clang-tools-extra/docs/ReleaseNotes.rst | 3 ++- clang/lib/Analysis/ExprMutationAnalyzer.cpp | 6 +++-- .../Analysis/ExprMutationAnalyzerTest.cpp | 26 +++++++++++++++++-- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index 3038d2b125f20d..5956ccb925485c 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -261,7 +261,8 @@ Changes in existing checks - Improved :doc:`misc-const-correctness ` check by avoiding infinite recursion - for recursive forwarding reference. + for recursive functions with forwarding reference parameters and reference + variables which refer to themselves. - Improved :doc:`misc-definitions-in-headers ` check by replacing the local diff --git a/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/clang/lib/Analysis/ExprMutationAnalyzer.cpp index 941322be8f870b..3b3782fa1db9a0 100644 --- a/clang/lib/Analysis/ExprMutationAnalyzer.cpp +++ b/clang/lib/Analysis/ExprMutationAnalyzer.cpp @@ -235,15 +235,17 @@ const Stmt *ExprMutationAnalyzer::Analyzer::findMutationMemoized( if (Memoized != MemoizedResults.end()) return Memoized->second; + // Assume Exp is not mutated before analyzing Exp. + MemoizedResults[Exp] = nullptr; if (isUnevaluated(Exp)) - return MemoizedResults[Exp] = nullptr; + return nullptr; for (const auto &Finder : Finders) { if (const Stmt *S = (this->*Finder)(Exp)) return MemoizedResults[Exp] = S; } - return MemoizedResults[Exp] = nullptr; + return nullptr; } const Stmt * diff --git a/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp b/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp index e94bc0d6039791..9c4ec07e139a12 100644 --- a/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp +++ b/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp @@ -10,8 +10,8 @@ #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/ASTMatchers/ASTMatchers.h" +#include "clang/Frontend/ASTUnit.h" #include "clang/Tooling/Tooling.h" -#include "llvm/ADT/SmallString.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include @@ -43,7 +43,7 @@ std::unique_ptr buildASTFromCode(const Twine &Code) { } ExprMatcher declRefTo(StringRef Name) { - return declRefExpr(to(namedDecl(hasName(Name)))); + return declRefExpr(to(namedDecl(hasName(Name)).bind("decl"))); } StmtMatcher withEnclosingCompound(ExprMatcher Matcher) { @@ -57,6 +57,13 @@ bool isMutated(const SmallVectorImpl &Results, ASTUnit *AST) { return ExprMutationAnalyzer(*S, AST->getASTContext()).isMutated(E); } +bool isDeclMutated(const SmallVectorImpl &Results, ASTUnit *AST) { + const auto *const S = selectFirst("stmt", Results); + const auto *const D = selectFirst("decl", Results); + TraversalKindScope RAII(AST->getASTContext(), TK_AsIs); + return ExprMutationAnalyzer(*S, AST->getASTContext()).isMutated(D); +} + SmallVector mutatedBy(const SmallVectorImpl &Results, ASTUnit *AST) { const auto *const S = selectFirst("stmt", Results); @@ -1552,6 +1559,21 @@ TEST(ExprMutationAnalyzerTest, UniquePtr) { // section: complex problems detected on real code +TEST(ExprMutationAnalyzerTest, SelfRef) { + std::unique_ptr AST{}; + SmallVector Results{}; + + AST = buildASTFromCodeWithArgs("void f() { int &x = x; }", + {"-Wno-unused-value", "-Wno-uninitialized"}); + Results = match(withEnclosingCompound(declRefTo("x")), AST->getASTContext()); + EXPECT_FALSE(isDeclMutated(Results, AST.get())); + + AST = buildASTFromCodeWithArgs("void f() { int &x = x; x = 1; }", + {"-Wno-unused-value", "-Wno-uninitialized"}); + Results = match(withEnclosingCompound(declRefTo("x")), AST->getASTContext()); + EXPECT_TRUE(isDeclMutated(Results, AST.get())); +} + TEST(ExprMutationAnalyzerTest, UnevaluatedContext) { const std::string Example = "template " From 4cbe7607c75486dd17a048a45dd8c72c3dbf7e62 Mon Sep 17 00:00:00 2001 From: Alastair Houghton Date: Wed, 1 May 2024 17:17:03 +0100 Subject: [PATCH 31/48] [LLDB][ELF] Fix section unification to not just use names. (#90099) Section unification cannot just use names, because it's valid for ELF binaries to have multiple sections with the same name. We should check other section properties too. Fixes #88001. rdar://124467787 --- .../Plugins/ObjectFile/ELF/ObjectFileELF.cpp | 59 +++++++++++++++---- .../ObjectFile/ELF/two-text-sections.yaml | 48 +++++++++++++++ 2 files changed, 95 insertions(+), 12 deletions(-) create mode 100644 lldb/test/Shell/ObjectFile/ELF/two-text-sections.yaml diff --git a/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp b/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp index 0d95a1c12bde35..16f6d2e884b577 100644 --- a/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp +++ b/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp @@ -1854,6 +1854,39 @@ class VMAddressProvider { }; } +// We have to do this because ELF doesn't have section IDs, and also +// doesn't require section names to be unique. (We use the section index +// for section IDs, but that isn't guaranteed to be the same in separate +// debug images.) +static SectionSP FindMatchingSection(const SectionList §ion_list, + SectionSP section) { + SectionSP sect_sp; + + addr_t vm_addr = section->GetFileAddress(); + ConstString name = section->GetName(); + offset_t byte_size = section->GetByteSize(); + bool thread_specific = section->IsThreadSpecific(); + uint32_t permissions = section->GetPermissions(); + uint32_t alignment = section->GetLog2Align(); + + for (auto sect : section_list) { + if (sect->GetName() == name && + sect->IsThreadSpecific() == thread_specific && + sect->GetPermissions() == permissions && + sect->GetByteSize() == byte_size && sect->GetFileAddress() == vm_addr && + sect->GetLog2Align() == alignment) { + sect_sp = sect; + break; + } else { + sect_sp = FindMatchingSection(sect->GetChildren(), section); + if (sect_sp) + break; + } + } + + return sect_sp; +} + void ObjectFileELF::CreateSections(SectionList &unified_section_list) { if (m_sections_up) return; @@ -2067,10 +2100,12 @@ unsigned ObjectFileELF::ParseSymbols(Symtab *symtab, user_id_t start_id, SectionList *module_section_list = module_sp ? module_sp->GetSectionList() : nullptr; - // Local cache to avoid doing a FindSectionByName for each symbol. The "const - // char*" key must came from a ConstString object so they can be compared by - // pointer - std::unordered_map section_name_to_section; + // We might have debug information in a separate object, in which case + // we need to map the sections from that object to the sections in the + // main object during symbol lookup. If we had to compare the sections + // for every single symbol, that would be expensive, so this map is + // used to accelerate the process. + std::unordered_map section_map; unsigned i; for (i = 0; i < num_symbols; ++i) { @@ -2275,14 +2310,14 @@ unsigned ObjectFileELF::ParseSymbols(Symtab *symtab, user_id_t start_id, if (symbol_section_sp && module_section_list && module_section_list != section_list) { - ConstString sect_name = symbol_section_sp->GetName(); - auto section_it = section_name_to_section.find(sect_name.GetCString()); - if (section_it == section_name_to_section.end()) - section_it = - section_name_to_section - .emplace(sect_name.GetCString(), - module_section_list->FindSectionByName(sect_name)) - .first; + auto section_it = section_map.find(symbol_section_sp); + if (section_it == section_map.end()) { + section_it = section_map + .emplace(symbol_section_sp, + FindMatchingSection(*module_section_list, + symbol_section_sp)) + .first; + } if (section_it->second) symbol_section_sp = section_it->second; } diff --git a/lldb/test/Shell/ObjectFile/ELF/two-text-sections.yaml b/lldb/test/Shell/ObjectFile/ELF/two-text-sections.yaml new file mode 100644 index 00000000000000..8b2fd47df1a1fa --- /dev/null +++ b/lldb/test/Shell/ObjectFile/ELF/two-text-sections.yaml @@ -0,0 +1,48 @@ +# Test handling of object files that have duplicate sections. This is legal, +# according to the System V ABI (Edition 4.1); see 4-20 where it says: +# +# Section names with a dot (.) prefix are reserved for the system, +# although applications may use these sections if their existing +# meanings are satisfactory. ... **An object file may have more than +# one section with the same name.** +# +# (See https://github.com/llvm/llvm-project/issues/88001) + +# RUN: yaml2obj %s -o %t +# RUN: lldb-test symbols %t | FileCheck %s + +# CHECK: 0x0000000000400010 {{.*}} my_function +# CHECK: 0x0000000000401020 {{.*}} my_other_function + +!ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +ProgramHeaders: + - Type: PT_LOAD + Flags: [ PF_X, PF_R ] + FirstSec: .text + LastSec: '.text (1)' + VAddr: 0x400000 + Align: 0x1000 + Offset: 0x0 +Sections: + - Name: .text + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x400010 + AddressAlign: 0x10 + - Name: '.text (1)' + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR, SHF_GNU_RETAIN ] + Address: 0x401000 + AddressAlign: 0x10 +Symbols: + - Name: my_function + Section: .text + Value: 0x400010 + - Name: my_other_function + Section: '.text (1)' + Value: 0x401020 From a00bbcbe7c7b1d5fb437d530555a6940c0b8d06a Mon Sep 17 00:00:00 2001 From: Louis Dionne Date: Wed, 1 May 2024 10:26:38 -0600 Subject: [PATCH 32/48] [libc++] Remove _LIBCPP_DISABLE_ADDITIONAL_DIAGNOSTICS (#90512) I strongly suspect nobody ever used that macro since it wasn't very well known. Furthermore, it only affects a handful of diagnostics and I think it makes sense to either provide them unconditionally, or to not provided them at all. --- libcxx/docs/ReleaseNotes/19.rst | 3 +++ libcxx/docs/UsingLibcxx.rst | 9 --------- libcxx/include/__config | 2 +- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/libcxx/docs/ReleaseNotes/19.rst b/libcxx/docs/ReleaseNotes/19.rst index ac4fd0ecc122bd..5a07b11cbcd509 100644 --- a/libcxx/docs/ReleaseNotes/19.rst +++ b/libcxx/docs/ReleaseNotes/19.rst @@ -118,6 +118,9 @@ Deprecations and Removals a ``std::basic_*fstream`` from a ``std::basic_string_view``, a input-iterator or a C-string, instead you can construct a temporary ``std::basic_string``. This change has been applied to C++17 and later. +- The ``_LIBCPP_DISABLE_ADDITIONAL_DIAGNOSTICS`` macro has been removed and is not honored anymore. Additional + warnings provided by libc++ as a matter of QoI will now be provided unconditionally. + Upcoming Deprecations and Removals ---------------------------------- diff --git a/libcxx/docs/UsingLibcxx.rst b/libcxx/docs/UsingLibcxx.rst index e7aaf4e1fbcf9c..0fdaeb433ac6a0 100644 --- a/libcxx/docs/UsingLibcxx.rst +++ b/libcxx/docs/UsingLibcxx.rst @@ -167,15 +167,6 @@ safety annotations. build of libc++ which does not export any symbols, which can be useful when building statically for inclusion into another library. -**_LIBCPP_DISABLE_ADDITIONAL_DIAGNOSTICS**: - This macro disables the additional diagnostics generated by libc++ using the - `diagnose_if` attribute. These additional diagnostics include checks for: - - * Giving `set`, `map`, `multiset`, `multimap` and their `unordered_` - counterparts a comparator which is not const callable. - * Giving an unordered associative container a hasher that is not const - callable. - **_LIBCPP_NO_VCRUNTIME**: Microsoft's C and C++ headers are fairly entangled, and some of their C++ headers are fairly hard to avoid. In particular, `vcruntime_new.h` gets pulled diff --git a/libcxx/include/__config b/libcxx/include/__config index 97cdd020c55d1f..e4c5c685a45645 100644 --- a/libcxx/include/__config +++ b/libcxx/include/__config @@ -1390,7 +1390,7 @@ typedef __char32_t char32_t; # define _LIBCPP_NO_DESTROY # endif -# if __has_attribute(__diagnose_if__) && !defined(_LIBCPP_DISABLE_ADDITIONAL_DIAGNOSTICS) +# if __has_attribute(__diagnose_if__) # define _LIBCPP_DIAGNOSE_WARNING(...) __attribute__((__diagnose_if__(__VA_ARGS__, "warning"))) # else # define _LIBCPP_DIAGNOSE_WARNING(...) From 6dfaecf077ade4bf003345501fdcfcebc8409ff7 Mon Sep 17 00:00:00 2001 From: Kojo Acquah Date: Wed, 1 May 2024 12:32:20 -0400 Subject: [PATCH 33/48] [mlir][Vector] Add patterns for efficient unsigned i4 -> i8 conversion emulation (#89131) This PR builds on https://github.com/llvm/llvm-project/pull/79494 with an additional path for efficient unsigned `i4 ->i8` type extension for 1D/2D operations. This will impact any i4 -> i8/i16/i32/i64 unsigned extensions as well as sitofp i4 -> f8/f16/f32/f64. --- .../Transforms/VectorEmulateNarrowType.cpp | 70 ++++++++++++++++--- .../Vector/vector-rewrite-narrow-types.mlir | 42 ++++++++++- 2 files changed, 101 insertions(+), 11 deletions(-) diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp index d24721f3defa65..a301b919dc5232 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp @@ -880,6 +880,38 @@ static Value rewriteI4ToI8SignedExt(PatternRewriter &rewriter, Location loc, return rewriter.create(loc, low, high); } +/// Rewrite the i4 -> i8 unsigned extension into a sequence of shuffles and +/// bitwise ops that take advantage of high-level information to avoid leaving +/// LLVM to scramble with peephole optimizations. +static Value rewriteI4ToI8UnsignedExt(PatternRewriter &rewriter, Location loc, + Value srcValue) { + VectorType srcVecType = cast(srcValue.getType()); + assert(srcVecType.getElementType().isSignlessInteger(4) && + "Expected i4 type"); + + // 1. Generate a bitcast vector -> vector. + SmallVector i8VecShape = llvm::to_vector(srcVecType.getShape()); + constexpr int64_t i4Toi8BitwidthFactor = 2; + i8VecShape.back() = i8VecShape.back() / i4Toi8BitwidthFactor; + auto i8VecType = VectorType::get(i8VecShape, rewriter.getI8Type()); + Value i8Vector = rewriter.create(loc, i8VecType, srcValue); + + // 2 Extend the i4 elements using shifts & masking. Low i4 elements of each + // byte are placed in one vector and the high i4 elements in another vector. + constexpr uint8_t lowBitsMask = 15; // Equivalent to [00001111] bit mask + auto lowBitsMaskValues = rewriter.create( + loc, DenseElementsAttr::get(i8VecType, lowBitsMask)); + Value low = rewriter.create(loc, i8VecType, i8Vector, + lowBitsMaskValues); + constexpr int8_t highBitsToShift = 4; + auto highShiftValues = rewriter.create( + loc, DenseElementsAttr::get(i8VecType, highBitsToShift)); + Value high = rewriter.create(loc, i8Vector, highShiftValues); + + // 3. Interleave low and high i8 elements. + return rewriter.create(loc, low, high); +} + /// Rewrite the i8 -> i4 truncation into a sequence of shuffles and bitwise ops /// that take advantage of high-level information to avoid leaving LLVM to /// scramble with peephole optimizations. @@ -1048,9 +1080,10 @@ struct RewriteExtOfBitCast : OpRewritePattern { /// Rewrite the i4 -> i8 part of any conversion into a sequence of shuffles and /// bitwise ops that take advantage of high-level information to avoid leaving -/// LLVM to scramble with peephole optimizations. +/// LLVM to scramble with peephole optimizations. Templated to choose between +/// signed and unsigned conversions. /// -/// For example: +/// For example (signed): /// arith.extsi %in : vector<8xi4> to vector<8xi32> /// is rewriten as /// %0 = vector.bitcast %in : vector<8xi4> to vector<4xi8> @@ -1069,16 +1102,25 @@ struct RewriteExtOfBitCast : OpRewritePattern { /// %4 = vector.interleave %2, %3 : vector<4xi8> /// %5 = arith.sitofp %4 : vector<8xi8> to vector<8xf32> /// -template -struct RewriteAlignedSubByteIntSignedExt : OpRewritePattern { +/// Example (unsigned): +/// arith.extui %in : vector<8xi4> to vector<8xi32> +/// is rewritten as +/// %0 = vector.bitcast %in : vector<8xi4> to vector<4xi8> +/// %1 = arith.andi %0, 15 : vector<4xi8> +/// %2 = arith.shrui %0, 4 : vector<4xi8> +/// %3 = vector.interleave %1, %2 : vector<4xi8> +/// %4 = arith.extui %3 : vector<8xi8> to vector<8xi32> +/// +template +struct RewriteAlignedSubByteIntExt : OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(ConversionOpType conversionOp, PatternRewriter &rewriter) const override { // Verify the preconditions. Value srcValue = conversionOp.getIn(); - auto srcVecType = dyn_cast(srcValue.getType()); - auto dstVecType = dyn_cast(conversionOp.getType()); + auto srcVecType = cast(srcValue.getType()); + auto dstVecType = cast(conversionOp.getType()); if (failed( commonConversionPrecondition(rewriter, dstVecType, conversionOp))) return failure(); @@ -1089,8 +1131,14 @@ struct RewriteAlignedSubByteIntSignedExt : OpRewritePattern { return failure(); // Perform the rewrite. - Value subByteExt = - rewriteI4ToI8SignedExt(rewriter, conversionOp.getLoc(), srcValue); + Value subByteExt; + if (isSigned) { + subByteExt = + rewriteI4ToI8SignedExt(rewriter, conversionOp.getLoc(), srcValue); + } else { + subByteExt = + rewriteI4ToI8UnsignedExt(rewriter, conversionOp.getLoc(), srcValue); + } // Finalize the rewrite. rewriter.replaceOpWithNewOp( @@ -1229,10 +1277,12 @@ void vector::populateVectorNarrowTypeRewritePatterns( // Patterns for aligned cases. We set higher priority as they are expected to // generate better performance for aligned cases. - patterns.add, - RewriteAlignedSubByteIntSignedExt, + patterns.add, + RewriteAlignedSubByteIntExt, RewriteAlignedSubByteIntTrunc>(patterns.getContext(), benefit.getBenefit() + 1); + patterns.add>( + patterns.getContext(), benefit.getBenefit() + 1); } void vector::populateVectorTransposeNarrowTypeRewritePatterns( diff --git a/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir b/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir index 8f0148119806c9..614b2d4945348b 100644 --- a/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir +++ b/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir @@ -324,6 +324,47 @@ func.func @i7_transpose(%a: vector<8x16xi7>) -> vector<16x8xi7> { return %0 : vector<16x8xi7> } +// CHECK-LABEL: func.func @aligned_extui( +func.func @aligned_extui(%a: vector<8xi4>) -> vector<8xi32> { +// CHECK-SAME: %[[IN:.*]]: vector<8xi4>) -> vector<8xi32> { +// CHECK: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<4xi8> +// CHECK: %[[LOWBITS_MASK:.*]] = arith.constant dense<15> : vector<4xi8> +// CHECK: %[[BITCAST:.*]] = vector.bitcast %[[IN]] : vector<8xi4> to vector<4xi8> +// CHECK: %[[LOW:.*]] = arith.andi %[[BITCAST]], %[[LOWBITS_MASK]] : vector<4xi8> +// CHECK: %[[HIGH:.*]] = arith.shrui %[[BITCAST]], %[[I4_BITS]] : vector<4xi8> +// CHECK: %[[INTERLEAVE:.*]] = vector.interleave %[[LOW]], %[[HIGH]] : vector<4xi8> +// CHECK: %[[I32:.*]] = arith.extui %[[INTERLEAVE]] : vector<8xi8> to vector<8xi32> + %0 = arith.extui %a : vector<8xi4> to vector<8xi32> + return %0 : vector<8xi32> +} + +// CHECK-LABEL: func.func @aligned_extui_2d( +func.func @aligned_extui_2d(%a: vector<8x32xi4>) -> vector<8x32xi32> { +// CHECK-SAME: %[[VAL_0:.*]]: vector<8x32xi4>) -> vector<8x32xi32> { +// CHECK: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<8x16xi8> +// CHECK: %[[LOWBITS_MASK:.*]] = arith.constant dense<15> : vector<8x16xi8> +// CHECK: %[[BITCAST:.*]] = vector.bitcast %[[VAL_0]] : vector<8x32xi4> to vector<8x16xi8> +// CHECK: %[[LOW:.*]] = arith.andi %[[BITCAST]], %[[LOWBITS_MASK]] : vector<8x16xi8> +// CHECK: %[[HIGH:.*]] = arith.shrui %[[BITCAST]], %[[I4_BITS]] : vector<8x16xi8> +// CHECK: %[[INTERLEAVE:.*]] = vector.interleave %[[LOW]], %[[HIGH]] : vector<8x16xi8> +// CHECK: %[[I32:.*]] = arith.extui %[[INTERLEAVE]] : vector<8x32xi8> to vector<8x32xi32> + %0 = arith.extui %a : vector<8x32xi4> to vector<8x32xi32> + return %0 : vector<8x32xi32> +} + +// CHECK-LABEL: func.func @aligned_extui_base_case( +func.func @aligned_extui_base_case(%a: vector<8xi4>) -> vector<8xi8> { +// CHECK-SAME: %[[IN:.*]]: vector<8xi4>) -> vector<8xi8> { +// CHECK: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<4xi8> +// CHECK: %[[LOWBITS_MASK:.*]] = arith.constant dense<15> : vector<4xi8> +// CHECK: %[[BITCAST:.*]] = vector.bitcast %[[IN]] : vector<8xi4> to vector<4xi8> +// CHECK: %[[LOW:.*]] = arith.andi %[[BITCAST]], %[[LOWBITS_MASK]] : vector<4xi8> +// CHECK: %[[HIGH:.*]] = arith.shrui %[[BITCAST]], %[[I4_BITS]] : vector<4xi8> +// CHECK: %[[INTERLEAVE:.*]] = vector.interleave %[[LOW]], %[[HIGH]] : vector<4xi8> + %0 = arith.extui %a : vector<8xi4> to vector<8xi8> + return %0 : vector<8xi8> +} + module attributes {transform.with_named_sequence} { transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) { %f = transform.structured.match ops{["func.func"]} in %module_op @@ -335,4 +376,3 @@ module attributes {transform.with_named_sequence} { transform.yield } } - From a764f49b4ae80daa5ba56cf0892bf0ebce48e2b3 Mon Sep 17 00:00:00 2001 From: Xiang Li Date: Wed, 1 May 2024 09:33:58 -0700 Subject: [PATCH 34/48] [DirectX backend] generate ISG1, OSG1 part for compute shader (#90508) Empty ISG1 and OSG1 parts are generated for compute shader since there's no signature for compute shader. Fixes #88778 --- .../lib/Target/DirectX/DXContainerGlobals.cpp | 51 +++++++++++++++---- .../DirectX/ContainerData/EmptySignature.ll | 26 ++++++++++ 2 files changed, 66 insertions(+), 11 deletions(-) create mode 100644 llvm/test/CodeGen/DirectX/ContainerData/EmptySignature.ll diff --git a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp index 65cf1dfdb4031b..67e04c212a6921 100644 --- a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp +++ b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp @@ -18,18 +18,25 @@ #include "llvm/CodeGen/Passes.h" #include "llvm/IR/Constants.h" #include "llvm/InitializePasses.h" +#include "llvm/MC/DXContainerPSVInfo.h" #include "llvm/Pass.h" #include "llvm/Support/MD5.h" #include "llvm/Transforms/Utils/ModuleUtils.h" using namespace llvm; using namespace llvm::dxil; +using namespace llvm::mcdxbc; namespace { class DXContainerGlobals : public llvm::ModulePass { + GlobalVariable *buildContainerGlobal(Module &M, Constant *Content, + StringRef Name, StringRef SectionName); GlobalVariable *getFeatureFlags(Module &M); GlobalVariable *computeShaderHash(Module &M); + GlobalVariable *buildSignature(Module &M, Signature &Sig, StringRef Name, + StringRef SectionName); + void addSignature(Module &M, SmallVector &Globals); public: static char ID; // Pass identification, replacement for typeid @@ -55,7 +62,7 @@ bool DXContainerGlobals::runOnModule(Module &M) { llvm::SmallVector Globals; Globals.push_back(getFeatureFlags(M)); Globals.push_back(computeShaderHash(M)); - + addSignature(M, Globals); appendToCompilerUsed(M, Globals); return true; } @@ -68,12 +75,7 @@ GlobalVariable *DXContainerGlobals::getFeatureFlags(Module &M) { Constant *FeatureFlagsConstant = ConstantInt::get(M.getContext(), APInt(64, FeatureFlags)); - auto *GV = new llvm::GlobalVariable(M, FeatureFlagsConstant->getType(), true, - GlobalValue::PrivateLinkage, - FeatureFlagsConstant, "dx.sfi0"); - GV->setSection("SFI0"); - GV->setAlignment(Align(4)); - return GV; + return buildContainerGlobal(M, FeatureFlagsConstant, "dx.sfi0", "SFI0"); } GlobalVariable *DXContainerGlobals::computeShaderHash(Module &M) { @@ -96,14 +98,41 @@ GlobalVariable *DXContainerGlobals::computeShaderHash(Module &M) { Constant *ModuleConstant = ConstantDataArray::get(M.getContext(), arrayRefFromStringRef(Data)); - auto *GV = new llvm::GlobalVariable(M, ModuleConstant->getType(), true, - GlobalValue::PrivateLinkage, - ModuleConstant, "dx.hash"); - GV->setSection("HASH"); + return buildContainerGlobal(M, ModuleConstant, "dx.hash", "HASH"); +} + +GlobalVariable *DXContainerGlobals::buildContainerGlobal( + Module &M, Constant *Content, StringRef Name, StringRef SectionName) { + auto *GV = new llvm::GlobalVariable( + M, Content->getType(), true, GlobalValue::PrivateLinkage, Content, Name); + GV->setSection(SectionName); GV->setAlignment(Align(4)); return GV; } +GlobalVariable *DXContainerGlobals::buildSignature(Module &M, Signature &Sig, + StringRef Name, + StringRef SectionName) { + SmallString<256> Data; + raw_svector_ostream OS(Data); + Sig.write(OS); + Constant *Constant = + ConstantDataArray::getString(M.getContext(), Data, /*AddNull*/ false); + return buildContainerGlobal(M, Constant, Name, SectionName); +} + +void DXContainerGlobals::addSignature(Module &M, + SmallVector &Globals) { + // FIXME: support graphics shader. + // see issue https://github.com/llvm/llvm-project/issues/90504. + + Signature InputSig; + Globals.emplace_back(buildSignature(M, InputSig, "dx.isg1", "ISG1")); + + Signature OutputSig; + Globals.emplace_back(buildSignature(M, OutputSig, "dx.osg1", "OSG1")); +} + char DXContainerGlobals::ID = 0; INITIALIZE_PASS_BEGIN(DXContainerGlobals, "dxil-globals", "DXContainer Global Emitter", false, true) diff --git a/llvm/test/CodeGen/DirectX/ContainerData/EmptySignature.ll b/llvm/test/CodeGen/DirectX/ContainerData/EmptySignature.ll new file mode 100644 index 00000000000000..796f7942a80b12 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/EmptySignature.ll @@ -0,0 +1,26 @@ +; RUN: opt %s -dxil-embed -dxil-globals -S -o - | FileCheck %s +; RUN: llc %s --filetype=obj -o - | obj2yaml | FileCheck %s --check-prefix=DXC +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK: @dx.isg1 = private constant [8 x i8] c"\00\00\00\00\08\00\00\00", section "ISG1", align 4 +; CHECK: @dx.osg1 = private constant [8 x i8] c"\00\00\00\00\08\00\00\00", section "OSG1", align 4 + +define void @main() #0 { +entry: + ret void +} + +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!dx.valver = !{!0} + +!0 = !{i32 1, i32 7} + +; DXC: - Name: ISG1 +; DXC-NEXT: Size: 8 +; DXC-NEXT: Signature: +; DXC-NEXT: Parameters: [] +; DXC: - Name: OSG1 +; DXC-NEXT: Size: 8 +; DXC-NEXT: Signature: +; DXC-NEXT: Parameters: [] From 754072e9a5c0785560953e237229d0fbdd504d04 Mon Sep 17 00:00:00 2001 From: Mark de Wever Date: Wed, 1 May 2024 18:32:14 +0200 Subject: [PATCH 35/48] [NFC][libc++] Fixes comment indention. The output on eel.is has similar oddities, so I expect this was copy pasted. --- libcxx/include/stdexcept | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/libcxx/include/stdexcept b/libcxx/include/stdexcept index 4e4cd22a6a64d2..853c185187c778 100644 --- a/libcxx/include/stdexcept +++ b/libcxx/include/stdexcept @@ -17,14 +17,14 @@ namespace std { class logic_error; - class domain_error; - class invalid_argument; - class length_error; - class out_of_range; +class domain_error; +class invalid_argument; +class length_error; +class out_of_range; class runtime_error; - class range_error; - class overflow_error; - class underflow_error; +class range_error; +class overflow_error; +class underflow_error; for each class xxx_error: From 477c705cb0d7cc857fad8184faac523f8ef72c84 Mon Sep 17 00:00:00 2001 From: Jan Svoboda Date: Wed, 1 May 2024 09:38:16 -0700 Subject: [PATCH 36/48] [clang][modules] Allow including module maps to be non-affecting (#89992) The dependency scanner only puts top-level affecting module map files on the command line for explicitly building a module. This is done because any affecting child module map files should be referenced by the top-level one, meaning listing them explicitly does not have any meaning and only makes the command lines longer. However, a problem arises whenever the definition of an affecting module lives in a module map that is not top-level. Considering the rules explained above, such module map file would not make it to the command line. That's why 83973cf157f7850eb133a4bbfa0f8b7958bad215 started marking the parents of an affecting module map file as affecting too. This way, the top-level file does make it into the command line. This can be problematic, though. On macOS, for example, the Darwin module lives in "/usr/include/Darwin.modulemap" one of many module map files included by "/usr/include/module.modulemap". Reporting the parent on the command line forces explicit builds to parse all the other module map files included by it, which is not necessary and can get expensive in terms of file system traffic. This patch solves that performance issue by stopping marking parent module map files as affecting, and marking module map files as top-level whenever they are top-level among the set of affecting files, not among the set of all known files. This means that the top-level "/usr/include/module.modulemap" is now not marked as affecting and "/usr/include/Darwin.modulemap" is. --- clang/include/clang/Serialization/ASTWriter.h | 9 ++ clang/lib/Serialization/ASTWriter.cpp | 90 ++++++++++++------- .../ClangScanDeps/modules-extern-unrelated.m | 20 ++--- 3 files changed, 75 insertions(+), 44 deletions(-) diff --git a/clang/include/clang/Serialization/ASTWriter.h b/clang/include/clang/Serialization/ASTWriter.h index 436acbb1803ca7..a55dfd327670f6 100644 --- a/clang/include/clang/Serialization/ASTWriter.h +++ b/clang/include/clang/Serialization/ASTWriter.h @@ -76,6 +76,10 @@ class StoredDeclsList; class SwitchCase; class Token; +namespace SrcMgr { +class FileInfo; +} // namespace SrcMgr + /// Writes an AST file containing the contents of a translation unit. /// /// The ASTWriter class produces a bitstream containing the serialized @@ -490,6 +494,11 @@ class ASTWriter : public ASTDeserializationListener, /// during \c SourceManager serialization. void computeNonAffectingInputFiles(); + /// Some affecting files can be included from files that are not affecting. + /// This function erases source locations pointing into such files. + SourceLocation getAffectingIncludeLoc(const SourceManager &SourceMgr, + const SrcMgr::FileInfo &File); + /// Returns an adjusted \c FileID, accounting for any non-affecting input /// files. FileID getAdjustedFileID(FileID FID) const; diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index b80b4ff14bd3c5..80c7ce643088b6 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -173,54 +173,50 @@ GetAffectingModuleMaps(const Preprocessor &PP, Module *RootModule) { const HeaderSearch &HS = PP.getHeaderSearchInfo(); const ModuleMap &MM = HS.getModuleMap(); - const SourceManager &SourceMgr = PP.getSourceManager(); std::set ModuleMaps; - auto CollectIncludingModuleMaps = [&](FileID FID, FileEntryRef F) { - if (!ModuleMaps.insert(F).second) + std::set ProcessedModules; + auto CollectModuleMapsForHierarchy = [&](const Module *M) { + M = M->getTopLevelModule(); + + if (!ProcessedModules.insert(M).second) return; - SourceLocation Loc = SourceMgr.getIncludeLoc(FID); - // The include location of inferred module maps can point into the header - // file that triggered the inferring. Cut off the walk if that's the case. - while (Loc.isValid() && isModuleMap(SourceMgr.getFileCharacteristic(Loc))) { - FID = SourceMgr.getFileID(Loc); - F = *SourceMgr.getFileEntryRefForID(FID); - if (!ModuleMaps.insert(F).second) - break; - Loc = SourceMgr.getIncludeLoc(FID); - } - }; - std::set ProcessedModules; - auto CollectIncludingMapsFromAncestors = [&](const Module *M) { - for (const Module *Mod = M; Mod; Mod = Mod->Parent) { - if (!ProcessedModules.insert(Mod).second) - break; + std::queue Q; + Q.push(M); + while (!Q.empty()) { + const Module *Mod = Q.front(); + Q.pop(); + // The containing module map is affecting, because it's being pointed // into by Module::DefinitionLoc. - if (FileID FID = MM.getContainingModuleMapFileID(Mod); FID.isValid()) - CollectIncludingModuleMaps(FID, *SourceMgr.getFileEntryRefForID(FID)); - // For inferred modules, the module map that allowed inferring is not in - // the include chain of the virtual containing module map file. It did - // affect the compilation, though. - if (FileID FID = MM.getModuleMapFileIDForUniquing(Mod); FID.isValid()) - CollectIncludingModuleMaps(FID, *SourceMgr.getFileEntryRefForID(FID)); + if (auto FE = MM.getContainingModuleMapFile(Mod)) + ModuleMaps.insert(*FE); + // For inferred modules, the module map that allowed inferring is not + // related to the virtual containing module map file. It did affect the + // compilation, though. + if (auto FE = MM.getModuleMapFileForUniquing(Mod)) + ModuleMaps.insert(*FE); + + for (auto *SubM : Mod->submodules()) + Q.push(SubM); } }; // Handle all the affecting modules referenced from the root module. + CollectModuleMapsForHierarchy(RootModule); + std::queue Q; Q.push(RootModule); while (!Q.empty()) { const Module *CurrentModule = Q.front(); Q.pop(); - CollectIncludingMapsFromAncestors(CurrentModule); for (const Module *ImportedModule : CurrentModule->Imports) - CollectIncludingMapsFromAncestors(ImportedModule); + CollectModuleMapsForHierarchy(ImportedModule); for (const Module *UndeclaredModule : CurrentModule->UndeclaredUses) - CollectIncludingMapsFromAncestors(UndeclaredModule); + CollectModuleMapsForHierarchy(UndeclaredModule); for (auto *M : CurrentModule->submodules()) Q.push(M); @@ -249,9 +245,27 @@ GetAffectingModuleMaps(const Preprocessor &PP, Module *RootModule) { for (const auto &KH : HS.findResolvedModulesForHeader(*File)) if (const Module *M = KH.getModule()) - CollectIncludingMapsFromAncestors(M); + CollectModuleMapsForHierarchy(M); } + // FIXME: This algorithm is not correct for module map hierarchies where + // module map file defining a (sub)module of a top-level module X includes + // a module map file that defines a (sub)module of another top-level module Y. + // Whenever X is affecting and Y is not, "replaying" this PCM file will fail + // when parsing module map files for X due to not knowing about the `extern` + // module map for Y. + // + // We don't have a good way to fix it here. We could mark all children of + // affecting module map files as being affecting as well, but that's + // expensive. SourceManager does not model the edge from parent to child + // SLocEntries, so instead, we would need to iterate over leaf module map + // files, walk up their include hierarchy and check whether we arrive at an + // affecting module map. + // + // Instead of complicating and slowing down this function, we should probably + // just ban module map hierarchies where module map defining a (sub)module X + // includes a module map defining a module that's not a submodule of X. + return ModuleMaps; } @@ -1665,6 +1679,18 @@ struct InputFileEntry { } // namespace +SourceLocation ASTWriter::getAffectingIncludeLoc(const SourceManager &SourceMgr, + const SrcMgr::FileInfo &File) { + SourceLocation IncludeLoc = File.getIncludeLoc(); + if (IncludeLoc.isValid()) { + FileID IncludeFID = SourceMgr.getFileID(IncludeLoc); + assert(IncludeFID.isValid() && "IncludeLoc in invalid file"); + if (!IsSLocAffecting[IncludeFID.ID]) + IncludeLoc = SourceLocation(); + } + return IncludeLoc; +} + void ASTWriter::WriteInputFiles(SourceManager &SourceMgr, HeaderSearchOptions &HSOpts) { using namespace llvm; @@ -1718,7 +1744,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr, Entry.IsSystemFile = isSystem(File.getFileCharacteristic()); Entry.IsTransient = Cache->IsTransient; Entry.BufferOverridden = Cache->BufferOverridden; - Entry.IsTopLevel = File.getIncludeLoc().isInvalid(); + Entry.IsTopLevel = getAffectingIncludeLoc(SourceMgr, File).isInvalid(); Entry.IsModuleMap = isModuleMap(File.getFileCharacteristic()); auto ContentHash = hash_code(-1); @@ -2245,7 +2271,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr, SLocEntryOffsets.push_back(Offset); // Starting offset of this entry within this module, so skip the dummy. Record.push_back(getAdjustedOffset(SLoc->getOffset()) - 2); - AddSourceLocation(File.getIncludeLoc(), Record); + AddSourceLocation(getAffectingIncludeLoc(SourceMgr, File), Record); Record.push_back(File.getFileCharacteristic()); // FIXME: stable encoding Record.push_back(File.hasLineDirectives()); diff --git a/clang/test/ClangScanDeps/modules-extern-unrelated.m b/clang/test/ClangScanDeps/modules-extern-unrelated.m index 76611c596d3eff..957132fd5b1854 100644 --- a/clang/test/ClangScanDeps/modules-extern-unrelated.m +++ b/clang/test/ClangScanDeps/modules-extern-unrelated.m @@ -1,3 +1,6 @@ +// This test checks that only module map files defining affecting modules are +// affecting. + // RUN: rm -rf %t // RUN: split-file %s %t @@ -22,15 +25,8 @@ //--- second/second.h #include "first_other.h" -//--- cdb.json.template -[{ - "directory": "DIR", - "file": "DIR/tu.m", - "command": "clang -fmodules -fmodules-cache-path=DIR/cache -I DIR/zeroth -I DIR/first -I DIR/second -c DIR/tu.m -o DIR/tu.o" -}] - -// RUN: sed -e "s|DIR|%/t|g" -e "s|INPUTS|%/S/Inputs|g" %t/cdb.json.template > %t/cdb.json -// RUN: clang-scan-deps -compilation-database %t/cdb.json -format experimental-full > %t/result.json +// RUN: clang-scan-deps -format experimental-full -o %t/result.json \ +// RUN: -- %clang -fmodules -fmodules-cache-path=%t/cache -I %t/zeroth -I %t/first -I %t/second -c %t/tu.m -o %t/tu.o // RUN: cat %t/result.json | sed 's:\\\\\?:/:g' | FileCheck %s -DPREFIX=%/t // CHECK: { @@ -67,11 +63,11 @@ // CHECK-NEXT: ], // CHECK-NEXT: "clang-modulemap-file": "[[PREFIX]]/second/second.modulemap", // CHECK-NEXT: "command-line": [ +// CHECK-NOT: "-fmodule-map-file=[[PREFIX]]/second/module.modulemap" // CHECK: ], // CHECK-NEXT: "context-hash": "{{.*}}", // CHECK-NEXT: "file-deps": [ // CHECK-NEXT: "[[PREFIX]]/first/module.modulemap", -// CHECK-NEXT: "[[PREFIX]]/second/module.modulemap", // CHECK-NEXT: "[[PREFIX]]/second/second.h", // CHECK-NEXT: "[[PREFIX]]/second/second.modulemap" // CHECK-NEXT: ], @@ -90,11 +86,11 @@ // CHECK-NEXT: ], // CHECK-NEXT: "clang-modulemap-file": "[[PREFIX]]/zeroth/module.modulemap", // CHECK-NEXT: "command-line": [ +// CHECK-NOT: "-fmodule-map-file=[[PREFIX]]/second/module.modulemap" // CHECK: ], // CHECK-NEXT: "context-hash": "{{.*}}", // CHECK-NEXT: "file-deps": [ // CHECK-NEXT: "[[PREFIX]]/first/module.modulemap", -// CHECK-NEXT: "[[PREFIX]]/second/module.modulemap", // CHECK-NEXT: "[[PREFIX]]/second/second.modulemap", // CHECK-NEXT: "[[PREFIX]]/zeroth/module.modulemap", // CHECK-NEXT: "[[PREFIX]]/zeroth/zeroth.h" @@ -115,7 +111,7 @@ // CHECK-NEXT: ], // CHECK-NEXT: "command-line": [ // CHECK: ], -// CHECK-NEXT: "executable": "clang", +// CHECK-NEXT: "executable": "{{.*}}", // CHECK-NEXT: "file-deps": [ // CHECK-NEXT: "[[PREFIX]]/tu.m" // CHECK-NEXT: ], From 987c036f5413a94aab58bd5e27b653f740a5f7e2 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Wed, 1 May 2024 18:40:33 +0200 Subject: [PATCH 37/48] [bazel][clang] Add missing dependency for 6e31714d249f857f15262518327b0f0c9509db72 --- utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel index 9823027b766c22..884a6055cf4e0c 100644 --- a/utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel @@ -110,6 +110,7 @@ cc_test( "//clang:ast", "//clang:ast_matchers", "//clang:basic", + "//clang:frontend", "//clang:lex", "//clang:parse", "//clang:tooling", From 6c369cf937b7d9acb98a1fc46b1340cef7703e12 Mon Sep 17 00:00:00 2001 From: Tomas Matheson Date: Wed, 1 May 2024 18:01:14 +0100 Subject: [PATCH 38/48] [AArch64] Changes missing from cfca97742723 (#90314) --- clang/lib/Basic/CMakeLists.txt | 3 +++ clang/lib/CodeGen/CMakeLists.txt | 3 +++ clang/lib/Driver/CMakeLists.txt | 3 +++ clang/tools/driver/CMakeLists.txt | 3 +++ llvm/include/module.install.modulemap | 2 ++ llvm/include/module.modulemap | 5 +++++ 6 files changed, 19 insertions(+) diff --git a/clang/lib/Basic/CMakeLists.txt b/clang/lib/Basic/CMakeLists.txt index 2e218ba7c84cca..824d4a0e2eee57 100644 --- a/clang/lib/Basic/CMakeLists.txt +++ b/clang/lib/Basic/CMakeLists.txt @@ -130,6 +130,9 @@ add_clang_library(clangBasic DEPENDS omp_gen ClangDriverOptions + # These generated headers are included transitively. + ARMTargetParserTableGen + AArch64TargetParserTableGen ) target_link_libraries(clangBasic diff --git a/clang/lib/CodeGen/CMakeLists.txt b/clang/lib/CodeGen/CMakeLists.txt index 52216d93a302bb..7a933d0ed0d0d7 100644 --- a/clang/lib/CodeGen/CMakeLists.txt +++ b/clang/lib/CodeGen/CMakeLists.txt @@ -143,6 +143,9 @@ add_clang_library(clangCodeGen DEPENDS intrinsics_gen ClangDriverOptions + # These generated headers are included transitively. + ARMTargetParserTableGen + AArch64TargetParserTableGen LINK_LIBS clangAST diff --git a/clang/lib/Driver/CMakeLists.txt b/clang/lib/Driver/CMakeLists.txt index 58427e3f83c420..32a4378ab499fa 100644 --- a/clang/lib/Driver/CMakeLists.txt +++ b/clang/lib/Driver/CMakeLists.txt @@ -90,6 +90,9 @@ add_clang_library(clangDriver DEPENDS ClangDriverOptions + # These generated headers are included transitively. + ARMTargetParserTableGen + AArch64TargetParserTableGen LINK_LIBS clangBasic diff --git a/clang/tools/driver/CMakeLists.txt b/clang/tools/driver/CMakeLists.txt index d70b92b0984e52..290bf2a42536dd 100644 --- a/clang/tools/driver/CMakeLists.txt +++ b/clang/tools/driver/CMakeLists.txt @@ -31,6 +31,9 @@ add_clang_tool(clang DEPENDS intrinsics_gen + # These generated headers are included transitively. + ARMTargetParserTableGen + AArch64TargetParserTableGen ${support_plugins} GENERATE_DRIVER ) diff --git a/llvm/include/module.install.modulemap b/llvm/include/module.install.modulemap index f7302830f561de..b917cddc78034c 100644 --- a/llvm/include/module.install.modulemap +++ b/llvm/include/module.install.modulemap @@ -31,5 +31,7 @@ module LLVM_Extern_Utils_DataTypes { } module LLVM_Extern_TargetParser_Gen { + textual header "llvm/TargetParser/ARMTargetParserDef.inc" + textual header "llvm/TargetParser/AArch64TargetParserDef.inc" textual header "llvm/TargetParser/RISCVTargetParserDef.inc" } diff --git a/llvm/include/module.modulemap b/llvm/include/module.modulemap index e60e03a282ac6e..b00da6d7cd28c7 100644 --- a/llvm/include/module.modulemap +++ b/llvm/include/module.modulemap @@ -345,6 +345,11 @@ extern module LLVM_Extern_Utils_DataTypes "module.extern.modulemap" // Build the module with the tablegen-generated files needed by the // TargetParser module before building the TargetParser module itself. module TargetParserGen { + module AArch64TargetParserDef { + header "llvm/TargetParser/AArch64TargetParser.h" + extern module LLVM_Extern_TargetParser_Gen "module.extern.modulemap" + export * + } module RISCVTargetParserDef { header "llvm/TargetParser/RISCVTargetParser.h" extern module LLVM_Extern_TargetParser_Gen "module.extern.modulemap" From cf2f32c97f8fece105557c2357be4809cb9c14a1 Mon Sep 17 00:00:00 2001 From: David Tellenbach Date: Wed, 1 May 2024 10:07:51 -0700 Subject: [PATCH 39/48] [MIR] Serialize MachineFrameInfo::isCalleeSavedInfoValid() (#90561) In case of functions without a stack frame no "stack" field is serialized into MIR which leads to isCalleeSavedInfoValid being false when reading a MIR file back in. To fix this we should serialize MachineFrameInfo::isCalleeSavedInfoValid() into MIR. --- llvm/include/llvm/CodeGen/MIRYamlMapping.h | 6 ++- llvm/lib/CodeGen/MIRParser/MIRParser.cpp | 1 + llvm/lib/CodeGen/MIRPrinter.cpp | 1 + .../MIR/AArch64/calleesavedinfovalid.mir | 41 +++++++++++++++++++ llvm/test/CodeGen/MIR/Generic/frame-info.mir | 1 + 5 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/MIR/AArch64/calleesavedinfovalid.mir diff --git a/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/llvm/include/llvm/CodeGen/MIRYamlMapping.h index bb8dbb0478ff54..304db57eca4994 100644 --- a/llvm/include/llvm/CodeGen/MIRYamlMapping.h +++ b/llvm/include/llvm/CodeGen/MIRYamlMapping.h @@ -640,6 +640,7 @@ struct MachineFrameInfo { bool HasVAStart = false; bool HasMustTailInVarArgFunc = false; bool HasTailCall = false; + bool IsCalleeSavedInfoValid = false; unsigned LocalFrameSize = 0; StringValue SavePoint; StringValue RestorePoint; @@ -663,7 +664,8 @@ struct MachineFrameInfo { HasMustTailInVarArgFunc == Other.HasMustTailInVarArgFunc && HasTailCall == Other.HasTailCall && LocalFrameSize == Other.LocalFrameSize && - SavePoint == Other.SavePoint && RestorePoint == Other.RestorePoint; + SavePoint == Other.SavePoint && RestorePoint == Other.RestorePoint && + IsCalleeSavedInfoValid == Other.IsCalleeSavedInfoValid; } }; @@ -691,6 +693,8 @@ template <> struct MappingTraits { YamlIO.mapOptional("hasMustTailInVarArgFunc", MFI.HasMustTailInVarArgFunc, false); YamlIO.mapOptional("hasTailCall", MFI.HasTailCall, false); + YamlIO.mapOptional("isCalleeSavedInfoValid", MFI.IsCalleeSavedInfoValid, + false); YamlIO.mapOptional("localFrameSize", MFI.LocalFrameSize, (unsigned)0); YamlIO.mapOptional("savePoint", MFI.SavePoint, StringValue()); // Don't print it out when it's empty. diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp index b65fc8cf5099b8..a5d6a40392d0cb 100644 --- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp @@ -779,6 +779,7 @@ bool MIRParserImpl::initializeFrameInfo(PerFunctionMIParsingState &PFS, MFI.setHasVAStart(YamlMFI.HasVAStart); MFI.setHasMustTailInVarArgFunc(YamlMFI.HasMustTailInVarArgFunc); MFI.setHasTailCall(YamlMFI.HasTailCall); + MFI.setCalleeSavedInfoValid(YamlMFI.IsCalleeSavedInfoValid); MFI.setLocalFrameSize(YamlMFI.LocalFrameSize); if (!YamlMFI.SavePoint.Value.empty()) { MachineBasicBlock *MBB = nullptr; diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp index 6751fcf97087c7..26d534f369ae5f 100644 --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -368,6 +368,7 @@ void MIRPrinter::convert(ModuleSlotTracker &MST, YamlMFI.HasVAStart = MFI.hasVAStart(); YamlMFI.HasMustTailInVarArgFunc = MFI.hasMustTailInVarArgFunc(); YamlMFI.HasTailCall = MFI.hasTailCall(); + YamlMFI.IsCalleeSavedInfoValid = MFI.isCalleeSavedInfoValid(); YamlMFI.LocalFrameSize = MFI.getLocalFrameSize(); if (MFI.getSavePoint()) { raw_string_ostream StrOS(YamlMFI.SavePoint.Value); diff --git a/llvm/test/CodeGen/MIR/AArch64/calleesavedinfovalid.mir b/llvm/test/CodeGen/MIR/AArch64/calleesavedinfovalid.mir new file mode 100644 index 00000000000000..829eccddb27f3e --- /dev/null +++ b/llvm/test/CodeGen/MIR/AArch64/calleesavedinfovalid.mir @@ -0,0 +1,41 @@ + +# RUN: llc -run-pass=none -mtriple=aarch64-- -o - %s | FileCheck %s + +--- +# CHECK-LABEL: name: no_stack_no_calleesavedinfo +# CHECK: isCalleeSavedInfoValid: false +name: no_stack_no_calleesavedinfo +frameInfo: + isCalleeSavedInfoValid: false +stack: [] + +... +--- +# CHECK-LABEL: name: no_stack_calleesavedinfo +# CHECK: isCalleeSavedInfoValid: true +name: no_stack_calleesavedinfo +frameInfo: + isCalleeSavedInfoValid: true +stack: [] + +... +--- +# CHECK-LABEL: name: stack_no_calleesavedinfo +# CHECK: isCalleeSavedInfoValid: true +name: stack_no_calleesavedinfo +frameInfo: + isCalleeSavedInfoValid: false +stack: + - { id: 0, type: spill-slot, offset: -8, size: 8, alignment: 8, callee-saved-register: '$lr' } + +... +--- +# CHECK-LABEL: name: stack_calleesavedinfo +# CHECK: isCalleeSavedInfoValid: true +name: stack_calleesavedinfo +frameInfo: + isCalleeSavedInfoValid: true +stack: + - { id: 0, type: spill-slot, offset: -8, size: 8, alignment: 8, callee-saved-register: '$lr' } + +... diff --git a/llvm/test/CodeGen/MIR/Generic/frame-info.mir b/llvm/test/CodeGen/MIR/Generic/frame-info.mir index 4a897a9ec5e3bb..d5e014cf629915 100644 --- a/llvm/test/CodeGen/MIR/Generic/frame-info.mir +++ b/llvm/test/CodeGen/MIR/Generic/frame-info.mir @@ -44,6 +44,7 @@ tracksRegLiveness: true # CHECK-NEXT: hasVAStart: false # CHECK-NEXT: hasMustTailInVarArgFunc: false # CHECK-NEXT: hasTailCall: false +# CHECK-NEXT: isCalleeSavedInfoValid: false # CHECK-NEXT: localFrameSize: 0 # CHECK-NEXT: savePoint: '' # CHECK-NEXT: restorePoint: '' From 7396ab1210a2aeee6bab5b73ec6d02975ba51b93 Mon Sep 17 00:00:00 2001 From: Nicolas Miller Date: Wed, 1 May 2024 18:15:52 +0100 Subject: [PATCH 40/48] [NVPTX] Fix 64 bits rotations with large shift values (#89399) ROTL and ROTR can take a shift amount larger than the element size, in which case the effective shift amount should be the shift amount modulo the element size. This patch adds the modulo step when the shift amount isn't known at compile time. Without it the existing implementation would end up shifting beyond the type size and give incorrect results. --- llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 10 +- llvm/test/CodeGen/NVPTX/rotate.ll | 339 ++++++++++++++++++++++-- 2 files changed, 320 insertions(+), 29 deletions(-) diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td index 897ee89323f083..142dd64ddea9dc 100644 --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -1752,8 +1752,9 @@ def ROTL64reg_sw : ".reg .b64 %lhs;\n\t" ".reg .b64 %rhs;\n\t" ".reg .u32 %amt2;\n\t" - "shl.b64 \t%lhs, $src, $amt;\n\t" - "sub.u32 \t%amt2, 64, $amt;\n\t" + "and.b32 \t%amt2, $amt, 63;\n\t" + "shl.b64 \t%lhs, $src, %amt2;\n\t" + "sub.u32 \t%amt2, 64, %amt2;\n\t" "shr.b64 \t%rhs, $src, %amt2;\n\t" "add.u64 \t$dst, %lhs, %rhs;\n\t" "}}", @@ -1765,8 +1766,9 @@ def ROTR64reg_sw : ".reg .b64 %lhs;\n\t" ".reg .b64 %rhs;\n\t" ".reg .u32 %amt2;\n\t" - "shr.b64 \t%lhs, $src, $amt;\n\t" - "sub.u32 \t%amt2, 64, $amt;\n\t" + "and.b32 \t%amt2, $amt, 63;\n\t" + "shr.b64 \t%lhs, $src, %amt2;\n\t" + "sub.u32 \t%amt2, 64, %amt2;\n\t" "shl.b64 \t%rhs, $src, %amt2;\n\t" "add.u64 \t$dst, %lhs, %rhs;\n\t" "}}", diff --git a/llvm/test/CodeGen/NVPTX/rotate.ll b/llvm/test/CodeGen/NVPTX/rotate.ll index 9d058662c27174..20c7ae5908d29f 100644 --- a/llvm/test/CodeGen/NVPTX/rotate.ll +++ b/llvm/test/CodeGen/NVPTX/rotate.ll @@ -1,7 +1,8 @@ -; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck --check-prefix=SM20 %s -; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck --check-prefix=SM35 %s -; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %} -; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_35 | %ptxas-verify %} +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc < %s --mtriple=nvptx64 -mcpu=sm_20 | FileCheck --check-prefix=SM20 %s +; RUN: llc < %s --mtriple=nvptx64 -mcpu=sm_35 | FileCheck --check-prefix=SM35 %s +; RUN: %if ptxas %{ llc < %s --mtriple=nvptx64 -mcpu=sm_20 | %ptxas-verify %} +; RUN: %if ptxas %{ llc < %s --mtriple=nvptx64 -mcpu=sm_35 | %ptxas-verify %} declare i32 @llvm.nvvm.rotate.b32(i32, i32) @@ -11,11 +12,35 @@ declare i64 @llvm.nvvm.rotate.right.b64(i64, i32) ; SM20: rotate32 ; SM35: rotate32 define i32 @rotate32(i32 %a, i32 %b) { -; SM20: shl.b32 -; SM20: sub.s32 -; SM20: shr.b32 -; SM20: add.u32 -; SM35: shf.l.wrap.b32 +; SM20-LABEL: rotate32( +; SM20: { +; SM20-NEXT: .reg .b32 %r<4>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.u32 %r1, [rotate32_param_0]; +; SM20-NEXT: ld.param.u32 %r2, [rotate32_param_1]; +; SM20-NEXT: { +; SM20-NEXT: .reg .b32 %lhs; +; SM20-NEXT: .reg .b32 %rhs; +; SM20-NEXT: .reg .b32 %amt2; +; SM20-NEXT: shl.b32 %lhs, %r1, %r2; +; SM20-NEXT: sub.s32 %amt2, 32, %r2; +; SM20-NEXT: shr.b32 %rhs, %r1, %amt2; +; SM20-NEXT: add.u32 %r3, %lhs, %rhs; +; SM20-NEXT: } +; SM20-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM20-NEXT: ret; +; +; SM35-LABEL: rotate32( +; SM35: { +; SM35-NEXT: .reg .b32 %r<4>; +; SM35-EMPTY: +; SM35-NEXT: // %bb.0: +; SM35-NEXT: ld.param.u32 %r1, [rotate32_param_0]; +; SM35-NEXT: ld.param.u32 %r2, [rotate32_param_1]; +; SM35-NEXT: shf.l.wrap.b32 %r3, %r1, %r1, %r2; +; SM35-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM35-NEXT: ret; %val = tail call i32 @llvm.nvvm.rotate.b32(i32 %a, i32 %b) ret i32 %val } @@ -23,12 +48,48 @@ define i32 @rotate32(i32 %a, i32 %b) { ; SM20: rotate64 ; SM35: rotate64 define i64 @rotate64(i64 %a, i32 %b) { -; SM20: shl.b64 -; SM20: sub.u32 -; SM20: shr.b64 -; SM20: add.u64 -; SM35: shf.l.wrap.b32 -; SM35: shf.l.wrap.b32 +; SM20-LABEL: rotate64( +; SM20: { +; SM20-NEXT: .reg .b32 %r<2>; +; SM20-NEXT: .reg .b64 %rd<3>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.u64 %rd1, [rotate64_param_0]; +; SM20-NEXT: ld.param.u32 %r1, [rotate64_param_1]; +; SM20-NEXT: { +; SM20-NEXT: .reg .b64 %lhs; +; SM20-NEXT: .reg .b64 %rhs; +; SM20-NEXT: .reg .u32 %amt2; +; SM20-NEXT: and.b32 %amt2, %r1, 63; +; SM20-NEXT: shl.b64 %lhs, %rd1, %amt2; +; SM20-NEXT: sub.u32 %amt2, 64, %amt2; +; SM20-NEXT: shr.b64 %rhs, %rd1, %amt2; +; SM20-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM20-NEXT: } +; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM20-NEXT: ret; +; +; SM35-LABEL: rotate64( +; SM35: { +; SM35-NEXT: .reg .b32 %r<6>; +; SM35-NEXT: .reg .b64 %rd<3>; +; SM35-EMPTY: +; SM35-NEXT: // %bb.0: +; SM35-NEXT: ld.param.u64 %rd1, [rotate64_param_0]; +; SM35-NEXT: { +; SM35-NEXT: .reg .b32 %dummy; +; SM35-NEXT: mov.b64 {%dummy,%r1}, %rd1; +; SM35-NEXT: } +; SM35-NEXT: { +; SM35-NEXT: .reg .b32 %dummy; +; SM35-NEXT: mov.b64 {%r2,%dummy}, %rd1; +; SM35-NEXT: } +; SM35-NEXT: ld.param.u32 %r3, [rotate64_param_1]; +; SM35-NEXT: shf.l.wrap.b32 %r4, %r2, %r1, %r3; +; SM35-NEXT: shf.l.wrap.b32 %r5, %r1, %r2, %r3; +; SM35-NEXT: mov.b64 %rd2, {%r5, %r4}; +; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM35-NEXT: ret; %val = tail call i64 @llvm.nvvm.rotate.b64(i64 %a, i32 %b) ret i64 %val } @@ -36,12 +97,48 @@ define i64 @rotate64(i64 %a, i32 %b) { ; SM20: rotateright64 ; SM35: rotateright64 define i64 @rotateright64(i64 %a, i32 %b) { -; SM20: shr.b64 -; SM20: sub.u32 -; SM20: shl.b64 -; SM20: add.u64 -; SM35: shf.r.wrap.b32 -; SM35: shf.r.wrap.b32 +; SM20-LABEL: rotateright64( +; SM20: { +; SM20-NEXT: .reg .b32 %r<2>; +; SM20-NEXT: .reg .b64 %rd<3>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.u64 %rd1, [rotateright64_param_0]; +; SM20-NEXT: ld.param.u32 %r1, [rotateright64_param_1]; +; SM20-NEXT: { +; SM20-NEXT: .reg .b64 %lhs; +; SM20-NEXT: .reg .b64 %rhs; +; SM20-NEXT: .reg .u32 %amt2; +; SM20-NEXT: and.b32 %amt2, %r1, 63; +; SM20-NEXT: shr.b64 %lhs, %rd1, %amt2; +; SM20-NEXT: sub.u32 %amt2, 64, %amt2; +; SM20-NEXT: shl.b64 %rhs, %rd1, %amt2; +; SM20-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM20-NEXT: } +; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM20-NEXT: ret; +; +; SM35-LABEL: rotateright64( +; SM35: { +; SM35-NEXT: .reg .b32 %r<6>; +; SM35-NEXT: .reg .b64 %rd<3>; +; SM35-EMPTY: +; SM35-NEXT: // %bb.0: +; SM35-NEXT: ld.param.u64 %rd1, [rotateright64_param_0]; +; SM35-NEXT: { +; SM35-NEXT: .reg .b32 %dummy; +; SM35-NEXT: mov.b64 {%r1,%dummy}, %rd1; +; SM35-NEXT: } +; SM35-NEXT: { +; SM35-NEXT: .reg .b32 %dummy; +; SM35-NEXT: mov.b64 {%dummy,%r2}, %rd1; +; SM35-NEXT: } +; SM35-NEXT: ld.param.u32 %r3, [rotateright64_param_1]; +; SM35-NEXT: shf.r.wrap.b32 %r4, %r2, %r1, %r3; +; SM35-NEXT: shf.r.wrap.b32 %r5, %r1, %r2, %r3; +; SM35-NEXT: mov.b64 %rd2, {%r5, %r4}; +; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM35-NEXT: ret; %val = tail call i64 @llvm.nvvm.rotate.right.b64(i64 %a, i32 %b) ret i64 %val } @@ -49,12 +146,204 @@ define i64 @rotateright64(i64 %a, i32 %b) { ; SM20: rotl0 ; SM35: rotl0 define i32 @rotl0(i32 %x) { -; SM20: shl.b32 -; SM20: shr.b32 -; SM20: add.u32 -; SM35: shf.l.wrap.b32 +; SM20-LABEL: rotl0( +; SM20: { +; SM20-NEXT: .reg .b32 %r<3>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.u32 %r1, [rotl0_param_0]; +; SM20-NEXT: { +; SM20-NEXT: .reg .b32 %lhs; +; SM20-NEXT: .reg .b32 %rhs; +; SM20-NEXT: shl.b32 %lhs, %r1, 8; +; SM20-NEXT: shr.b32 %rhs, %r1, 24; +; SM20-NEXT: add.u32 %r2, %lhs, %rhs; +; SM20-NEXT: } +; SM20-NEXT: st.param.b32 [func_retval0+0], %r2; +; SM20-NEXT: ret; +; +; SM35-LABEL: rotl0( +; SM35: { +; SM35-NEXT: .reg .b32 %r<3>; +; SM35-EMPTY: +; SM35-NEXT: // %bb.0: +; SM35-NEXT: ld.param.u32 %r1, [rotl0_param_0]; +; SM35-NEXT: shf.l.wrap.b32 %r2, %r1, %r1, 8; +; SM35-NEXT: st.param.b32 [func_retval0+0], %r2; +; SM35-NEXT: ret; %t0 = shl i32 %x, 8 %t1 = lshr i32 %x, 24 %t2 = or i32 %t0, %t1 ret i32 %t2 } + +declare i64 @llvm.fshl.i64(i64, i64, i64) +declare i64 @llvm.fshr.i64(i64, i64, i64) + +; SM35: rotl64 +define i64 @rotl64(i64 %a, i64 %n) { +; SM20-LABEL: rotl64( +; SM20: { +; SM20-NEXT: .reg .b32 %r<2>; +; SM20-NEXT: .reg .b64 %rd<3>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.u64 %rd1, [rotl64_param_0]; +; SM20-NEXT: ld.param.u32 %r1, [rotl64_param_1]; +; SM20-NEXT: { +; SM20-NEXT: .reg .b64 %lhs; +; SM20-NEXT: .reg .b64 %rhs; +; SM20-NEXT: .reg .u32 %amt2; +; SM20-NEXT: and.b32 %amt2, %r1, 63; +; SM20-NEXT: shl.b64 %lhs, %rd1, %amt2; +; SM20-NEXT: sub.u32 %amt2, 64, %amt2; +; SM20-NEXT: shr.b64 %rhs, %rd1, %amt2; +; SM20-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM20-NEXT: } +; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM20-NEXT: ret; +; +; SM35-LABEL: rotl64( +; SM35: { +; SM35-NEXT: .reg .b32 %r<2>; +; SM35-NEXT: .reg .b64 %rd<3>; +; SM35-EMPTY: +; SM35-NEXT: // %bb.0: +; SM35-NEXT: ld.param.u64 %rd1, [rotl64_param_0]; +; SM35-NEXT: ld.param.u32 %r1, [rotl64_param_1]; +; SM35-NEXT: { +; SM35-NEXT: .reg .b64 %lhs; +; SM35-NEXT: .reg .b64 %rhs; +; SM35-NEXT: .reg .u32 %amt2; +; SM35-NEXT: and.b32 %amt2, %r1, 63; +; SM35-NEXT: shl.b64 %lhs, %rd1, %amt2; +; SM35-NEXT: sub.u32 %amt2, 64, %amt2; +; SM35-NEXT: shr.b64 %rhs, %rd1, %amt2; +; SM35-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM35-NEXT: } +; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM35-NEXT: ret; + %val = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %n) + ret i64 %val +} + +; SM35: rotl64_imm +define i64 @rotl64_imm(i64 %a) { +; SM20-LABEL: rotl64_imm( +; SM20: { +; SM20-NEXT: .reg .b64 %rd<3>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.u64 %rd1, [rotl64_imm_param_0]; +; SM20-NEXT: { +; SM20-NEXT: .reg .b64 %lhs; +; SM20-NEXT: .reg .b64 %rhs; +; SM20-NEXT: shl.b64 %lhs, %rd1, 2; +; SM20-NEXT: shr.b64 %rhs, %rd1, 62; +; SM20-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM20-NEXT: } +; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM20-NEXT: ret; +; +; SM35-LABEL: rotl64_imm( +; SM35: { +; SM35-NEXT: .reg .b64 %rd<3>; +; SM35-EMPTY: +; SM35-NEXT: // %bb.0: +; SM35-NEXT: ld.param.u64 %rd1, [rotl64_imm_param_0]; +; SM35-NEXT: { +; SM35-NEXT: .reg .b64 %lhs; +; SM35-NEXT: .reg .b64 %rhs; +; SM35-NEXT: shl.b64 %lhs, %rd1, 2; +; SM35-NEXT: shr.b64 %rhs, %rd1, 62; +; SM35-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM35-NEXT: } +; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM35-NEXT: ret; + %val = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 66) + ret i64 %val +} + +; SM35: rotr64 +define i64 @rotr64(i64 %a, i64 %n) { +; SM20-LABEL: rotr64( +; SM20: { +; SM20-NEXT: .reg .b32 %r<2>; +; SM20-NEXT: .reg .b64 %rd<3>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.u64 %rd1, [rotr64_param_0]; +; SM20-NEXT: ld.param.u32 %r1, [rotr64_param_1]; +; SM20-NEXT: { +; SM20-NEXT: .reg .b64 %lhs; +; SM20-NEXT: .reg .b64 %rhs; +; SM20-NEXT: .reg .u32 %amt2; +; SM20-NEXT: and.b32 %amt2, %r1, 63; +; SM20-NEXT: shr.b64 %lhs, %rd1, %amt2; +; SM20-NEXT: sub.u32 %amt2, 64, %amt2; +; SM20-NEXT: shl.b64 %rhs, %rd1, %amt2; +; SM20-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM20-NEXT: } +; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM20-NEXT: ret; +; +; SM35-LABEL: rotr64( +; SM35: { +; SM35-NEXT: .reg .b32 %r<2>; +; SM35-NEXT: .reg .b64 %rd<3>; +; SM35-EMPTY: +; SM35-NEXT: // %bb.0: +; SM35-NEXT: ld.param.u64 %rd1, [rotr64_param_0]; +; SM35-NEXT: ld.param.u32 %r1, [rotr64_param_1]; +; SM35-NEXT: { +; SM35-NEXT: .reg .b64 %lhs; +; SM35-NEXT: .reg .b64 %rhs; +; SM35-NEXT: .reg .u32 %amt2; +; SM35-NEXT: and.b32 %amt2, %r1, 63; +; SM35-NEXT: shr.b64 %lhs, %rd1, %amt2; +; SM35-NEXT: sub.u32 %amt2, 64, %amt2; +; SM35-NEXT: shl.b64 %rhs, %rd1, %amt2; +; SM35-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM35-NEXT: } +; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM35-NEXT: ret; + %val = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %n) + ret i64 %val +} + +; SM35: rotr64_imm +define i64 @rotr64_imm(i64 %a) { +; SM20-LABEL: rotr64_imm( +; SM20: { +; SM20-NEXT: .reg .b64 %rd<3>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.u64 %rd1, [rotr64_imm_param_0]; +; SM20-NEXT: { +; SM20-NEXT: .reg .b64 %lhs; +; SM20-NEXT: .reg .b64 %rhs; +; SM20-NEXT: shl.b64 %lhs, %rd1, 62; +; SM20-NEXT: shr.b64 %rhs, %rd1, 2; +; SM20-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM20-NEXT: } +; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM20-NEXT: ret; +; +; SM35-LABEL: rotr64_imm( +; SM35: { +; SM35-NEXT: .reg .b64 %rd<3>; +; SM35-EMPTY: +; SM35-NEXT: // %bb.0: +; SM35-NEXT: ld.param.u64 %rd1, [rotr64_imm_param_0]; +; SM35-NEXT: { +; SM35-NEXT: .reg .b64 %lhs; +; SM35-NEXT: .reg .b64 %rhs; +; SM35-NEXT: shl.b64 %lhs, %rd1, 62; +; SM35-NEXT: shr.b64 %rhs, %rd1, 2; +; SM35-NEXT: add.u64 %rd2, %lhs, %rhs; +; SM35-NEXT: } +; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2; +; SM35-NEXT: ret; + %val = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 66) + ret i64 %val +} From 09f4b06dde65adcd077bd1d10f1165083c1fe410 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 1 May 2024 10:39:24 -0700 Subject: [PATCH 41/48] [RISCV] Refactor profile selection in RISCVISAInfo::parseArchString. (#90700) Instead of hardcoding the 4 current profile prefixes, treat profile selection as a fallback if we don't find "rv32" or "rv64". Update the error message accordingly. --- clang/test/Driver/riscv-arch.c | 2 +- clang/test/Driver/riscv-profiles.c | 2 +- llvm/lib/TargetParser/RISCVISAInfo.cpp | 45 +++++++++---------- llvm/test/MC/RISCV/invalid-attribute.s | 2 +- .../TargetParser/RISCVISAInfoTest.cpp | 6 ++- 5 files changed, 29 insertions(+), 28 deletions(-) diff --git a/clang/test/Driver/riscv-arch.c b/clang/test/Driver/riscv-arch.c index abbe8612b3780a..8c701a736fc7e0 100644 --- a/clang/test/Driver/riscv-arch.c +++ b/clang/test/Driver/riscv-arch.c @@ -204,7 +204,7 @@ // RUN: not %clang --target=riscv32-unknown-elf -march=unknown -### %s \ // RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-STR %s // RV32-STR: error: invalid arch name 'unknown', -// RV32-STR: string must begin with rv32{i,e,g} or rv64{i,e,g} +// RV32-STR: string must begin with rv32{i,e,g}, rv64{i,e,g}, or a supported profile name // RUN: not %clang --target=riscv32-unknown-elf -march=rv32q -### %s \ // RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-LETTER %s diff --git a/clang/test/Driver/riscv-profiles.c b/clang/test/Driver/riscv-profiles.c index 647567d4c971f4..298f301de3feb6 100644 --- a/clang/test/Driver/riscv-profiles.c +++ b/clang/test/Driver/riscv-profiles.c @@ -318,7 +318,7 @@ // PROFILE-WITH-ADDITIONAL: "-target-feature" "+zkt" // RUN: not %clang --target=riscv64 -### -c %s 2>&1 -march=rva19u64_zfa | FileCheck -check-prefix=INVALID-PROFILE %s -// INVALID-PROFILE: error: invalid arch name 'rva19u64_zfa', unsupported profile +// INVALID-PROFILE: error: invalid arch name 'rva19u64_zfa', string must begin with rv32{i,e,g}, rv64{i,e,g}, or a supported profile name // RUN: not %clang --target=riscv64 -### -c %s 2>&1 -march=rva22u64zfa | FileCheck -check-prefix=INVALID-ADDITIONAL %s // INVALID-ADDITIONAL: error: invalid arch name 'rva22u64zfa', additional extensions must be after separator '_' diff --git a/llvm/lib/TargetParser/RISCVISAInfo.cpp b/llvm/lib/TargetParser/RISCVISAInfo.cpp index 3b0cf8fab25f46..d154c00a785927 100644 --- a/llvm/lib/TargetParser/RISCVISAInfo.cpp +++ b/llvm/lib/TargetParser/RISCVISAInfo.cpp @@ -592,40 +592,39 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension, return createStringError(errc::invalid_argument, "string must be lowercase"); - if (Arch.starts_with("rvi") || Arch.starts_with("rva") || - Arch.starts_with("rvb") || Arch.starts_with("rvm")) { + // ISA string must begin with rv32, rv64, or a profile. + unsigned XLen = 0; + if (Arch.consume_front("rv32")) { + XLen = 32; + } else if (Arch.consume_front("rv64")) { + XLen = 64; + } else { + // Try parsing as a profile. const auto *FoundProfile = llvm::find_if(SupportedProfiles, [Arch](const RISCVProfile &Profile) { return Arch.starts_with(Profile.Name); }); - if (FoundProfile == std::end(SupportedProfiles)) - return createStringError(errc::invalid_argument, "unsupported profile"); - - std::string NewArch = FoundProfile->MArch.str(); - StringRef ArchWithoutProfile = Arch.substr(FoundProfile->Name.size()); - if (!ArchWithoutProfile.empty()) { - if (!ArchWithoutProfile.starts_with("_")) - return createStringError( - errc::invalid_argument, - "additional extensions must be after separator '_'"); - NewArch += ArchWithoutProfile.str(); + if (FoundProfile != std::end(SupportedProfiles)) { + std::string NewArch = FoundProfile->MArch.str(); + StringRef ArchWithoutProfile = Arch.drop_front(FoundProfile->Name.size()); + if (!ArchWithoutProfile.empty()) { + if (ArchWithoutProfile.front() != '_') + return createStringError( + errc::invalid_argument, + "additional extensions must be after separator '_'"); + NewArch += ArchWithoutProfile.str(); + } + return parseArchString(NewArch, EnableExperimentalExtension, + ExperimentalExtensionVersionCheck, IgnoreUnknown); } - return parseArchString(NewArch, EnableExperimentalExtension, - ExperimentalExtensionVersionCheck, IgnoreUnknown); } - // ISA string must begin with rv32 or rv64. - unsigned XLen = 0; - if (Arch.consume_front("rv32")) - XLen = 32; - else if (Arch.consume_front("rv64")) - XLen = 64; - if (XLen == 0 || Arch.empty()) return createStringError( errc::invalid_argument, - "string must begin with rv32{i,e,g} or rv64{i,e,g}"); + "string must begin with rv32{i,e,g}, rv64{i,e,g}, or a supported " + "profile name"); std::unique_ptr ISAInfo(new RISCVISAInfo(XLen)); MapVector Date: Wed, 1 May 2024 10:44:10 -0700 Subject: [PATCH 42/48] [RISCV] Merge RISCVISAInfo::updateFLen/MinVLen/MaxELen into a single function. (#90665) This simplifies the callers. --- llvm/include/llvm/TargetParser/RISCVISAInfo.h | 15 +++-- llvm/lib/TargetParser/RISCVISAInfo.cpp | 63 +++++++++---------- 2 files changed, 37 insertions(+), 41 deletions(-) diff --git a/llvm/include/llvm/TargetParser/RISCVISAInfo.h b/llvm/include/llvm/TargetParser/RISCVISAInfo.h index 0d5637155daa96..36617a9b625972 100644 --- a/llvm/include/llvm/TargetParser/RISCVISAInfo.h +++ b/llvm/include/llvm/TargetParser/RISCVISAInfo.h @@ -78,13 +78,12 @@ class RISCVISAInfo { static std::string getTargetFeatureForExtension(StringRef Ext); private: - RISCVISAInfo(unsigned XLen) - : XLen(XLen), FLen(0), MinVLen(0), MaxELen(0), MaxELenFp(0) {} + RISCVISAInfo(unsigned XLen) : XLen(XLen) {} unsigned XLen; - unsigned FLen; - unsigned MinVLen; - unsigned MaxELen, MaxELenFp; + unsigned FLen = 0; + unsigned MinVLen = 0; + unsigned MaxELen = 0, MaxELenFp = 0; RISCVISAUtils::OrderedExtensionMap Exts; @@ -94,9 +93,9 @@ class RISCVISAInfo { void updateImplication(); void updateCombination(); - void updateFLen(); - void updateMinVLen(); - void updateMaxELen(); + + /// Update FLen, MinVLen, MaxELen, and MaxELenFp. + void updateImpliedLengths(); }; } // namespace llvm diff --git a/llvm/lib/TargetParser/RISCVISAInfo.cpp b/llvm/lib/TargetParser/RISCVISAInfo.cpp index d154c00a785927..64405ca8cb9ff1 100644 --- a/llvm/lib/TargetParser/RISCVISAInfo.cpp +++ b/llvm/lib/TargetParser/RISCVISAInfo.cpp @@ -478,9 +478,7 @@ RISCVISAInfo::parseNormalizedArchString(StringRef Arch) { "failed to parse major version number"); ISAInfo->addExtension(ExtName, {MajorVersion, MinorVersion}); } - ISAInfo->updateFLen(); - ISAInfo->updateMinVLen(); - ISAInfo->updateMaxELen(); + ISAInfo->updateImpliedLengths(); return std::move(ISAInfo); } @@ -906,50 +904,51 @@ void RISCVISAInfo::updateCombination() { } while (MadeChange); } -void RISCVISAInfo::updateFLen() { - FLen = 0; +void RISCVISAInfo::updateImpliedLengths() { + assert(FLen == 0 && MaxELenFp == 0 && MaxELen == 0 && MinVLen == 0 && + "Expected lengths to be initialied to zero"); + // TODO: Handle q extension. if (Exts.count("d")) FLen = 64; else if (Exts.count("f")) FLen = 32; -} -void RISCVISAInfo::updateMinVLen() { - for (auto const &Ext : Exts) { - StringRef ExtName = Ext.first; - bool IsZvlExt = ExtName.consume_front("zvl") && ExtName.consume_back("b"); - if (IsZvlExt) { - unsigned ZvlLen; - if (!ExtName.getAsInteger(10, ZvlLen)) - MinVLen = std::max(MinVLen, ZvlLen); - } - } -} - -void RISCVISAInfo::updateMaxELen() { - assert(MaxELenFp == 0 && MaxELen == 0); if (Exts.count("v")) { MaxELenFp = std::max(MaxELenFp, 64u); MaxELen = std::max(MaxELen, 64u); } - // handles EEW restriction by sub-extension zve for (auto const &Ext : Exts) { StringRef ExtName = Ext.first; - bool IsZveExt = ExtName.consume_front("zve"); - if (IsZveExt) { - if (ExtName.back() == 'f') + // Infer MaxELen and MaxELenFp from Zve(32/64)(x/f/d) + if (ExtName.consume_front("zve")) { + unsigned ZveELen; + if (ExtName.consumeInteger(10, ZveELen)) + continue; + + if (ExtName == "f") MaxELenFp = std::max(MaxELenFp, 32u); - else if (ExtName.back() == 'd') + else if (ExtName == "d") MaxELenFp = std::max(MaxELenFp, 64u); - else if (ExtName.back() != 'x') + else if (ExtName != "x") continue; - ExtName = ExtName.drop_back(); - unsigned ZveELen; - if (!ExtName.getAsInteger(10, ZveELen)) - MaxELen = std::max(MaxELen, ZveELen); + MaxELen = std::max(MaxELen, ZveELen); + continue; + } + + // Infer MinVLen from zvl*b. + if (ExtName.consume_front("zvl")) { + unsigned ZvlLen; + if (ExtName.consumeInteger(10, ZvlLen)) + continue; + + if (ExtName != "b") + continue; + + MinVLen = std::max(MinVLen, ZvlLen); + continue; } } } @@ -975,9 +974,7 @@ llvm::Expected> RISCVISAInfo::postProcessAndChecking(std::unique_ptr &&ISAInfo) { ISAInfo->updateImplication(); ISAInfo->updateCombination(); - ISAInfo->updateFLen(); - ISAInfo->updateMinVLen(); - ISAInfo->updateMaxELen(); + ISAInfo->updateImpliedLengths(); if (Error Result = ISAInfo->checkDependency()) return std::move(Result); From 28869a704ef59471cee6c750f2566b133b0ff391 Mon Sep 17 00:00:00 2001 From: Jan Voung Date: Wed, 1 May 2024 13:58:05 -0400 Subject: [PATCH 43/48] Reapply "Use an abbrev to reduce size of VALUE_GUID records in ThinLTO summaries" (#90610) (#90692) This reverts commit 2aabfc811670beb843074c765c056fff4a7b443b. Add fixes to LLD and Gold tests missed in original change. Co-authored-by: Jan Voung --- lld/test/COFF/thinlto-index-only.ll | 6 +- lld/test/ELF/lto/thinlto-emit-index.ll | 6 +- lld/test/ELF/lto/thinlto-index-only.ll | 6 +- lld/test/MachO/thinlto-emit-index.ll | 6 +- lld/test/MachO/thinlto-index-only.ll | 6 +- llvm/include/llvm/IR/ModuleSummaryIndex.h | 2 +- llvm/lib/Bitcode/Reader/BitcodeReader.cpp | 9 ++- llvm/lib/Bitcode/Writer/BitcodeWriter.cpp | 30 +++++++- llvm/test/Assembler/thinlto-summary.ll | 53 +++++++------ llvm/test/Bitcode/summary_version.ll | 2 +- llvm/test/Bitcode/thinlto-alias.ll | 4 +- .../thinlto-func-summary-vtableref-pgo.ll | 8 +- ...allgraph-partial-sample-profile-summary.ll | 2 +- .../thinlto-function-summary-callgraph-pgo.ll | 2 +- ...ction-summary-callgraph-profile-summary.ll | 2 +- ...ummary-callgraph-sample-profile-summary.ll | 2 +- .../thinlto-function-summary-callgraph.ll | 2 +- .../thinlto-function-summary-originalnames.ll | 6 +- .../thinlto-function-summary-paramaccess.ll | 76 +++++++++---------- llvm/test/ThinLTO/X86/distributed_indexes.ll | 16 ++-- llvm/test/tools/gold/X86/thinlto.ll | 10 +-- llvm/test/tools/llvm-lto/thinlto.ll | 4 +- 22 files changed, 146 insertions(+), 114 deletions(-) diff --git a/lld/test/COFF/thinlto-index-only.ll b/lld/test/COFF/thinlto-index-only.ll index 8ef981d6090f12..f99134143e4dc5 100644 --- a/lld/test/COFF/thinlto-index-only.ll +++ b/lld/test/COFF/thinlto-index-only.ll @@ -22,8 +22,8 @@ ; BACKEND1: = 10) { + RefGUID = Record[1] << 32 | Record[2]; + } else { + RefGUID = Record[1]; + } ValueIdToValueInfoMap[ValueID] = std::make_tuple( TheIndex.getOrInsertValueInfo(RefGUID), RefGUID, RefGUID); break; diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index 6d01e3b4d82189..1aaf160e91ca18 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -4299,9 +4299,20 @@ void ModuleBitcodeWriterBase::writePerModuleGlobalValueSummary() { return; } + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FS_VALUE_GUID)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); + // GUIDS often use up most of 64-bits, so encode as two Fixed 32. + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); + unsigned ValueGuidAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + for (const auto &GVI : valueIds()) { Stream.EmitRecord(bitc::FS_VALUE_GUID, - ArrayRef{GVI.second, GVI.first}); + ArrayRef{GVI.second, + static_cast(GVI.first >> 32), + static_cast(GVI.first)}, + ValueGuidAbbrev); } if (!Index->stackIds().empty()) { @@ -4315,7 +4326,7 @@ void ModuleBitcodeWriterBase::writePerModuleGlobalValueSummary() { } // Abbrev for FS_PERMODULE_PROFILE. - auto Abbv = std::make_shared(); + Abbv = std::make_shared(); Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_PROFILE)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // flags @@ -4467,9 +4478,20 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() { // Write the index flags. Stream.EmitRecord(bitc::FS_FLAGS, ArrayRef{Index.getFlags()}); + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FS_VALUE_GUID)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); + // GUIDS often use up most of 64-bits, so encode as two Fixed 32. + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); + unsigned ValueGuidAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + for (const auto &GVI : valueIds()) { Stream.EmitRecord(bitc::FS_VALUE_GUID, - ArrayRef{GVI.second, GVI.first}); + ArrayRef{GVI.second, + static_cast(GVI.first >> 32), + static_cast(GVI.first)}, + ValueGuidAbbrev); } if (!StackIdIndices.empty()) { @@ -4488,7 +4510,7 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() { } // Abbrev for FS_COMBINED_PROFILE. - auto Abbv = std::make_shared(); + Abbv = std::make_shared(); Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_PROFILE)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // modid diff --git a/llvm/test/Assembler/thinlto-summary.ll b/llvm/test/Assembler/thinlto-summary.ll index 05dad2c7acad46..e0d866da0d8a22 100644 --- a/llvm/test/Assembler/thinlto-summary.ll +++ b/llvm/test/Assembler/thinlto-summary.ll @@ -46,28 +46,32 @@ ^18 = gv: (guid: 17, summaries: (alias: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 1), aliasee: ^14))) ; Test all types of TypeIdInfo on function summaries. -^19 = gv: (guid: 18, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 4, typeIdInfo: (typeTests: (^25, ^27))))) -^20 = gv: (guid: 19, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 8, typeIdInfo: (typeTestAssumeVCalls: (vFuncId: (^28, offset: 16)))))) -^21 = gv: (guid: 20, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 5, typeIdInfo: (typeCheckedLoadVCalls: (vFuncId: (^26, offset: 16)))))) -^22 = gv: (guid: 21, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 15, typeIdInfo: (typeTestAssumeConstVCalls: ((vFuncId: (^28, offset: 16), args: (42)), (vFuncId: (^28, offset: 24))))))) -^23 = gv: (guid: 22, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 5, typeIdInfo: (typeCheckedLoadConstVCalls: ((vFuncId: (^29, offset: 16), args: (42))))))) +^19 = gv: (guid: 18, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 4, typeIdInfo: (typeTests: (^26, ^28))))) +^20 = gv: (guid: 19, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 8, typeIdInfo: (typeTestAssumeVCalls: (vFuncId: (^29, offset: 16)))))) +^21 = gv: (guid: 20, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 5, typeIdInfo: (typeCheckedLoadVCalls: (vFuncId: (^27, offset: 16)))))) +^22 = gv: (guid: 21, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 15, typeIdInfo: (typeTestAssumeConstVCalls: ((vFuncId: (^29, offset: 16), args: (42)), (vFuncId: (^29, offset: 24))))))) +^23 = gv: (guid: 22, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0), insts: 5, typeIdInfo: (typeCheckedLoadConstVCalls: ((vFuncId: (^30, offset: 16), args: (42))))))) ; Function summary with an import type of declaration ^24 = gv: (guid: 23, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, importType: declaration), insts: 5))) +; GUID that are 64-bit + +^25 = gv: (guid: 9123456789101112131, summaries: (function: (module: ^0, flags: (linkage: internal, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 1, importType: definition), insts: 1))) + ; Test TypeId summaries: -^25 = typeid: (name: "_ZTS1C", summary: (typeTestRes: (kind: single, sizeM1BitWidth: 0))) +^26 = typeid: (name: "_ZTS1C", summary: (typeTestRes: (kind: single, sizeM1BitWidth: 0))) ; Test TypeId with other optional fields (alignLog2/sizeM1/bitMask/inlineBits) -^26 = typeid: (name: "_ZTS1B", summary: (typeTestRes: (kind: inline, sizeM1BitWidth: 0, alignLog2: 1, sizeM1: 2, bitMask: 3, inlineBits: 4))) +^27 = typeid: (name: "_ZTS1B", summary: (typeTestRes: (kind: inline, sizeM1BitWidth: 0, alignLog2: 1, sizeM1: 2, bitMask: 3, inlineBits: 4))) ; Test the AllOnes resolution, and all kinds of WholeProgramDevirtResolution ; types, including all optional resolution by argument kinds. -^27 = typeid: (name: "_ZTS1A", summary: (typeTestRes: (kind: allOnes, sizeM1BitWidth: 7), wpdResolutions: ((offset: 0, wpdRes: (kind: branchFunnel)), (offset: 8, wpdRes: (kind: singleImpl, singleImplName: "_ZN1A1nEi")), (offset: 16, wpdRes: (kind: indir, resByArg: (args: (1, 2), byArg: (kind: indir, byte: 2, bit: 3), args: (3), byArg: (kind: uniformRetVal, info: 1), args: (4), byArg: (kind: uniqueRetVal, info: 1), args: (5), byArg: (kind: virtualConstProp))))))) +^28 = typeid: (name: "_ZTS1A", summary: (typeTestRes: (kind: allOnes, sizeM1BitWidth: 7), wpdResolutions: ((offset: 0, wpdRes: (kind: branchFunnel)), (offset: 8, wpdRes: (kind: singleImpl, singleImplName: "_ZN1A1nEi")), (offset: 16, wpdRes: (kind: indir, resByArg: (args: (1, 2), byArg: (kind: indir, byte: 2, bit: 3), args: (3), byArg: (kind: uniformRetVal, info: 1), args: (4), byArg: (kind: uniqueRetVal, info: 1), args: (5), byArg: (kind: virtualConstProp))))))) ; Test the other kinds of type test resoultions -^28 = typeid: (name: "_ZTS1D", summary: (typeTestRes: (kind: byteArray, sizeM1BitWidth: 0))) -^29 = typeid: (name: "_ZTS1E", summary: (typeTestRes: (kind: unsat, sizeM1BitWidth: 0))) -^30 = flags: 8 -^31 = blockcount: 1888 +^29 = typeid: (name: "_ZTS1D", summary: (typeTestRes: (kind: byteArray, sizeM1BitWidth: 0))) +^30 = typeid: (name: "_ZTS1E", summary: (typeTestRes: (kind: unsat, sizeM1BitWidth: 0))) +^31 = flags: 8 +^32 = blockcount: 1888 ; Make sure we get back from llvm-dis essentially what we put in via llvm-as. ; CHECK: ^0 = module: (path: "thinlto-summary1.o", hash: (1369602428, 2747878711, 259090915, 2507395659, 1141468049)) @@ -91,19 +95,20 @@ ; CHECK: ^16 = gv: (guid: 15, summaries: (function: (module: ^1, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 1, funcFlags: (readNone: 1, readOnly: 0, noRecurse: 1, returnDoesNotAlias: 0, noInline: 0, alwaysInline: 1, noUnwind: 1, mayThrow: 1, hasUnknownCall: 1, mustBeUnreachable: 0)))) ; CHECK: ^17 = gv: (guid: 16, summaries: (function: (module: ^1, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 1, funcFlags: (readNone: 0, readOnly: 1, noRecurse: 0, returnDoesNotAlias: 1, noInline: 0, alwaysInline: 0, noUnwind: 0, mayThrow: 0, hasUnknownCall: 0, mustBeUnreachable: 1), calls: ((callee: ^15))))) ; CHECK: ^18 = gv: (guid: 17, summaries: (alias: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 1, canAutoHide: 0, importType: definition), aliasee: ^14))) -; CHECK: ^19 = gv: (guid: 18, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 4, typeIdInfo: (typeTests: (^25, ^27))))) -; CHECK: ^20 = gv: (guid: 19, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 8, typeIdInfo: (typeTestAssumeVCalls: (vFuncId: (^28, offset: 16)))))) -; CHECK: ^21 = gv: (guid: 20, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 5, typeIdInfo: (typeCheckedLoadVCalls: (vFuncId: (^26, offset: 16)))))) -; CHECK: ^22 = gv: (guid: 21, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 15, typeIdInfo: (typeTestAssumeConstVCalls: ((vFuncId: (^28, offset: 16), args: (42)), (vFuncId: (^28, offset: 24))))))) -; CHECK: ^23 = gv: (guid: 22, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 5, typeIdInfo: (typeCheckedLoadConstVCalls: ((vFuncId: (^29, offset: 16), args: (42))))))) +; CHECK: ^19 = gv: (guid: 18, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 4, typeIdInfo: (typeTests: (^26, ^28))))) +; CHECK: ^20 = gv: (guid: 19, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 8, typeIdInfo: (typeTestAssumeVCalls: (vFuncId: (^29, offset: 16)))))) +; CHECK: ^21 = gv: (guid: 20, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 5, typeIdInfo: (typeCheckedLoadVCalls: (vFuncId: (^27, offset: 16)))))) +; CHECK: ^22 = gv: (guid: 21, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 15, typeIdInfo: (typeTestAssumeConstVCalls: ((vFuncId: (^29, offset: 16), args: (42)), (vFuncId: (^29, offset: 24))))))) +; CHECK: ^23 = gv: (guid: 22, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: definition), insts: 5, typeIdInfo: (typeCheckedLoadConstVCalls: ((vFuncId: (^30, offset: 16), args: (42))))))) ; CHECK: ^24 = gv: (guid: 23, summaries: (function: (module: ^0, flags: (linkage: external, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 0, canAutoHide: 0, importType: declaration), insts: 5))) -; CHECK: ^25 = typeid: (name: "_ZTS1C", summary: (typeTestRes: (kind: single, sizeM1BitWidth: 0))) ; guid = 1884921850105019584 -; CHECK: ^26 = typeid: (name: "_ZTS1B", summary: (typeTestRes: (kind: inline, sizeM1BitWidth: 0, alignLog2: 1, sizeM1: 2, bitMask: 3, inlineBits: 4))) ; guid = 6203814149063363976 -; CHECK: ^27 = typeid: (name: "_ZTS1A", summary: (typeTestRes: (kind: allOnes, sizeM1BitWidth: 7), wpdResolutions: ((offset: 0, wpdRes: (kind: branchFunnel)), (offset: 8, wpdRes: (kind: singleImpl, singleImplName: "_ZN1A1nEi")), (offset: 16, wpdRes: (kind: indir, resByArg: (args: (1, 2), byArg: (kind: indir, byte: 2, bit: 3), args: (3), byArg: (kind: uniformRetVal, info: 1), args: (4), byArg: (kind: uniqueRetVal, info: 1), args: (5), byArg: (kind: virtualConstProp))))))) ; guid = 7004155349499253778 -; CHECK: ^28 = typeid: (name: "_ZTS1D", summary: (typeTestRes: (kind: byteArray, sizeM1BitWidth: 0))) ; guid = 9614786172484273522 -; CHECK: ^29 = typeid: (name: "_ZTS1E", summary: (typeTestRes: (kind: unsat, sizeM1BitWidth: 0))) ; guid = 17437243864166745132 -; CHECK: ^30 = flags: 8 -; CHECK: ^31 = blockcount: 1888 +; CHECK: ^25 = gv: (guid: 9123456789101112131, summaries: (function: (module: ^0, flags: (linkage: internal, visibility: default, notEligibleToImport: 0, live: 0, dsoLocal: 1, canAutoHide: 0, importType: definition), insts: 1))) +; CHECK: ^26 = typeid: (name: "_ZTS1C", summary: (typeTestRes: (kind: single, sizeM1BitWidth: 0))) ; guid = 1884921850105019584 +; CHECK: ^27 = typeid: (name: "_ZTS1B", summary: (typeTestRes: (kind: inline, sizeM1BitWidth: 0, alignLog2: 1, sizeM1: 2, bitMask: 3, inlineBits: 4))) ; guid = 6203814149063363976 +; CHECK: ^28 = typeid: (name: "_ZTS1A", summary: (typeTestRes: (kind: allOnes, sizeM1BitWidth: 7), wpdResolutions: ((offset: 0, wpdRes: (kind: branchFunnel)), (offset: 8, wpdRes: (kind: singleImpl, singleImplName: "_ZN1A1nEi")), (offset: 16, wpdRes: (kind: indir, resByArg: (args: (1, 2), byArg: (kind: indir, byte: 2, bit: 3), args: (3), byArg: (kind: uniformRetVal, info: 1), args: (4), byArg: (kind: uniqueRetVal, info: 1), args: (5), byArg: (kind: virtualConstProp))))))) ; guid = 7004155349499253778 +; CHECK: ^29 = typeid: (name: "_ZTS1D", summary: (typeTestRes: (kind: byteArray, sizeM1BitWidth: 0))) ; guid = 9614786172484273522 +; CHECK: ^30 = typeid: (name: "_ZTS1E", summary: (typeTestRes: (kind: unsat, sizeM1BitWidth: 0))) ; guid = 17437243864166745132 +; CHECK: ^31 = flags: 8 +; CHECK: ^32 = blockcount: 1888 ; Make sure parsing of a non-summary entry containing a ":" does not fail ; after summary parsing, which handles colons differently. diff --git a/llvm/test/Bitcode/summary_version.ll b/llvm/test/Bitcode/summary_version.ll index 98feab6fe2f995..26c64f81a773f1 100644 --- a/llvm/test/Bitcode/summary_version.ll +++ b/llvm/test/Bitcode/summary_version.ll @@ -2,7 +2,7 @@ ; RUN: opt -module-summary %s -o - | llvm-bcanalyzer -dump | FileCheck %s ; CHECK: +; CHECK: diff --git a/llvm/test/Bitcode/thinlto-alias.ll b/llvm/test/Bitcode/thinlto-alias.ll index 5dfff0f796198d..7deb2d8259e328 100644 --- a/llvm/test/Bitcode/thinlto-alias.ll +++ b/llvm/test/Bitcode/thinlto-alias.ll @@ -31,9 +31,9 @@ ; COMBINED-NEXT: +; COMBINED-NEXT: ; COMBINED-NEXT: +; COMBINED-NEXT: ; COMBINED-NEXT: +; CHECK-NEXT: ; CHECK-NEXT: ; The `VALUE_GUID` below represents the "_ZTV4Base" referenced by the instruction ; that loads vtable pointers. -; CHECK-NEXT: +; CHECK-NEXT: ; The `VALUE_GUID` below represents the "_ZN4Base4funcEv" referenced by the ; indirect call instruction. -; CHECK-NEXT: +; CHECK-NEXT: ; NOTE vtables and functions from Derived class is dropped because ; `-icp-max-num-vtables` and `-icp-max-prom` are both set to one. ; has the format [valueid, flags, instcount, funcflags, ; numrefs, rorefcnt, worefcnt, ; m x valueid, ; n x (valueid, hotness+tailcall)] -; CHECK-NEXT: +; CHECK-NEXT: ; CHECK-NEXT: target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Bitcode/thinlto-function-summary-callgraph-partial-sample-profile-summary.ll b/llvm/test/Bitcode/thinlto-function-summary-callgraph-partial-sample-profile-summary.ll index d44ee24694be56..0c3ab9b2089310 100644 --- a/llvm/test/Bitcode/thinlto-function-summary-callgraph-partial-sample-profile-summary.ll +++ b/llvm/test/Bitcode/thinlto-function-summary-callgraph-partial-sample-profile-summary.ll @@ -30,7 +30,7 @@ ; CHECK-LABEL: +; CHECK-NEXT: ; op4=none1 op6=hot1 op8=cold1 op10=none2 op12=hot2 op14=cold2 op16=none3 op18=hot3 op20=cold3 op22=123 ; CHECK-NEXT: ; CHECK-NEXT: diff --git a/llvm/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll b/llvm/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll index 2bbab0c6bb0d07..ed3c716288d6f5 100644 --- a/llvm/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll +++ b/llvm/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll @@ -26,7 +26,7 @@ ; COMBINED: +; COMBINED-NEXT: ; COMBINED-NEXT: +; CHECK-NEXT: ; op4=hot1 op6=cold op8=hot2 op10=hot4 op12=none1 op14=hot3 op16=none2 op18=none3 op20=123 ; CHECK-NEXT: ; CHECK-NEXT: diff --git a/llvm/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll b/llvm/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll index 601bebd39267d0..2e9b362d39bb4b 100644 --- a/llvm/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll +++ b/llvm/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll @@ -30,7 +30,7 @@ ; CHECK-LABEL: +; CHECK-NEXT: ; op4=none1 op6=hot1 op8=cold1 op10=none2 op12=hot2 op14=cold2 op16=none3 op18=hot3 op20=cold3 op22=123 ; CHECK-NEXT: ; CHECK-NEXT: diff --git a/llvm/test/Bitcode/thinlto-function-summary-callgraph.ll b/llvm/test/Bitcode/thinlto-function-summary-callgraph.ll index 542b400f8e33b8..becbc4a32dd92b 100644 --- a/llvm/test/Bitcode/thinlto-function-summary-callgraph.ll +++ b/llvm/test/Bitcode/thinlto-function-summary-callgraph.ll @@ -30,7 +30,7 @@ ; COMBINED-NEXT: +; COMBINED-NEXT: ; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: ; COMBINED-DAG: ; COMBINED-DAG: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: +; COMBINED-NEXT: ; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: -; COMBINED-NEXT: Date: Wed, 1 May 2024 19:04:51 +0100 Subject: [PATCH 44/48] [flang] always run PolymorphicOpConversion sequentially (#90721) It was pointed out in post commit review of https://github.com/llvm/llvm-project/pull/90597 that the pass should never have been run in parallel over all functions (and now other top level operations) in the first place. The mutex used in the pass was ineffective at preventing races since each instance of the pass would have a different mutex. --- .../flang/Optimizer/Transforms/Passes.td | 2 +- flang/include/flang/Tools/CLOptions.inc | 2 +- .../Transforms/PolymorphicOpConversion.cpp | 20 ++++--------------- flang/test/Driver/bbc-mlir-pass-pipeline.f90 | 6 ++---- .../test/Driver/mlir-debug-pass-pipeline.f90 | 6 ++---- flang/test/Driver/mlir-pass-pipeline.f90 | 19 ++++-------------- flang/test/Fir/basic-program.fir | 11 +--------- 7 files changed, 15 insertions(+), 51 deletions(-) diff --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td index dcb7037e2991be..1eaaa32a508a03 100644 --- a/flang/include/flang/Optimizer/Transforms/Passes.td +++ b/flang/include/flang/Optimizer/Transforms/Passes.td @@ -298,7 +298,7 @@ def AlgebraicSimplification : Pass<"flang-algebraic-simplification"> { let constructor = "::fir::createAlgebraicSimplificationPass()"; } -def PolymorphicOpConversion : Pass<"fir-polymorphic-op"> { +def PolymorphicOpConversion : Pass<"fir-polymorphic-op", "mlir::ModuleOp"> { let summary = "Simplify operations on polymorphic types"; let description = [{ diff --git a/flang/include/flang/Tools/CLOptions.inc b/flang/include/flang/Tools/CLOptions.inc index bd60c66b61ad24..d8543648987036 100644 --- a/flang/include/flang/Tools/CLOptions.inc +++ b/flang/include/flang/Tools/CLOptions.inc @@ -271,7 +271,7 @@ inline void createDefaultFIROptimizerPassPipeline( pm.addPass(mlir::createCSEPass()); // Polymorphic types - addNestedPassToAllTopLevelOperations(pm, fir::createPolymorphicOpConversion); + pm.addPass(fir::createPolymorphicOpConversion()); if (pc.AliasAnalysis && !disableFirAliasTags && !useOldAliasTags) pm.addPass(fir::createAliasTagsPass()); diff --git a/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp b/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp index 0f5c43882ee30a..76c12d2de5c444 100644 --- a/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp +++ b/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp @@ -29,7 +29,6 @@ #include "mlir/Transforms/DialectConversion.h" #include "llvm/ADT/SmallSet.h" #include "llvm/Support/CommandLine.h" -#include namespace fir { #define GEN_PASS_DEF_POLYMORPHICOPCONVERSION @@ -48,9 +47,8 @@ class SelectTypeConv : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; - SelectTypeConv(mlir::MLIRContext *ctx, std::mutex *moduleMutex) - : mlir::OpConversionPattern(ctx), - moduleMutex(moduleMutex) {} + SelectTypeConv(mlir::MLIRContext *ctx) + : mlir::OpConversionPattern(ctx) {} mlir::LogicalResult matchAndRewrite(fir::SelectTypeOp selectType, OpAdaptor adaptor, @@ -72,9 +70,6 @@ class SelectTypeConv : public OpConversionPattern { llvm::SmallSet collectAncestors(fir::TypeInfoOp dt, mlir::ModuleOp mod) const; - - // Mutex used to guard insertion of mlir::func::FuncOp in the module. - std::mutex *moduleMutex; }; /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch @@ -223,21 +218,18 @@ class PolymorphicOpConversion : public fir::impl::PolymorphicOpConversionBase { public: mlir::LogicalResult initialize(mlir::MLIRContext *ctx) override { - moduleMutex = new std::mutex(); return mlir::success(); } void runOnOperation() override { auto *context = &getContext(); - auto mod = mlir::dyn_cast_or_null(getOperation()); - if (!mod) - mod = getOperation()->getParentOfType(); + mlir::ModuleOp mod = getOperation(); mlir::RewritePatternSet patterns(context); BindingTables bindingTables; buildBindingTables(bindingTables, mod); - patterns.insert(context, moduleMutex); + patterns.insert(context); patterns.insert(context, bindingTables); mlir::ConversionTarget target(*context); target.addLegalDialect lock(*moduleMutex); callee = fir::createFuncOp(rewriter.getUnknownLoc(), mod, fctName, rewriter.getFunctionType({descNoneTy, typeDescTy}, diff --git a/flang/test/Driver/bbc-mlir-pass-pipeline.f90 b/flang/test/Driver/bbc-mlir-pass-pipeline.f90 index 07b68bfe03b336..2cc25b3c473feb 100644 --- a/flang/test/Driver/bbc-mlir-pass-pipeline.f90 +++ b/flang/test/Driver/bbc-mlir-pass-pipeline.f90 @@ -45,18 +45,16 @@ ! CHECK-NEXT: (S) 0 num-cse'd - Number of operations CSE'd ! CHECK-NEXT: (S) 0 num-dce'd - Number of operations DCE'd +! CHECK-NEXT: PolymorphicOpConversion + ! CHECK-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private'] ! CHECK-NEXT: 'fir.global' Pipeline -! CHECK-NEXT: PolymorphicOpConversion ! CHECK-NEXT: CFGConversion ! CHECK-NEXT: 'func.func' Pipeline -! CHECK-NEXT: PolymorphicOpConversion ! CHECK-NEXT: CFGConversion ! CHECK-NEXT: 'omp.declare_reduction' Pipeline -! CHECK-NEXT: PolymorphicOpConversion ! CHECK-NEXT: CFGConversion ! CHECK-NEXT: 'omp.private' Pipeline -! CHECK-NEXT: PolymorphicOpConversion ! CHECK-NEXT: CFGConversion ! CHECK-NEXT: SCFToControlFlow diff --git a/flang/test/Driver/mlir-debug-pass-pipeline.f90 b/flang/test/Driver/mlir-debug-pass-pipeline.f90 index cad7415a3b528d..a9980e3c932c81 100644 --- a/flang/test/Driver/mlir-debug-pass-pipeline.f90 +++ b/flang/test/Driver/mlir-debug-pass-pipeline.f90 @@ -65,18 +65,16 @@ ! ALL-NEXT: (S) 0 num-cse'd - Number of operations CSE'd ! ALL-NEXT: (S) 0 num-dce'd - Number of operations DCE'd +! ALL-NEXT: PolymorphicOpConversion + ! ALL-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private'] ! ALL-NEXT: 'fir.global' Pipeline -! ALL-NEXT: PolymorphicOpConversion ! ALL-NEXT: CFGConversion ! ALL-NEXT: 'func.func' Pipeline -! ALL-NEXT: PolymorphicOpConversion ! ALL-NEXT: CFGConversion ! ALL-NEXT: 'omp.declare_reduction' Pipeline -! ALL-NEXT: PolymorphicOpConversion ! ALL-NEXT: CFGConversion ! ALL-NEXT: 'omp.private' Pipeline -! ALL-NEXT: PolymorphicOpConversion ! ALL-NEXT: CFGConversion ! ALL-NEXT: SCFToControlFlow ! ALL-NEXT: Canonicalizer diff --git a/flang/test/Driver/mlir-pass-pipeline.f90 b/flang/test/Driver/mlir-pass-pipeline.f90 index 7f63f946c2fbd9..4ebac7c3fb65c1 100644 --- a/flang/test/Driver/mlir-pass-pipeline.f90 +++ b/flang/test/Driver/mlir-pass-pipeline.f90 @@ -1,8 +1,8 @@ ! Test the MLIR pass pipeline -! RUN: %flang_fc1 -S -mmlir --mlir-pass-statistics -mmlir --mlir-pass-statistics-display=pipeline -o /dev/null %s 2>&1 | FileCheck --check-prefixes=ALL,NOTO2 %s +! RUN: %flang_fc1 -S -mmlir --mlir-pass-statistics -mmlir --mlir-pass-statistics-display=pipeline -o /dev/null %s 2>&1 | FileCheck --check-prefixes=ALL %s ! -O0 is the default: -! RUN: %flang_fc1 -S -mmlir --mlir-pass-statistics -mmlir --mlir-pass-statistics-display=pipeline %s -O0 -o /dev/null 2>&1 | FileCheck --check-prefixes=ALL,NOTO2 %s +! RUN: %flang_fc1 -S -mmlir --mlir-pass-statistics -mmlir --mlir-pass-statistics-display=pipeline %s -O0 -o /dev/null 2>&1 | FileCheck --check-prefixes=ALL %s ! RUN: %flang_fc1 -S -mmlir --mlir-pass-statistics -mmlir --mlir-pass-statistics-display=pipeline %s -O2 -o /dev/null 2>&1 | FileCheck --check-prefixes=ALL,O2 %s ! REQUIRES: asserts @@ -56,28 +56,17 @@ ! ALL-NEXT: (S) 0 num-cse'd - Number of operations CSE'd ! ALL-NEXT: (S) 0 num-dce'd - Number of operations DCE'd -! O2-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private'] -! O2-NEXT: 'fir.global' Pipeline -! O2-NEXT: PolymorphicOpConversion -! O2-NEXT: 'func.func' Pipeline -! O2-NEXT: PolymorphicOpConversion -! O2-NEXT: 'omp.declare_reduction' Pipeline -! O2-NEXT: PolymorphicOpConversion -! O2-NEXT: 'omp.private' Pipeline -! O2-NEXT: PolymorphicOpConversion +! ALL-NEXT: PolymorphicOpConversion ! O2-NEXT: AddAliasTags + ! ALL-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private'] ! ALL-NEXT: 'fir.global' Pipeline -! NOTO2-NEXT: PolymorphicOpConversion ! ALL-NEXT: CFGConversion ! ALL-NEXT: 'func.func' Pipeline -! NOTO2-NEXT: PolymorphicOpConversion ! ALL-NEXT: CFGConversion ! ALL-NEXT: 'omp.declare_reduction' Pipeline -! NOTO2-NEXT: PolymorphicOpConversion ! ALL-NEXT: CFGConversion ! ALL-NEXT: 'omp.private' Pipeline -! NOTO2-NEXT: PolymorphicOpConversion ! ALL-NEXT: CFGConversion ! ALL-NEXT: SCFToControlFlow diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir index 67a9c56ed9acb5..02fb84ed8c873d 100644 --- a/flang/test/Fir/basic-program.fir +++ b/flang/test/Fir/basic-program.fir @@ -62,16 +62,7 @@ func.func @_QQmain() { // PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd // PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd -// PASSES-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private'] -// PASSES-NEXT: 'fir.global' Pipeline -// PASSES-NEXT: PolymorphicOpConversion -// PASSES-NEXT: 'func.func' Pipeline -// PASSES-NEXT: PolymorphicOpConversion -// PASSES-NEXT: 'omp.declare_reduction' Pipeline -// PASSES-NEXT: PolymorphicOpConversion -// PASSES-NEXT: 'omp.private' Pipeline -// PASSES-NEXT: PolymorphicOpConversion - +// PASSES-NEXT: PolymorphicOpConversion // PASSES-NEXT: AddAliasTags // PASSES-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private'] From aca511734f5ff6204fdc65427566c8bd3b810a24 Mon Sep 17 00:00:00 2001 From: Vinayak Dev <104419489+vinayakdsci@users.noreply.github.com> Date: Wed, 1 May 2024 23:48:44 +0530 Subject: [PATCH 45/48] [libc] Implement fcntl() function (#89507) Fixes #84968. Implements the `fcntl()` function defined in the `fcntl.h` header. --- libc/config/linux/aarch64/entrypoints.txt | 1 + libc/config/linux/riscv/entrypoints.txt | 1 + libc/config/linux/x86_64/entrypoints.txt | 1 + libc/hdr/CMakeLists.txt | 9 + libc/hdr/fcntl_macros.h | 22 +++ libc/hdr/types/CMakeLists.txt | 24 +++ libc/hdr/types/struct_f_owner_ex.h | 21 +++ libc/hdr/types/struct_flock.h | 21 +++ libc/hdr/types/struct_flock64.h | 21 +++ libc/include/CMakeLists.txt | 4 + .../llvm-libc-macros/linux/fcntl-macros.h | 31 ++++ libc/include/llvm-libc-types/CMakeLists.txt | 3 + .../llvm-libc-types/struct_f_owner_ex.h | 25 +++ libc/include/llvm-libc-types/struct_flock.h | 25 +++ libc/include/llvm-libc-types/struct_flock64.h | 25 +++ libc/spec/posix.td | 5 + libc/src/fcntl/CMakeLists.txt | 7 + libc/src/fcntl/fcntl.h | 18 ++ libc/src/fcntl/linux/CMakeLists.txt | 16 ++ libc/src/fcntl/linux/fcntl.cpp | 93 +++++++++++ libc/test/src/fcntl/CMakeLists.txt | 17 ++ libc/test/src/fcntl/fcntl_test.cpp | 155 ++++++++++++++++++ 22 files changed, 545 insertions(+) create mode 100644 libc/hdr/fcntl_macros.h create mode 100644 libc/hdr/types/struct_f_owner_ex.h create mode 100644 libc/hdr/types/struct_flock.h create mode 100644 libc/hdr/types/struct_flock64.h create mode 100644 libc/include/llvm-libc-types/struct_f_owner_ex.h create mode 100644 libc/include/llvm-libc-types/struct_flock.h create mode 100644 libc/include/llvm-libc-types/struct_flock64.h create mode 100644 libc/src/fcntl/fcntl.h create mode 100644 libc/src/fcntl/linux/fcntl.cpp create mode 100644 libc/test/src/fcntl/fcntl_test.cpp diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt index eedd9342a09f02..ad50b6f59cdcce 100644 --- a/libc/config/linux/aarch64/entrypoints.txt +++ b/libc/config/linux/aarch64/entrypoints.txt @@ -22,6 +22,7 @@ set(TARGET_LIBC_ENTRYPOINTS # fcntl.h entrypoints libc.src.fcntl.creat + libc.src.fcntl.fcntl libc.src.fcntl.open libc.src.fcntl.openat diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt index 4ddc1fb365e155..479af40b5b26b9 100644 --- a/libc/config/linux/riscv/entrypoints.txt +++ b/libc/config/linux/riscv/entrypoints.txt @@ -22,6 +22,7 @@ set(TARGET_LIBC_ENTRYPOINTS # fcntl.h entrypoints libc.src.fcntl.creat + libc.src.fcntl.fcntl libc.src.fcntl.open libc.src.fcntl.openat diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt index 2576e4a92e85e2..5e3ddd34fb4dcb 100644 --- a/libc/config/linux/x86_64/entrypoints.txt +++ b/libc/config/linux/x86_64/entrypoints.txt @@ -22,6 +22,7 @@ set(TARGET_LIBC_ENTRYPOINTS # fcntl.h entrypoints libc.src.fcntl.creat + libc.src.fcntl.fcntl libc.src.fcntl.open libc.src.fcntl.openat diff --git a/libc/hdr/CMakeLists.txt b/libc/hdr/CMakeLists.txt index fb7c342f92b78f..179b05e6ee966d 100644 --- a/libc/hdr/CMakeLists.txt +++ b/libc/hdr/CMakeLists.txt @@ -32,6 +32,15 @@ add_proxy_header_library( libc.include.math ) +add_proxy_header_library( + fcntl_macros + HDRS + fcntl_macros.h + FULL_BUILD_DEPENDS + libc.include.llvm-libc-macros.fcntl_macros + libc.include.fcntl +) + add_proxy_header_library( fenv_macros HDRS diff --git a/libc/hdr/fcntl_macros.h b/libc/hdr/fcntl_macros.h new file mode 100644 index 00000000000000..828cb984c0cb14 --- /dev/null +++ b/libc/hdr/fcntl_macros.h @@ -0,0 +1,22 @@ +//===-- Definition of macros from fcntl/fcntl.h ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_HDR_FCNTL_MACROS_H +#define LLVM_LIBC_HDR_FCNTL_MACROS_H + +#ifdef LIBC_FULL_BUILD + +#include "include/llvm-libc-macros/fcntl-macros.h" + +#else // Overlay mode + +#include + +#endif // LLVM_LIBC_FULL_BUILD + +#endif // LLVM_LIBC_HDR_FCNTL_MACROS_H diff --git a/libc/hdr/types/CMakeLists.txt b/libc/hdr/types/CMakeLists.txt index f53766777e7530..46a66ec590202d 100644 --- a/libc/hdr/types/CMakeLists.txt +++ b/libc/hdr/types/CMakeLists.txt @@ -14,6 +14,30 @@ add_proxy_header_library( libc.include.llvm-libc-types.struct_epoll_event ) +add_proxy_header_library( + struct_flock + HDRS + struct_flock.h + FULL_BUILD_DEPENDS + libc.include.llvm-libc-types.struct_flock +) + +add_proxy_header_library( + struct_flock64 + HDRS + struct_flock64.h + FULL_BUILD_DEPENDS + libc.include.llvm-libc-types.struct_flock64 +) + +add_proxy_header_library( + struct_f_owner_ex + HDRS + struct_f_owner_ex.h + FULL_BUILD_DEPENDS + libc.include.llvm-libc-types.struct_f_owner_ex +) + add_proxy_header_library( struct_timespec HDRS diff --git a/libc/hdr/types/struct_f_owner_ex.h b/libc/hdr/types/struct_f_owner_ex.h new file mode 100644 index 00000000000000..49985115ae4bb2 --- /dev/null +++ b/libc/hdr/types/struct_f_owner_ex.h @@ -0,0 +1,21 @@ +//===-- Proxy for struct f_owner_ex --------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_LIBC_HDR_TYPES_STRUCT_F_OWNER_EX_H +#define LLVM_LIBC_HDR_TYPES_STRUCT_F_OWNER_EX_H + +#ifdef LIBC_FULL_BUILD + +#include "include/llvm-libc-types/struct_f_owner_ex.h" + +#else + +#include + +#endif // LIBC_FULL_BUILD + +#endif // LLVM_LIBC_HDR_TYPES_STRUCT_F_OWNER_EX_H diff --git a/libc/hdr/types/struct_flock.h b/libc/hdr/types/struct_flock.h new file mode 100644 index 00000000000000..a552b91c432b38 --- /dev/null +++ b/libc/hdr/types/struct_flock.h @@ -0,0 +1,21 @@ +//===-- Proxy for struct flock -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_LIBC_HDR_TYPES_STRUCT_FLOCK_H +#define LLVM_LIBC_HDR_TYPES_STRUCT_FLOCK_H + +#ifdef LIBC_FULL_BUILD + +#include "include/llvm-libc-types/struct_flock.h" + +#else + +#include + +#endif // LIBC_FULL_BUILD + +#endif // LLVM_LIBC_HDR_TYPES_STRUCT_FLOCK_H diff --git a/libc/hdr/types/struct_flock64.h b/libc/hdr/types/struct_flock64.h new file mode 100644 index 00000000000000..84fe67816c3372 --- /dev/null +++ b/libc/hdr/types/struct_flock64.h @@ -0,0 +1,21 @@ +//===-- Proxy for struct flock64 -----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_LIBC_HDR_TYPES_STRUCT_FLOCK64_H +#define LLVM_LIBC_HDR_TYPES_STRUCT_FLOCK64_H + +#ifdef LIBC_FULL_BUILD + +#include "include/llvm-libc-types/struct_flock64.h" + +#else + +#include + +#endif // LIBC_FULL_BUILD + +#endif // LLVM_LIBC_HDR_TYPES_STRUCT_FLOCK64_H diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt index 6dea8e539969d0..6101ec136b2639 100644 --- a/libc/include/CMakeLists.txt +++ b/libc/include/CMakeLists.txt @@ -43,6 +43,10 @@ add_gen_header( DEPENDS .llvm-libc-macros.fcntl_macros .llvm-libc-types.mode_t + .llvm-libc-types.struct_flock + .llvm-libc-types.struct_flock64 + .llvm-libc-types.off64_t + .llvm-libc-types.pid_t .llvm-libc-types.off_t .llvm_libc_common_h ) diff --git a/libc/include/llvm-libc-macros/linux/fcntl-macros.h b/libc/include/llvm-libc-macros/linux/fcntl-macros.h index 1d4e5bbbdc770a..8ee95863728e15 100644 --- a/libc/include/llvm-libc-macros/linux/fcntl-macros.h +++ b/libc/include/llvm-libc-macros/linux/fcntl-macros.h @@ -67,5 +67,36 @@ #define F_SETFD 2 #define F_GETFL 3 #define F_SETFL 4 +#define F_GETLK 5 +#define F_SETLK 6 +#define F_SETLKW 7 +#define F_SETOWN 8 +#define F_GETOWN 9 +#define F_SETSIG 10 +#define F_GETSIG 11 +#define F_GETLK64 12 +#define F_SETLK64 13 +#define F_SETLKW64 14 +#define F_SETOWN_EX 15 +#define F_GETOWN_EX 16 + +// Open File Description Locks. +#define F_OFD_GETLK 36 +#define F_OFD_SETLK 37 +#define F_OFD_SETLKW 38 + +// Close on succesful +#define F_CLOEXEC 1 + +#define F_RDLCK 0 +#define F_WRLCK 1 +#define F_UNLCK 2 + +// For Large File Support +#if defined(_LARGEFILE64_SOURCE) +#define F_GETLK F_GETLK64 +#define F_SETLK F_SETLK64 +#define F_SETLKW F_SETLKW64 +#endif #endif // LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H diff --git a/libc/include/llvm-libc-types/CMakeLists.txt b/libc/include/llvm-libc-types/CMakeLists.txt index 16e343d6f3448c..018b6c58316c32 100644 --- a/libc/include/llvm-libc-types/CMakeLists.txt +++ b/libc/include/llvm-libc-types/CMakeLists.txt @@ -60,6 +60,9 @@ add_header(rlim_t HDR rlim_t.h) add_header(time_t HDR time_t.h) add_header(stack_t HDR stack_t.h) add_header(suseconds_t HDR suseconds_t.h) +add_header(struct_flock HDR struct_flock.h DEPENDS .off_t .pid_t) +add_header(struct_flock64 HDR struct_flock64.h DEPENDS .off64_t .pid_t) +add_header(struct_f_owner_ex HDR struct_f_owner_ex.h DEPENDS .pid_t) add_header(struct_timeval HDR struct_timeval.h DEPENDS .suseconds_t .time_t) add_header(struct_rlimit HDR struct_rlimit.h DEPENDS .rlim_t) add_header(struct_rusage HDR struct_rusage.h DEPENDS .struct_timeval) diff --git a/libc/include/llvm-libc-types/struct_f_owner_ex.h b/libc/include/llvm-libc-types/struct_f_owner_ex.h new file mode 100644 index 00000000000000..c9cc85f69d2b45 --- /dev/null +++ b/libc/include/llvm-libc-types/struct_f_owner_ex.h @@ -0,0 +1,25 @@ +//===-- Definition of type struct f_owner_ex ------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_TYPES_STRUCT_F_OWNER_EX_H +#define LLVM_LIBC_TYPES_STRUCT_F_OWNER_EX_H + +#include "llvm-libc-types/pid_t.h" + +enum pid_type { + F_OWNER_TID = 0, + F_OWNER_PID, + F_OWNER_PGRP, +}; + +struct f_owner_ex { + enum pid_type type; + pid_t pid; +}; + +#endif // LLVM_LIBC_TYPES_STRUCT_F_OWNER_EX_H diff --git a/libc/include/llvm-libc-types/struct_flock.h b/libc/include/llvm-libc-types/struct_flock.h new file mode 100644 index 00000000000000..51c9d27640ea1a --- /dev/null +++ b/libc/include/llvm-libc-types/struct_flock.h @@ -0,0 +1,25 @@ +//===-- Definition of type struct flock64 ---------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_TYPES_STRUCT_FLOCK_H +#define LLVM_LIBC_TYPES_STRUCT_FLOCK_H + +#include "llvm-libc-types/off_t.h" +#include "llvm-libc-types/pid_t.h" + +#include + +struct flock { + int16_t l_type; + int16_t l_whence; + off_t l_start; + off_t l_len; + pid_t l_pid; +}; + +#endif // LLVM_LIBC_TYPES_STRUCT_FLOCK_H diff --git a/libc/include/llvm-libc-types/struct_flock64.h b/libc/include/llvm-libc-types/struct_flock64.h new file mode 100644 index 00000000000000..ac50003ca62f60 --- /dev/null +++ b/libc/include/llvm-libc-types/struct_flock64.h @@ -0,0 +1,25 @@ +//===-- Definition of type struct flock64 ---------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_TYPES_STRUCT_FLOCK64_H +#define LLVM_LIBC_TYPES_STRUCT_FLOCK64_H + +#include "llvm-libc-types/off64_t.h" +#include "llvm-libc-types/pid_t.h" + +#include + +struct flock64 { + int16_t l_type; + int16_t l_whence; + off64_t l_start; + off64_t l_len; + pid_t l_pid; +}; + +#endif // LLVM_LIBC_TYPES_STRUCT_FLOCK64_H diff --git a/libc/spec/posix.td b/libc/spec/posix.td index d428d54e32a331..e7a0cf883c6077 100644 --- a/libc/spec/posix.td +++ b/libc/spec/posix.td @@ -230,6 +230,11 @@ def POSIX : StandardSpec<"POSIX"> { RetValSpec, [ArgSpec, ArgSpec] >, + FunctionSpec< + "fcntl", + RetValSpec, + [ArgSpec, ArgSpec, ArgSpec] + >, FunctionSpec< "open", RetValSpec, diff --git a/libc/src/fcntl/CMakeLists.txt b/libc/src/fcntl/CMakeLists.txt index 0b9ee47c4f7c13..77400e9050d08b 100644 --- a/libc/src/fcntl/CMakeLists.txt +++ b/libc/src/fcntl/CMakeLists.txt @@ -9,6 +9,13 @@ add_entrypoint_object( .${LIBC_TARGET_OS}.creat ) +add_entrypoint_object( + fcntl + ALIAS + DEPENDS + .${LIBC_TARGET_OS}.fcntl +) + add_entrypoint_object( open ALIAS diff --git a/libc/src/fcntl/fcntl.h b/libc/src/fcntl/fcntl.h new file mode 100644 index 00000000000000..8fe3fb3146b91d --- /dev/null +++ b/libc/src/fcntl/fcntl.h @@ -0,0 +1,18 @@ +//===-- Implementation header of fcntl --------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_FCNTL_FCNTL_H +#define LLVM_LIBC_SRC_FCNTL_FCNTL_H + +namespace LIBC_NAMESPACE { + +int fcntl(int fd, int cmd, ...); + +} // namespace LIBC_NAMESPACE + +#endif // LLVM_LIBC_SRC_FCNTL_FCNTL_H diff --git a/libc/src/fcntl/linux/CMakeLists.txt b/libc/src/fcntl/linux/CMakeLists.txt index 87b8d4695c4fc5..732b7beac41bfb 100644 --- a/libc/src/fcntl/linux/CMakeLists.txt +++ b/libc/src/fcntl/linux/CMakeLists.txt @@ -10,6 +10,22 @@ add_entrypoint_object( libc.src.errno.errno ) +add_entrypoint_object( + fcntl + SRCS + fcntl.cpp + HDRS + ../fcntl.h + DEPENDS + libc.include.fcntl + libc.hdr.types.struct_flock + libc.hdr.types.struct_flock64 + libc.hdr.types.struct_f_owner_ex + libc.hdr.fcntl_macros + libc.src.__support.OSUtil.osutil + libc.src.errno.errno +) + add_entrypoint_object( open SRCS diff --git a/libc/src/fcntl/linux/fcntl.cpp b/libc/src/fcntl/linux/fcntl.cpp new file mode 100644 index 00000000000000..24a20fb364109b --- /dev/null +++ b/libc/src/fcntl/linux/fcntl.cpp @@ -0,0 +1,93 @@ +//===-- Implementation of fcntl -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/fcntl/fcntl.h" + +#include "hdr/fcntl_macros.h" +#include "hdr/types/struct_f_owner_ex.h" +#include "hdr/types/struct_flock.h" +#include "hdr/types/struct_flock64.h" +#include "src/__support/OSUtil/syscall.h" // For internal syscall function. +#include "src/__support/common.h" +#include "src/errno/libc_errno.h" + +#include +#include // For syscall numbers. + +// The OFD file locks require special handling for LARGEFILES +namespace LIBC_NAMESPACE { +LLVM_LIBC_FUNCTION(int, fcntl, (int fd, int cmd, ...)) { + void *arg; + va_list varargs; + va_start(varargs, cmd); + arg = va_arg(varargs, void *); + va_end(varargs); + + switch (cmd) { + case F_SETLKW: + return syscall_impl(SYS_fcntl, fd, cmd, arg); + case F_OFD_SETLKW: { + struct flock *flk = reinterpret_cast(arg); + // convert the struct to a flock64 + struct flock64 flk64; + flk64.l_type = flk->l_type; + flk64.l_whence = flk->l_whence; + flk64.l_start = flk->l_start; + flk64.l_len = flk->l_len; + flk64.l_pid = flk->l_pid; + // create a syscall + return syscall_impl(SYS_fcntl, fd, cmd, &flk64); + } + case F_OFD_GETLK: + case F_OFD_SETLK: { + struct flock *flk = reinterpret_cast(arg); + // convert the struct to a flock64 + struct flock64 flk64; + flk64.l_type = flk->l_type; + flk64.l_whence = flk->l_whence; + flk64.l_start = flk->l_start; + flk64.l_len = flk->l_len; + flk64.l_pid = flk->l_pid; + // create a syscall + int retVal = syscall_impl(SYS_fcntl, fd, cmd, &flk64); + // On failure, return + if (retVal == -1) + return -1; + // Check for overflow, i.e. the offsets are not the same when cast + // to off_t from off64_t. + if (static_cast(flk64.l_len) != flk64.l_len || + static_cast(flk64.l_start) != flk64.l_start) { + libc_errno = EOVERFLOW; + return -1; + } + // Now copy back into flk, in case flk64 got modified + flk->l_type = flk64.l_type; + flk->l_whence = flk64.l_whence; + flk->l_start = flk64.l_start; + flk->l_len = flk64.l_len; + flk->l_pid = flk64.l_pid; + return retVal; + } + case F_GETOWN: { + struct f_owner_ex fex; + int retVal = syscall_impl(SYS_fcntl, fd, F_GETOWN_EX, &fex); + if (retVal == -EINVAL) + return syscall_impl(SYS_fcntl, fd, cmd, + reinterpret_cast(arg)); + if (static_cast(retVal) <= -4096UL) + return fex.type == F_OWNER_PGRP ? -fex.pid : fex.pid; + + libc_errno = -retVal; + return -1; + } + // The general case + default: + return syscall_impl(SYS_fcntl, fd, cmd, reinterpret_cast(arg)); + } +} +} // namespace LIBC_NAMESPACE diff --git a/libc/test/src/fcntl/CMakeLists.txt b/libc/test/src/fcntl/CMakeLists.txt index ae39d8d5f878c5..aae296f074bea0 100644 --- a/libc/test/src/fcntl/CMakeLists.txt +++ b/libc/test/src/fcntl/CMakeLists.txt @@ -17,6 +17,23 @@ add_libc_unittest( libc.test.UnitTest.ErrnoSetterMatcher ) +add_libc_unittest( + fcntl_test + SUITE + libc_fcntl_unittests + SRCS + fcntl_test.cpp + DEPENDS + libc.include.fcntl + libc.src.errno.errno + libc.src.fcntl.fcntl + libc.src.fcntl.open + libc.src.unistd.close + libc.hdr.types.struct_flock + libc.hdr.fcntl_macros + libc.test.UnitTest.ErrnoSetterMatcher +) + add_libc_unittest( openat_test SUITE diff --git a/libc/test/src/fcntl/fcntl_test.cpp b/libc/test/src/fcntl/fcntl_test.cpp new file mode 100644 index 00000000000000..c5cbb61b4ed8a5 --- /dev/null +++ b/libc/test/src/fcntl/fcntl_test.cpp @@ -0,0 +1,155 @@ +//===-- Unittest for fcntl ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "hdr/fcntl_macros.h" +#include "hdr/types/struct_flock.h" +#include "src/errno/libc_errno.h" +#include "src/fcntl/fcntl.h" +#include "src/fcntl/open.h" +#include "src/unistd/close.h" +#include "test/UnitTest/ErrnoSetterMatcher.h" +#include "test/UnitTest/Test.h" + +#include +#include // For S_IRWXU + +TEST(LlvmLibcFcntlTest, FcntlDupfd) { + using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; + constexpr const char *TEST_FILE_NAME = "testdata/fcntl_dup.test"; + auto TEST_FILE = libc_make_test_file_path(TEST_FILE_NAME); + int fd2, fd3; + int fd = LIBC_NAMESPACE::open(TEST_FILE, O_CREAT | O_TRUNC, S_IRWXU); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd, 0); + + fd2 = LIBC_NAMESPACE::fcntl(fd, F_DUPFD, 0); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd2, 0); + + fd3 = LIBC_NAMESPACE::fcntl(fd, F_DUPFD, 10); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd3, 0); + + ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0)); + ASSERT_THAT(LIBC_NAMESPACE::close(fd2), Succeeds(0)); + ASSERT_THAT(LIBC_NAMESPACE::close(fd3), Succeeds(0)); +} + +TEST(LlvmLibcFcntlTest, FcntlGetFl) { + using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; + constexpr const char *TEST_FILE_NAME = "testdata/fcntl_getfl.test"; + auto TEST_FILE = libc_make_test_file_path(TEST_FILE_NAME); + int retVal; + int fd = LIBC_NAMESPACE::open(TEST_FILE, O_CREAT | O_TRUNC, S_IRWXU); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd, 0); + + retVal = LIBC_NAMESPACE::fcntl(fd, F_GETFL); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(retVal, -1); + + ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0)); +} + +TEST(LlvmLibcFcntlTest, FcntlSetFl) { + using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; + constexpr const char *TEST_FILE_NAME = "testdata/fcntl_setfl.test"; + auto TEST_FILE = libc_make_test_file_path(TEST_FILE_NAME); + + int retVal; + int fd = LIBC_NAMESPACE::open(TEST_FILE, O_CREAT | O_TRUNC | O_RDWR, S_IRWXU); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd, 0); + + retVal = LIBC_NAMESPACE::fcntl(fd, F_GETFL); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(retVal, -1); + + int oldFlags = LIBC_NAMESPACE::fcntl(fd, F_GETFL, 0); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(oldFlags, 0); + + // Add the APPEND flag; + oldFlags |= O_APPEND; + + retVal = LIBC_NAMESPACE::fcntl(fd, F_SETFL, oldFlags); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(retVal, -1); + + // Remove the APPEND flag; + oldFlags = -oldFlags & O_APPEND; + + retVal = LIBC_NAMESPACE::fcntl(fd, F_SETFL, oldFlags); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(retVal, -1); + + ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0)); +} + +TEST(LlvmLibcFcntlTest, FcntlGetLkRead) { + using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; + constexpr const char *TEST_FILE_NAME = "testdata/fcntl_getlkread.test"; + auto TEST_FILE = libc_make_test_file_path(TEST_FILE_NAME); + + struct flock flk, svflk; + int retVal; + int fd = + LIBC_NAMESPACE::open(TEST_FILE, O_CREAT | O_TRUNC | O_RDONLY, S_IRWXU); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd, 0); + + flk.l_type = F_RDLCK; + flk.l_start = 0; + flk.l_whence = SEEK_SET; + flk.l_len = 50; + + // copy flk into svflk + svflk = flk; + + retVal = LIBC_NAMESPACE::fcntl(fd, F_GETLK, &svflk); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(retVal, -1); + ASSERT_NE((int)flk.l_type, F_WRLCK); // File should not be write locked. + + retVal = LIBC_NAMESPACE::fcntl(fd, F_SETLK, &svflk); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(retVal, -1); + + ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0)); +} + +TEST(LlvmLibcFcntlTest, FcntlGetLkWrite) { + using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; + constexpr const char *TEST_FILE_NAME = "testdata/fcntl_getlkwrite.test"; + auto TEST_FILE = libc_make_test_file_path(TEST_FILE_NAME); + + struct flock flk, svflk; + int retVal; + int fd = LIBC_NAMESPACE::open(TEST_FILE, O_CREAT | O_TRUNC | O_RDWR, S_IRWXU); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd, 0); + + flk.l_type = F_WRLCK; + flk.l_start = 0; + flk.l_whence = SEEK_SET; + flk.l_len = 0; + + // copy flk into svflk + svflk = flk; + + retVal = LIBC_NAMESPACE::fcntl(fd, F_GETLK, &svflk); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(retVal, -1); + ASSERT_NE((int)flk.l_type, F_RDLCK); // File should not be read locked. + + retVal = LIBC_NAMESPACE::fcntl(fd, F_SETLK, &svflk); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(retVal, -1); + + ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0)); +} From 1ca600586a61cee41e117a188d880417de3e1c00 Mon Sep 17 00:00:00 2001 From: Ryosuke Niwa Date: Wed, 1 May 2024 11:27:45 -0700 Subject: [PATCH 46/48] [alpha.webkit.UncountedCallArgsChecker] Support more trivial expressions. (#90414) Treat a compound operator such as |=, array subscription, sizeof, and non-type template parameter as trivial so long as subexpressions are also trivial. Also treat true/false boolean literal as trivial. --- .../Checkers/WebKit/PtrTypesSemantics.cpp | 23 ++++++++++++++++++- .../Checkers/WebKit/uncounted-obj-arg.cpp | 23 +++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp index 287f6a52870056..6901dbb415bf76 100644 --- a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp @@ -311,7 +311,7 @@ class TrivialFunctionAnalysisVisitor bool VisitUnaryOperator(const UnaryOperator *UO) { // Operator '*' and '!' are allowed as long as the operand is trivial. auto op = UO->getOpcode(); - if (op == UO_Deref || op == UO_AddrOf || op == UO_LNot) + if (op == UO_Deref || op == UO_AddrOf || op == UO_LNot || op == UO_Not) return Visit(UO->getSubExpr()); if (UO->isIncrementOp() || UO->isDecrementOp()) { @@ -331,6 +331,16 @@ class TrivialFunctionAnalysisVisitor return Visit(BO->getLHS()) && Visit(BO->getRHS()); } + bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { + // Compound assignment operator such as |= is trivial if its + // subexpresssions are trivial. + return VisitChildren(CAO); + } + + bool VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { + return VisitChildren(ASE); + } + bool VisitConditionalOperator(const ConditionalOperator *CO) { // Ternary operators are trivial if their conditions & values are trivial. return VisitChildren(CO); @@ -360,6 +370,16 @@ class TrivialFunctionAnalysisVisitor return TrivialFunctionAnalysis::isTrivialImpl(Callee, Cache); } + bool + VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E) { + // Non-type template paramter is compile time constant and trivial. + return true; + } + + bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E) { + return VisitChildren(E); + } + bool VisitPredefinedExpr(const PredefinedExpr *E) { // A predefined identifier such as "func" is considered trivial. return true; @@ -463,6 +483,7 @@ class TrivialFunctionAnalysisVisitor bool VisitFixedPointLiteral(const FixedPointLiteral *E) { return true; } bool VisitCharacterLiteral(const CharacterLiteral *E) { return true; } bool VisitStringLiteral(const StringLiteral *E) { return true; } + bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { return true; } bool VisitConstantExpr(const ConstantExpr *CE) { // Constant expressions are trivial. diff --git a/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp b/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp index 80a9a263dab140..63a68a994a5c64 100644 --- a/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp +++ b/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp @@ -201,6 +201,13 @@ class RefCounted { unsigned trivial25() const { return __c11_atomic_load((volatile _Atomic(unsigned) *)&v, __ATOMIC_RELAXED); } bool trivial26() { bool hasValue = v; return !hasValue; } bool trivial27(int v) { bool value; value = v ? 1 : 0; return value; } + bool trivial28() { return true; } + bool trivial29() { return false; } + unsigned trivial30(unsigned v) { unsigned r = 0xff; r |= v; return r; } + int trivial31(int* v) { return v[0]; } + unsigned trivial32() { return sizeof(int); } + unsigned trivial33() { return ~0xff; } + template unsigned trivial34() { return v; } static RefCounted& singleton() { static RefCounted s_RefCounted; @@ -273,6 +280,9 @@ class RefCounted { return val; } + int nonTrivial13() { return ~otherFunction(); } + int nonTrivial14() { int r = 0xff; r |= otherFunction(); return r; } + unsigned v { 0 }; Number* number { nullptr }; Enum enumValue { Enum::Value1 }; @@ -322,6 +332,15 @@ class UnrelatedClass { getFieldTrivial().trivial25(); // no-warning getFieldTrivial().trivial26(); // no-warning getFieldTrivial().trivial27(5); // no-warning + getFieldTrivial().trivial28(); // no-warning + getFieldTrivial().trivial29(); // no-warning + getFieldTrivial().trivial30(7); // no-warning + int a[] = {1, 2}; + getFieldTrivial().trivial31(a); // no-warning + getFieldTrivial().trivial32(); // no-warning + getFieldTrivial().trivial33(); // no-warning + getFieldTrivial().trivial34<7>(); // no-warning + RefCounted::singleton().trivial18(); // no-warning RefCounted::singleton().someFunction(); // no-warning @@ -351,6 +370,10 @@ class UnrelatedClass { // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}} getFieldTrivial().nonTrivial12(); // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}} + getFieldTrivial().nonTrivial13(); + // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}} + getFieldTrivial().nonTrivial14(); + // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}} } }; From 91fef0013f2668d1dc0623ede21cf4048d9a733e Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Wed, 1 May 2024 11:32:04 -0700 Subject: [PATCH 47/48] [ELF] Catch zlib deflateInit2 error The function may return Z_MEM_ERROR or Z_STREAM_ERR. The former does not have a good way of testing. The latter will be possible with a pending change that allows setting the compression level, which will come with a test. --- lld/ELF/OutputSections.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp index 3e58ed4bda2d3c..1b09e5b0a55742 100644 --- a/lld/ELF/OutputSections.cpp +++ b/lld/ELF/OutputSections.cpp @@ -301,7 +301,11 @@ static SmallVector deflateShard(ArrayRef in, int level, // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate // data with no zlib header or trailer. z_stream s = {}; - deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY); + auto res = deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY); + if (res != 0) { + errorOrWarn("--compress-sections: deflateInit2 returned " + Twine(res)); + return {}; + } s.next_in = const_cast(in.data()); s.avail_in = in.size(); From 6d44a1ef55b559e59d725b07ffe1da988b4e5f1c Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Wed, 1 May 2024 11:40:46 -0700 Subject: [PATCH 48/48] [ELF] Adjust --compress-sections to support compression level zstd excels at scaling from low-ratio-very-fast to high-ratio-pretty-slow. Some users prioritize speed and prefer disk read speed, while others focus on achieving the highest compression ratio possible, similar to traditional high-ratio codecs like LZMA. Add an optional `level` to `--compress-sections` (#84855) to cater to these diverse needs. While we initially aimed for a one-size-fits-all approach, this no longer seems to work. (https://richg42.blogspot.com/2015/11/the-lossless-decompression-pareto.html) When --compress-debug-sections is used together, make --compress-sections take precedence since --compress-sections is usually more specific. Remove the level distinction between -O/-O1 and -O2 for --compress-debug-sections=zlib for a more consistent user experience. Pull Request: https://github.com/llvm/llvm-project/pull/90567 --- lld/ELF/Config.h | 3 ++- lld/ELF/Driver.cpp | 12 ++++++++++-- lld/ELF/Options.td | 5 +++-- lld/ELF/OutputSections.cpp | 22 +++++++++++----------- lld/docs/ReleaseNotes.rst | 5 ++++- lld/docs/ld.lld.1 | 12 ++++++------ lld/test/ELF/compress-sections.s | 18 ++++++++++++++---- lld/test/ELF/compressed-debug-level.test | 14 ++++++-------- 8 files changed, 56 insertions(+), 35 deletions(-) diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h index 33bfa42b0fcbf0..c55b547a733c77 100644 --- a/lld/ELF/Config.h +++ b/lld/ELF/Config.h @@ -224,7 +224,8 @@ struct Config { bool checkSections; bool checkDynamicRelocs; std::optional compressDebugSections; - llvm::SmallVector, 0> + llvm::SmallVector< + std::tuple, 0> compressSections; bool cref; llvm::SmallVector, 0> diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp index a5b47f020f8726..b29e1e1a67f139 100644 --- a/lld/ELF/Driver.cpp +++ b/lld/ELF/Driver.cpp @@ -1533,9 +1533,17 @@ static void readConfigs(opt::InputArgList &args) { ": parse error, not 'section-glob=[none|zlib|zstd]'"); continue; } - auto type = getCompressionType(fields[1], arg->getSpelling()); + auto [typeStr, levelStr] = fields[1].split(':'); + auto type = getCompressionType(typeStr, arg->getSpelling()); + unsigned level = 0; + if (fields[1].size() != typeStr.size() && + !llvm::to_integer(levelStr, level)) { + error(arg->getSpelling() + + ": expected a non-negative integer compression level, but got '" + + levelStr + "'"); + } if (Expected pat = GlobPattern::create(fields[0])) { - config->compressSections.emplace_back(std::move(*pat), type); + config->compressSections.emplace_back(std::move(*pat), type, level); } else { error(arg->getSpelling() + ": " + toString(pat.takeError())); continue; diff --git a/lld/ELF/Options.td b/lld/ELF/Options.td index 72eaf157a181cf..73a4f9662a561f 100644 --- a/lld/ELF/Options.td +++ b/lld/ELF/Options.td @@ -68,8 +68,9 @@ defm compress_debug_sections: MetaVarName<"[none,zlib,zstd]">; defm compress_sections: EEq<"compress-sections", - "Compress non-SHF_ALLOC output sections matching ">, - MetaVarName<"=[none|zlib|zstd]">; + "Compress output sections that match the glob and do not have the SHF_ALLOC flag." + "The compression level is (if specified) or a default speed-focused level">, + MetaVarName<"={none,zlib,zstd}[:level]">; defm defsym: Eq<"defsym", "Define a symbol alias">, MetaVarName<"=">; diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp index 1b09e5b0a55742..2dbbff06a89087 100644 --- a/lld/ELF/OutputSections.cpp +++ b/lld/ELF/OutputSections.cpp @@ -339,12 +339,13 @@ template void OutputSection::maybeCompress() { (void)sizeof(Elf_Chdr); DebugCompressionType ctype = DebugCompressionType::None; - for (auto &[glob, t] : config->compressSections) - if (glob.match(name)) - ctype = t; + unsigned level = 0; // default compression level if (!(flags & SHF_ALLOC) && config->compressDebugSections && name.starts_with(".debug_") && size) ctype = *config->compressDebugSections; + for (auto &[glob, t, l] : config->compressSections) + if (glob.match(name)) + std::tie(ctype, level) = {t, l}; if (ctype == DebugCompressionType::None) return; if (flags & SHF_ALLOC) { @@ -376,13 +377,14 @@ template void OutputSection::maybeCompress() { auto shardsOut = std::make_unique[]>(numShards); #if LLVM_ENABLE_ZSTD - // Use ZSTD's streaming compression API which permits parallel workers working - // on the stream. See http://facebook.github.io/zstd/zstd_manual.html - // "Streaming compression - HowTo". + // Use ZSTD's streaming compression API. See + // http://facebook.github.io/zstd/zstd_manual.html "Streaming compression - + // HowTo". if (ctype == DebugCompressionType::Zstd) { parallelFor(0, numShards, [&](size_t i) { SmallVector out; ZSTD_CCtx *cctx = ZSTD_createCCtx(); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, level); ZSTD_inBuffer zib = {shardsIn[i].data(), shardsIn[i].size(), 0}; ZSTD_outBuffer zob = {nullptr, 0, 0}; size_t size; @@ -410,12 +412,10 @@ template void OutputSection::maybeCompress() { #if LLVM_ENABLE_ZLIB // We chose 1 (Z_BEST_SPEED) as the default compression level because it is - // the fastest. If -O2 is given, we use level 6 to compress debug info more by - // ~15%. We found that level 7 to 9 doesn't make much difference (~1% more - // compression) while they take significant amount of time (~2x), so level 6 - // seems enough. + // fast and provides decent compression ratios. if (ctype == DebugCompressionType::Zlib) { - const int level = config->optimize >= 2 ? 6 : Z_BEST_SPEED; + if (!level) + level = Z_BEST_SPEED; // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all // shards but the last to flush the output to a byte boundary to be diff --git a/lld/docs/ReleaseNotes.rst b/lld/docs/ReleaseNotes.rst index a7ed49726fd99a..f8fdebfeaecf26 100644 --- a/lld/docs/ReleaseNotes.rst +++ b/lld/docs/ReleaseNotes.rst @@ -26,9 +26,12 @@ Non-comprehensive list of changes in this release ELF Improvements ---------------- -* ``--compress-sections =[none|zlib|zstd]`` is added to compress +* ``--compress-sections ={none,zlib,zstd}[:level]`` is added to compress matched output sections without the ``SHF_ALLOC`` flag. (`#84855 `_) + (`#90567 `_) +* The default compression level for zlib is now independent of linker + optimization level (``Z_BEST_SPEED``). * ``GNU_PROPERTY_AARCH64_FEATURE_PAUTH`` notes, ``R_AARCH64_AUTH_ABS64`` and ``R_AARCH64_AUTH_RELATIVE`` relocations are now supported. (`#72714 `_) diff --git a/lld/docs/ld.lld.1 b/lld/docs/ld.lld.1 index 3861120915e8bc..9ea1a9c52f2a13 100644 --- a/lld/docs/ld.lld.1 +++ b/lld/docs/ld.lld.1 @@ -156,16 +156,16 @@ may be No compression. .It Cm zlib The default compression level is 1 (fastest) as the debug info usually -compresses well at that level. If you want to compress it more, -you can specify -.Fl O2 -to set the compression level to 6. +compresses well at that level. .It Cm zstd -The compression level is 5. +Use the default compression level in zstd. .El .Pp -.It Fl -compress-sections Ns = Ns Ar section-glob=[none|zlib|zstd] +.It Fl -compress-sections Ns = Ns Ar section-glob={none,zlib,zstd}[:level] Compress output sections that match the glob and do not have the SHF_ALLOC flag. +The compression level is +.Cm level +(if specified) or a default speed-focused level. This is like a generalized .Cm --compress-debug-sections. .It Fl -cref diff --git a/lld/test/ELF/compress-sections.s b/lld/test/ELF/compress-sections.s index 59b5408c9624ac..aa30c7a90474f6 100644 --- a/lld/test/ELF/compress-sections.s +++ b/lld/test/ELF/compress-sections.s @@ -16,7 +16,7 @@ # CHECK1: 0000000000000010 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc0) sym0 # CHECK1: 0000000000000008 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc1) sym1 -# RUN: ld.lld -pie a.o --compress-sections '*c0=zlib' --compress-sections .debug_str=zstd -o out2 +# RUN: ld.lld -pie a.o --compress-sections '*c0=zlib' --compress-sections .debug_str=zstd:3 -o out2 # RUN: llvm-readelf -SrsX -x nonalloc0 -x .debug_str out2 | FileCheck %s --check-prefix=CHECK2 # CHECK2: Name Type Address Off Size ES Flg Lk Inf Al @@ -39,11 +39,11 @@ # CHECK2-NEXT: 02000000 00000000 38000000 00000000 # CHECK2-NEXT: 01000000 00000000 {{.*}} -## --compress-debug-sections=none takes precedence. -# RUN: ld.lld a.o --compress-debug-sections=none --compress-sections .debug_str=zstd -o out3 +## --compress-sections takes precedence. +# RUN: ld.lld a.o --compress-sections .debug_str=zstd --compress-debug-sections=none -o out3 # RUN: llvm-readelf -S out3 | FileCheck %s --check-prefix=CHECK3 -# CHECK3: .debug_str PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 01 MS 0 0 1 +# CHECK3: .debug_str PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 01 MSC 0 0 1 # RUN: not ld.lld a.o --compress-sections '*0=zlib' 2>&1 | \ # RUN: FileCheck %s --check-prefix=ERR-ALLOC --implicit-check-not=error: @@ -62,6 +62,16 @@ # ERR3: unknown --compress-sections value: zlib-gabi # ERR3-NEXT: --compress-sections: parse error, not 'section-glob=[none|zlib|zstd]' +# RUN: not ld.lld a.o --compress-sections='a=zlib:' --compress-sections='a=zlib:-1' 2>&1 | \ +# RUN: FileCheck %s --check-prefix=ERR4 --implicit-check-not=error: +# ERR4: error: --compress-sections: expected a non-negative integer compression level, but got '' +# ERR4: error: --compress-sections: expected a non-negative integer compression level, but got '-1' + +## Invalid compression level for zlib. +# RUN: not ld.lld a.o --compress-sections='.debug*=zlib:99' 2>&1 | \ +# RUN: FileCheck %s --check-prefix=ERR6 --implicit-check-not=error: +# ERR6: error: --compress-sections: deflateInit2 returned -2 + .globl _start _start: ret diff --git a/lld/test/ELF/compressed-debug-level.test b/lld/test/ELF/compressed-debug-level.test index ee95f126799722..ce3a194bd7c2b9 100644 --- a/lld/test/ELF/compressed-debug-level.test +++ b/lld/test/ELF/compressed-debug-level.test @@ -2,22 +2,20 @@ # RUN: yaml2obj %s -o %t.o +## LLD uses zlib compression of level 1 by default. Unlike previous versions, +## -O does not change the level. # RUN: ld.lld %t.o -o %t.default --compress-debug-sections=zlib # RUN: llvm-readelf --sections %t.default | FileCheck -check-prefixes=HEADER,LEVEL1 %s # RUN: ld.lld -O0 %t.o -o %t.O0 --compress-debug-sections=zlib -# RUN: llvm-readelf --sections %t.O0 | FileCheck -check-prefixes=HEADER,LEVEL1 %s # RUN: cmp %t.default %t.O0 -# RUN: ld.lld -O1 %t.o -o %t.O1 --compress-debug-sections=zlib -# RUN: llvm-readelf --sections %t.O1 | FileCheck -check-prefixes=HEADER,LEVEL1 %s -# RUN: cmp %t.default %t.O1 - # RUN: ld.lld -O2 %t.o -o %t.O2 --compress-debug-sections=zlib -# RUN: llvm-readelf --sections %t.O2 | FileCheck -check-prefixes=HEADER,LEVEL6 %s +# RUN: cmp %t.default %t.O2 -## LLD uses zlib compression of level 1 when -O0, -O1 and level 6 when -O2. -## Here we check how -O flag affects the size of compressed sections produced. +## --compression-level specifies the level. +# RUN: ld.lld %t.o -o %t.6 --compress-sections=.debug_info=zlib:6 +# RUN: llvm-readelf --sections %t.6 | FileCheck -check-prefixes=HEADER,LEVEL6 %s # HEADER: [Nr] Name Type Address Off Size # LEVEL1: [ 1] .debug_info PROGBITS 00000000 000094 00001{{[bc]}}