[llvm] 27f3d51 - [SVE] Lower fixed length vector fneg and fsqrt operations.
Paul Walker via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 6 02:56:53 PDT 2020
Author: Paul Walker
Date: 2020-10-06T10:48:16+01:00
New Revision: 27f3d51b4ef9bf4dc9cfeed88f66fcfc7ee99ee9
URL: https://github.com/llvm/llvm-project/commit/27f3d51b4ef9bf4dc9cfeed88f66fcfc7ee99ee9
DIFF: https://github.com/llvm/llvm-project/commit/27f3d51b4ef9bf4dc9cfeed88f66fcfc7ee99ee9.diff
LOG: [SVE] Lower fixed length vector fneg and fsqrt operations.
Also updates sve-fp.ll to use fneg directly.
Differential Revision: https://reviews.llvm.org/D88683
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
llvm/test/CodeGen/AArch64/sve-fp.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d76918d8ace4a..308628a7f8348 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1234,8 +1234,10 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
setOperationAction(ISD::FMINNUM, VT, Custom);
setOperationAction(ISD::FMUL, VT, Custom);
setOperationAction(ISD::FNEARBYINT, VT, Custom);
+ setOperationAction(ISD::FNEG, VT, Custom);
setOperationAction(ISD::FRINT, VT, Custom);
setOperationAction(ISD::FROUND, VT, Custom);
+ setOperationAction(ISD::FSQRT, VT, Custom);
setOperationAction(ISD::FSUB, VT, Custom);
setOperationAction(ISD::FTRUNC, VT, Custom);
setOperationAction(ISD::LOAD, VT, Custom);
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
index f8a37e7d81fe0..1806b4945ec91 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
@@ -1044,6 +1044,434 @@ define void @fmul_v32f64(<32 x double>* %a, <32 x double>* %b) #0 {
ret void
}
+;
+; FNEG
+;
+
+; Don't use SVE for 64-bit vectors.
+define <4 x half> @fneg_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: fneg_v4f16:
+; CHECK: fneg v0.4h, v0.4h
+; CHECK: ret
+ %res = fneg <4 x half> %op
+ ret <4 x half> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <8 x half> @fneg_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: fneg_v8f16:
+; CHECK: fneg v0.8h, v0.8h
+; CHECK: ret
+ %res = fneg <8 x half> %op
+ ret <8 x half> %res
+}
+
+define void @fneg_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
+; CHECK-LABEL: fneg_v16f16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
+; CHECK: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <16 x half>, <16 x half>* %a
+ %res = fneg <16 x half> %op
+ store <16 x half> %res, <16 x half>* %a
+ ret void
+}
+
+define void @fneg_v32f16(<32 x half>* %a) #0 {
+; CHECK-LABEL: fneg_v32f16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),32)]]
+; CHECK: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <32 x half>, <32 x half>* %a
+ %res = fneg <32 x half> %op
+ store <32 x half> %res, <32 x half>* %a
+ ret void
+}
+
+define void @fneg_v64f16(<64 x half>* %a) #0 {
+; CHECK-LABEL: fneg_v64f16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),64)]]
+; CHECK: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <64 x half>, <64 x half>* %a
+ %res = fneg <64 x half> %op
+ store <64 x half> %res, <64 x half>* %a
+ ret void
+}
+
+define void @fneg_v128f16(<128 x half>* %a) #0 {
+; CHECK-LABEL: fneg_v128f16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),128)]]
+; CHECK: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <128 x half>, <128 x half>* %a
+ %res = fneg <128 x half> %op
+ store <128 x half> %res, <128 x half>* %a
+ ret void
+}
+
+; Don't use SVE for 64-bit vectors.
+define <2 x float> @fneg_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: fneg_v2f32:
+; CHECK: fneg v0.2s, v0.2s
+; CHECK: ret
+ %res = fneg <2 x float> %op
+ ret <2 x float> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <4 x float> @fneg_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: fneg_v4f32:
+; CHECK: fneg v0.4s, v0.4s
+; CHECK: ret
+ %res = fneg <4 x float> %op
+ ret <4 x float> %res
+}
+
+define void @fneg_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: fneg_v8f32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
+; CHECK: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <8 x float>, <8 x float>* %a
+ %res = fneg <8 x float> %op
+ store <8 x float> %res, <8 x float>* %a
+ ret void
+}
+
+define void @fneg_v16f32(<16 x float>* %a) #0 {
+; CHECK-LABEL: fneg_v16f32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
+; CHECK: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <16 x float>, <16 x float>* %a
+ %res = fneg <16 x float> %op
+ store <16 x float> %res, <16 x float>* %a
+ ret void
+}
+
+define void @fneg_v32f32(<32 x float>* %a) #0 {
+; CHECK-LABEL: fneg_v32f32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
+; CHECK: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <32 x float>, <32 x float>* %a
+ %res = fneg <32 x float> %op
+ store <32 x float> %res, <32 x float>* %a
+ ret void
+}
+
+define void @fneg_v64f32(<64 x float>* %a) #0 {
+; CHECK-LABEL: fneg_v64f32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
+; CHECK: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <64 x float>, <64 x float>* %a
+ %res = fneg <64 x float> %op
+ store <64 x float> %res, <64 x float>* %a
+ ret void
+}
+
+; Don't use SVE for 64-bit vectors.
+define <1 x double> @fneg_v1f64(<1 x double> %op) #0 {
+; CHECK-LABEL: fneg_v1f64:
+; CHECK: fneg d0, d0
+; CHECK: ret
+ %res = fneg <1 x double> %op
+ ret <1 x double> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <2 x double> @fneg_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: fneg_v2f64:
+; CHECK: fneg v0.2d, v0.2d
+; CHECK: ret
+ %res = fneg <2 x double> %op
+ ret <2 x double> %res
+}
+
+define void @fneg_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: fneg_v4f64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
+; CHECK: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <4 x double>, <4 x double>* %a
+ %res = fneg <4 x double> %op
+ store <4 x double> %res, <4 x double>* %a
+ ret void
+}
+
+define void @fneg_v8f64(<8 x double>* %a) #0 {
+; CHECK-LABEL: fneg_v8f64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),8)]]
+; CHECK: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <8 x double>, <8 x double>* %a
+ %res = fneg <8 x double> %op
+ store <8 x double> %res, <8 x double>* %a
+ ret void
+}
+
+define void @fneg_v16f64(<16 x double>* %a) #0 {
+; CHECK-LABEL: fneg_v16f64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),16)]]
+; CHECK: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <16 x double>, <16 x double>* %a
+ %res = fneg <16 x double> %op
+ store <16 x double> %res, <16 x double>* %a
+ ret void
+}
+
+define void @fneg_v32f64(<32 x double>* %a) #0 {
+; CHECK-LABEL: fneg_v32f64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),32)]]
+; CHECK: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK: fneg [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <32 x double>, <32 x double>* %a
+ %res = fneg <32 x double> %op
+ store <32 x double> %res, <32 x double>* %a
+ ret void
+}
+
+;
+; FSQRT
+;
+
+; Don't use SVE for 64-bit vectors.
+define <4 x half> @fsqrt_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: fsqrt_v4f16:
+; CHECK: fsqrt v0.4h, v0.4h
+; CHECK: ret
+ %res = call <4 x half> @llvm.sqrt.v4f16(<4 x half> %op)
+ ret <4 x half> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <8 x half> @fsqrt_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: fsqrt_v8f16:
+; CHECK: fsqrt v0.8h, v0.8h
+; CHECK: ret
+ %res = call <8 x half> @llvm.sqrt.v8f16(<8 x half> %op)
+ ret <8 x half> %res
+}
+
+define void @fsqrt_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
+; CHECK-LABEL: fsqrt_v16f16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
+; CHECK: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <16 x half>, <16 x half>* %a
+ %res = call <16 x half> @llvm.sqrt.v16f16(<16 x half> %op)
+ store <16 x half> %res, <16 x half>* %a
+ ret void
+}
+
+define void @fsqrt_v32f16(<32 x half>* %a) #0 {
+; CHECK-LABEL: fsqrt_v32f16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),32)]]
+; CHECK: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <32 x half>, <32 x half>* %a
+ %res = call <32 x half> @llvm.sqrt.v32f16(<32 x half> %op)
+ store <32 x half> %res, <32 x half>* %a
+ ret void
+}
+
+define void @fsqrt_v64f16(<64 x half>* %a) #0 {
+; CHECK-LABEL: fsqrt_v64f16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),64)]]
+; CHECK: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <64 x half>, <64 x half>* %a
+ %res = call <64 x half> @llvm.sqrt.v64f16(<64 x half> %op)
+ store <64 x half> %res, <64 x half>* %a
+ ret void
+}
+
+define void @fsqrt_v128f16(<128 x half>* %a) #0 {
+; CHECK-LABEL: fsqrt_v128f16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),128)]]
+; CHECK: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <128 x half>, <128 x half>* %a
+ %res = call <128 x half> @llvm.sqrt.v128f16(<128 x half> %op)
+ store <128 x half> %res, <128 x half>* %a
+ ret void
+}
+
+; Don't use SVE for 64-bit vectors.
+define <2 x float> @fsqrt_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: fsqrt_v2f32:
+; CHECK: fsqrt v0.2s, v0.2s
+; CHECK: ret
+ %res = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %op)
+ ret <2 x float> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <4 x float> @fsqrt_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: fsqrt_v4f32:
+; CHECK: fsqrt v0.4s, v0.4s
+; CHECK: ret
+ %res = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %op)
+ ret <4 x float> %res
+}
+
+define void @fsqrt_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: fsqrt_v8f32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
+; CHECK: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <8 x float>, <8 x float>* %a
+ %res = call <8 x float> @llvm.sqrt.v8f32(<8 x float> %op)
+ store <8 x float> %res, <8 x float>* %a
+ ret void
+}
+
+define void @fsqrt_v16f32(<16 x float>* %a) #0 {
+; CHECK-LABEL: fsqrt_v16f32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
+; CHECK: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <16 x float>, <16 x float>* %a
+ %res = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %op)
+ store <16 x float> %res, <16 x float>* %a
+ ret void
+}
+
+define void @fsqrt_v32f32(<32 x float>* %a) #0 {
+; CHECK-LABEL: fsqrt_v32f32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
+; CHECK: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <32 x float>, <32 x float>* %a
+ %res = call <32 x float> @llvm.sqrt.v32f32(<32 x float> %op)
+ store <32 x float> %res, <32 x float>* %a
+ ret void
+}
+
+define void @fsqrt_v64f32(<64 x float>* %a) #0 {
+; CHECK-LABEL: fsqrt_v64f32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
+; CHECK: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <64 x float>, <64 x float>* %a
+ %res = call <64 x float> @llvm.sqrt.v64f32(<64 x float> %op)
+ store <64 x float> %res, <64 x float>* %a
+ ret void
+}
+
+; Don't use SVE for 64-bit vectors.
+define <1 x double> @fsqrt_v1f64(<1 x double> %op) #0 {
+; CHECK-LABEL: fsqrt_v1f64:
+; CHECK: fsqrt d0, d0
+; CHECK: ret
+ %res = call <1 x double> @llvm.sqrt.v1f64(<1 x double> %op)
+ ret <1 x double> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <2 x double> @fsqrt_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: fsqrt_v2f64:
+; CHECK: fsqrt v0.2d, v0.2d
+; CHECK: ret
+ %res = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %op)
+ ret <2 x double> %res
+}
+
+define void @fsqrt_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: fsqrt_v4f64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
+; CHECK: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <4 x double>, <4 x double>* %a
+ %res = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %op)
+ store <4 x double> %res, <4 x double>* %a
+ ret void
+}
+
+define void @fsqrt_v8f64(<8 x double>* %a) #0 {
+; CHECK-LABEL: fsqrt_v8f64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),8)]]
+; CHECK: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <8 x double>, <8 x double>* %a
+ %res = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %op)
+ store <8 x double> %res, <8 x double>* %a
+ ret void
+}
+
+define void @fsqrt_v16f64(<16 x double>* %a) #0 {
+; CHECK-LABEL: fsqrt_v16f64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),16)]]
+; CHECK: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <16 x double>, <16 x double>* %a
+ %res = call <16 x double> @llvm.sqrt.v16f64(<16 x double> %op)
+ store <16 x double> %res, <16 x double>* %a
+ ret void
+}
+
+define void @fsqrt_v32f64(<32 x double>* %a) #0 {
+; CHECK-LABEL: fsqrt_v32f64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),32)]]
+; CHECK: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK: fsqrt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+ %op = load <32 x double>, <32 x double>* %a
+ %res = call <32 x double> @llvm.sqrt.v32f64(<32 x double> %op)
+ store <32 x double> %res, <32 x double>* %a
+ ret void
+}
+
;
; FSUB
;
@@ -1302,3 +1730,22 @@ declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>)
declare <16 x double> @llvm.fma.v16f64(<16 x double>, <16 x double>, <16 x double>)
declare <32 x double> @llvm.fma.v32f64(<32 x double>, <32 x double>, <32 x double>)
+
+declare <4 x half> @llvm.sqrt.v4f16(<4 x half>)
+declare <8 x half> @llvm.sqrt.v8f16(<8 x half>)
+declare <16 x half> @llvm.sqrt.v16f16(<16 x half>)
+declare <32 x half> @llvm.sqrt.v32f16(<32 x half>)
+declare <64 x half> @llvm.sqrt.v64f16(<64 x half>)
+declare <128 x half> @llvm.sqrt.v128f16(<128 x half>)
+declare <2 x float> @llvm.sqrt.v2f32(<2 x float>)
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
+declare <8 x float> @llvm.sqrt.v8f32(<8 x float>)
+declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
+declare <32 x float> @llvm.sqrt.v32f32(<32 x float>)
+declare <64 x float> @llvm.sqrt.v64f32(<64 x float>)
+declare <1 x double> @llvm.sqrt.v1f64(<1 x double>)
+declare <2 x double> @llvm.sqrt.v2f64(<2 x double>)
+declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
+declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
+declare <16 x double> @llvm.sqrt.v16f64(<16 x double>)
+declare <32 x double> @llvm.sqrt.v32f64(<32 x double>)
diff --git a/llvm/test/CodeGen/AArch64/sve-fp.ll b/llvm/test/CodeGen/AArch64/sve-fp.ll
index 7ca1fdee7f32f..a54412cee9bf0 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp.ll
@@ -329,10 +329,8 @@ define <vscale x 8 x half> @fneg_nxv8f16(<vscale x 8 x half> %a) {
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: fneg z0.h, p0/m, z0.h
; CHECK-NEXT: ret
- %minus.one = insertelement <vscale x 8 x half> undef, half -1.0, i64 0
- %minus.one.vec = shufflevector <vscale x 8 x half> %minus.one, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
- %neg = fmul <vscale x 8 x half> %a, %minus.one.vec
- ret <vscale x 8 x half> %neg
+ %res = fneg <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
}
define <vscale x 4 x half> @fneg_nxv4f16(<vscale x 4 x half> %a) {
@@ -341,10 +339,8 @@ define <vscale x 4 x half> @fneg_nxv4f16(<vscale x 4 x half> %a) {
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fneg z0.h, p0/m, z0.h
; CHECK-NEXT: ret
- %minus.one = insertelement <vscale x 4 x half> undef, half -1.0, i64 0
- %minus.one.vec = shufflevector <vscale x 4 x half> %minus.one, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
- %neg = fmul <vscale x 4 x half> %a, %minus.one.vec
- ret <vscale x 4 x half> %neg
+ %res = fneg <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
}
define <vscale x 2 x half> @fneg_nxv2f16(<vscale x 2 x half> %a) {
@@ -353,10 +349,8 @@ define <vscale x 2 x half> @fneg_nxv2f16(<vscale x 2 x half> %a) {
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fneg z0.h, p0/m, z0.h
; CHECK-NEXT: ret
- %minus.one = insertelement <vscale x 2 x half> undef, half -1.0, i64 0
- %minus.one.vec = shufflevector <vscale x 2 x half> %minus.one, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
- %neg = fmul <vscale x 2 x half> %a, %minus.one.vec
- ret <vscale x 2 x half> %neg
+ %res = fneg <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
}
define <vscale x 4 x float> @fneg_nxv4f32(<vscale x 4 x float> %a) {
@@ -365,10 +359,8 @@ define <vscale x 4 x float> @fneg_nxv4f32(<vscale x 4 x float> %a) {
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: fneg z0.s, p0/m, z0.s
; CHECK-NEXT: ret
- %minus.one = insertelement <vscale x 4 x float> undef, float -1.0, i64 0
- %minus.one.vec = shufflevector <vscale x 4 x float> %minus.one, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
- %neg = fmul <vscale x 4 x float> %a, %minus.one.vec
- ret <vscale x 4 x float> %neg
+ %res = fneg <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
}
define <vscale x 2 x float> @fneg_nxv2f32(<vscale x 2 x float> %a) {
@@ -377,10 +369,8 @@ define <vscale x 2 x float> @fneg_nxv2f32(<vscale x 2 x float> %a) {
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fneg z0.s, p0/m, z0.s
; CHECK-NEXT: ret
- %minus.one = insertelement <vscale x 2 x float> undef, float -1.0, i64 0
- %minus.one.vec = shufflevector <vscale x 2 x float> %minus.one, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
- %neg = fmul <vscale x 2 x float> %a, %minus.one.vec
- ret <vscale x 2 x float> %neg
+ %res = fneg <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
}
define <vscale x 2 x double> @fneg_nxv2f64(<vscale x 2 x double> %a) {
@@ -389,10 +379,8 @@ define <vscale x 2 x double> @fneg_nxv2f64(<vscale x 2 x double> %a) {
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fneg z0.d, p0/m, z0.d
; CHECK-NEXT: ret
- %minus.one = insertelement <vscale x 2 x double> undef, double -1.0, i64 0
- %minus.one.vec = shufflevector <vscale x 2 x double> %minus.one, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
- %neg = fmul <vscale x 2 x double> %a, %minus.one.vec
- ret <vscale x 2 x double> %neg
+ %res = fneg <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
}
define <vscale x 8 x half> @frecps_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
More information about the llvm-commits
mailing list