[llvm] 89e7f4d - [LV] Teach the vectorizer to cost and vectorize modf and sincospi intrinsics (#129064)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 28 04:56:16 PST 2025
Author: Benjamin Maxwell
Date: 2025-02-28T12:56:12Z
New Revision: 89e7f4d31b2673fd3bfaf065f930ca9139d92e10
URL: https://github.com/llvm/llvm-project/commit/89e7f4d31b2673fd3bfaf065f930ca9139d92e10
DIFF: https://github.com/llvm/llvm-project/commit/89e7f4d31b2673fd3bfaf065f930ca9139d92e10.diff
LOG: [LV] Teach the vectorizer to cost and vectorize modf and sincospi intrinsics (#129064)
Follow on to #128035. It is a small extension to support vectorizing
`llvm.modf.*` and `llvm.sincospi.*` too.
This renames the test files from `sincos.ll` ->
`multiple-result-intrinsics.ll` to group together the similar tests
(which make up most of this PR).
Added:
llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
Modified:
llvm/include/llvm/CodeGen/BasicTTIImpl.h
llvm/lib/Analysis/VectorUtils.cpp
Removed:
llvm/test/Transforms/LoopVectorize/AArch64/sincos.ll
llvm/test/Transforms/LoopVectorize/sincos.ll
################################################################################
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index d679409770ca1..563953516a354 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -2056,12 +2056,33 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
}
case Intrinsic::experimental_vector_match:
return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);
- case Intrinsic::sincos: {
+ case Intrinsic::modf:
+ case Intrinsic::sincos:
+ case Intrinsic::sincospi: {
Type *Ty = getContainedTypes(RetTy).front();
EVT VT = getTLI()->getValueType(DL, Ty);
- RTLIB::Libcall LC = RTLIB::getSINCOS(VT.getScalarType());
- if (auto Cost =
- getMultipleResultIntrinsicVectorLibCallCost(ICA, CostKind, LC))
+
+ RTLIB::Libcall LC = [&] {
+ switch (ICA.getID()) {
+ case Intrinsic::modf:
+ return RTLIB::getMODF;
+ case Intrinsic::sincos:
+ return RTLIB::getSINCOS;
+ case Intrinsic::sincospi:
+ return RTLIB::getSINCOSPI;
+ default:
+ llvm_unreachable("unexpected intrinsic");
+ }
+ }()(VT.getScalarType());
+
+ std::optional<unsigned> CallRetElementIndex;
+ // The first element of the modf result is returned by value in the
+ // libcall.
+ if (ICA.getID() == Intrinsic::modf)
+ CallRetElementIndex = 0;
+
+ if (auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
+ ICA, CostKind, LC, CallRetElementIndex))
return *Cost;
// Otherwise, fallback to default scalarization cost.
break;
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index dcfd3d5a8bd6e..ede0fca4d51b0 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -73,6 +73,7 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
case Intrinsic::sin:
case Intrinsic::cos:
case Intrinsic::sincos:
+ case Intrinsic::sincospi:
case Intrinsic::tan:
case Intrinsic::sinh:
case Intrinsic::cosh:
@@ -88,6 +89,7 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
case Intrinsic::maxnum:
case Intrinsic::minimum:
case Intrinsic::maximum:
+ case Intrinsic::modf:
case Intrinsic::copysign:
case Intrinsic::floor:
case Intrinsic::ceil:
@@ -186,7 +188,9 @@ bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
case Intrinsic::ucmp:
case Intrinsic::scmp:
return OpdIdx == -1 || OpdIdx == 0;
+ case Intrinsic::modf:
case Intrinsic::sincos:
+ case Intrinsic::sincospi:
case Intrinsic::is_fpclass:
case Intrinsic::vp_is_fpclass:
return OpdIdx == 0;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
new file mode 100644
index 0000000000000..544ef5c82c7ac
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
@@ -0,0 +1,579 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(:|sincos|modf|extractvalue|store)" --version 5
+; RUN: opt -passes=loop-vectorize -mtriple=aarch64-gnu-linux -mcpu=neoverse-v1 -mattr=+sve < %s -S -o - -debug-only=loop-vectorize 2>%t.1 | FileCheck %s --check-prefix=CHECK
+; RUN: opt -passes=loop-vectorize -mtriple=aarch64-gnu-linux -mcpu=neoverse-v1 -mattr=+sve -vector-library=ArmPL < %s -S -o - -debug-only=loop-vectorize 2>%t.2 | FileCheck %s --check-prefix=CHECK-ARMPL
+; RUN: FileCheck --input-file=%t.1 --check-prefix=CHECK-COST %s
+; RUN: FileCheck --input-file=%t.2 --check-prefix=CHECK-COST-ARMPL %s
+; REQUIRES: asserts
+
+; CHECK-COST-LABEL: sincos_f32
+; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
+; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of 58 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+
+; CHECK-COST-ARMPL-LABEL: sincos_f32
+; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
+; CHECK-COST-ARMPL: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 12 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 13 for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+
+define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @sincos_f32(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
+; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
+; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK: [[EXIT:.*:]]
+;
+; CHECK-ARMPL-LABEL: define void @sincos_f32(
+; CHECK-ARMPL-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-ARMPL: [[ENTRY:.*:]]
+; CHECK-ARMPL: [[VECTOR_PH:.*:]]
+; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
+; CHECK-ARMPL: [[TMP12:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.sincos.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD:%.*]])
+; CHECK-ARMPL: [[TMP13:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.sincos.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD1:%.*]])
+; CHECK-ARMPL: [[TMP14:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP12]], 0
+; CHECK-ARMPL: [[TMP15:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP13]], 0
+; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP12]], 1
+; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP13]], 1
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP14]], ptr [[TMP19:%.*]], align 4
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP15]], ptr [[TMP22:%.*]], align 4
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP16]], ptr [[TMP24:%.*]], align 4
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP17]], ptr [[TMP27:%.*]], align 4
+; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
+; CHECK-ARMPL: [[SCALAR_PH:.*:]]
+; CHECK-ARMPL: [[FOR_BODY:.*:]]
+; CHECK-ARMPL: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
+; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK-ARMPL: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK-ARMPL: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK-ARMPL: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
+ %in_val = load float, ptr %arrayidx, align 4
+ %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
+ %extract_a = extractvalue { float, float } %call, 0
+ %extract_b = extractvalue { float, float } %call, 1
+ %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
+ store float %extract_a, ptr %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
+ store float %extract_b, ptr %arrayidx4, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+; CHECK-COST-LABEL: sincos_f64
+; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { double, double } @llvm.sincos.f64(double %in_val)
+; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+
+; CHECK-COST-ARMPL-LABEL: sincos_f64
+; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { double, double } @llvm.sincos.f64(double %in_val)
+; CHECK-COST-ARMPL: Cost of 12 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 13 for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+
+define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @sincos_f64(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x double>, <2 x double> } @llvm.sincos.v2f64(<2 x double> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
+; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
+; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
+; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
+; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
+; CHECK: [[EXIT:.*:]]
+;
+; CHECK-ARMPL-LABEL: define void @sincos_f64(
+; CHECK-ARMPL-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK-ARMPL: [[ENTRY:.*:]]
+; CHECK-ARMPL: [[VECTOR_PH:.*:]]
+; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
+; CHECK-ARMPL: [[TMP12:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.sincos.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD:%.*]])
+; CHECK-ARMPL: [[TMP13:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.sincos.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD1:%.*]])
+; CHECK-ARMPL: [[TMP14:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP12]], 0
+; CHECK-ARMPL: [[TMP15:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP13]], 0
+; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP12]], 1
+; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP13]], 1
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP14]], ptr [[TMP19:%.*]], align 8
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP15]], ptr [[TMP22:%.*]], align 8
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP16]], ptr [[TMP24:%.*]], align 8
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP17]], ptr [[TMP27:%.*]], align 8
+; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
+; CHECK-ARMPL: [[SCALAR_PH:.*:]]
+; CHECK-ARMPL: [[FOR_BODY:.*:]]
+; CHECK-ARMPL: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]])
+; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
+; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
+; CHECK-ARMPL: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
+; CHECK-ARMPL: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
+; CHECK-ARMPL: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %iv
+ %in_val = load double, ptr %arrayidx, align 8
+ %call = tail call { double, double } @llvm.sincos.f64(double %in_val)
+ %extract_a = extractvalue { double, double } %call, 0
+ %extract_b = extractvalue { double, double } %call, 1
+ %arrayidx2 = getelementptr inbounds double, ptr %out_a, i64 %iv
+ store double %extract_a, ptr %arrayidx2, align 8
+ %arrayidx4 = getelementptr inbounds double, ptr %out_b, i64 %iv
+ store double %extract_b, ptr %arrayidx4, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+; CHECK-COST-LABEL: predicated_sincos
+; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
+; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of 58 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+
+; CHECK-COST-ARMPL-LABEL: predicated_sincos
+; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
+; CHECK-COST-ARMPL: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 12 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 13 for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
+
+define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @predicated_sincos(
+; CHECK-SAME: float [[X:%.*]], ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[IF_THEN:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK: [[IF_MERGE:.*:]]
+; CHECK: [[FOR_END:.*:]]
+;
+; CHECK-ARMPL-LABEL: define void @predicated_sincos(
+; CHECK-ARMPL-SAME: float [[X:%.*]], ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK-ARMPL: [[ENTRY:.*:]]
+; CHECK-ARMPL: [[VECTOR_PH:.*:]]
+; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
+; CHECK-ARMPL: [[TMP15:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.sincos.nxv4f32(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]])
+; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP15]], 0
+; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP15]], 1
+; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP16]], ptr [[TMP19:%.*]], i32 4, <vscale x 4 x i1> [[TMP14:%.*]])
+; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP17]], ptr [[TMP21:%.*]], i32 4, <vscale x 4 x i1> [[TMP14]])
+; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
+; CHECK-ARMPL: [[SCALAR_PH:.*:]]
+; CHECK-ARMPL: [[FOR_BODY:.*:]]
+; CHECK-ARMPL: [[IF_THEN:.*:]]
+; CHECK-ARMPL: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
+; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK-ARMPL: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK-ARMPL: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK-ARMPL: [[IF_MERGE:.*:]]
+; CHECK-ARMPL: [[FOR_END:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ %iv.next, %if.merge ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
+ %in_val = load float, ptr %arrayidx, align 4
+ %if_cond = fcmp olt float %in_val, %x
+ br i1 %if_cond, label %if.then, label %if.merge
+
+if.then:
+ %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
+ %extract_a = extractvalue { float, float } %call, 0
+ %extract_b = extractvalue { float, float } %call, 1
+ %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
+ store float %extract_a, ptr %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
+ store float %extract_b, ptr %arrayidx4, align 4
+ br label %if.merge
+
+if.merge:
+ %iv.next = add nuw nsw i64 %iv, 1
+ %cond = icmp slt i64 %iv.next, 1024
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; CHECK-COST-LABEL: modf_f32
+; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.modf.f32(float %in_val)
+; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST: Cost of 58 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+
+; CHECK-COST-ARMPL-LABEL: modf_f32
+; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.modf.f32(float %in_val)
+; CHECK-COST-ARMPL: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 11 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 12 for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+
+define void @modf_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @modf_f32(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x float>, <2 x float> } @llvm.modf.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
+; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
+; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.modf.f32(float [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK: [[EXIT:.*:]]
+;
+; CHECK-ARMPL-LABEL: define void @modf_f32(
+; CHECK-ARMPL-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK-ARMPL: [[ENTRY:.*:]]
+; CHECK-ARMPL: [[VECTOR_PH:.*:]]
+; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
+; CHECK-ARMPL: [[TMP12:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.modf.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD:%.*]])
+; CHECK-ARMPL: [[TMP13:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.modf.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD1:%.*]])
+; CHECK-ARMPL: [[TMP14:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP12]], 0
+; CHECK-ARMPL: [[TMP15:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP13]], 0
+; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP12]], 1
+; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP13]], 1
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP14]], ptr [[TMP19:%.*]], align 4
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP15]], ptr [[TMP22:%.*]], align 4
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP16]], ptr [[TMP24:%.*]], align 4
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP17]], ptr [[TMP27:%.*]], align 4
+; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
+; CHECK-ARMPL: [[SCALAR_PH:.*:]]
+; CHECK-ARMPL: [[FOR_BODY:.*:]]
+; CHECK-ARMPL: [[CALL:%.*]] = tail call { float, float } @llvm.modf.f32(float [[IN_VAL:%.*]])
+; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK-ARMPL: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK-ARMPL: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK-ARMPL: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
+ %in_val = load float, ptr %arrayidx, align 4
+ %call = tail call { float, float } @llvm.modf.f32(float %in_val)
+ %extract_a = extractvalue { float, float } %call, 0
+ %extract_b = extractvalue { float, float } %call, 1
+ %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
+ store float %extract_a, ptr %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
+ store float %extract_b, ptr %arrayidx4, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+; CHECK-COST-LABEL: modf_f64
+; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { double, double } @llvm.modf.f64(double %in_val)
+; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+
+; CHECK-COST-ARMPL-LABEL: modf_f64
+; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { double, double } @llvm.modf.f64(double %in_val)
+; CHECK-COST-ARMPL: Cost of 11 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 12 for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.modf(ir<%in_val>)
+
+define void @modf_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @modf_f64(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
+; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
+; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.modf.f64(double [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
+; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
+; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
+; CHECK: [[EXIT:.*:]]
+;
+; CHECK-ARMPL-LABEL: define void @modf_f64(
+; CHECK-ARMPL-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK-ARMPL: [[ENTRY:.*:]]
+; CHECK-ARMPL: [[VECTOR_PH:.*:]]
+; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
+; CHECK-ARMPL: [[TMP12:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.modf.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD:%.*]])
+; CHECK-ARMPL: [[TMP13:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.modf.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD1:%.*]])
+; CHECK-ARMPL: [[TMP14:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP12]], 0
+; CHECK-ARMPL: [[TMP15:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP13]], 0
+; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP12]], 1
+; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP13]], 1
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP14]], ptr [[TMP19:%.*]], align 8
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP15]], ptr [[TMP22:%.*]], align 8
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP16]], ptr [[TMP24:%.*]], align 8
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP17]], ptr [[TMP27:%.*]], align 8
+; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
+; CHECK-ARMPL: [[SCALAR_PH:.*:]]
+; CHECK-ARMPL: [[FOR_BODY:.*:]]
+; CHECK-ARMPL: [[CALL:%.*]] = tail call { double, double } @llvm.modf.f64(double [[IN_VAL:%.*]])
+; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
+; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
+; CHECK-ARMPL: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
+; CHECK-ARMPL: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
+; CHECK-ARMPL: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %iv
+ %in_val = load double, ptr %arrayidx, align 8
+ %call = tail call { double, double } @llvm.modf.f64(double %in_val)
+ %extract_a = extractvalue { double, double } %call, 0
+ %extract_b = extractvalue { double, double } %call, 1
+ %arrayidx2 = getelementptr inbounds double, ptr %out_a, i64 %iv
+ store double %extract_a, ptr %arrayidx2, align 8
+ %arrayidx4 = getelementptr inbounds double, ptr %out_b, i64 %iv
+ store double %extract_b, ptr %arrayidx4, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+; CHECK-COST-LABEL: sincospi_f32
+; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincospi.f32(float %in_val)
+; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST: Cost of 58 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+
+; CHECK-COST-ARMPL-LABEL: sincospi_f32
+; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincospi.f32(float %in_val)
+; CHECK-COST-ARMPL: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 12 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 13 for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+
+define void @sincospi_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @sincospi_f32(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
+; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
+; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincospi.f32(float [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK: [[EXIT:.*:]]
+;
+; CHECK-ARMPL-LABEL: define void @sincospi_f32(
+; CHECK-ARMPL-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK-ARMPL: [[ENTRY:.*:]]
+; CHECK-ARMPL: [[VECTOR_PH:.*:]]
+; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
+; CHECK-ARMPL: [[TMP12:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.sincospi.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD:%.*]])
+; CHECK-ARMPL: [[TMP13:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.sincospi.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD1:%.*]])
+; CHECK-ARMPL: [[TMP14:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP12]], 0
+; CHECK-ARMPL: [[TMP15:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP13]], 0
+; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP12]], 1
+; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP13]], 1
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP14]], ptr [[TMP19:%.*]], align 4
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP15]], ptr [[TMP22:%.*]], align 4
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP16]], ptr [[TMP24:%.*]], align 4
+; CHECK-ARMPL: store <vscale x 4 x float> [[TMP17]], ptr [[TMP27:%.*]], align 4
+; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
+; CHECK-ARMPL: [[SCALAR_PH:.*:]]
+; CHECK-ARMPL: [[FOR_BODY:.*:]]
+; CHECK-ARMPL: [[CALL:%.*]] = tail call { float, float } @llvm.sincospi.f32(float [[IN_VAL:%.*]])
+; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK-ARMPL: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK-ARMPL: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK-ARMPL: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
+ %in_val = load float, ptr %arrayidx, align 4
+ %call = tail call { float, float } @llvm.sincospi.f32(float %in_val)
+ %extract_a = extractvalue { float, float } %call, 0
+ %extract_b = extractvalue { float, float } %call, 1
+ %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
+ store float %extract_a, ptr %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
+ store float %extract_b, ptr %arrayidx4, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+; CHECK-COST-LABEL: sincospi_f64
+; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { double, double } @llvm.sincospi.f64(double %in_val)
+; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+
+; CHECK-COST-ARMPL-LABEL: sincospi_f64
+; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { double, double } @llvm.sincospi.f64(double %in_val)
+; CHECK-COST-ARMPL: Cost of 12 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+; CHECK-COST-ARMPL: Cost of 13 for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincospi(ir<%in_val>)
+
+define void @sincospi_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @sincospi_f64(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
+; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
+; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincospi.f64(double [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
+; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
+; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
+; CHECK: [[EXIT:.*:]]
+;
+; CHECK-ARMPL-LABEL: define void @sincospi_f64(
+; CHECK-ARMPL-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK-ARMPL: [[ENTRY:.*:]]
+; CHECK-ARMPL: [[VECTOR_PH:.*:]]
+; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
+; CHECK-ARMPL: [[TMP12:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.sincospi.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD:%.*]])
+; CHECK-ARMPL: [[TMP13:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.sincospi.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD1:%.*]])
+; CHECK-ARMPL: [[TMP14:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP12]], 0
+; CHECK-ARMPL: [[TMP15:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP13]], 0
+; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP12]], 1
+; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP13]], 1
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP14]], ptr [[TMP19:%.*]], align 8
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP15]], ptr [[TMP22:%.*]], align 8
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP16]], ptr [[TMP24:%.*]], align 8
+; CHECK-ARMPL: store <vscale x 2 x double> [[TMP17]], ptr [[TMP27:%.*]], align 8
+; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
+; CHECK-ARMPL: [[SCALAR_PH:.*:]]
+; CHECK-ARMPL: [[FOR_BODY:.*:]]
+; CHECK-ARMPL: [[CALL:%.*]] = tail call { double, double } @llvm.sincospi.f64(double [[IN_VAL:%.*]])
+; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
+; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
+; CHECK-ARMPL: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
+; CHECK-ARMPL: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
+; CHECK-ARMPL: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %iv
+ %in_val = load double, ptr %arrayidx, align 8
+ %call = tail call { double, double } @llvm.sincospi.f64(double %in_val)
+ %extract_a = extractvalue { double, double } %call, 0
+ %extract_b = extractvalue { double, double } %call, 1
+ %arrayidx2 = getelementptr inbounds double, ptr %out_a, i64 %iv
+ store double %extract_a, ptr %arrayidx2, align 8
+ %arrayidx4 = getelementptr inbounds double, ptr %out_b, i64 %iv
+ store double %extract_b, ptr %arrayidx4, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sincos.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sincos.ll
deleted file mode 100644
index a7e949838f762..0000000000000
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sincos.ll
+++ /dev/null
@@ -1,251 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(:|sincos|extractvalue|store)" --version 5
-; RUN: opt -passes=loop-vectorize -mtriple=aarch64-gnu-linux -mcpu=neoverse-v1 -mattr=+sve < %s -S -o - -debug-only=loop-vectorize 2>%t.1 | FileCheck %s --check-prefix=CHECK
-; RUN: opt -passes=loop-vectorize -mtriple=aarch64-gnu-linux -mcpu=neoverse-v1 -mattr=+sve -vector-library=ArmPL < %s -S -o - -debug-only=loop-vectorize 2>%t.2 | FileCheck %s --check-prefix=CHECK-ARMPL
-; RUN: FileCheck --input-file=%t.1 --check-prefix=CHECK-COST %s
-; RUN: FileCheck --input-file=%t.2 --check-prefix=CHECK-COST-ARMPL %s
-; REQUIRES: asserts
-
-; CHECK-COST-LABEL: sincos_f32
-; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
-; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of 58 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of Invalid for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-
-; CHECK-COST-ARMPL-LABEL: sincos_f32
-; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
-; CHECK-COST-ARMPL: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of 12 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of 13 for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-
-define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @sincos_f32(
-; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK: [[ENTRY:.*:]]
-; CHECK: [[VECTOR_PH:.*:]]
-; CHECK: [[VECTOR_BODY:.*:]]
-; CHECK: [[TMP3:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
-; CHECK: [[TMP4:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 0
-; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
-; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
-; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
-; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
-; CHECK: [[EXIT:.*:]]
-;
-; CHECK-ARMPL-LABEL: define void @sincos_f32(
-; CHECK-ARMPL-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-ARMPL: [[ENTRY:.*:]]
-; CHECK-ARMPL: [[VECTOR_PH:.*:]]
-; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
-; CHECK-ARMPL: [[TMP12:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.sincos.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD:%.*]])
-; CHECK-ARMPL: [[TMP13:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.sincos.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD1:%.*]])
-; CHECK-ARMPL: [[TMP14:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP12]], 0
-; CHECK-ARMPL: [[TMP15:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP13]], 0
-; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP12]], 1
-; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP13]], 1
-; CHECK-ARMPL: store <vscale x 4 x float> [[TMP14]], ptr [[TMP19:%.*]], align 4
-; CHECK-ARMPL: store <vscale x 4 x float> [[TMP15]], ptr [[TMP22:%.*]], align 4
-; CHECK-ARMPL: store <vscale x 4 x float> [[TMP16]], ptr [[TMP24:%.*]], align 4
-; CHECK-ARMPL: store <vscale x 4 x float> [[TMP17]], ptr [[TMP27:%.*]], align 4
-; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
-; CHECK-ARMPL: [[SCALAR_PH:.*:]]
-; CHECK-ARMPL: [[FOR_BODY:.*:]]
-; CHECK-ARMPL: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK-ARMPL: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK-ARMPL: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
-; CHECK-ARMPL: [[EXIT:.*:]]
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
- %in_val = load float, ptr %arrayidx, align 4
- %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
- %extract_a = extractvalue { float, float } %call, 0
- %extract_b = extractvalue { float, float } %call, 1
- %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
- store float %extract_a, ptr %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
- store float %extract_b, ptr %arrayidx4, align 4
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, 1024
- br i1 %exitcond.not, label %exit, label %for.body
-
-exit:
- ret void
-}
-
-; CHECK-COST-LABEL: sincos_f64
-; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { double, double } @llvm.sincos.f64(double %in_val)
-; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-
-; CHECK-COST-ARMPL-LABEL: sincos_f64
-; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { double, double } @llvm.sincos.f64(double %in_val)
-; CHECK-COST-ARMPL: Cost of 12 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of 13 for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-
-define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @sincos_f64(
-; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
-; CHECK: [[ENTRY:.*:]]
-; CHECK: [[VECTOR_PH:.*:]]
-; CHECK: [[VECTOR_BODY:.*:]]
-; CHECK: [[TMP3:%.*]] = call { <2 x double>, <2 x double> } @llvm.sincos.v2f64(<2 x double> [[WIDE_LOAD:%.*]])
-; CHECK: [[TMP4:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 0
-; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
-; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
-; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
-; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
-; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
-; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
-; CHECK: [[EXIT:.*:]]
-;
-; CHECK-ARMPL-LABEL: define void @sincos_f64(
-; CHECK-ARMPL-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
-; CHECK-ARMPL: [[ENTRY:.*:]]
-; CHECK-ARMPL: [[VECTOR_PH:.*:]]
-; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
-; CHECK-ARMPL: [[TMP12:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.sincos.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD:%.*]])
-; CHECK-ARMPL: [[TMP13:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.sincos.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD1:%.*]])
-; CHECK-ARMPL: [[TMP14:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP12]], 0
-; CHECK-ARMPL: [[TMP15:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP13]], 0
-; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP12]], 1
-; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP13]], 1
-; CHECK-ARMPL: store <vscale x 2 x double> [[TMP14]], ptr [[TMP19:%.*]], align 8
-; CHECK-ARMPL: store <vscale x 2 x double> [[TMP15]], ptr [[TMP22:%.*]], align 8
-; CHECK-ARMPL: store <vscale x 2 x double> [[TMP16]], ptr [[TMP24:%.*]], align 8
-; CHECK-ARMPL: store <vscale x 2 x double> [[TMP17]], ptr [[TMP27:%.*]], align 8
-; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
-; CHECK-ARMPL: [[SCALAR_PH:.*:]]
-; CHECK-ARMPL: [[FOR_BODY:.*:]]
-; CHECK-ARMPL: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]])
-; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
-; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
-; CHECK-ARMPL: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
-; CHECK-ARMPL: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
-; CHECK-ARMPL: [[EXIT:.*:]]
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %in, i64 %iv
- %in_val = load double, ptr %arrayidx, align 8
- %call = tail call { double, double } @llvm.sincos.f64(double %in_val)
- %extract_a = extractvalue { double, double } %call, 0
- %extract_b = extractvalue { double, double } %call, 1
- %arrayidx2 = getelementptr inbounds double, ptr %out_a, i64 %iv
- store double %extract_a, ptr %arrayidx2, align 8
- %arrayidx4 = getelementptr inbounds double, ptr %out_b, i64 %iv
- store double %extract_b, ptr %arrayidx4, align 8
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, 1024
- br i1 %exitcond.not, label %exit, label %for.body
-
-exit:
- ret void
-}
-
-; CHECK-COST-LABEL: predicated_sincos
-; CHECK-COST: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
-; CHECK-COST: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of 58 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST: Cost of Invalid for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-
-; CHECK-COST-ARMPL-LABEL: predicated_sincos
-; CHECK-COST-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
-; CHECK-COST-ARMPL: Cost of 26 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of 12 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-; CHECK-COST-ARMPL: Cost of 13 for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sincos(ir<%in_val>)
-
-define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @predicated_sincos(
-; CHECK-SAME: float [[X:%.*]], ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
-; CHECK: [[ENTRY:.*:]]
-; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[IF_THEN:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
-; CHECK: [[IF_MERGE:.*:]]
-; CHECK: [[FOR_END:.*:]]
-;
-; CHECK-ARMPL-LABEL: define void @predicated_sincos(
-; CHECK-ARMPL-SAME: float [[X:%.*]], ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
-; CHECK-ARMPL: [[ENTRY:.*:]]
-; CHECK-ARMPL: [[VECTOR_PH:.*:]]
-; CHECK-ARMPL: [[VECTOR_BODY:.*:]]
-; CHECK-ARMPL: [[TMP15:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.sincos.nxv4f32(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]])
-; CHECK-ARMPL: [[TMP16:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP15]], 0
-; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP15]], 1
-; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP16]], ptr [[TMP19:%.*]], i32 4, <vscale x 4 x i1> [[TMP14:%.*]])
-; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP17]], ptr [[TMP21:%.*]], i32 4, <vscale x 4 x i1> [[TMP14]])
-; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
-; CHECK-ARMPL: [[SCALAR_PH:.*:]]
-; CHECK-ARMPL: [[FOR_BODY:.*:]]
-; CHECK-ARMPL: [[IF_THEN:.*:]]
-; CHECK-ARMPL: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK-ARMPL: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK-ARMPL: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
-; CHECK-ARMPL: [[IF_MERGE:.*:]]
-; CHECK-ARMPL: [[FOR_END:.*:]]
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ %iv.next, %if.merge ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
- %in_val = load float, ptr %arrayidx, align 4
- %if_cond = fcmp olt float %in_val, %x
- br i1 %if_cond, label %if.then, label %if.merge
-
-if.then:
- %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
- %extract_a = extractvalue { float, float } %call, 0
- %extract_b = extractvalue { float, float } %call, 1
- %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
- store float %extract_a, ptr %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
- store float %extract_b, ptr %arrayidx4, align 4
- br label %if.merge
-
-if.merge:
- %iv.next = add nuw nsw i64 %iv, 1
- %cond = icmp slt i64 %iv.next, 1024
- br i1 %cond, label %for.body, label %for.end
-
-for.end:
- ret void
-}
diff --git a/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
new file mode 100644
index 0000000000000..d928a4b7ebe4b
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
@@ -0,0 +1,330 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(:|sincos|modf|extract|store)" --version 5
+; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 < %s -S -o - | FileCheck %s
+
+define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @sincos_f32(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
+; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
+; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
+ %in_val = load float, ptr %arrayidx, align 4
+ %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
+ %extract_a = extractvalue { float, float } %call, 0
+ %extract_b = extractvalue { float, float } %call, 1
+ %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
+ store float %extract_a, ptr %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
+ store float %extract_b, ptr %arrayidx4, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @sincos_f64(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x double>, <2 x double> } @llvm.sincos.v2f64(<2 x double> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
+; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
+; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
+; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
+; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
+; CHECK: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %iv
+ %in_val = load double, ptr %arrayidx, align 8
+ %call = tail call { double, double } @llvm.sincos.f64(double %in_val)
+ %extract_a = extractvalue { double, double } %call, 0
+ %extract_b = extractvalue { double, double } %call, 1
+ %arrayidx2 = getelementptr inbounds double, ptr %out_a, i64 %iv
+ store double %extract_a, ptr %arrayidx2, align 8
+ %arrayidx4 = getelementptr inbounds double, ptr %out_b, i64 %iv
+ store double %extract_b, ptr %arrayidx4, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @predicated_sincos(
+; CHECK-SAME: float [[X:%.*]], ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_BODY1:.*]]:
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_BODY1]] ], [ [[INDEX_NEXT:%.*]], %[[IF_THEN2:.*]] ]
+; CHECK: [[TMP4:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 0
+; CHECK: [[TMP6:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 1
+; CHECK: [[TMP7:%.*]] = extractelement <2 x i1> [[TMP3:%.*]], i32 0
+; CHECK: br i1 [[TMP7]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; CHECK: [[PRED_STORE_IF]]:
+; CHECK: [[TMP9:%.*]] = extractelement <2 x float> [[TMP5]], i32 0
+; CHECK: store float [[TMP9]], ptr [[TMP8:%.*]], align 4
+; CHECK: [[TMP11:%.*]] = extractelement <2 x float> [[TMP6]], i32 0
+; CHECK: store float [[TMP11]], ptr [[TMP10:%.*]], align 4
+; CHECK: br label %[[PRED_STORE_CONTINUE]]
+; CHECK: [[PRED_STORE_CONTINUE]]:
+; CHECK: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1
+; CHECK: br i1 [[TMP12]], label %[[PRED_STORE_IF1:.*]], label %[[IF_THEN2]]
+; CHECK: [[PRED_STORE_IF1]]:
+; CHECK: [[TMP15:%.*]] = extractelement <2 x float> [[TMP5]], i32 1
+; CHECK: store float [[TMP15]], ptr [[TMP14:%.*]], align 4
+; CHECK: [[TMP17:%.*]] = extractelement <2 x float> [[TMP6]], i32 1
+; CHECK: store float [[TMP17]], ptr [[TMP16:%.*]], align 4
+; CHECK: br label %[[IF_THEN2]]
+; CHECK: [[IF_THEN2]]:
+; CHECK: [[IF_THEN:.*:]]
+; CHECK: [[IF_THEN3:.*:]]
+; CHECK: [[IF_THEN4:.*:]]
+; CHECK: [[IF_THEN1:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK: [[IF_MERGE:.*:]]
+; CHECK: [[FOR_END:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ %iv.next, %if.merge ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
+ %in_val = load float, ptr %arrayidx, align 4
+ %if_cond = fcmp olt float %in_val, %x
+ br i1 %if_cond, label %if.then, label %if.merge
+
+if.then:
+ %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
+ %extract_a = extractvalue { float, float } %call, 0
+ %extract_b = extractvalue { float, float } %call, 1
+ %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
+ store float %extract_a, ptr %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
+ store float %extract_b, ptr %arrayidx4, align 4
+ br label %if.merge
+
+if.merge:
+ %iv.next = add nuw nsw i64 %iv, 1
+ %cond = icmp slt i64 %iv.next, 1024
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+define void @modf_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @modf_f32(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x float>, <2 x float> } @llvm.modf.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
+; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
+; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.modf.f32(float [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
+ %in_val = load float, ptr %arrayidx, align 4
+ %call = tail call { float, float } @llvm.modf.f32(float %in_val)
+ %extract_a = extractvalue { float, float } %call, 0
+ %extract_b = extractvalue { float, float } %call, 1
+ %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
+ store float %extract_a, ptr %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
+ store float %extract_b, ptr %arrayidx4, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+define void @modf_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @modf_f64(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
+; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
+; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.modf.f64(double [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
+; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
+; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
+; CHECK: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %iv
+ %in_val = load double, ptr %arrayidx, align 8
+ %call = tail call { double, double } @llvm.modf.f64(double %in_val)
+ %extract_a = extractvalue { double, double } %call, 0
+ %extract_b = extractvalue { double, double } %call, 1
+ %arrayidx2 = getelementptr inbounds double, ptr %out_a, i64 %iv
+ store double %extract_a, ptr %arrayidx2, align 8
+ %arrayidx4 = getelementptr inbounds double, ptr %out_b, i64 %iv
+ store double %extract_b, ptr %arrayidx4, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+define void @sincospi_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @sincospi_f32(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
+; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
+; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincospi.f32(float [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
+; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
+ %in_val = load float, ptr %arrayidx, align 4
+ %call = tail call { float, float } @llvm.sincospi.f32(float %in_val)
+ %extract_a = extractvalue { float, float } %call, 0
+ %extract_b = extractvalue { float, float } %call, 1
+ %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
+ store float %extract_a, ptr %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
+ store float %extract_b, ptr %arrayidx4, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+define void @sincospi_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
+; CHECK-LABEL: define void @sincospi_f64(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK: [[ENTRY:.*:]]
+; CHECK: [[VECTOR_PH:.*:]]
+; CHECK: [[VECTOR_BODY:.*:]]
+; CHECK: [[TMP3:%.*]] = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> [[WIDE_LOAD:%.*]])
+; CHECK: [[TMP4:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 0
+; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
+; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
+; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK: [[MIDDLE_BLOCK:.*:]]
+; CHECK: [[SCALAR_PH:.*:]]
+; CHECK: [[FOR_BODY:.*:]]
+; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincospi.f64(double [[IN_VAL:%.*]])
+; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
+; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
+; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
+; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
+; CHECK: [[EXIT:.*:]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %iv
+ %in_val = load double, ptr %arrayidx, align 8
+ %call = tail call { double, double } @llvm.sincospi.f64(double %in_val)
+ %extract_a = extractvalue { double, double } %call, 0
+ %extract_b = extractvalue { double, double } %call, 1
+ %arrayidx2 = getelementptr inbounds double, ptr %out_a, i64 %iv
+ store double %extract_a, ptr %arrayidx2, align 8
+ %arrayidx4 = getelementptr inbounds double, ptr %out_b, i64 %iv
+ store double %extract_b, ptr %arrayidx4, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
diff --git a/llvm/test/Transforms/LoopVectorize/sincos.ll b/llvm/test/Transforms/LoopVectorize/sincos.ll
deleted file mode 100644
index c2936eb8bb8b2..0000000000000
--- a/llvm/test/Transforms/LoopVectorize/sincos.ll
+++ /dev/null
@@ -1,157 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(:|sincos|extract|store)" --version 5
-; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 < %s -S -o - | FileCheck %s
-
-define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @sincos_f32(
-; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
-; CHECK: [[ENTRY:.*:]]
-; CHECK: [[VECTOR_PH:.*:]]
-; CHECK: [[VECTOR_BODY:.*:]]
-; CHECK: [[TMP3:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
-; CHECK: [[TMP4:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 0
-; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
-; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
-; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
-; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
-; CHECK: [[EXIT:.*:]]
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
- %in_val = load float, ptr %arrayidx, align 4
- %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
- %extract_a = extractvalue { float, float } %call, 0
- %extract_b = extractvalue { float, float } %call, 1
- %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
- store float %extract_a, ptr %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
- store float %extract_b, ptr %arrayidx4, align 4
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, 1024
- br i1 %exitcond.not, label %exit, label %for.body
-
-exit:
- ret void
-}
-
-define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @sincos_f64(
-; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
-; CHECK: [[ENTRY:.*:]]
-; CHECK: [[VECTOR_PH:.*:]]
-; CHECK: [[VECTOR_BODY:.*:]]
-; CHECK: [[TMP3:%.*]] = call { <2 x double>, <2 x double> } @llvm.sincos.v2f64(<2 x double> [[WIDE_LOAD:%.*]])
-; CHECK: [[TMP4:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 0
-; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
-; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
-; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
-; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
-; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
-; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
-; CHECK: [[EXIT:.*:]]
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %in, i64 %iv
- %in_val = load double, ptr %arrayidx, align 8
- %call = tail call { double, double } @llvm.sincos.f64(double %in_val)
- %extract_a = extractvalue { double, double } %call, 0
- %extract_b = extractvalue { double, double } %call, 1
- %arrayidx2 = getelementptr inbounds double, ptr %out_a, i64 %iv
- store double %extract_a, ptr %arrayidx2, align 8
- %arrayidx4 = getelementptr inbounds double, ptr %out_b, i64 %iv
- store double %extract_b, ptr %arrayidx4, align 8
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, 1024
- br i1 %exitcond.not, label %exit, label %for.body
-
-exit:
- ret void
-}
-
-define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @predicated_sincos(
-; CHECK-SAME: float [[X:%.*]], ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
-; CHECK: [[ENTRY:.*:]]
-; CHECK: [[VECTOR_BODY1:.*]]:
-; CHECK: [[VECTOR_BODY:.*:]]
-; CHECK: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_BODY1]] ], [ [[INDEX_NEXT:%.*]], %[[FOR_BODY1:.*]] ]
-; CHECK: [[TMP4:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
-; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 0
-; CHECK: [[TMP6:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 1
-; CHECK: [[TMP7:%.*]] = extractelement <2 x i1> [[TMP3:%.*]], i32 0
-; CHECK: br i1 [[TMP7]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
-; CHECK: [[PRED_STORE_IF]]:
-; CHECK: [[TMP9:%.*]] = extractelement <2 x float> [[TMP5]], i32 0
-; CHECK: store float [[TMP9]], ptr [[TMP8:%.*]], align 4
-; CHECK: [[TMP11:%.*]] = extractelement <2 x float> [[TMP6]], i32 0
-; CHECK: store float [[TMP11]], ptr [[TMP10:%.*]], align 4
-; CHECK: br label %[[PRED_STORE_CONTINUE]]
-; CHECK: [[PRED_STORE_CONTINUE]]:
-; CHECK: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1
-; CHECK: br i1 [[TMP12]], label %[[PRED_STORE_IF1:.*]], label %[[FOR_BODY1]]
-; CHECK: [[PRED_STORE_IF1]]:
-; CHECK: [[TMP15:%.*]] = extractelement <2 x float> [[TMP5]], i32 1
-; CHECK: store float [[TMP15]], ptr [[TMP14:%.*]], align 4
-; CHECK: [[TMP17:%.*]] = extractelement <2 x float> [[TMP6]], i32 1
-; CHECK: store float [[TMP17]], ptr [[TMP16:%.*]], align 4
-; CHECK: br label %[[FOR_BODY1]]
-; CHECK: [[FOR_BODY1]]:
-; CHECK: [[IF_THEN1:.*:]]
-; CHECK: [[IF_THEN2:.*:]]
-; CHECK: [[IF_THEN:.*:]]
-; CHECK: [[IF_THEN3:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
-; CHECK: [[IF_MERGE:.*:]]
-; CHECK: [[FOR_END:.*:]]
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ %iv.next, %if.merge ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float, ptr %in, i64 %iv
- %in_val = load float, ptr %arrayidx, align 4
- %if_cond = fcmp olt float %in_val, %x
- br i1 %if_cond, label %if.then, label %if.merge
-
-if.then:
- %call = tail call { float, float } @llvm.sincos.f32(float %in_val)
- %extract_a = extractvalue { float, float } %call, 0
- %extract_b = extractvalue { float, float } %call, 1
- %arrayidx2 = getelementptr inbounds float, ptr %out_a, i64 %iv
- store float %extract_a, ptr %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds float, ptr %out_b, i64 %iv
- store float %extract_b, ptr %arrayidx4, align 4
- br label %if.merge
-
-if.merge:
- %iv.next = add nuw nsw i64 %iv, 1
- %cond = icmp slt i64 %iv.next, 1024
- br i1 %cond, label %for.body, label %for.end
-
-for.end:
- ret void
-}
More information about the llvm-commits
mailing list