[llvm] 0dff8a9 - [RISCV] Handle vmv.x.s intrinsic for i64 vectors on RV32.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 11 09:40:17 PST 2021
Author: Craig Topper
Date: 2021-03-11T09:39:50-08:00
New Revision: 0dff8a96278fcafccb9b5d5181f7f8d37f4c3217
URL: https://github.com/llvm/llvm-project/commit/0dff8a96278fcafccb9b5d5181f7f8d37f4c3217
DIFF: https://github.com/llvm/llvm-project/commit/0dff8a96278fcafccb9b5d5181f7f8d37f4c3217.diff
LOG: [RISCV] Handle vmv.x.s intrinsic for i64 vectors on RV32.
Reviewed By: frasercrmck
Differential Revision: https://reviews.llvm.org/D98372
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2c51964c2780..9885feda1e9b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3557,12 +3557,38 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
"Don't know how to custom type legalize this intrinsic!");
case Intrinsic::riscv_vmv_x_s: {
EVT VT = N->getValueType(0);
- assert((VT == MVT::i8 || VT == MVT::i16 ||
- (Subtarget.is64Bit() && VT == MVT::i32)) &&
- "Unexpected custom legalisation!");
- SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
- Subtarget.getXLenVT(), N->getOperand(1));
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
+ MVT XLenVT = Subtarget.getXLenVT();
+ if (VT.bitsLT(XLenVT)) {
+ // Simple case just extract using vmv.x.s and truncate.
+ SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
+ Subtarget.getXLenVT(), N->getOperand(1));
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
+ return;
+ }
+
+ assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
+ "Unexpected custom legalization");
+
+ // We need to do the move in two steps.
+ SDValue Vec = N->getOperand(1);
+ MVT VecVT = Vec.getSimpleValueType();
+
+ // First extract the lower XLEN bits of the element.
+ SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
+
+ // To extract the upper XLEN bits of the vector element, shift the first
+ // element right by 32 bits and re-extract the lower XLEN bits.
+ SDValue VL = DAG.getConstant(1, DL, XLenVT);
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
+ SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
+ SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
+ DAG.getConstant(32, DL, XLenVT), VL);
+ SDValue LShr32 =
+ DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
+ SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
+
+ Results.push_back(
+ DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
break;
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
index 0051d90b6288..bdc5b1548ab6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
@@ -234,3 +234,67 @@ entry:
%a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32> %0)
ret i32 %a
}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv1i64(<vscale x 1 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi a0, zero, 32
+; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v8, a0
+; CHECK-NEXT: vmv.x.s a1, v25
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64> %0)
+ ret i64 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv2i64(<vscale x 2 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi a0, zero, 32
+; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu
+; CHECK-NEXT: vsrl.vx v26, v8, a0
+; CHECK-NEXT: vmv.x.s a1, v26
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64> %0)
+ ret i64 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv4i64(<vscale x 4 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi a0, zero, 32
+; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu
+; CHECK-NEXT: vsrl.vx v28, v8, a0
+; CHECK-NEXT: vmv.x.s a1, v28
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64> %0)
+ ret i64 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv8i64(<vscale x 8 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi a0, zero, 32
+; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu
+; CHECK-NEXT: vsrl.vx v16, v8, a0
+; CHECK-NEXT: vmv.x.s a1, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> %0)
+ ret i64 %a
+}
More information about the llvm-commits
mailing list