[llvm] 05f82dc - [RISCV] Fix incorrect cases of vmv.s.f in the VSETVLI insert pass.

via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 30 22:17:20 PST 2021


Author: jacquesguan
Date: 2021-12-31T14:17:03+08:00
New Revision: 05f82dc877a81b99c91a3f2e81dae895361ce1c7

URL: https://github.com/llvm/llvm-project/commit/05f82dc877a81b99c91a3f2e81dae895361ce1c7
DIFF: https://github.com/llvm/llvm-project/commit/05f82dc877a81b99c91a3f2e81dae895361ce1c7.diff

LOG: [RISCV] Fix incorrect cases of vmv.s.f in the VSETVLI insert pass.

Fix incorrect cases of vmv.s.f and add test cases for it.

Differential Revision: https://reviews.llvm.org/D116432

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index eab9ee916fd2e..15a75ba411c04 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -467,27 +467,27 @@ static bool isScalarMoveInstr(const MachineInstr &MI) {
   case RISCV::PseudoVMV_S_X_MF2:
   case RISCV::PseudoVMV_S_X_MF4:
   case RISCV::PseudoVMV_S_X_MF8:
-  case RISCV::PseudoVFMV_F16_S_M1:
-  case RISCV::PseudoVFMV_F16_S_M2:
-  case RISCV::PseudoVFMV_F16_S_M4:
-  case RISCV::PseudoVFMV_F16_S_M8:
-  case RISCV::PseudoVFMV_F16_S_MF2:
-  case RISCV::PseudoVFMV_F16_S_MF4:
-  case RISCV::PseudoVFMV_F16_S_MF8:
-  case RISCV::PseudoVFMV_F32_S_M1:
-  case RISCV::PseudoVFMV_F32_S_M2:
-  case RISCV::PseudoVFMV_F32_S_M4:
-  case RISCV::PseudoVFMV_F32_S_M8:
-  case RISCV::PseudoVFMV_F32_S_MF2:
-  case RISCV::PseudoVFMV_F32_S_MF4:
-  case RISCV::PseudoVFMV_F32_S_MF8:
-  case RISCV::PseudoVFMV_F64_S_M1:
-  case RISCV::PseudoVFMV_F64_S_M2:
-  case RISCV::PseudoVFMV_F64_S_M4:
-  case RISCV::PseudoVFMV_F64_S_M8:
-  case RISCV::PseudoVFMV_F64_S_MF2:
-  case RISCV::PseudoVFMV_F64_S_MF4:
-  case RISCV::PseudoVFMV_F64_S_MF8:
+  case RISCV::PseudoVFMV_S_F16_M1:
+  case RISCV::PseudoVFMV_S_F16_M2:
+  case RISCV::PseudoVFMV_S_F16_M4:
+  case RISCV::PseudoVFMV_S_F16_M8:
+  case RISCV::PseudoVFMV_S_F16_MF2:
+  case RISCV::PseudoVFMV_S_F16_MF4:
+  case RISCV::PseudoVFMV_S_F16_MF8:
+  case RISCV::PseudoVFMV_S_F32_M1:
+  case RISCV::PseudoVFMV_S_F32_M2:
+  case RISCV::PseudoVFMV_S_F32_M4:
+  case RISCV::PseudoVFMV_S_F32_M8:
+  case RISCV::PseudoVFMV_S_F32_MF2:
+  case RISCV::PseudoVFMV_S_F32_MF4:
+  case RISCV::PseudoVFMV_S_F32_MF8:
+  case RISCV::PseudoVFMV_S_F64_M1:
+  case RISCV::PseudoVFMV_S_F64_M2:
+  case RISCV::PseudoVFMV_S_F64_M4:
+  case RISCV::PseudoVFMV_S_F64_M8:
+  case RISCV::PseudoVFMV_S_F64_MF2:
+  case RISCV::PseudoVFMV_S_F64_MF4:
+  case RISCV::PseudoVFMV_S_F64_MF8:
     return true;
   }
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 7b97b72c95870..c8c50ac8dca9a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -190,6 +190,55 @@ entry:
   ret <vscale x 1 x i64> %y
 }
 
+define <vscale x 1 x double> @test10(<vscale x 1 x double> %a, double %b) nounwind {
+; CHECK-LABEL: test10:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.d.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, mu
+; CHECK-NEXT:    vfmv.s.f v8, ft0
+; CHECK-NEXT:    ret
+entry:
+  %x = tail call i64 @llvm.riscv.vsetvlimax(i64 3, i64 0)
+  %y = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
+    <vscale x 1 x double> %a, double %b, i64 1)
+  ret <vscale x 1 x double> %y
+}
+
+define <vscale x 1 x double> @test11(<vscale x 1 x double> %a, double %b) nounwind {
+; CHECK-LABEL: test11:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.d.x ft0, a0
+; CHECK-NEXT:    vsetivli a0, 6, e64, m1, tu, mu
+; CHECK-NEXT:    vfmv.s.f v8, ft0
+; CHECK-NEXT:    ret
+entry:
+  %x = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 3, i64 0)
+  %y = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
+    <vscale x 1 x double> %a, double %b, i64 2)
+  ret <vscale x 1 x double> %y
+}
+
+define <vscale x 1 x double> @test12(<vscale x 1 x double> %a, double %b, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: test12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.d.x ft0, a0
+; CHECK-NEXT:    vsetivli zero, 9, e64, m1, tu, mu
+; CHECK-NEXT:    vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT:    vfmv.s.f v8, ft0
+; CHECK-NEXT:    ret
+entry:
+  %x = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
+    <vscale x 1 x double> %a,
+    <vscale x 1 x double> %a,
+    <vscale x 1 x double> %a,
+    <vscale x 1 x i1> %mask,
+    i64 9,
+    i64 0)
+  %y = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
+    <vscale x 1 x double> %x, double %b, i64 2)
+  ret <vscale x 1 x double> %y
+}
+
 declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -198,10 +247,24 @@ declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
   i64,
   i64);
 
+declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i64,
+  i64);
+
 declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(
   <vscale x 1 x i64>,
   i64,
   i64);
+
+declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64
+  (<vscale x 1 x double>,
+  double,
+  i64)
+
 declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg)
 declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32>* nocapture, i64)
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i64(<vscale x 2 x i32>, i32, i64)


        


More information about the llvm-commits mailing list