[llvm] 278a3ea - [RISCV] Use vmv.v.i vd, 0 instead of vmv.v.x vd, x0 for llvm.riscv.vfmv.v.f with 0.0

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 11 15:08:30 PST 2021


Author: Craig Topper
Date: 2021-01-11T15:08:05-08:00
New Revision: 278a3ea1b27089dbc7060e02f612c8a3bf137bb2

URL: https://github.com/llvm/llvm-project/commit/278a3ea1b27089dbc7060e02f612c8a3bf137bb2
DIFF: https://github.com/llvm/llvm-project/commit/278a3ea1b27089dbc7060e02f612c8a3bf137bb2.diff

LOG: [RISCV] Use vmv.v.i vd, 0 instead of vmv.v.x vd, x0 for llvm.riscv.vfmv.v.f with 0.0

This matches what we use for integer 0. It's also consistent with
the scalar 'mv' pseudo that uses addi rather than add with x0.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 8f494d54ee64..32d208046174 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -3363,8 +3363,8 @@ foreach fvti = AllFloatVectors in {
   // If we're splatting fpimm0, use vmv.v.x vd, x0.
   def : Pat<(fvti.Vector (int_riscv_vfmv_v_f
                          (fvti.Scalar (fpimm0)), GPR:$vl)),
-            (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX)
-             X0, (NoX0 GPR:$vl), fvti.SEW)>;
+            (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
+             0, (NoX0 GPR:$vl), fvti.SEW)>;
 
   def : Pat<(fvti.Vector (int_riscv_vfmv_v_f
                          (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)),

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
index b3a095d34572..1a49ad3f67a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
@@ -244,7 +244,7 @@ define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half 0.0,
     i32 %0)
@@ -252,11 +252,11 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vmv.v.x_zero_nxv2f16(i32 %0) nounwind {
+define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half 0.0,
     i32 %0)
@@ -264,11 +264,11 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vmv.v.x_zero_nxv4f16(i32 %0) nounwind {
+define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half 0.0,
     i32 %0)
@@ -276,11 +276,11 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vmv.v.x_zero_nxv8f16(i32 %0) nounwind {
+define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half 0.0,
     i32 %0)
@@ -288,11 +288,11 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vmv.v.x_zero_nxv16f16(i32 %0) nounwind {
+define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half 0.0,
     i32 %0)
@@ -300,11 +300,11 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 32 x half> @intrinsic_vmv.v.x_zero_nxv32f16(i32 %0) nounwind {
+define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half 0.0,
     i32 %0)
@@ -312,11 +312,11 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vmv.v.x_zero_nxv1f32(i32 %0) nounwind {
+define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float 0.0,
     i32 %0)
@@ -324,11 +324,11 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vmv.v.x_zero_nxv2f32(i32 %0) nounwind {
+define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float 0.0,
     i32 %0)
@@ -336,11 +336,11 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vmv.v.x_zero_nxv4f32(i32 %0) nounwind {
+define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float 0.0,
     i32 %0)
@@ -348,11 +348,11 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vmv.v.x_zero_nxv8f32(i32 %0) nounwind {
+define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float 0.0,
     i32 %0)
@@ -360,11 +360,11 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vmv.v.x_zero_nxv16f32(i32 %0) nounwind {
+define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float 0.0,
     i32 %0)
@@ -372,11 +372,11 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vmv.v.x_zero_nxv1f64(i32 %0) nounwind {
+define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m1,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double 0.0,
     i32 %0)
@@ -384,11 +384,11 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vmv.v.x_zero_nxv2f64(i32 %0) nounwind {
+define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double 0.0,
     i32 %0)
@@ -396,11 +396,11 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vmv.v.x_zero_nxv4f64(i32 %0) nounwind {
+define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m4,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double 0.0,
     i32 %0)
@@ -408,11 +408,11 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vmv.v.x_zero_nxv8f64(i32 %0) nounwind {
+define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m8,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double 0.0,
     i32 %0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
index f781c1f0397b..4c6b7a494449 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
@@ -244,7 +244,7 @@ define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i64 %0) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half 0.0,
     i64 %0)
@@ -252,11 +252,11 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vmv.v.x_zero_nxv2f16(i64 %0) nounwind {
+define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half 0.0,
     i64 %0)
@@ -264,11 +264,11 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vmv.v.x_zero_nxv4f16(i64 %0) nounwind {
+define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half 0.0,
     i64 %0)
@@ -276,11 +276,11 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vmv.v.x_zero_nxv8f16(i64 %0) nounwind {
+define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half 0.0,
     i64 %0)
@@ -288,11 +288,11 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vmv.v.x_zero_nxv16f16(i64 %0) nounwind {
+define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half 0.0,
     i64 %0)
@@ -300,11 +300,11 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 32 x half> @intrinsic_vmv.v.x_zero_nxv32f16(i64 %0) nounwind {
+define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half 0.0,
     i64 %0)
@@ -312,11 +312,11 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vmv.v.x_zero_nxv1f32(i64 %0) nounwind {
+define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float 0.0,
     i64 %0)
@@ -324,11 +324,11 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vmv.v.x_zero_nxv2f32(i64 %0) nounwind {
+define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float 0.0,
     i64 %0)
@@ -336,11 +336,11 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vmv.v.x_zero_nxv4f32(i64 %0) nounwind {
+define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float 0.0,
     i64 %0)
@@ -348,11 +348,11 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vmv.v.x_zero_nxv8f32(i64 %0) nounwind {
+define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float 0.0,
     i64 %0)
@@ -360,11 +360,11 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vmv.v.x_zero_nxv16f32(i64 %0) nounwind {
+define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float 0.0,
     i64 %0)
@@ -372,11 +372,11 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vmv.v.x_zero_nxv1f64(i64 %0) nounwind {
+define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m1,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double 0.0,
     i64 %0)
@@ -384,11 +384,11 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vmv.v.x_zero_nxv2f64(i64 %0) nounwind {
+define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m2,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double 0.0,
     i64 %0)
@@ -396,11 +396,11 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vmv.v.x_zero_nxv4f64(i64 %0) nounwind {
+define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m4,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double 0.0,
     i64 %0)
@@ -408,11 +408,11 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vmv.v.x_zero_nxv8f64(i64 %0) nounwind {
+define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m8,ta,mu
-; CHECK:       vmv.v.x {{v[0-9]+}}, zero
+; CHECK:       vmv.v.i {{v[0-9]+}}, 0
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double 0.0,
     i64 %0)


        


More information about the llvm-commits mailing list