[llvm] [RISCV] Fold PseudoVMV_V_V with undef passthru (PR #106840)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Sat Aug 31 02:32:50 PDT 2024
https://github.com/lukel97 created https://github.com/llvm/llvm-project/pull/106840
If a vmv.v.v has an undef passthru then we can just replace it with its input operand, since the tail is completely undefined.
The intrinsic tests in vmv.v.v.ll were updated to have a passthru operand to prevent them from being all folded away.
>From b89b57696565b6fe0f91dc1d39e74837ba6f0695 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Sat, 31 Aug 2024 17:20:17 +0800
Subject: [PATCH 1/2] Precommit test, add passthru to existing vmv.v.v
intrinsic tests
---
.../CodeGen/RISCV/rvv/vmv.v.v-peephole.ll | 10 +
llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll | 430 +++++++++---------
2 files changed, 225 insertions(+), 215 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
index 3952e48c5c28fc..683786004e0871 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
@@ -194,3 +194,13 @@ define <vscale x 2 x i32> @unfoldable_mismatched_sew(<vscale x 2 x i32> %passthr
%b = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a.bitcast, iXLen %avl)
ret <vscale x 2 x i32> %b
}
+
+define <vscale x 1 x i64> @undef_passthru(<vscale x 1 x i64> %v, iXLen %avl) {
+; CHECK-LABEL: undef_passthru:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: ret
+ %x = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.vnxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %v, iXLen %avl)
+ ret <vscale x 1 x i64> %x
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll
index 7217c2cfafca29..784b807a6a2e54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll
@@ -9,17 +9,17 @@ declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
<vscale x 1 x i8>,
iXLen);
-define <vscale x 1 x i8> @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
- <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
- iXLen %1)
+ <vscale x 1 x i8> %1,
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -29,17 +29,17 @@ declare <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
<vscale x 2 x i8>,
iXLen);
-define <vscale x 2 x i8> @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
- <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
- iXLen %1)
+ <vscale x 2 x i8> %1,
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -49,17 +49,17 @@ declare <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
<vscale x 4 x i8>,
iXLen);
-define <vscale x 4 x i8> @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
- <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
- iXLen %1)
+ <vscale x 4 x i8> %1,
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -69,17 +69,17 @@ declare <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
<vscale x 8 x i8>,
iXLen);
-define <vscale x 8 x i8> @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
- <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
- iXLen %1)
+ <vscale x 8 x i8> %1,
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -89,17 +89,17 @@ declare <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
<vscale x 16 x i8>,
iXLen);
-define <vscale x 16 x i8> @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
- <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
- iXLen %1)
+ <vscale x 16 x i8> %1,
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -109,17 +109,17 @@ declare <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
<vscale x 32 x i8>,
iXLen);
-define <vscale x 32 x i8> @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
- <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
- iXLen %1)
+ <vscale x 32 x i8> %1,
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -129,17 +129,17 @@ declare <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
<vscale x 64 x i8>,
iXLen);
-define <vscale x 64 x i8> @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
- <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
- iXLen %1)
+ <vscale x 64 x i8> %1,
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -149,17 +149,17 @@ declare <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
<vscale x 1 x i16>,
iXLen);
-define <vscale x 1 x i16> @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
- <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
- iXLen %1)
+ <vscale x 1 x i16> %1,
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -169,17 +169,17 @@ declare <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
<vscale x 2 x i16>,
iXLen);
-define <vscale x 2 x i16> @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
- <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
- iXLen %1)
+ <vscale x 2 x i16> %1,
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -189,17 +189,17 @@ declare <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
<vscale x 4 x i16>,
iXLen);
-define <vscale x 4 x i16> @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
- <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
- iXLen %1)
+ <vscale x 4 x i16> %1,
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -209,17 +209,17 @@ declare <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
<vscale x 8 x i16>,
iXLen);
-define <vscale x 8 x i16> @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
- <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
- iXLen %1)
+ <vscale x 8 x i16> %1,
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -229,17 +229,17 @@ declare <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
<vscale x 16 x i16>,
iXLen);
-define <vscale x 16 x i16> @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
- <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
- iXLen %1)
+ <vscale x 16 x i16> %1,
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -249,17 +249,17 @@ declare <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
<vscale x 32 x i16>,
iXLen);
-define <vscale x 32 x i16> @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
- <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
- iXLen %1)
+ <vscale x 32 x i16> %1,
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -269,17 +269,17 @@ declare <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
<vscale x 1 x i32>,
iXLen);
-define <vscale x 1 x i32> @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
- <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
- iXLen %1)
+ <vscale x 1 x i32> %1,
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -289,17 +289,17 @@ declare <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
<vscale x 2 x i32>,
iXLen);
-define <vscale x 2 x i32> @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
- <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
- iXLen %1)
+ <vscale x 2 x i32> %1,
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -309,17 +309,17 @@ declare <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
<vscale x 4 x i32>,
iXLen);
-define <vscale x 4 x i32> @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
- <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
- iXLen %1)
+ <vscale x 4 x i32> %1,
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -329,17 +329,17 @@ declare <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
<vscale x 8 x i32>,
iXLen);
-define <vscale x 8 x i32> @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
- <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
- iXLen %1)
+ <vscale x 8 x i32> %1,
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -349,17 +349,17 @@ declare <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
<vscale x 16 x i32>,
iXLen);
-define <vscale x 16 x i32> @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
- <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
- iXLen %1)
+ <vscale x 16 x i32> %1,
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -369,17 +369,17 @@ declare <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
<vscale x 1 x i64>,
iXLen);
-define <vscale x 1 x i64> @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
- <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
- iXLen %1)
+ <vscale x 1 x i64> %1,
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -389,17 +389,17 @@ declare <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
<vscale x 2 x i64>,
iXLen);
-define <vscale x 2 x i64> @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
- <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
- iXLen %1)
+ <vscale x 2 x i64> %1,
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -409,17 +409,17 @@ declare <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
<vscale x 4 x i64>,
iXLen);
-define <vscale x 4 x i64> @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
- <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
- iXLen %1)
+ <vscale x 4 x i64> %1,
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -429,17 +429,17 @@ declare <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
<vscale x 8 x i64>,
iXLen);
-define <vscale x 8 x i64> @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
- <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
- iXLen %1)
+ <vscale x 8 x i64> %1,
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -449,17 +449,17 @@ declare <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
<vscale x 1 x half>,
iXLen);
-define <vscale x 1 x half> @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
- <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
- iXLen %1)
+ <vscale x 1 x half> %1,
+ iXLen %2)
ret <vscale x 1 x half> %a
}
@@ -469,17 +469,17 @@ declare <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
<vscale x 2 x half>,
iXLen);
-define <vscale x 2 x half> @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
- <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
- iXLen %1)
+ <vscale x 2 x half> %1,
+ iXLen %2)
ret <vscale x 2 x half> %a
}
@@ -489,17 +489,17 @@ declare <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
<vscale x 4 x half>,
iXLen);
-define <vscale x 4 x half> @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
- <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
- iXLen %1)
+ <vscale x 4 x half> %1,
+ iXLen %2)
ret <vscale x 4 x half> %a
}
@@ -509,17 +509,17 @@ declare <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
<vscale x 8 x half>,
iXLen);
-define <vscale x 8 x half> @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
- <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
- iXLen %1)
+ <vscale x 8 x half> %1,
+ iXLen %2)
ret <vscale x 8 x half> %a
}
@@ -529,17 +529,17 @@ declare <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
<vscale x 16 x half>,
iXLen);
-define <vscale x 16 x half> @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
- <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
- iXLen %1)
+ <vscale x 16 x half> %1,
+ iXLen %2)
ret <vscale x 16 x half> %a
}
@@ -549,17 +549,17 @@ declare <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
<vscale x 32 x half>,
iXLen);
-define <vscale x 32 x half> @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
- <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
- iXLen %1)
+ <vscale x 32 x half> %1,
+ iXLen %2)
ret <vscale x 32 x half> %a
}
@@ -569,17 +569,17 @@ declare <vscale x 1 x bfloat> @llvm.riscv.vmv.v.v.nxv1bf16(
<vscale x 1 x bfloat>,
iXLen);
-define <vscale x 1 x bfloat> @intrinsic_vmv.v.v_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+define <vscale x 1 x bfloat> @intrinsic_vmv.v.v_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1bf16_nxv1bf16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x bfloat> @llvm.riscv.vmv.v.v.nxv1bf16(
- <vscale x 1 x bfloat> undef,
<vscale x 1 x bfloat> %0,
- iXLen %1)
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
ret <vscale x 1 x bfloat> %a
}
@@ -589,17 +589,17 @@ declare <vscale x 2 x bfloat> @llvm.riscv.vmv.v.v.nxv2bf16(
<vscale x 2 x bfloat>,
iXLen);
-define <vscale x 2 x bfloat> @intrinsic_vmv.v.v_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+define <vscale x 2 x bfloat> @intrinsic_vmv.v.v_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2bf16_nxv2bf16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x bfloat> @llvm.riscv.vmv.v.v.nxv2bf16(
- <vscale x 2 x bfloat> undef,
<vscale x 2 x bfloat> %0,
- iXLen %1)
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
ret <vscale x 2 x bfloat> %a
}
@@ -609,17 +609,17 @@ declare <vscale x 4 x bfloat> @llvm.riscv.vmv.v.v.nxv4bf16(
<vscale x 4 x bfloat>,
iXLen);
-define <vscale x 4 x bfloat> @intrinsic_vmv.v.v_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+define <vscale x 4 x bfloat> @intrinsic_vmv.v.v_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4bf16_nxv4bf16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x bfloat> @llvm.riscv.vmv.v.v.nxv4bf16(
- <vscale x 4 x bfloat> undef,
<vscale x 4 x bfloat> %0,
- iXLen %1)
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
ret <vscale x 4 x bfloat> %a
}
@@ -629,17 +629,17 @@ declare <vscale x 8 x bfloat> @llvm.riscv.vmv.v.v.nxv8bf16(
<vscale x 8 x bfloat>,
iXLen);
-define <vscale x 8 x bfloat> @intrinsic_vmv.v.v_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+define <vscale x 8 x bfloat> @intrinsic_vmv.v.v_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8bf16_nxv8bf16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x bfloat> @llvm.riscv.vmv.v.v.nxv8bf16(
- <vscale x 8 x bfloat> undef,
<vscale x 8 x bfloat> %0,
- iXLen %1)
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
ret <vscale x 8 x bfloat> %a
}
@@ -649,17 +649,17 @@ declare <vscale x 16 x bfloat> @llvm.riscv.vmv.v.v.nxv16bf16(
<vscale x 16 x bfloat>,
iXLen);
-define <vscale x 16 x bfloat> @intrinsic_vmv.v.v_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+define <vscale x 16 x bfloat> @intrinsic_vmv.v.v_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16bf16_nxv16bf16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x bfloat> @llvm.riscv.vmv.v.v.nxv16bf16(
- <vscale x 16 x bfloat> undef,
<vscale x 16 x bfloat> %0,
- iXLen %1)
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
ret <vscale x 16 x bfloat> %a
}
@@ -669,17 +669,17 @@ declare <vscale x 32 x bfloat> @llvm.riscv.vmv.v.v.nxv32bf16(
<vscale x 32 x bfloat>,
iXLen);
-define <vscale x 32 x bfloat> @intrinsic_vmv.v.v_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+define <vscale x 32 x bfloat> @intrinsic_vmv.v.v_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32bf16_nxv32bf16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x bfloat> @llvm.riscv.vmv.v.v.nxv32bf16(
- <vscale x 32 x bfloat> undef,
<vscale x 32 x bfloat> %0,
- iXLen %1)
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
ret <vscale x 32 x bfloat> %a
}
@@ -689,17 +689,17 @@ declare <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
<vscale x 1 x float>,
iXLen);
-define <vscale x 1 x float> @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
- <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
- iXLen %1)
+ <vscale x 1 x float> %1,
+ iXLen %2)
ret <vscale x 1 x float> %a
}
@@ -709,17 +709,17 @@ declare <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
<vscale x 2 x float>,
iXLen);
-define <vscale x 2 x float> @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
- <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
- iXLen %1)
+ <vscale x 2 x float> %1,
+ iXLen %2)
ret <vscale x 2 x float> %a
}
@@ -729,17 +729,17 @@ declare <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
<vscale x 4 x float>,
iXLen);
-define <vscale x 4 x float> @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
- <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
- iXLen %1)
+ <vscale x 4 x float> %1,
+ iXLen %2)
ret <vscale x 4 x float> %a
}
@@ -749,17 +749,17 @@ declare <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
<vscale x 8 x float>,
iXLen);
-define <vscale x 8 x float> @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
- <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
- iXLen %1)
+ <vscale x 8 x float> %1,
+ iXLen %2)
ret <vscale x 8 x float> %a
}
@@ -769,17 +769,17 @@ declare <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
<vscale x 16 x float>,
iXLen);
-define <vscale x 16 x float> @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
- <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
- iXLen %1)
+ <vscale x 16 x float> %1,
+ iXLen %2)
ret <vscale x 16 x float> %a
}
@@ -789,17 +789,17 @@ declare <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
<vscale x 1 x double>,
iXLen);
-define <vscale x 1 x double> @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
- <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
- iXLen %1)
+ <vscale x 1 x double> %1,
+ iXLen %2)
ret <vscale x 1 x double> %a
}
@@ -809,17 +809,17 @@ declare <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
<vscale x 2 x double>,
iXLen);
-define <vscale x 2 x double> @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
- <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
- iXLen %1)
+ <vscale x 2 x double> %1,
+ iXLen %2)
ret <vscale x 2 x double> %a
}
@@ -829,17 +829,17 @@ declare <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
<vscale x 4 x double>,
iXLen);
-define <vscale x 4 x double> @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
- <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
- iXLen %1)
+ <vscale x 4 x double> %1,
+ iXLen %2)
ret <vscale x 4 x double> %a
}
@@ -849,17 +849,17 @@ declare <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
<vscale x 8 x double>,
iXLen);
-define <vscale x 8 x double> @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
- <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
- iXLen %1)
+ <vscale x 8 x double> %1,
+ iXLen %2)
ret <vscale x 8 x double> %a
}
>From 540f0598622b1dfcd700cb19d952e51abb46373e Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Sat, 31 Aug 2024 17:29:01 +0800
Subject: [PATCH 2/2] [RISCV] Fold PseudoVMV_V_V with undef passthru
If a vmv.v.v has an undef passthru then we can just replace it with its input operand, since the tail is completely undefined.
The intrinsic tests in vmv.v.v.ll were updated to have a passthru operand to prevent them from being all folded away.
---
llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp | 16 ++++++++++++++++
.../RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir | 1 -
llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll | 2 --
3 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index 412fd790061a37..2c421c3ca105ab 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -66,6 +66,7 @@ class RISCVVectorPeephole : public MachineFunctionPass {
bool convertToWholeRegister(MachineInstr &MI) const;
bool convertToUnmasked(MachineInstr &MI) const;
bool convertVMergeToVMv(MachineInstr &MI) const;
+ bool foldUndefPassthruVMV_V_V(MachineInstr &MI);
bool foldVMV_V_V(MachineInstr &MI);
bool isAllOnesMask(const MachineInstr *MaskDef) const;
@@ -472,6 +473,20 @@ bool RISCVVectorPeephole::ensureDominates(const MachineOperand &MO,
return true;
}
+/// If a PseudoVMV_V_V's passthru is undef then we can replace it with its input
+bool RISCVVectorPeephole::foldUndefPassthruVMV_V_V(MachineInstr &MI) {
+ if (RISCV::getRVVMCOpcode(MI.getOpcode()) != RISCV::VMV_V_V)
+ return false;
+
+ if (MI.getOperand(1).getReg() != RISCV::NoRegister)
+ return false;
+
+ MRI->replaceRegWith(MI.getOperand(0).getReg(), MI.getOperand(2).getReg());
+ MI.eraseFromParent();
+ V0Defs.erase(&MI);
+ return true;
+}
+
/// If a PseudoVMV_V_V is the only user of its input, fold its passthru and VL
/// into it.
///
@@ -581,6 +596,7 @@ bool RISCVVectorPeephole::runOnMachineFunction(MachineFunction &MF) {
Changed |= convertToUnmasked(MI);
Changed |= convertToWholeRegister(MI);
Changed |= convertVMergeToVMv(MI);
+ Changed |= foldUndefPassthruVMV_V_V(MI);
Changed |= foldVMV_V_V(MI);
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir
index 1419eede6ca9d1..19a918148e6eb8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir
@@ -15,7 +15,6 @@ body: |
; CHECK-NEXT: %avl:gprnox0 = COPY $x1
; CHECK-NEXT: %mask:vmv0 = PseudoVMSET_M_B8 %avl, 5 /* e32 */
; CHECK-NEXT: $v0 = COPY %mask
- ; CHECK-NEXT: %x:vr = PseudoVMV_V_V_M1 $noreg, %true, %avl, 5 /* e32 */, 0 /* tu, mu */
%false:vr = COPY $v8
%true:vr = COPY $v9
%avl:gprnox0 = COPY $x1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
index 683786004e0871..ef827e1839c939 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
@@ -198,8 +198,6 @@ define <vscale x 2 x i32> @unfoldable_mismatched_sew(<vscale x 2 x i32> %passthr
define <vscale x 1 x i64> @undef_passthru(<vscale x 1 x i64> %v, iXLen %avl) {
; CHECK-LABEL: undef_passthru:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmv.v.v v8, v8
; CHECK-NEXT: ret
%x = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.vnxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %v, iXLen %avl)
ret <vscale x 1 x i64> %x
More information about the llvm-commits
mailing list