[llvm] 5cc2501 - [RISCV] Merge rv32-vsetvli-intrinsics.ll and rv64-vsetvli-intrinsics.ll into a single test using sed. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 2 11:26:28 PST 2023


Author: Craig Topper
Date: 2023-02-02T11:26:12-08:00
New Revision: 5cc2501136c7676c0fc9aabdb9a6c5b50bbe2e41

URL: https://github.com/llvm/llvm-project/commit/5cc2501136c7676c0fc9aabdb9a6c5b50bbe2e41
DIFF: https://github.com/llvm/llvm-project/commit/5cc2501136c7676c0fc9aabdb9a6c5b50bbe2e41.diff

LOG: [RISCV] Merge rv32-vsetvli-intrinsics.ll and rv64-vsetvli-intrinsics.ll into a single test using sed. NFC

Added: 
    llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll
    llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll
deleted file mode 100644
index cc9a55d3d5201..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll
+++ /dev/null
@@ -1,127 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-
-declare i32 @llvm.riscv.vsetvli.i32(i32, i32, i32)
-declare i32 @llvm.riscv.vsetvlimax.i32(i32, i32)
-declare i32 @llvm.riscv.vsetvli.opt.i32(i32, i32, i32)
-declare i32 @llvm.riscv.vsetvlimax.opt.i32(i32, i32)
-
-define void @test_vsetvli_e64mf8(i32 %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_e64mf8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, mf8, ta, mu
-; CHECK-NEXT:    ret
-  call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 3, i32 5)
-  ret void
-}
-
-define void @test_vsetvli_e8mf2_zero_avl() nounwind {
-; CHECK-LABEL: test_vsetvli_e8mf2_zero_avl:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e8, mf2, ta, mu
-; CHECK-NEXT:    ret
-  call i32 @llvm.riscv.vsetvli.i32(i32 0, i32 0, i32 7)
-  ret void
-}
-
-define void @test_vsetvlimax_e64m8() nounwind {
-; CHECK-LABEL: test_vsetvlimax_e64m8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    ret
-  call i32 @llvm.riscv.vsetvlimax.i32(i32 3, i32 3)
-  ret void
-}
-
-define i32 @test_vsetvli_opt_e8m1(i32 %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e8m1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e8, m1, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i32 @llvm.riscv.vsetvli.opt.i32(i32 %avl, i32 0, i32 0)
-  ret i32 %vl
-}
-
-; Check that we remove the intrinsic if it's unused.
-define void @test_vsetvli_opt_e8m1_nouse(i32 %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e8m1_nouse:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ret
-  call i32 @llvm.riscv.vsetvli.opt.i32(i32 %avl, i32 0, i32 0)
-  ret void
-}
-
-define i32 @test_vsetvli_opt_e16mf4(i32 %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e16mf4:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i32 @llvm.riscv.vsetvli.opt.i32(i32 %avl, i32 1, i32 6)
-  ret i32 %vl
-}
-
-define i32 @test_vsetvli_opt_e32mf8_zero_avl() nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e32mf8_zero_avl:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a0, 0, e16, mf4, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i32 @llvm.riscv.vsetvli.opt.i32(i32 0, i32 1, i32 6)
-  ret i32 %vl
-}
-
-define i32 @test_vsetvlimax_opt_e32m2() nounwind {
-; CHECK-LABEL: test_vsetvlimax_opt_e32m2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 2, i32 1)
-  ret i32 %vl
-}
-
-define void @test_vsetvlimax_opt_e32m2_nouse() nounwind {
-; CHECK-LABEL: test_vsetvlimax_opt_e32m2_nouse:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ret
-  call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 2, i32 1)
-  ret void
-}
-
-define i32 @test_vsetvlimax_opt_e64m4() nounwind {
-; CHECK-LABEL: test_vsetvlimax_opt_e64m4:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 3, i32 2)
-  ret i32 %vl
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32)
-
-; Check that we remove the redundant vsetvli when followed by another operation
-define <vscale x 4 x i32> @redundant_vsetvli(i32 %avl, <vscale x 4 x i32>* %ptr) nounwind {
-; CHECK-LABEL: redundant_vsetvli:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    ret
-  %vl = call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 2, i32 1)
-  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i32(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr, i32 %vl)
-  ret <vscale x 4 x i32> %x
-}
-
-; Check that we remove the repeated/redundant vsetvli when followed by another
-; operation
-; FIXME: We don't catch the second vsetvli because it has a use of its output.
-; We could replace it with the output of the first vsetvli.
-define <vscale x 4 x i32> @repeated_vsetvli(i32 %avl, <vscale x 4 x i32>* %ptr) nounwind {
-; CHECK-LABEL: repeated_vsetvli:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    ret
-  %vl0 = call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 2, i32 1)
-  %vl1 = call i32 @llvm.riscv.vsetvli.i32(i32 %vl0, i32 2, i32 1)
-  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i32(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr, i32 %vl1)
-  ret <vscale x 4 x i32> %x
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll
deleted file mode 100644
index 0aea057b3bbd7..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll
+++ /dev/null
@@ -1,145 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-
-declare i64 @llvm.riscv.vsetvli.i64(i64, i64, i64)
-declare i64 @llvm.riscv.vsetvlimax.i64(i64, i64)
-declare i64 @llvm.riscv.vsetvli.opt.i64(i64, i64, i64)
-declare i64 @llvm.riscv.vsetvlimax.opt.i64(i64, i64)
-
-define void @test_vsetvli_e8m1(i64 %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_e8m1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    ret
-  call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 0, i64 0)
-  ret void
-}
-
-define void @test_vsetvli_e16mf4(i64 %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_e16mf4:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    ret
-  call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 1, i64 6)
-  ret void
-}
-
-define void @test_vsetvli_e32mf8_zero_avl() nounwind {
-; CHECK-LABEL: test_vsetvli_e32mf8_zero_avl:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, mu
-; CHECK-NEXT:    ret
-  call i64 @llvm.riscv.vsetvli.i64(i64 0, i64 1, i64 6)
-  ret void
-}
-
-define void @test_vsetvlimax_e32m2() nounwind {
-; CHECK-LABEL: test_vsetvlimax_e32m2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    ret
-  call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 1)
-  ret void
-}
-
-define void @test_vsetvlimax_e64m4() nounwind {
-; CHECK-LABEL: test_vsetvlimax_e64m4:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    ret
-  call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 2)
-  ret void
-}
-
-define i64 @test_vsetvli_opt_e8m1(i64 %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e8m1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e8, m1, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i64 @llvm.riscv.vsetvli.opt.i64(i64 %avl, i64 0, i64 0)
-  ret i64 %vl
-}
-
-; Check that we remove the intrinsic if it's unused.
-define void @test_vsetvli_opt_e8m1_nouse(i64 %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e8m1_nouse:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ret
-  call i64 @llvm.riscv.vsetvli.opt.i64(i64 %avl, i64 0, i64 0)
-  ret void
-}
-
-define i64 @test_vsetvli_opt_e16mf4(i64 %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e16mf4:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i64 @llvm.riscv.vsetvli.opt.i64(i64 %avl, i64 1, i64 6)
-  ret i64 %vl
-}
-
-define i64 @test_vsetvli_opt_e32mf8_zero_avl() nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e32mf8_zero_avl:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a0, 0, e16, mf4, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i64 @llvm.riscv.vsetvli.opt.i64(i64 0, i64 1, i64 6)
-  ret i64 %vl
-}
-
-define i64 @test_vsetvlimax_opt_e32m2() nounwind {
-; CHECK-LABEL: test_vsetvlimax_opt_e32m2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 2, i64 1)
-  ret i64 %vl
-}
-
-define void @test_vsetvlimax_opt_e32m2_nouse() nounwind {
-; CHECK-LABEL: test_vsetvlimax_opt_e32m2_nouse:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ret
-  call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 2, i64 1)
-  ret void
-}
-
-define i64 @test_vsetvlimax_opt_e64m4() nounwind {
-; CHECK-LABEL: test_vsetvlimax_opt_e64m4:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 3, i64 2)
-  ret i64 %vl
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32>, <vscale x 4 x i32>*, i64)
-
-; Check that we remove the redundant vsetvli when followed by another operation
-define <vscale x 4 x i32> @redundant_vsetvli(i64 %avl, <vscale x 4 x i32>* %ptr) nounwind {
-; CHECK-LABEL: redundant_vsetvli:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    ret
-  %vl = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 2, i64 1)
-  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr, i64 %vl)
-  ret <vscale x 4 x i32> %x
-}
-
-; Check that we remove the repeated/redundant vsetvli when followed by another
-; operation
-; FIXME: We don't catch the second vsetvli because it has a use of its output.
-; We could replace it with the output of the first vsetvli.
-define <vscale x 4 x i32> @repeated_vsetvli(i64 %avl, <vscale x 4 x i32>* %ptr) nounwind {
-; CHECK-LABEL: repeated_vsetvli:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    ret
-  %vl0 = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 2, i64 1)
-  %vl1 = call i64 @llvm.riscv.vsetvli.i64(i64 %vl0, i64 2, i64 1)
-  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr, i64 %vl1)
-  ret <vscale x 4 x i32> %x
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
new file mode 100644
index 0000000000000..c5fdcd72c8fa2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
@@ -0,0 +1,175 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+
+declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen, iXLen)
+declare iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen, iXLen)
+declare iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen, iXLen, iXLen)
+declare iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen, iXLen)
+
+define void @test_vsetvli_e8m1(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_e8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0)
+  ret void
+}
+
+define void @test_vsetvli_e16mf4(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_e16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 1, iXLen 6)
+  ret void
+}
+
+define void @test_vsetvli_e64mf8(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_e64mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, mf8, ta, mu
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 3, iXLen 5)
+  ret void
+}
+
+define void @test_vsetvli_e8mf2_zero_avl() nounwind {
+; CHECK-LABEL: test_vsetvli_e8mf2_zero_avl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 0, e8, mf2, ta, mu
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 0, iXLen 7)
+  ret void
+}
+
+define void @test_vsetvli_e32mf8_zero_avl() nounwind {
+; CHECK-LABEL: test_vsetvli_e32mf8_zero_avl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, mu
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 1, iXLen 6)
+  ret void
+}
+
+define void @test_vsetvlimax_e32m2() nounwind {
+; CHECK-LABEL: test_vsetvlimax_e32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1)
+  ret void
+}
+
+define void @test_vsetvlimax_e64m4() nounwind {
+; CHECK-LABEL: test_vsetvlimax_e64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 2)
+  ret void
+}
+
+define void @test_vsetvlimax_e64m8() nounwind {
+; CHECK-LABEL: test_vsetvlimax_e64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 3)
+  ret void
+}
+
+define iXLen @test_vsetvli_opt_e8m1(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_opt_e8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, a0, e8, m1, ta, mu
+; CHECK-NEXT:    ret
+  %vl = call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen %avl, iXLen 0, iXLen 0)
+  ret iXLen %vl
+}
+
+; Check that we remove the intrinsic if it's unused.
+define void @test_vsetvli_opt_e8m1_nouse(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_opt_e8m1_nouse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen %avl, iXLen 0, iXLen 0)
+  ret void
+}
+
+define iXLen @test_vsetvli_opt_e16mf4(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_opt_e16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    ret
+  %vl = call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen %avl, iXLen 1, iXLen 6)
+  ret iXLen %vl
+}
+
+define iXLen @test_vsetvli_opt_e32mf8_zero_avl() nounwind {
+; CHECK-LABEL: test_vsetvli_opt_e32mf8_zero_avl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a0, 0, e16, mf4, ta, mu
+; CHECK-NEXT:    ret
+  %vl = call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen 0, iXLen 1, iXLen 6)
+  ret iXLen %vl
+}
+
+define iXLen @test_vsetvlimax_opt_e32m2() nounwind {
+; CHECK-LABEL: test_vsetvlimax_opt_e32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    ret
+  %vl = call iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen 2, iXLen 1)
+  ret iXLen %vl
+}
+
+define void @test_vsetvlimax_opt_e32m2_nouse() nounwind {
+; CHECK-LABEL: test_vsetvlimax_opt_e32m2_nouse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
+  call iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen 2, iXLen 1)
+  ret void
+}
+
+define iXLen @test_vsetvlimax_opt_e64m4() nounwind {
+; CHECK-LABEL: test_vsetvlimax_opt_e64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; CHECK-NEXT:    ret
+  %vl = call iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen 3, iXLen 2)
+  ret iXLen %vl
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32>, <vscale x 4 x i32>*, iXLen)
+
+; Check that we remove the redundant vsetvli when followed by another operation
+define <vscale x 4 x i32> @redundant_vsetvli(iXLen %avl, <vscale x 4 x i32>* %ptr) nounwind {
+; CHECK-LABEL: redundant_vsetvli:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)
+  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr, iXLen %vl)
+  ret <vscale x 4 x i32> %x
+}
+
+; Check that we remove the repeated/redundant vsetvli when followed by another
+; operation
+; FIXME: We don't catch the second vsetvli because it has a use of its output.
+; We could replace it with the output of the first vsetvli.
+define <vscale x 4 x i32> @repeated_vsetvli(iXLen %avl, <vscale x 4 x i32>* %ptr) nounwind {
+; CHECK-LABEL: repeated_vsetvli:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %vl0 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)
+  %vl1 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %vl0, iXLen 2, iXLen 1)
+  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr, iXLen %vl1)
+  ret <vscale x 4 x i32> %x
+}


        


More information about the llvm-commits mailing list