[llvm] a4ac847 - [RISCV] Add some tests showing vsetvli cleanup opportunities

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 2 01:50:02 PDT 2021


Author: Fraser Cormack
Date: 2021-04-02T09:43:04+01:00
New Revision: a4ac847c8ef0cf9c04c7ea2be0b34bb565cc7056

URL: https://github.com/llvm/llvm-project/commit/a4ac847c8ef0cf9c04c7ea2be0b34bb565cc7056
DIFF: https://github.com/llvm/llvm-project/commit/a4ac847c8ef0cf9c04c7ea2be0b34bb565cc7056.diff

LOG: [RISCV] Add some tests showing vsetvli cleanup opportunities

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D99717

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll
    llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll
index 5e97df06470c..6975cf7909a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll
@@ -30,3 +30,36 @@ define void @test_vsetvlimax_e64m8() nounwind {
   call i32 @llvm.riscv.vsetvlimax.i32(i32 3, i32 3)
   ret void
 }
+
+declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i32(<vscale x 4 x i32>*, i32)
+
+; Check that we remove the redundant vsetvli when followed by another operation
+; FIXME: We don't
+define <vscale x 4 x i32> @redundant_vsetvli(i32 %avl, <vscale x 4 x i32>* %ptr) nounwind {
+; CHECK-LABEL: redundant_vsetvli:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %vl = call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 2, i32 1)
+  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i32(<vscale x 4 x i32>* %ptr, i32 %vl)
+  ret <vscale x 4 x i32> %x
+}
+
+; Check that we remove the repeated/redundant vsetvli when followed by another
+; operation
+; FIXME: We don't
+define <vscale x 4 x i32> @repeated_vsetvli(i32 %avl, <vscale x 4 x i32>* %ptr) nounwind {
+; CHECK-LABEL: repeated_vsetvli:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %vl0 = call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 2, i32 1)
+  %vl1 = call i32 @llvm.riscv.vsetvli.i32(i32 %vl0, i32 2, i32 1)
+  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i32(<vscale x 4 x i32>* %ptr, i32 %vl1)
+  ret <vscale x 4 x i32> %x
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll
index 78d1008ce28b..9e693a876de8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll
@@ -48,3 +48,36 @@ define void @test_vsetvlimax_e64m4() nounwind {
   call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 2)
   ret void
 }
+
+declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32>*, i64)
+
+; Check that we remove the redundant vsetvli when followed by another operation
+; FIXME: We don't
+define <vscale x 4 x i32> @redundant_vsetvli(i64 %avl, <vscale x 4 x i32>* %ptr) nounwind {
+; CHECK-LABEL: redundant_vsetvli:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %vl = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 2, i64 1)
+  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32>* %ptr, i64 %vl)
+  ret <vscale x 4 x i32> %x
+}
+
+; Check that we remove the repeated/redundant vsetvli when followed by another
+; operation
+; FIXME: We don't
+define <vscale x 4 x i32> @repeated_vsetvli(i64 %avl, <vscale x 4 x i32>* %ptr) nounwind {
+; CHECK-LABEL: repeated_vsetvli:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %vl0 = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 2, i64 1)
+  %vl1 = call i64 @llvm.riscv.vsetvli.i64(i64 %vl0, i64 2, i64 1)
+  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32>* %ptr, i64 %vl1)
+  ret <vscale x 4 x i32> %x
+}


        


More information about the llvm-commits mailing list