[llvm] 29bb7f7 - [RISCV] Add test coverage for profitable vsetvli a0, zero, <vtype> cases

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 13 12:58:34 PST 2023


Author: Philip Reames
Date: 2023-12-13T12:58:25-08:00
New Revision: 29bb7f762bdaffcb22010a8bb92fe0afd6c61cdf

URL: https://github.com/llvm/llvm-project/commit/29bb7f762bdaffcb22010a8bb92fe0afd6c61cdf
DIFF: https://github.com/llvm/llvm-project/commit/29bb7f762bdaffcb22010a8bb92fe0afd6c61cdf.diff

LOG: [RISCV] Add test coverage for profitable vsetvli a0, zero, <vtype> cases

Test coverage for an upcoming change, we can avoid generating an immediate
in register if we know the immediate is equal to vlmax.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll
index caf0ae603fda9c..ed27c9c7eb3445 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll
@@ -135,3 +135,56 @@ define <6 x i1> @load_v6i1(ptr %p) {
   %x = load <6 x i1>, ptr %p
   ret <6 x i1> %x
 }
+
+
+define <4 x i32> @exact_vlen_i32_m1(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i32_m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, ptr %p
+  ret <4 x i32> %v
+}
+
+define <16 x i8> @exact_vlen_i8_m1(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i8_m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %v = load <16 x i8>, ptr %p
+  ret <16 x i8> %v
+}
+
+define <32 x i8> @exact_vlen_i8_m2(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i8_m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %v = load <32 x i8>, ptr %p
+  ret <32 x i8> %v
+}
+
+define <128 x i8> @exact_vlen_i8_m8(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i8_m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %v = load <128 x i8>, ptr %p
+  ret <128 x i8> %v
+}
+
+define <16 x i64> @exact_vlen_i64_m8(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i64_m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    ret
+  %v = load <16 x i64>, ptr %p
+  ret <16 x i64> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
index 32d26827f989e0..7c6c70221d851a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
@@ -237,6 +237,65 @@ define void @store_constant_v2i8_volatile(ptr %p) {
   store volatile <2 x i8> <i8 1, i8 1>, ptr %p
   ret void
 }
+
+
+define void @exact_vlen_i32_m1(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i32_m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
+  store <4 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @exact_vlen_i8_m1(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i8_m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  store <16 x i8> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @exact_vlen_i8_m2(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i8_m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  store <32 x i8> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @exact_vlen_i8_m8(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i8_m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  store <128 x i8> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @exact_vlen_i64_m8(ptr %p) vscale_range(2,2) {
+; CHECK-LABEL: exact_vlen_i64_m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
+  store <16 x i64> zeroinitializer, ptr %p
+  ret void
+}
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; RV32: {{.*}}
 ; RV64: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 1114c7657c63d3..1ea8925ba415bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -594,6 +594,40 @@ bb:
   ret i64 %tmp2
 }
 
+
+define void @add_v128i8(ptr %x, ptr %y) vscale_range(2,2) {
+; CHECK-LABEL: add_v128i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 128
+; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vle8.v v16, (a1)
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %a = load <128 x i8>, ptr %x
+  %b = load <128 x i8>, ptr %y
+  %c = add <128 x i8> %a, %b
+  store <128 x i8> %c, ptr %x
+  ret void
+}
+
+define void @add_v16i64(ptr %x, ptr %y) vscale_range(2,2) {
+; CHECK-LABEL: add_v16i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vle64.v v16, (a1)
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
+  %a = load <16 x i64>, ptr %x
+  %b = load <16 x i64>, ptr %y
+  %c = add <16 x i64> %a, %b
+  store <16 x i64> %c, ptr %x
+  ret void
+}
+
 declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,


        


More information about the llvm-commits mailing list