[llvm] d58ded4 - [RISCV][test] Precommit test for non-power-of-2 VLS type code gen
Kito Cheng via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 27 23:23:27 PDT 2024
Author: Kito Cheng
Date: 2024-06-28T14:23:13+08:00
New Revision: d58ded4e5b9625cba23a5865822100bfd998972b
URL: https://github.com/llvm/llvm-project/commit/d58ded4e5b9625cba23a5865822100bfd998972b
DIFF: https://github.com/llvm/llvm-project/commit/d58ded4e5b9625cba23a5865822100bfd998972b.diff
LOG: [RISCV][test] Precommit test for non-power-of-2 VLS type code gen
Added:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-non-power-of-2.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-non-power-of-2.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-non-power-of-2.ll
new file mode 100644
index 0000000000000..4aa60897f5064
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-non-power-of-2.ll
@@ -0,0 +1,149 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+
+define void @vls3i8(ptr align 8 %array) {
+; CHECK-LABEL: vls3i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 3, e8, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsetivli zero, 3, e8, mf4, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %arr = getelementptr inbounds <3 x i8>, ptr %array, i64 0
+ %1 = load <3 x i8>, ptr %array, align 1
+ %2 = add<3 x i8> %1, %1
+ store <3 x i8> %2, ptr %array, align 1
+ ret void
+}
+
+define void @vls3(ptr align 8 %array) {
+; CHECK-LABEL: vls3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %arr = getelementptr inbounds <3 x i32>, ptr %array, i64 0
+ %1 = load <3 x i32>, ptr %array, align 4
+ %2 = add<3 x i32> %1, %1
+ store <3 x i32> %2, ptr %array, align 4
+ ret void
+}
+
+define void @vls5(ptr align 8 %array) {
+; CHECK-LABEL: vls5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 5, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsetivli zero, 5, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %arr = getelementptr inbounds <5 x i32>, ptr %array, i64 0
+ %1 = load <5 x i32>, ptr %array, align 4
+ %2 = add<5 x i32> %1, %1
+ store <5 x i32> %2, ptr %array, align 4
+ ret void
+}
+
+define void @vls6(ptr align 8 %array) {
+; CHECK-LABEL: vls6:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %arr = getelementptr inbounds <6 x i32>, ptr %array, i64 0
+ %1 = load <6 x i32>, ptr %array, align 4
+ %2 = add<6 x i32> %1, %1
+ store <6 x i32> %2, ptr %array, align 4
+ ret void
+}
+
+define void @vls7(ptr align 8 %array) {
+; CHECK-LABEL: vls7:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 7, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsetivli zero, 7, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %arr = getelementptr inbounds <7 x i32>, ptr %array, i64 0
+ %1 = load <7 x i32>, ptr %array, align 4
+ %2 = add<7 x i32> %1, %1
+ store <7 x i32> %2, ptr %array, align 4
+ ret void
+}
+
+
+define void @vls9(ptr align 8 %array) {
+; CHECK-LABEL: vls9:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 9, e32, m4, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsetivli zero, 9, e32, m4, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %arr = getelementptr inbounds <9 x i32>, ptr %array, i64 0
+ %1 = load <9 x i32>, ptr %array, align 4
+ %2 = add<9 x i32> %1, %1
+ store <9 x i32> %2, ptr %array, align 4
+ ret void
+}
+
+
+define void @vls10(ptr align 8 %array) {
+; CHECK-LABEL: vls10:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 10, e32, m4, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsetivli zero, 10, e32, m4, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %arr = getelementptr inbounds <10 x i32>, ptr %array, i64 0
+ %1 = load <10 x i32>, ptr %array, align 4
+ %2 = add<10 x i32> %1, %1
+ store <10 x i32> %2, ptr %array, align 4
+ ret void
+}
+
+define void @vls11(ptr align 8 %array) {
+; CHECK-LABEL: vls11:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 11, e32, m4, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsetivli zero, 11, e32, m4, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %arr = getelementptr inbounds <11 x i32>, ptr %array, i64 0
+ %1 = load <11 x i32>, ptr %array, align 4
+ %2 = add<11 x i32> %1, %1
+ store <11 x i32> %2, ptr %array, align 4
+ ret void
+}
More information about the llvm-commits
mailing list