[llvm] 552fc39 - [RISCV] Add test for small vector copies

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Wed May 24 02:25:51 PDT 2023


Author: Luke Lau
Date: 2023-05-24T10:25:34+01:00
New Revision: 552fc3928a807d9bd549bae9845d2ab787a90ab6

URL: https://github.com/llvm/llvm-project/commit/552fc3928a807d9bd549bae9845d2ab787a90ab6
DIFF: https://github.com/llvm/llvm-project/commit/552fc3928a807d9bd549bae9845d2ab787a90ab6.diff

LOG: [RISCV] Add test for small vector copies

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D151211

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load-store.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load-store.ll
new file mode 100644
index 0000000000000..d60b3adb47378
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load-store.ll
@@ -0,0 +1,283 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s
+
+define void @v2i8(ptr %p, ptr %q) {
+; CHECK-LABEL: v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %p
+  store <2 x i8> %v, ptr %q
+  ret void
+}
+
+define void @v2i16(ptr %p, ptr %q) {
+; CHECK-LABEL: v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x i16>, ptr %p
+  store <2 x i16> %v, ptr %q
+  ret void
+}
+
+define void @v2i32(ptr %p, ptr %q) {
+; CHECK-LABEL: v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vse32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x i32>, ptr %p
+  store <2 x i32> %v, ptr %q
+  ret void
+}
+
+define void @v2i64(ptr %p, ptr %q) {
+; CHECK-LABEL: v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x i64>, ptr %p
+  store <2 x i64> %v, ptr %q
+  ret void
+}
+
+define void @v2f16(ptr %p, ptr %q) {
+; CHECK-LABEL: v2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x half>, ptr %p
+  store <2 x half> %v, ptr %q
+  ret void
+}
+
+define void @v2f32(ptr %p, ptr %q) {
+; CHECK-LABEL: v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vse32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x float>, ptr %p
+  store <2 x float> %v, ptr %q
+  ret void
+}
+
+define void @v2f64(ptr %p, ptr %q) {
+; CHECK-LABEL: v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x double>, ptr %p
+  store <2 x double> %v, ptr %q
+  ret void
+}
+
+define void @v4i8(ptr %p, ptr %q) {
+; CHECK-LABEL: v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <4 x i8>, ptr %p
+  store <4 x i8> %v, ptr %q
+  ret void
+}
+
+define void @v4i16(ptr %p, ptr %q) {
+; CHECK-LABEL: v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, ptr %p
+  store <4 x i16> %v, ptr %q
+  ret void
+}
+
+define void @v4i32(ptr %p, ptr %q) {
+; CHECK-LABEL: v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vse32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, ptr %p
+  store <4 x i32> %v, ptr %q
+  ret void
+}
+
+define void @v4i64(ptr %p, ptr %q) {
+; CHECK-LABEL: v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <4 x i64>, ptr %p
+  store <4 x i64> %v, ptr %q
+  ret void
+}
+
+define void @v4f16(ptr %p, ptr %q) {
+; CHECK-LABEL: v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <4 x half>, ptr %p
+  store <4 x half> %v, ptr %q
+  ret void
+}
+
+define void @v4f32(ptr %p, ptr %q) {
+; CHECK-LABEL: v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vse32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <4 x float>, ptr %p
+  store <4 x float> %v, ptr %q
+  ret void
+}
+
+define void @v4f64(ptr %p, ptr %q) {
+; CHECK-LABEL: v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <4 x double>, ptr %p
+  store <4 x double> %v, ptr %q
+  ret void
+}
+
+define void @v8i8(ptr %p, ptr %q) {
+; CHECK-LABEL: v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <8 x i8>, ptr %p
+  store <8 x i8> %v, ptr %q
+  ret void
+}
+
+define void @v8i16(ptr %p, ptr %q) {
+; CHECK-LABEL: v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <8 x i16>, ptr %p
+  store <8 x i16> %v, ptr %q
+  ret void
+}
+
+define void @v8i32(ptr %p, ptr %q) {
+; CHECK-LABEL: v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vse32.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <8 x i32>, ptr %p
+  store <8 x i32> %v, ptr %q
+  ret void
+}
+
+define void @v8i64(ptr %p, ptr %q) {
+; CHECK-LABEL: v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <8 x i64>, ptr %p
+  store <8 x i64> %v, ptr %q
+  ret void
+}
+
+define void @v2i8_align1(ptr %p, ptr %q) {
+; CHECK-LABEL: v2i8_align1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %p, align 1
+  store <2 x i8> %v, ptr %q
+  ret void
+}
+
+define void @v2i8_align2(ptr %p, ptr %q) {
+; CHECK-LABEL: v2i8_align2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %p, align 2
+  store <2 x i8> %v, ptr %q
+  ret void
+}
+
+define void @v2i8_align4(ptr %p, ptr %q) {
+; CHECK-LABEL: v2i8_align4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %p, align 4
+  store <2 x i8> %v, ptr %q
+  ret void
+}
+
+define void @v2i8_volatile_load(ptr %p, ptr %q) {
+; CHECK-LABEL: v2i8_volatile_load:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load volatile <2 x i8>, ptr %p
+  store <2 x i8> %v, ptr %q
+  ret void
+}
+
+define void @v2i8_volatile_store(ptr %p, ptr %q) {
+; CHECK-LABEL: v2i8_volatile_store:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %p
+  store volatile <2 x i8> %v, ptr %q
+  ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}


        


More information about the llvm-commits mailing list