[llvm] [RISCV] Add tests for widening fixed vector masked loads/stores. NFC (PR #140949)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed May 21 11:50:07 PDT 2025


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/140949

None

>From 45d58d6e6be6b44db8a7bd5acec3120db3d5ec97 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 21 May 2025 11:00:34 -0700
Subject: [PATCH] [RISCV] Add tests for widening fixed vector masked
 loads/stores. NFC

---
 .../RISCV/rvv/fixed-vectors-masked-load-fp.ll       | 12 ++++++++++++
 .../RISCV/rvv/fixed-vectors-masked-load-int.ll      | 13 +++++++++++++
 .../RISCV/rvv/fixed-vectors-masked-store-fp.ll      | 12 ++++++++++++
 .../RISCV/rvv/fixed-vectors-masked-store-int.ll     | 12 ++++++++++++
 4 files changed, 49 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
index dbbec96445e3e..636af9535f6fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
@@ -322,3 +322,15 @@ define <128 x half> @masked_load_v128f16(ptr %a, <128 x i1> %mask) {
   ret <128 x half> %load
 }
 
+define <7 x float> @masked_load_v7f32(ptr %a, <7 x i1> %mask) {
+; CHECK-LABEL: masked_load_v7f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 127
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v8, a1
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <7 x float> @llvm.masked.load.v7f32(ptr %a, i32 8, <7 x i1> %mask, <7 x float> undef)
+  ret <7 x float> %load
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
index bca3544d8f032..f8f8a0c22d212 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
@@ -331,3 +331,16 @@ define <256 x i8> @masked_load_v256i8(ptr %a, <256 x i1> %mask) {
   ret <256 x i8> %load
 }
 
+define <7 x i8> @masked_load_v7i8(ptr %a, <7 x i1> %mask) {
+; CHECK-LABEL: masked_load_v7i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 127
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v8, a1
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    vle8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <7 x i8> @llvm.masked.load.v7i8(ptr %a, i32 8, <7 x i1> %mask, <7 x i8> undef)
+  ret <7 x i8> %load
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
index f7e311d06c03a..f2ff4b284c293 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
@@ -322,3 +322,15 @@ define void @masked_store_v128f16(<128 x half> %val, ptr %a, <128 x i1> %mask) {
   ret void
 }
 
+define void @masked_store_v7f32(<7 x float> %val, ptr %a, <7 x i1> %mask) {
+; CHECK-LABEL: masked_store_v7f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 127
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v10, a1
+; CHECK-NEXT:    vmand.mm v0, v0, v10
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v7f32.p0(<7 x float> %val, ptr %a, i32 8, <7 x i1> %mask)
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
index 6914a86726af4..a6bbaab97c8bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
@@ -330,3 +330,15 @@ define void @masked_store_v256i8(<256 x i8> %val, ptr %a, <256 x i1> %mask) {
   ret void
 }
 
+define void @masked_store_v7i8(<7 x i8> %val, ptr %a, <7 x i1> %mask) {
+; CHECK-LABEL: masked_store_v7i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 127
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a1
+; CHECK-NEXT:    vmand.mm v0, v0, v9
+; CHECK-NEXT:    vse8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v7i8.p0(<7 x i8> %val, ptr %a, i32 8, <7 x i1> %mask)
+  ret void
+}



More information about the llvm-commits mailing list