[llvm] aaba552 - [RISCV] Add coverage for VP div[u]/rem[u] with non-power-of-2 vectors

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 20 09:18:41 PDT 2024


Author: Philip Reames
Date: 2024-08-20T09:18:18-07:00
New Revision: aaba552f51e1a96f829afa8b422ca8d7ace55781

URL: https://github.com/llvm/llvm-project/commit/aaba552f51e1a96f829afa8b422ca8d7ace55781
DIFF: https://github.com/llvm/llvm-project/commit/aaba552f51e1a96f829afa8b422ca8d7ace55781.diff

LOG: [RISCV] Add coverage for VP div[u]/rem[u] with non-power-of-2 vectors

This already works, just adding coverage to show that before a change
which depends on this functionality.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
index e3c7d02462cc7f..e626727ffb8b4c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
@@ -879,3 +879,48 @@ define <16 x i64> @vdiv_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
   %v = call <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
+
+
+declare <3 x i8> @llvm.vp.sdiv.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32)
+
+define <3 x i8> @vdiv_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vdiv_vv_v3i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.sdiv.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> splat (i1 true), i32 %evl)
+  ret <3 x i8> %v
+}
+
+define <3 x i8> @vdiv_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) {
+; CHECK-LABEL: vdiv_vv_v3i8_unmasked_avl3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, ta, ma
+; CHECK-NEXT:    vdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.sdiv.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> splat (i1 true), i32 3)
+  ret <3 x i8> %v
+}
+
+declare <7 x i8> @llvm.vp.sdiv.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
+
+define <7 x i8> @vdiv_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vdiv_vv_v7i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.sdiv.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> splat (i1 true), i32 %evl)
+  ret <7 x i8> %v
+}
+
+define <7 x i8> @vdiv_vv_v7i8_unmasked_avl7(<7 x i8> %va, <7 x i8> %b) {
+; CHECK-LABEL: vdiv_vv_v7i8_unmasked_avl7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 7, e8, mf2, ta, ma
+; CHECK-NEXT:    vdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.sdiv.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> splat (i1 true), i32 7)
+  ret <7 x i8> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
index 03bd85bf5e69e2..3715449ef27f06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
@@ -878,3 +878,68 @@ define <16 x i64> @vdivu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
   %v = call <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
+
+
+define <8 x i8> @vdivu_vv_v8i8_unmasked_avl3(<8 x i8> %va, <8 x i8> %b) {
+; CHECK-LABEL: vdivu_vv_v8i8_unmasked_avl3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf2, ta, ma
+; CHECK-NEXT:    vdivu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 3)
+  ret <8 x i8> %v
+}
+
+define <8 x i8> @vdivu_vv_v8i8_unmasked_avl7(<8 x i8> %va, <8 x i8> %b) {
+; CHECK-LABEL: vdivu_vv_v8i8_unmasked_avl7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 7, e8, mf2, ta, ma
+; CHECK-NEXT:    vdivu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 7)
+  ret <8 x i8> %v
+}
+
+declare <3 x i8> @llvm.vp.udiv.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32)
+
+define <3 x i8> @vdivu_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vdivu_vv_v3i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vdivu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.udiv.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> splat (i1 true), i32 %evl)
+  ret <3 x i8> %v
+}
+
+define <3 x i8> @vdivu_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) {
+; CHECK-LABEL: vdivu_vv_v3i8_unmasked_avl3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, ta, ma
+; CHECK-NEXT:    vdivu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.udiv.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> splat (i1 true), i32 3)
+  ret <3 x i8> %v
+}
+
+declare <7 x i8> @llvm.vp.udiv.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
+
+define <7 x i8> @vdivu_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vdivu_vv_v7i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vdivu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.udiv.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> splat (i1 true), i32 %evl)
+  ret <7 x i8> %v
+}
+
+define <7 x i8> @vdivu_vv_v7i8_unmasked_avl7(<7 x i8> %va, <7 x i8> %b) {
+; CHECK-LABEL: vdivu_vv_v7i8_unmasked_avl7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 7, e8, mf2, ta, ma
+; CHECK-NEXT:    vdivu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.udiv.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> splat (i1 true), i32 7)
+  ret <7 x i8> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
index ff8a63e371c8ef..aa76324f3804f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
@@ -879,3 +879,48 @@ define <16 x i64> @vrem_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
   %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
+
+
+declare <3 x i8> @llvm.vp.srem.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32)
+
+define <3 x i8> @vrem_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v3i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.srem.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> splat (i1 true), i32 %evl)
+  ret <3 x i8> %v
+}
+
+define <3 x i8> @vrem_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) {
+; CHECK-LABEL: vrem_vv_v3i8_unmasked_avl3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.srem.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> splat (i1 true), i32 3)
+  ret <3 x i8> %v
+}
+
+declare <7 x i8> @llvm.vp.srem.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
+
+define <7 x i8> @vrem_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v7i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.srem.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> splat (i1 true), i32 %evl)
+  ret <7 x i8> %v
+}
+
+define <7 x i8> @vrem_vv_v7i8_unmasked_avl7(<7 x i8> %va, <7 x i8> %b) {
+; CHECK-LABEL: vrem_vv_v7i8_unmasked_avl7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 7, e8, mf2, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.srem.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> splat (i1 true), i32 7)
+  ret <7 x i8> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
index b5eec4142c7824..24fa9357f91660 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
@@ -878,3 +878,48 @@ define <16 x i64> @vremu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
   %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
+
+
+declare <3 x i8> @llvm.vp.urem.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32)
+
+define <3 x i8> @vremu_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v3i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vremu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.urem.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> splat (i1 true), i32 %evl)
+  ret <3 x i8> %v
+}
+
+define <3 x i8> @vremu_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) {
+; CHECK-LABEL: vremu_vv_v3i8_unmasked_avl3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, ta, ma
+; CHECK-NEXT:    vremu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.urem.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> splat (i1 true), i32 3)
+  ret <3 x i8> %v
+}
+
+declare <7 x i8> @llvm.vp.urem.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
+
+define <7 x i8> @vremu_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v7i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vremu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.urem.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> splat (i1 true), i32 %evl)
+  ret <7 x i8> %v
+}
+
+define <7 x i8> @vremu_vv_v7i8_unmasked_avl7(<7 x i8> %va, <7 x i8> %b) {
+; CHECK-LABEL: vremu_vv_v7i8_unmasked_avl7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 7, e8, mf2, ta, ma
+; CHECK-NEXT:    vremu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.urem.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> splat (i1 true), i32 7)
+  ret <7 x i8> %v
+}


        


More information about the llvm-commits mailing list