[llvm] 8d32654 - [RISCV] Add coverage for an upcoming set of vector narrowing changes

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 14 13:34:17 PST 2024


Author: Philip Reames
Date: 2024-02-14T13:34:08-08:00
New Revision: 8d326542926d4fba89cfb0ec01a0c1a1bd0789d6

URL: https://github.com/llvm/llvm-project/commit/8d326542926d4fba89cfb0ec01a0c1a1bd0789d6
DIFF: https://github.com/llvm/llvm-project/commit/8d326542926d4fba89cfb0ec01a0c1a1bd0789d6.diff

LOG: [RISCV] Add coverage for an upcoming set of vector narrowing changes

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index 37d05f08d0ff3d..d2e0113e69b900 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -148,3 +148,54 @@ define void @abs_v4i64(ptr %x) {
   ret void
 }
 declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+
+define void @abs_v4i64_of_sext_v4i8(ptr %x) {
+; CHECK-LABEL: abs_v4i64_of_sext_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsext.vf8 v10, v8
+; CHECK-NEXT:    vrsub.vi v8, v10, 0
+; CHECK-NEXT:    vmax.vv v8, v10, v8
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
+  %a = load <4 x i8>, ptr %x
+  %a.ext = sext <4 x i8> %a to <4 x i64>
+  %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
+  store <4 x i64> %b, ptr %x
+  ret void
+}
+
+define void @abs_v4i64_of_sext_v4i16(ptr %x) {
+; CHECK-LABEL: abs_v4i64_of_sext_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsext.vf4 v10, v8
+; CHECK-NEXT:    vrsub.vi v8, v10, 0
+; CHECK-NEXT:    vmax.vv v8, v10, v8
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
+  %a = load <4 x i16>, ptr %x
+  %a.ext = sext <4 x i16> %a to <4 x i64>
+  %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
+  store <4 x i64> %b, ptr %x
+  ret void
+}
+
+define void @abs_v4i64_of_sext_v4i32(ptr %x) {
+; CHECK-LABEL: abs_v4i64_of_sext_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vrsub.vi v8, v10, 0
+; CHECK-NEXT:    vmax.vv v8, v10, v8
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
+  %a = load <4 x i32>, ptr %x
+  %a.ext = sext <4 x i32> %a to <4 x i64>
+  %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
+  store <4 x i64> %b, ptr %x
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
index c8de041a26f430..7bffbaa1c21ea6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
@@ -880,3 +880,57 @@ define <2 x i64> @vwadd_vx_v2i64_i64(ptr %x, ptr %y) nounwind {
   %g = add <2 x i64> %e, %f
   ret <2 x i64> %g
 }
+
+define <2 x i32> @vwadd_v2i32_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwadd_v2i32_of_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v11, v9
+; CHECK-NEXT:    vwadd.vv v8, v11, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i8>, ptr %x
+  %b = load <2 x i8>, ptr %y
+  %c = sext <2 x i8> %a to <2 x i32>
+  %d = sext <2 x i8> %b to <2 x i32>
+  %e = add <2 x i32> %c, %d
+  ret <2 x i32> %e
+}
+
+define <2 x i64> @vwadd_v2i64_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwadd_v2i64_of_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vsext.vf4 v10, v8
+; CHECK-NEXT:    vsext.vf4 v11, v9
+; CHECK-NEXT:    vwadd.vv v8, v11, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i8>, ptr %x
+  %b = load <2 x i8>, ptr %y
+  %c = sext <2 x i8> %a to <2 x i64>
+  %d = sext <2 x i8> %b to <2 x i64>
+  %e = add <2 x i64> %c, %d
+  ret <2 x i64> %e
+}
+
+define <2 x i64> @vwadd_v2i64_of_v2i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vwadd_v2i64_of_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v9, (a0)
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v11, v9
+; CHECK-NEXT:    vwadd.vv v8, v11, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i16>, ptr %x
+  %b = load <2 x i16>, ptr %y
+  %c = sext <2 x i16> %a to <2 x i64>
+  %d = sext <2 x i16> %b to <2 x i64>
+  %e = add <2 x i64> %c, %d
+  ret <2 x i64> %e
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index e51ca9f153dcb1..8779c6dd9fc38a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -908,3 +908,57 @@ define <4 x i64> @crash(<4 x i16> %x, <4 x i16> %y) {
   %c = add <4 x i64> %a, %b
   ret <4 x i64> %c
 }
+
+define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwaddu_v2i32_of_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v11, v9
+; CHECK-NEXT:    vwaddu.vv v8, v11, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i8>, ptr %x
+  %b = load <2 x i8>, ptr %y
+  %c = zext <2 x i8> %a to <2 x i32>
+  %d = zext <2 x i8> %b to <2 x i32>
+  %e = add <2 x i32> %c, %d
+  ret <2 x i32> %e
+}
+
+define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwaddu_v2i64_of_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vzext.vf4 v10, v8
+; CHECK-NEXT:    vzext.vf4 v11, v9
+; CHECK-NEXT:    vwaddu.vv v8, v11, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i8>, ptr %x
+  %b = load <2 x i8>, ptr %y
+  %c = zext <2 x i8> %a to <2 x i64>
+  %d = zext <2 x i8> %b to <2 x i64>
+  %e = add <2 x i64> %c, %d
+  ret <2 x i64> %e
+}
+
+define <2 x i64> @vwaddu_v2i64_of_v2i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vwaddu_v2i64_of_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v9, (a0)
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v11, v9
+; CHECK-NEXT:    vwaddu.vv v8, v11, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i16>, ptr %x
+  %b = load <2 x i16>, ptr %y
+  %c = zext <2 x i16> %a to <2 x i64>
+  %d = zext <2 x i16> %b to <2 x i64>
+  %e = add <2 x i64> %c, %d
+  ret <2 x i64> %e
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
index cf00fe14858d91..d2d54796069bb1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
@@ -895,3 +895,57 @@ define <2 x i64> @vwsubu_vx_v2i64_i64(ptr %x, ptr %y) nounwind {
   %g = sub <2 x i64> %e, %f
   ret <2 x i64> %g
 }
+
+define <2 x i32> @vwsubu_v2i32_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwsubu_v2i32_of_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v11, v9
+; CHECK-NEXT:    vwsubu.vv v8, v11, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i8>, ptr %x
+  %b = load <2 x i8>, ptr %y
+  %c = zext <2 x i8> %a to <2 x i32>
+  %d = zext <2 x i8> %b to <2 x i32>
+  %e = sub <2 x i32> %c, %d
+  ret <2 x i32> %e
+}
+
+define <2 x i64> @vwsubu_v2i64_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwsubu_v2i64_of_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vzext.vf4 v10, v8
+; CHECK-NEXT:    vzext.vf4 v11, v9
+; CHECK-NEXT:    vwsubu.vv v8, v11, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i8>, ptr %x
+  %b = load <2 x i8>, ptr %y
+  %c = zext <2 x i8> %a to <2 x i64>
+  %d = zext <2 x i8> %b to <2 x i64>
+  %e = sub <2 x i64> %c, %d
+  ret <2 x i64> %e
+}
+
+define <2 x i64> @vwsubu_v2i64_of_v2i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vwsubu_v2i64_of_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v9, (a0)
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v11, v9
+; CHECK-NEXT:    vwsubu.vv v8, v11, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i16>, ptr %x
+  %b = load <2 x i16>, ptr %y
+  %c = zext <2 x i16> %a to <2 x i64>
+  %d = zext <2 x i16> %b to <2 x i64>
+  %e = sub <2 x i64> %c, %d
+  ret <2 x i64> %e
+}


        


More information about the llvm-commits mailing list