[llvm] 439783d - [RISCV] Adjust fixed vector coverage for get.active.lane.mask

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 7 13:28:41 PDT 2022


Author: Philip Reames
Date: 2022-07-07T13:28:29-07:00
New Revision: 439783da010ebaf27c09de5092df6eadc8d2be2a

URL: https://github.com/llvm/llvm-project/commit/439783da010ebaf27c09de5092df6eadc8d2be2a
DIFF: https://github.com/llvm/llvm-project/commit/439783da010ebaf27c09de5092df6eadc8d2be2a.diff

LOG: [RISCV] Adjust fixed vector coverage for get.active.lane.mask

Make sure we include at least one case where the vsadd/vmsltu lowering
requires only LMUL1.  We should be able to generate all of the fixed
vector variants from scalar to vector idioms, but this is probably not
very important right now given the fixed length variants we'd actually
use when vectorizing with LMUL=1 are reasonable.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
index fd96785e1e61..d4a72933b539 100644
--- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
@@ -76,6 +76,19 @@ define <vscale x 1 x i1> @above_maxvl(ptr %p) {
   ret <vscale x 1 x i1> %mask
 }
 
+define <2 x i1> @fv2(ptr %p, i64 %index, i64 %tc) {
+; CHECK-LABEL: fv2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vmv.v.x v8, a1
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    vsaddu.vv v8, v8, v9
+; CHECK-NEXT:    vmsltu.vx v0, v8, a2
+; CHECK-NEXT:    ret
+  %mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 %index, i64 %tc)
+  ret <2 x i1> %mask
+}
+
 define <8 x i1> @fv8(ptr %p, i64 %index, i64 %tc) {
 ; CHECK-LABEL: fv8:
 ; CHECK:       # %bb.0:
@@ -93,8 +106,8 @@ define <32 x i1> @fv32(ptr %p, i64 %index, i64 %tc) {
 ; CHECK-LABEL: fv32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; CHECK-NEXT:    lui a0, %hi(.LCPI7_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI7_0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI8_0)
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmv.v.x v16, a1
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v8
@@ -113,8 +126,8 @@ define <64 x i1> @fv64(ptr %p, i64 %index, i64 %tc) {
 ; CHECK-LABEL: fv64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI8_0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI9_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_0)
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vsaddu.vv v16, v8, v16
@@ -124,16 +137,16 @@ define <64 x i1> @fv64(ptr %p, i64 %index, i64 %tc) {
 ; CHECK-NEXT:    vmsltu.vx v0, v16, a2
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
 ; CHECK-NEXT:    vslideup.vi v0, v24, 2
-; CHECK-NEXT:    lui a0, %hi(.LCPI8_1)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI8_1)
+; CHECK-NEXT:    lui a0, %hi(.LCPI9_1)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_1)
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vsaddu.vv v16, v8, v16
 ; CHECK-NEXT:    vmsltu.vx v24, v16, a2
 ; CHECK-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
 ; CHECK-NEXT:    vslideup.vi v0, v24, 4
-; CHECK-NEXT:    lui a0, %hi(.LCPI8_2)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI8_2)
+; CHECK-NEXT:    lui a0, %hi(.LCPI9_2)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_2)
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
@@ -149,8 +162,8 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
 ; CHECK-LABEL: fv128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; CHECK-NEXT:    lui a0, %hi(.LCPI9_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_0)
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vsaddu.vv v16, v8, v16
@@ -160,48 +173,48 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
 ; CHECK-NEXT:    vmsltu.vx v0, v16, a2
 ; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
 ; CHECK-NEXT:    vslideup.vi v0, v24, 2
-; CHECK-NEXT:    lui a0, %hi(.LCPI9_1)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_1)
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_1)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_1)
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vsaddu.vv v16, v8, v16
 ; CHECK-NEXT:    vmsltu.vx v24, v16, a2
 ; CHECK-NEXT:    vsetivli zero, 6, e8, m1, tu, mu
 ; CHECK-NEXT:    vslideup.vi v0, v24, 4
-; CHECK-NEXT:    lui a0, %hi(.LCPI9_2)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_2)
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_2)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_2)
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vsaddu.vv v16, v8, v16
 ; CHECK-NEXT:    vmsltu.vx v24, v16, a2
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
 ; CHECK-NEXT:    vslideup.vi v0, v24, 6
-; CHECK-NEXT:    lui a0, %hi(.LCPI9_3)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_3)
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_3)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_3)
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vsaddu.vv v16, v8, v16
 ; CHECK-NEXT:    vmsltu.vx v24, v16, a2
 ; CHECK-NEXT:    vsetivli zero, 10, e8, m1, tu, mu
 ; CHECK-NEXT:    vslideup.vi v0, v24, 8
-; CHECK-NEXT:    lui a0, %hi(.LCPI9_4)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_4)
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_4)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_4)
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vsaddu.vv v16, v8, v16
 ; CHECK-NEXT:    vmsltu.vx v24, v16, a2
 ; CHECK-NEXT:    vsetivli zero, 12, e8, m1, tu, mu
 ; CHECK-NEXT:    vslideup.vi v0, v24, 10
-; CHECK-NEXT:    lui a0, %hi(.LCPI9_5)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_5)
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_5)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_5)
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vsaddu.vv v16, v8, v16
 ; CHECK-NEXT:    vmsltu.vx v24, v16, a2
 ; CHECK-NEXT:    vsetivli zero, 14, e8, m1, tu, mu
 ; CHECK-NEXT:    vslideup.vi v0, v24, 12
-; CHECK-NEXT:    lui a0, %hi(.LCPI9_6)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_6)
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_6)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_6)
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
@@ -215,6 +228,7 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
 
 
 declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64, i64)
+declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64, i64)
 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64, i64)
 declare <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64, i64)
 declare <64 x i1> @llvm.get.active.lane.mask.v64i1.i64(i64, i64)


        


More information about the llvm-commits mailing list