[llvm] [AArch64] Add custom lowering of nxv32i1 get.active.lane.mask nodes (PR #141969)

David Sherwood via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 4 02:28:11 PDT 2025


================
@@ -151,6 +151,154 @@ define void @test_fixed_extract(i64 %i, i64 %n) #0 {
     ret void
 }
 
+; Illegal Types
+
+define void @test_2x16bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) #0 {
+; CHECK-SVE-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count:
+; CHECK-SVE:       // %bb.0:
+; CHECK-SVE-NEXT:    rdvl x8, #1
+; CHECK-SVE-NEXT:    adds w8, w0, w8
+; CHECK-SVE-NEXT:    csinv w8, w8, wzr, lo
+; CHECK-SVE-NEXT:    whilelo p0.b, w0, w1
+; CHECK-SVE-NEXT:    whilelo p1.b, w8, w1
+; CHECK-SVE-NEXT:    b use
+;
+; CHECK-SVE2p1-LABEL: test_2x16bit_mask_with_32bit_index_and_trip_count:
+; CHECK-SVE2p1:       // %bb.0:
+; CHECK-SVE2p1-NEXT:    mov w8, w1
+; CHECK-SVE2p1-NEXT:    mov w9, w0
+; CHECK-SVE2p1-NEXT:    whilelo { p0.b, p1.b }, x9, x8
+; CHECK-SVE2p1-NEXT:    b use
+  %r = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i32(i32 %i, i32 %n)
+  %v0 = call <vscale x 16 x i1> @llvm.vector.extract.nxv16i1.nxv32i1.i64(<vscale x 32 x i1> %r, i64 0)
+  %v1 = call <vscale x 16 x i1> @llvm.vector.extract.nxv16i1.nxv32i1.i64(<vscale x 32 x i1> %r, i64 16)
+  tail call void @use(<vscale x 16 x i1> %v0, <vscale x 16 x i1> %v1)
+  ret void
+}
+
+define void @test_2x32bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) #0 {
+; CHECK-SVE-LABEL: test_2x32bit_mask_with_32bit_index_and_trip_count:
+; CHECK-SVE:       // %bb.0:
+; CHECK-SVE-NEXT:    rdvl x8, #2
+; CHECK-SVE-NEXT:    rdvl x9, #1
+; CHECK-SVE-NEXT:    adds w8, w0, w8
+; CHECK-SVE-NEXT:    csinv w8, w8, wzr, lo
+; CHECK-SVE-NEXT:    adds w10, w8, w9
+; CHECK-SVE-NEXT:    csinv w10, w10, wzr, lo
+; CHECK-SVE-NEXT:    whilelo p3.b, w10, w1
+; CHECK-SVE-NEXT:    adds w9, w0, w9
+; CHECK-SVE-NEXT:    csinv w9, w9, wzr, lo
+; CHECK-SVE-NEXT:    whilelo p0.b, w0, w1
+; CHECK-SVE-NEXT:    whilelo p1.b, w9, w1
+; CHECK-SVE-NEXT:    whilelo p2.b, w8, w1
+; CHECK-SVE-NEXT:    b use
+;
+; CHECK-SVE2p1-LABEL: test_2x32bit_mask_with_32bit_index_and_trip_count:
+; CHECK-SVE2p1:       // %bb.0:
+; CHECK-SVE2p1-NEXT:    rdvl x8, #2
+; CHECK-SVE2p1-NEXT:    mov w9, w1
+; CHECK-SVE2p1-NEXT:    mov w10, w0
+; CHECK-SVE2p1-NEXT:    adds w8, w0, w8
+; CHECK-SVE2p1-NEXT:    csinv w8, w8, wzr, lo
+; CHECK-SVE2p1-NEXT:    whilelo { p0.b, p1.b }, x10, x9
+; CHECK-SVE2p1-NEXT:    whilelo { p2.b, p3.b }, x8, x9
+; CHECK-SVE2p1-NEXT:    b use
+  %r = call <vscale x 64 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 %i, i32 %n)
----------------
david-arm wrote:

nit: Perhaps change the intrinsic to `@llvm.get.active.lane.mask.nxv64i1.i32`?

https://github.com/llvm/llvm-project/pull/141969


More information about the llvm-commits mailing list