[llvm] [AArch64][InstCombine] Canonicalize whilelo intrinsic (PR #151553)

Matthew Devereau via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 1 07:28:02 PDT 2025


https://github.com/MDevereau updated https://github.com/llvm/llvm-project/pull/151553

>From f4711c667c5534f1144794079c0995998ab6b856 Mon Sep 17 00:00:00 2001
From: Matthew Devereau <matthew.devereau at arm.com>
Date: Wed, 23 Jul 2025 02:15:52 +0000
Subject: [PATCH 1/2] [AArch64][InstCombine] Canonicalize whilelo intrinsic

InstCombine llvm.aarch64.sve.whilelo to the generic LLVM
llvm.get.active.lane.mask intrinsic
---
 .../AArch64/AArch64TargetTransformInfo.cpp    | 11 ++++
 .../AArch64/sve-intrinsic-whilelo.ll          | 66 +++++++++++++++++++
 2 files changed, 77 insertions(+)
 create mode 100644 llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-whilelo.ll

diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 18ca22fc9f211..1220a0fc8ee82 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -2696,6 +2696,15 @@ static std::optional<Instruction *> instCombineDMB(InstCombiner &IC,
   return std::nullopt;
 }
 
+static std::optional<Instruction *> instCombineWhilelo(InstCombiner &IC,
+                                                       IntrinsicInst &II) {
+  return IC.replaceInstUsesWith(
+      II,
+      IC.Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
+                                 {II.getType(), II.getOperand(0)->getType()},
+                                 {II.getOperand(0), II.getOperand(1)}));
+}
+
 static std::optional<Instruction *> instCombinePTrue(InstCombiner &IC,
                                                      IntrinsicInst &II) {
   if (match(II.getOperand(0), m_ConstantInt<AArch64SVEPredPattern::all>()))
@@ -2830,6 +2839,8 @@ AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
     return instCombineSVEDupqLane(IC, II);
   case Intrinsic::aarch64_sve_insr:
     return instCombineSVEInsr(IC, II);
+  case Intrinsic::aarch64_sve_whilelo:
+    return instCombineWhilelo(IC, II);
   case Intrinsic::aarch64_sve_ptrue:
     return instCombinePTrue(IC, II);
   case Intrinsic::aarch64_sve_uxtb:
diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-whilelo.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-whilelo.ll
new file mode 100644
index 0000000000000..9dde171217432
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-whilelo.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+
+define <vscale x 4 x float> @const_whilelo_nxv4i32(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 4 x float> @const_whilelo_nxv4i32(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT:    ret <vscale x 4 x float> [[LOAD]]
+;
+  %mask = tail call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 0, i32 4)
+  %load = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr nonnull %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> zeroinitializer)
+  ret <vscale x 4 x float> %load
+}
+
+define <vscale x 8 x float> @const_whilelo_nxv8f32(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 8 x float> @const_whilelo_nxv8f32(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 0, i32 8)
+; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 8 x i1> [[MASK]], <vscale x 8 x float> zeroinitializer)
+; CHECK-NEXT:    ret <vscale x 8 x float> [[LOAD]]
+;
+  %mask = tail call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 0, i32 8)
+  %load = tail call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr nonnull %0, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x float> zeroinitializer)
+  ret <vscale x 8 x float> %load
+}
+
+define <vscale x 8 x i16> @const_whilelo_nxv8i16(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 8 x i16> @const_whilelo_nxv8i16(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 0, i32 8)
+; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 8 x i1> [[MASK]], <vscale x 8 x i16> zeroinitializer)
+; CHECK-NEXT:    ret <vscale x 8 x i16> [[LOAD]]
+;
+  %mask = tail call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i16(i32 0, i32 8)
+  %load = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr nonnull %0, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i16> zeroinitializer)
+  ret <vscale x 8 x i16> %load
+}
+
+define <vscale x 16 x i8> @const_whilelo_nxv16i8(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 16 x i8> @const_whilelo_nxv16i8(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 16)
+; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 16 x i1> [[MASK]], <vscale x 16 x i8> zeroinitializer)
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[LOAD]]
+;
+  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i8(i32 0, i32 16)
+  %load = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr nonnull %0, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> zeroinitializer)
+  ret <vscale x 16 x i8> %load
+}
+
+
+define <vscale x 16 x i8> @whilelo_nxv16i8(ptr %0, i32 %a, i32 %b) #0 {
+; CHECK-LABEL: define <vscale x 16 x i8> @whilelo_nxv16i8(
+; CHECK-SAME: ptr [[TMP0:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 16 x i1> [[MASK]], <vscale x 16 x i8> zeroinitializer)
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[LOAD]]
+;
+  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i8(i32 %a, i32 %b)
+  %load = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr nonnull %0, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> zeroinitializer)
+  ret <vscale x 16 x i8> %load
+}

>From 90ba31b80443de3acd72026caedcc0b77cc4a880 Mon Sep 17 00:00:00 2001
From: Matthew Devereau <matthew.devereau at arm.com>
Date: Fri, 1 Aug 2025 14:27:19 +0000
Subject: [PATCH 2/2] Add i64 test, rename tests, add nxv2i1 test

---
 .../AArch64/sve-intrinsic-whilelo.ll          | 51 ++++++++++++++-----
 1 file changed, 38 insertions(+), 13 deletions(-)

diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-whilelo.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-whilelo.ll
index 9dde171217432..5c47066c87373 100644
--- a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-whilelo.ll
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-whilelo.ll
@@ -4,8 +4,20 @@
 target triple = "aarch64-unknown-linux-gnu"
 
 
-define <vscale x 4 x float> @const_whilelo_nxv4i32(ptr %0) #0 {
-; CHECK-LABEL: define <vscale x 4 x float> @const_whilelo_nxv4i32(
+define <vscale x 2 x i64> @const_whilelo_nxv2i1.64(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 2 x i64> @const_whilelo_nxv2i1.64(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 4)
+; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 2 x i1> [[MASK]], <vscale x 2 x i64> zeroinitializer)
+; CHECK-NEXT:    ret <vscale x 2 x i64> [[LOAD]]
+;
+  %mask = tail call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 0, i64 4)
+  %load = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr nonnull %0, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i64> zeroinitializer)
+  ret <vscale x 2 x i64> %load
+}
+
+define <vscale x 4 x float> @const_whilelo_nxv4i1.i32(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 4 x float> @const_whilelo_nxv4i1.i32(
 ; CHECK-SAME: ptr [[TMP0:%.*]]) {
 ; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
 ; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> zeroinitializer)
@@ -16,8 +28,21 @@ define <vscale x 4 x float> @const_whilelo_nxv4i32(ptr %0) #0 {
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 8 x float> @const_whilelo_nxv8f32(ptr %0) #0 {
-; CHECK-LABEL: define <vscale x 8 x float> @const_whilelo_nxv8f32(
+define <vscale x 2 x i32> @const_whilelo_nxv2i1.i32(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 2 x i32> @const_whilelo_nxv2i1.i32(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 0, i32 2)
+; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 2 x i1> [[MASK]], <vscale x 2 x i32> zeroinitializer)
+; CHECK-NEXT:    ret <vscale x 2 x i32> [[LOAD]]
+;
+  %mask = tail call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 0, i32 2)
+  %load = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr nonnull %0, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> zeroinitializer)
+  ret <vscale x 2 x i32> %load
+}
+
+
+define <vscale x 8 x float> @const_whilelo_nxv8i1.i32_nxv8f32(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 8 x float> @const_whilelo_nxv8i1.i32_nxv8f32(
 ; CHECK-SAME: ptr [[TMP0:%.*]]) {
 ; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 0, i32 8)
 ; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 8 x i1> [[MASK]], <vscale x 8 x float> zeroinitializer)
@@ -28,39 +53,39 @@ define <vscale x 8 x float> @const_whilelo_nxv8f32(ptr %0) #0 {
   ret <vscale x 8 x float> %load
 }
 
-define <vscale x 8 x i16> @const_whilelo_nxv8i16(ptr %0) #0 {
-; CHECK-LABEL: define <vscale x 8 x i16> @const_whilelo_nxv8i16(
+define <vscale x 8 x i16> @const_whilelo_nxv8i1.i32_nxv8i16(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 8 x i16> @const_whilelo_nxv8i1.i32_nxv8i16(
 ; CHECK-SAME: ptr [[TMP0:%.*]]) {
 ; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 0, i32 8)
 ; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 8 x i1> [[MASK]], <vscale x 8 x i16> zeroinitializer)
 ; CHECK-NEXT:    ret <vscale x 8 x i16> [[LOAD]]
 ;
-  %mask = tail call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i16(i32 0, i32 8)
+  %mask = tail call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 0, i32 8)
   %load = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr nonnull %0, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i16> zeroinitializer)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 16 x i8> @const_whilelo_nxv16i8(ptr %0) #0 {
-; CHECK-LABEL: define <vscale x 16 x i8> @const_whilelo_nxv16i8(
+define <vscale x 16 x i8> @const_whilelo_nxv16i1.i32(ptr %0) #0 {
+; CHECK-LABEL: define <vscale x 16 x i8> @const_whilelo_nxv16i1.i32(
 ; CHECK-SAME: ptr [[TMP0:%.*]]) {
 ; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 16)
 ; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 16 x i1> [[MASK]], <vscale x 16 x i8> zeroinitializer)
 ; CHECK-NEXT:    ret <vscale x 16 x i8> [[LOAD]]
 ;
-  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i8(i32 0, i32 16)
+  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 0, i32 16)
   %load = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr nonnull %0, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> zeroinitializer)
   ret <vscale x 16 x i8> %load
 }
 
 
-define <vscale x 16 x i8> @whilelo_nxv16i8(ptr %0, i32 %a, i32 %b) #0 {
-; CHECK-LABEL: define <vscale x 16 x i8> @whilelo_nxv16i8(
+define <vscale x 16 x i8> @whilelo_nxv16i1.i32(ptr %0, i32 %a, i32 %b) #0 {
+; CHECK-LABEL: define <vscale x 16 x i8> @whilelo_nxv16i1.i32(
 ; CHECK-SAME: ptr [[TMP0:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) {
 ; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[A]], i32 [[B]])
 ; CHECK-NEXT:    [[LOAD:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr nonnull [[TMP0]], i32 1, <vscale x 16 x i1> [[MASK]], <vscale x 16 x i8> zeroinitializer)
 ; CHECK-NEXT:    ret <vscale x 16 x i8> [[LOAD]]
 ;
-  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i8(i32 %a, i32 %b)
+  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %a, i32 %b)
   %load = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr nonnull %0, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> zeroinitializer)
   ret <vscale x 16 x i8> %load
 }



More information about the llvm-commits mailing list