[llvm] [AArch64][InstCombine] Canonicalize whilelo intrinsic (PR #151553)

David Sherwood via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 11 06:21:40 PDT 2025


================
@@ -0,0 +1,129 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define <vscale x 2 x i1> @whilelo_nxv2i1.i32(i32 %a, i32 %b) {
+; CHECK-LABEL: define <vscale x 2 x i1> @whilelo_nxv2i1.i32(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret <vscale x 2 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %a, i32 %b)
+  ret <vscale x 2 x i1> %mask
+}
+
+define <vscale x 4 x i1> @whilelo_nxv4i1.i32(i32 %a, i32 %b) {
+; CHECK-LABEL: define <vscale x 4 x i1> @whilelo_nxv4i1.i32(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret <vscale x 4 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %a, i32 %b)
+  ret <vscale x 4 x i1> %mask
+}
+
+define <vscale x 8 x i1> @whilelo_nxv8i1.i32(i32 %a, i32 %b) {
+; CHECK-LABEL: define <vscale x 8 x i1> @whilelo_nxv8i1.i32(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret <vscale x 8 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 %a, i32 %b)
+  ret <vscale x 8 x i1> %mask
+}
+
+define <vscale x 16 x i1> @whilelo_nxv16i1.i32(i32 %a, i32 %b) {
+; CHECK-LABEL: define <vscale x 16 x i1> @whilelo_nxv16i1.i32(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret <vscale x 16 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %a, i32 %b)
+  ret <vscale x 16 x i1> %mask
+}
+
+define <vscale x 2 x i1> @whilelo_nxv2i1.i64(i64 %a, i64 %b) {
+; CHECK-LABEL: define <vscale x 2 x i1> @whilelo_nxv2i1.i64(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT:    ret <vscale x 2 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 %a, i64 %b)
+  ret <vscale x 2 x i1> %mask
+}
+
+define <vscale x 4 x i1> @whilelo_nxv4i1.i64(i64 %a, i64 %b) {
+; CHECK-LABEL: define <vscale x 4 x i1> @whilelo_nxv4i1.i64(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT:    ret <vscale x 4 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %a, i64 %b)
+  ret <vscale x 4 x i1> %mask
+}
+
+define <vscale x 8 x i1> @whilelo_nxv8i1.i64(i64 %a, i64 %b) {
+; CHECK-LABEL: define <vscale x 8 x i1> @whilelo_nxv8i1.i64(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT:    ret <vscale x 8 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64 %a, i64 %b)
+  ret <vscale x 8 x i1> %mask
+}
+
+define <vscale x 16 x i1> @whilelo_nxv16i1.i64(i64 %a, i64 %b) {
+; CHECK-LABEL: define <vscale x 16 x i1> @whilelo_nxv16i1.i64(
+; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT:    ret <vscale x 16 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 %a, i64 %b)
+  ret <vscale x 16 x i1> %mask
+}
+
+define <vscale x 16 x i1> @whilelo_nxv16i1.i64_const() {
+; CHECK-LABEL: define <vscale x 16 x i1> @whilelo_nxv16i1.i64_const() {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 16)
+; CHECK-NEXT:    ret <vscale x 16 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 0, i64 16)
+  ret <vscale x 16 x i1> %mask
+}
+
+define <vscale x 16 x i1> @whilelo_nxv16i1.i32_const() {
+; CHECK-LABEL: define <vscale x 16 x i1> @whilelo_nxv16i1.i32_const() {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 16)
+; CHECK-NEXT:    ret <vscale x 16 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 0, i32 16)
+  ret <vscale x 16 x i1> %mask
+}
+
+define <vscale x 16 x i1> @whilelo_nxv16i1.i16_const() {
+; CHECK-LABEL: define <vscale x 16 x i1> @whilelo_nxv16i1.i16_const() {
+; CHECK-NEXT:    [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 0, i16 16)
+; CHECK-NEXT:    ret <vscale x 16 x i1> [[MASK]]
+;
+  %mask = tail call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i16(i16 0, i16 16)
----------------
david-arm wrote:

I think there are only 32 and 64 bit versions of whilelo builtins so you can drop this function and the ones below.

https://github.com/llvm/llvm-project/pull/151553


More information about the llvm-commits mailing list