[llvm] 75099c2 - [ConstantFolding] Fold scalable get_active_lane_masks (#156659)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 11 02:53:21 PDT 2025
Author: Matthew Devereau
Date: 2025-09-11T10:53:18+01:00
New Revision: 75099c224632b7e424e2c59e3fdee980c1483348
URL: https://github.com/llvm/llvm-project/commit/75099c224632b7e424e2c59e3fdee980c1483348
DIFF: https://github.com/llvm/llvm-project/commit/75099c224632b7e424e2c59e3fdee980c1483348.diff
LOG: [ConstantFolding] Fold scalable get_active_lane_masks (#156659)
Scalable get_active_lane_mask intrinsics with a range of 0 can be
lowered to zeroinitializer. This helps remove no-op scalable masked
stores and loads.
Added:
Modified:
llvm/lib/Analysis/ConstantFolding.cpp
llvm/test/Transforms/InstSimplify/ConstProp/active-lane-mask.ll
Removed:
################################################################################
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index a136e8718435b..07c6ba8ae7d9e 100755
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -4252,6 +4252,13 @@ static Constant *ConstantFoldScalableVectorCall(
return ConstantInt::getFalse(SVTy);
}
+ case Intrinsic::get_active_lane_mask: {
+ auto Op0 = cast<ConstantInt>(Operands[0])->getValue();
+ auto Op1 = cast<ConstantInt>(Operands[1])->getValue();
+ if (Op0.uge(Op1))
+ return ConstantVector::getNullValue(SVTy);
+ break;
+ }
default:
break;
}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/active-lane-mask.ll b/llvm/test/Transforms/InstSimplify/ConstProp/active-lane-mask.ll
index a904e697cc975..ed26deb58eae4 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/active-lane-mask.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/active-lane-mask.ll
@@ -307,6 +307,39 @@ entry:
ret <4 x float> %var33
}
+define <vscale x 4 x i1> @nxv4i1_12_12() {
+; CHECK-LABEL: @nxv4i1_12_12(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret <vscale x 4 x i1> zeroinitializer
+;
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 12, i32 12)
+ ret <vscale x 4 x i1> %mask
+}
+
+define <vscale x 4 x i1> @nxv4i1_8_4() {
+; CHECK-LABEL: @nxv4i1_8_4(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret <vscale x 4 x i1> zeroinitializer
+;
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 8, i32 4)
+ ret <vscale x 4 x i1> %mask
+}
+
+define <vscale x 16 x i1> @nxv16i1_0_0() {
+; CHECK-LABEL: @nxv16i1_0_0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer
+;
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 0)
+ ret <vscale x 16 x i1> %mask
+}
+
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
+
+declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32, i32)
+declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64, i64)
More information about the llvm-commits
mailing list