[llvm] [LLVM][SCEV] Look through common multiplicand when simplifying compares. (PR #141798)

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Fri May 30 08:37:32 PDT 2025


https://github.com/paulwalker-arm updated https://github.com/llvm/llvm-project/pull/141798

>From 47a151e427eb6b5102608efaef78ec8ce1d382a7 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Fri, 30 May 2025 14:41:18 +0100
Subject: [PATCH 1/3] Add dedicated SCEV tests.

---
 .../ScalarEvolution/simplify-icmp-ops.ll      | 416 ++++++++++++++++++
 1 file changed, 416 insertions(+)
 create mode 100644 llvm/test/Analysis/ScalarEvolution/simplify-icmp-ops.ll

diff --git a/llvm/test/Analysis/ScalarEvolution/simplify-icmp-ops.ll b/llvm/test/Analysis/ScalarEvolution/simplify-icmp-ops.ll
new file mode 100644
index 0000000000000..87fedaa5c3556
--- /dev/null
+++ b/llvm/test/Analysis/ScalarEvolution/simplify-icmp-ops.ll
@@ -0,0 +1,416 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=indvars < %s | FileCheck %s
+
+; Verify ScalarEvolution can simplify comparisons of the form:
+;    (X * Z) icmp (Y * Z) ==> X icmp Y
+; which allows IndVarSimplify to "remove" control flow.
+
+define void @signed_icmp_mul_common_multiplicand(ptr %loc) vscale_range(1,1073741824) {
+; CHECK-LABEL: define void @signed_icmp_mul_common_multiplicand(
+; CHECK-SAME: ptr [[LOC:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul nsw i32 9, [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = mul nsw i32 5, [[Z]]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %z = call i32 @llvm.vscale.i32()
+  %x = mul nsw i32 9, %z
+  %y = mul nsw i32 5, %z
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp slt i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @signed_icmp_mul_common_multiplicand_commuted(ptr %loc) vscale_range(1,1073741824) {
+; CHECK-LABEL: define void @signed_icmp_mul_common_multiplicand_commuted(
+; CHECK-SAME: ptr [[LOC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul nsw i32 [[Z]], 9
+; CHECK-NEXT:    [[Y:%.*]] = mul nsw i32 [[Z]], 5
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %z = call i32 @llvm.vscale.i32()
+  %x = mul nsw i32 %z, 9
+  %y = mul nsw i32 %z, 5
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp slt i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @signed_icmp_mul_common_multiplicand_mixed_arith(ptr %loc) vscale_range(1,1073741824) {
+; CHECK-LABEL: define void @signed_icmp_mul_common_multiplicand_mixed_arith(
+; CHECK-SAME: ptr [[LOC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[VS1:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[VS2:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul nsw i32 9, [[VS1]]
+; CHECK-NEXT:    [[Y:%.*]] = shl nsw i32 [[VS2]], 2
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %vs1 = call i32 @llvm.vscale.i32()
+  %vs2 = call i32 @llvm.vscale.i32()
+  %x = mul nsw i32 9, %vs1
+  %y = shl nsw i32 %vs2, 2
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp slt i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @signed_icmp_mul_common_multiplicand_potential_wrapping(ptr %loc) vscale_range(1,1073741824) {
+; CHECK-LABEL: define void @signed_icmp_mul_common_multiplicand_potential_wrapping(
+; CHECK-SAME: ptr [[LOC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul nsw i32 5, [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = mul i32 9, [[Z]]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp sgt i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %z = call i32 @llvm.vscale.i32()
+  %x = mul nsw i32 5, %z
+  %y = mul i32 9, %z
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp sgt i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @signed_icmp_mul_common_multiplicand_potential_wrapping_2(ptr %loc) vscale_range(1,1073741824) {
+; CHECK-LABEL: define void @signed_icmp_mul_common_multiplicand_potential_wrapping_2(
+; CHECK-SAME: ptr [[LOC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul i32 9, [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = mul nsw i32 5, [[Z]]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %z = call i32 @llvm.vscale.i32()
+  %x = mul i32 9, %z
+  %y = mul nsw i32 5, %z
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp slt i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @signed_icmp_mul_common_but_potentially_non_positive_multiplicand(ptr %loc, i32 %z) {
+; CHECK-LABEL: define void @signed_icmp_mul_common_but_potentially_non_positive_multiplicand(
+; CHECK-SAME: ptr [[LOC:%.*]], i32 [[Z:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[X:%.*]] = mul nsw i32 9, [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = mul nsw i32 5, [[Z]]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %x = mul nsw i32 9, %z
+  %y = mul nsw i32 5, %z
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp slt i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @unsigned_icmp_mul_common_multiplicand(ptr %loc) {
+; CHECK-LABEL: define void @unsigned_icmp_mul_common_multiplicand(
+; CHECK-SAME: ptr [[LOC:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul nuw i32 9, [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = mul nuw i32 5, [[Z]]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp ult i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %z = call i32 @llvm.vscale.i32()
+  %x = mul nuw i32 9, %z
+  %y = mul nuw i32 5, %z
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp ult i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @unsigned_icmp_mul_common_multiplicand_commuted(ptr %loc) {
+; CHECK-LABEL: define void @unsigned_icmp_mul_common_multiplicand_commuted(
+; CHECK-SAME: ptr [[LOC:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul nuw i32 [[Z]], 9
+; CHECK-NEXT:    [[Y:%.*]] = mul nuw i32 [[Z]], 5
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp ult i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %z = call i32 @llvm.vscale.i32()
+  %x = mul nuw i32 %z, 9
+  %y = mul nuw i32 %z, 5
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp ult i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @unsigned_icmp_mul_common_multiplicand_mixed_arith(ptr %loc) {
+; CHECK-LABEL: define void @unsigned_icmp_mul_common_multiplicand_mixed_arith(
+; CHECK-SAME: ptr [[LOC:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[VS1:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[VS2:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul nuw i32 9, [[VS1]]
+; CHECK-NEXT:    [[Y:%.*]] = shl nuw i32 [[VS2]], 2
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp ult i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %vs1 = call i32 @llvm.vscale.i32()
+  %vs2 = call i32 @llvm.vscale.i32()
+  %x = mul nuw i32 9, %vs1
+  %y = shl nuw i32 %vs2, 2
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp ult i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @unsigned_icmp_mul_common_multiplicand_potential_wrapping(ptr %loc) {
+; CHECK-LABEL: define void @unsigned_icmp_mul_common_multiplicand_potential_wrapping(
+; CHECK-SAME: ptr [[LOC:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul nuw i32 5, [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = mul i32 9, [[Z]]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp ugt i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %z = call i32 @llvm.vscale.i32()
+  %x = mul nuw i32 5, %z
+  %y = mul i32 9, %z
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp ugt i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @unsigned_icmp_mul_common_multiplicand_potential_wrapping_2(ptr %loc) {
+; CHECK-LABEL: define void @unsigned_icmp_mul_common_multiplicand_potential_wrapping_2(
+; CHECK-SAME: ptr [[LOC:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[X:%.*]] = mul i32 9, [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = mul nuw i32 5, [[Z]]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp ult i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %z = call i32 @llvm.vscale.i32()
+  %x = mul i32 9, %z
+  %y = mul nuw i32 5, %z
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp ult i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @unsigned_icmp_mul_common_but_potentially_zero_multiplicand(ptr %loc, i32 %z) {
+; CHECK-LABEL: define void @unsigned_icmp_mul_common_but_potentially_zero_multiplicand(
+; CHECK-SAME: ptr [[LOC:%.*]], i32 [[Z:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[X:%.*]] = mul nuw i32 9, [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = mul nuw i32 5, [[Z]]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
+; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp ult i32 [[X]], [[Y]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %x = mul nuw i32 9, %z
+  %y = mul nuw i32 5, %z
+  br label %loop
+
+loop:
+  %idx = phi i32 [ 0, %entry ], [ %idx.dec, %loop ]
+  store i32 %idx, ptr %loc
+  %idx.dec = add nuw i32 %idx, 1
+  %cond = icmp ult i32 %x, %y
+  br i1 %cond, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+declare i32 @llvm.vscale.i32()

>From cfcd9dfcca54e3d6180c6e10fb4959e81cf6a0b9 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Thu, 15 May 2025 17:43:23 +0100
Subject: [PATCH 2/3] [LLVM][SCEV] Look through common multiplicand when
 simplifying compares.

My usecase is simplifying the control flow generated by LoopVectorize
when vectorising loops whose tripcount is a function of the runtime
vector length. This can be problematic because:

* CSE is a pre-LoopVectorize transform and so it's common for an IR
function to include several calls to llvm.vscale(). (NOTE: Code
generation will typically remove the duplicates)
* Pre-LoopVectorize instcombines will rewrite some multiplies as
shifts. This leads to a mismatch between VL based maths of the scalar
loop and that created for the vector loop, which prevents some obvious
simplifications.

SCEV does not suffer these issues because it effectively does CSE
during construction and shifts are represented as multiplies.
---
 llvm/lib/Analysis/ScalarEvolution.cpp         |  16 +++
 .../AArch64/sve-vscale-based-trip-counts.ll   | 104 ++++--------------
 2 files changed, 38 insertions(+), 82 deletions(-)

diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 4bd5a4c3ab75c..545cd27f54360 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -10748,6 +10748,22 @@ bool ScalarEvolution::SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS,
   if (Depth >= 3)
     return false;
 
+  // (X * Z) icmp (Y * Z) ==> X icmp Y
+  //     when neither multiply wraps and Z is positive.
+  if (isa<SCEVMulExpr>(LHS) && isa<SCEVMulExpr>(RHS)) {
+    const SCEVMulExpr *LMul = cast<SCEVMulExpr>(LHS);
+    const SCEVMulExpr *RMul = cast<SCEVMulExpr>(RHS);
+
+    if (LMul->getNumOperands() == 2 && RMul->getNumOperands() == 2 &&
+        LMul->getOperand(1) == RMul->getOperand(1) &&
+        isKnownPositive(LMul->getOperand(1)) && ICmpInst::isUnsigned(Pred) &&
+        LMul->hasNoUnsignedWrap() && RMul->hasNoUnsignedWrap()) {
+      LHS = LMul->getOperand(0);
+      RHS = RMul->getOperand(0);
+      Changed = true;
+    }
+  }
+
   // Canonicalize a constant to the right side.
   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
     // Check for both operands constant.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll
index 685516a57680f..488e374104c05 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll
@@ -9,54 +9,13 @@ define void @vscale_mul_4(ptr noalias noundef readonly captures(none) %a, ptr no
 ; CHECK-NEXT:  [[ENTRY:.*]]:
 ; CHECK-NEXT:    [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 8
-; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[TMP3]]
-; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; CHECK:       [[VECTOR_PH]]:
-; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 8
-; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
-; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = mul i64 [[TMP10]], 8
-; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
-; CHECK:       [[VECTOR_BODY]]:
-; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i32 0
-; CHECK-NEXT:    [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP18]], 4
-; CHECK-NEXT:    [[TMP26:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP11]]
-; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP14]], align 4
-; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP26]], align 4
-; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i32 0
-; CHECK-NEXT:    [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP16:%.*]] = mul i64 [[TMP15]], 4
-; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP16]]
-; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>, ptr [[TMP17]], align 4
-; CHECK-NEXT:    [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>, ptr [[TMP27]], align 4
-; CHECK-NEXT:    [[TMP19:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD3]]
-; CHECK-NEXT:    [[TMP28:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD4]]
-; CHECK-NEXT:    [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP21:%.*]] = mul i64 [[TMP20]], 4
-; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP21]]
-; CHECK-NEXT:    store <vscale x 4 x float> [[TMP19]], ptr [[TMP17]], align 4
-; CHECK-NEXT:    store <vscale x 4 x float> [[TMP28]], ptr [[TMP22]], align 4
-; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT:    [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK:       [[MIDDLE_BLOCK]]:
-; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
-; CHECK:       [[SCALAR_PH]]:
-; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
 ; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
-; CHECK:       [[FOR_COND_CLEANUP]]:
+; CHECK:       [[FOR_COND_CLEANUP:.*]]:
 ; CHECK-NEXT:    ret void
 ; CHECK:       [[FOR_BODY]]:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[TMP24:%.*]] = load float, ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
@@ -65,7 +24,7 @@ define void @vscale_mul_4(ptr noalias noundef readonly captures(none) %a, ptr no
 ; CHECK-NEXT:    store float [[MUL4]], ptr [[ARRAYIDX3]], align 4
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[TMP1]]
-; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
 ;
 entry:
   %0 = tail call i64 @llvm.vscale.i64()
@@ -136,7 +95,7 @@ define  void @vscale_mul_8(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    store float [[MUL5]], ptr [[ARRAYIDX4]], align 4
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[MUL1]]
-; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
 ;
 entry:
   %0 = tail call i64 @llvm.vscale.i64()
@@ -167,9 +126,6 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    [[MUL1:%.*]] = mul nuw nsw i64 [[TMP0]], 12
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 8
-; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[MUL1]], [[TMP2]]
-; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; CHECK:       [[VECTOR_PH]]:
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP4:%.*]] = mul i64 [[TMP3]], 8
 ; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
@@ -178,7 +134,7 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 8
 ; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
 ; CHECK:       [[VECTOR_BODY]]:
-; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP7]], i32 0
 ; CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
@@ -202,17 +158,14 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    store <vscale x 4 x float> [[TMP25]], ptr [[TMP21]], align 4
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[MUL1]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
-; CHECK:       [[SCALAR_PH]]:
-; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY:.*]]
 ; CHECK:       [[FOR_COND_CLEANUP]]:
 ; CHECK-NEXT:    ret void
 ; CHECK:       [[FOR_BODY]]:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[TMP23:%.*]] = load float, ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
@@ -221,7 +174,7 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    store float [[MUL5]], ptr [[ARRAYIDX4]], align 4
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[MUL1]]
-; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
 ;
 entry:
   %0 = tail call i64 @llvm.vscale.i64()
@@ -252,9 +205,6 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    [[MUL1:%.*]] = mul nuw nsw i64 [[TMP0]], 31
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 8
-; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[MUL1]], [[TMP2]]
-; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; CHECK:       [[VECTOR_PH]]:
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP4:%.*]] = mul i64 [[TMP3]], 8
 ; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
@@ -263,7 +213,7 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 8
 ; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
 ; CHECK:       [[VECTOR_BODY]]:
-; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP7]], i32 0
 ; CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
@@ -287,17 +237,14 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    store <vscale x 4 x float> [[TMP18]], ptr [[TMP21]], align 4
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[MUL1]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
-; CHECK:       [[SCALAR_PH]]:
-; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY:.*]]
 ; CHECK:       [[FOR_COND_CLEANUP]]:
 ; CHECK-NEXT:    ret void
 ; CHECK:       [[FOR_BODY]]:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[TMP23:%.*]] = load float, ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
@@ -306,7 +253,7 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    store float [[MUL5]], ptr [[ARRAYIDX4]], align 4
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[MUL1]]
-; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
 ;
 entry:
   %0 = tail call i64 @llvm.vscale.i64()
@@ -337,9 +284,6 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    [[MUL1:%.*]] = mul nuw nsw i64 [[TMP0]], 64
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 8
-; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[MUL1]], [[TMP2]]
-; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; CHECK:       [[VECTOR_PH]]:
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP4:%.*]] = mul i64 [[TMP3]], 8
 ; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
@@ -348,7 +292,7 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 8
 ; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
 ; CHECK:       [[VECTOR_BODY]]:
-; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP7]], i32 0
 ; CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
@@ -372,17 +316,14 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    store <vscale x 4 x float> [[TMP18]], ptr [[TMP21]], align 4
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[MUL1]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
-; CHECK:       [[SCALAR_PH]]:
-; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY:.*]]
 ; CHECK:       [[FOR_COND_CLEANUP]]:
 ; CHECK-NEXT:    ret void
 ; CHECK:       [[FOR_BODY]]:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[TMP23:%.*]] = load float, ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
@@ -391,7 +332,7 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
 ; CHECK-NEXT:    store float [[MUL5]], ptr [[ARRAYIDX4]], align 4
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[MUL1]]
-; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
 ;
 entry:
   %0 = tail call i64 @llvm.vscale.i64()
@@ -419,14 +360,13 @@ declare i64 @llvm.vscale.i64()
 attributes #0 = { vscale_range(1,16) "target-features"="+sve" }
 ;.
 ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
 ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]}
 ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
 ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]}
 ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
 ; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
 ; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META2]], [[META1]]}
 ;.

>From c1a013513c303094ea28c483ac34ba64792f110b Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Fri, 30 May 2025 14:40:11 +0100
Subject: [PATCH 3/3] Extend to cover signed comparisons.

---
 llvm/lib/Analysis/ScalarEvolution.cpp         | 26 +++++--
 .../ScalarEvolution/simplify-icmp-ops.ll      | 74 +++++--------------
 2 files changed, 36 insertions(+), 64 deletions(-)

diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 545cd27f54360..485e242227858 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -10748,19 +10748,29 @@ bool ScalarEvolution::SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS,
   if (Depth >= 3)
     return false;
 
-  // (X * Z) icmp (Y * Z) ==> X icmp Y
-  //     when neither multiply wraps and Z is positive.
   if (isa<SCEVMulExpr>(LHS) && isa<SCEVMulExpr>(RHS)) {
     const SCEVMulExpr *LMul = cast<SCEVMulExpr>(LHS);
     const SCEVMulExpr *RMul = cast<SCEVMulExpr>(RHS);
 
     if (LMul->getNumOperands() == 2 && RMul->getNumOperands() == 2 &&
-        LMul->getOperand(1) == RMul->getOperand(1) &&
-        isKnownPositive(LMul->getOperand(1)) && ICmpInst::isUnsigned(Pred) &&
-        LMul->hasNoUnsignedWrap() && RMul->hasNoUnsignedWrap()) {
-      LHS = LMul->getOperand(0);
-      RHS = RMul->getOperand(0);
-      Changed = true;
+        LMul->getOperand(1) == RMul->getOperand(1)) {
+      // (X * Z) uicmp (Y * Z) ==> X uicmp Y
+      //     when neither multiply wraps and Z is non-zero.
+      if (ICmpInst::isUnsigned(Pred) && isKnownNonZero(LMul->getOperand(1)) &&
+          LMul->hasNoUnsignedWrap() && RMul->hasNoUnsignedWrap()) {
+        LHS = LMul->getOperand(0);
+        RHS = RMul->getOperand(0);
+        Changed = true;
+      }
+      // (X * Z) sicmp (Y * Z) ==> X sicmp Y
+      //     when neither multiply wraps and Z is positive.
+      else if (ICmpInst::isSigned(Pred) &&
+               isKnownPositive(LMul->getOperand(1)) &&
+               LMul->hasNoSignedWrap() && RMul->hasNoSignedWrap()) {
+        LHS = LMul->getOperand(0);
+        RHS = RMul->getOperand(0);
+        Changed = true;
+      }
     }
   }
 
diff --git a/llvm/test/Analysis/ScalarEvolution/simplify-icmp-ops.ll b/llvm/test/Analysis/ScalarEvolution/simplify-icmp-ops.ll
index 87fedaa5c3556..3497991ec37d1 100644
--- a/llvm/test/Analysis/ScalarEvolution/simplify-icmp-ops.ll
+++ b/llvm/test/Analysis/ScalarEvolution/simplify-icmp-ops.ll
@@ -8,17 +8,11 @@
 define void @signed_icmp_mul_common_multiplicand(ptr %loc) vscale_range(1,1073741824) {
 ; CHECK-LABEL: define void @signed_icmp_mul_common_multiplicand(
 ; CHECK-SAME: ptr [[LOC:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*]]:
-; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[X:%.*]] = mul nsw i32 9, [[Z]]
-; CHECK-NEXT:    [[Y:%.*]] = mul nsw i32 5, [[Z]]
+; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    br label %[[LOOP:.*]]
 ; CHECK:       [[LOOP]]:
-; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
-; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
-; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 [[X]], [[Y]]
-; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK-NEXT:    store i32 0, ptr [[LOC]], align 4
+; CHECK-NEXT:    br i1 false, label %[[LOOP]], label %[[EXIT:.*]]
 ; CHECK:       [[EXIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -42,17 +36,11 @@ exit:
 define void @signed_icmp_mul_common_multiplicand_commuted(ptr %loc) vscale_range(1,1073741824) {
 ; CHECK-LABEL: define void @signed_icmp_mul_common_multiplicand_commuted(
 ; CHECK-SAME: ptr [[LOC:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:  [[ENTRY:.*]]:
-; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[X:%.*]] = mul nsw i32 [[Z]], 9
-; CHECK-NEXT:    [[Y:%.*]] = mul nsw i32 [[Z]], 5
+; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    br label %[[LOOP:.*]]
 ; CHECK:       [[LOOP]]:
-; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
-; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
-; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 [[X]], [[Y]]
-; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK-NEXT:    store i32 0, ptr [[LOC]], align 4
+; CHECK-NEXT:    br i1 false, label %[[LOOP]], label %[[EXIT:.*]]
 ; CHECK:       [[EXIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -76,18 +64,11 @@ exit:
 define void @signed_icmp_mul_common_multiplicand_mixed_arith(ptr %loc) vscale_range(1,1073741824) {
 ; CHECK-LABEL: define void @signed_icmp_mul_common_multiplicand_mixed_arith(
 ; CHECK-SAME: ptr [[LOC:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:  [[ENTRY:.*]]:
-; CHECK-NEXT:    [[VS1:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[VS2:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[X:%.*]] = mul nsw i32 9, [[VS1]]
-; CHECK-NEXT:    [[Y:%.*]] = shl nsw i32 [[VS2]], 2
+; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    br label %[[LOOP:.*]]
 ; CHECK:       [[LOOP]]:
-; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
-; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
-; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 [[X]], [[Y]]
-; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK-NEXT:    store i32 0, ptr [[LOC]], align 4
+; CHECK-NEXT:    br i1 false, label %[[LOOP]], label %[[EXIT:.*]]
 ; CHECK:       [[EXIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -212,17 +193,11 @@ exit:
 define void @unsigned_icmp_mul_common_multiplicand(ptr %loc) {
 ; CHECK-LABEL: define void @unsigned_icmp_mul_common_multiplicand(
 ; CHECK-SAME: ptr [[LOC:%.*]]) {
-; CHECK-NEXT:  [[ENTRY:.*]]:
-; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[X:%.*]] = mul nuw i32 9, [[Z]]
-; CHECK-NEXT:    [[Y:%.*]] = mul nuw i32 5, [[Z]]
+; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    br label %[[LOOP:.*]]
 ; CHECK:       [[LOOP]]:
-; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
-; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
-; CHECK-NEXT:    [[COND:%.*]] = icmp ult i32 [[X]], [[Y]]
-; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK-NEXT:    store i32 0, ptr [[LOC]], align 4
+; CHECK-NEXT:    br i1 false, label %[[LOOP]], label %[[EXIT:.*]]
 ; CHECK:       [[EXIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -246,17 +221,11 @@ exit:
 define void @unsigned_icmp_mul_common_multiplicand_commuted(ptr %loc) {
 ; CHECK-LABEL: define void @unsigned_icmp_mul_common_multiplicand_commuted(
 ; CHECK-SAME: ptr [[LOC:%.*]]) {
-; CHECK-NEXT:  [[ENTRY:.*]]:
-; CHECK-NEXT:    [[Z:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[X:%.*]] = mul nuw i32 [[Z]], 9
-; CHECK-NEXT:    [[Y:%.*]] = mul nuw i32 [[Z]], 5
+; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    br label %[[LOOP:.*]]
 ; CHECK:       [[LOOP]]:
-; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
-; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
-; CHECK-NEXT:    [[COND:%.*]] = icmp ult i32 [[X]], [[Y]]
-; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK-NEXT:    store i32 0, ptr [[LOC]], align 4
+; CHECK-NEXT:    br i1 false, label %[[LOOP]], label %[[EXIT:.*]]
 ; CHECK:       [[EXIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -280,18 +249,11 @@ exit:
 define void @unsigned_icmp_mul_common_multiplicand_mixed_arith(ptr %loc) {
 ; CHECK-LABEL: define void @unsigned_icmp_mul_common_multiplicand_mixed_arith(
 ; CHECK-SAME: ptr [[LOC:%.*]]) {
-; CHECK-NEXT:  [[ENTRY:.*]]:
-; CHECK-NEXT:    [[VS1:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[VS2:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[X:%.*]] = mul nuw i32 9, [[VS1]]
-; CHECK-NEXT:    [[Y:%.*]] = shl nuw i32 [[VS2]], 2
+; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    br label %[[LOOP:.*]]
 ; CHECK:       [[LOOP]]:
-; CHECK-NEXT:    [[IDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IDX_DEC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT:    store i32 [[IDX]], ptr [[LOC]], align 4
-; CHECK-NEXT:    [[IDX_DEC]] = add nuw i32 [[IDX]], 1
-; CHECK-NEXT:    [[COND:%.*]] = icmp ult i32 [[X]], [[Y]]
-; CHECK-NEXT:    br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK-NEXT:    store i32 0, ptr [[LOC]], align 4
+; CHECK-NEXT:    br i1 false, label %[[LOOP]], label %[[EXIT:.*]]
 ; CHECK:       [[EXIT]]:
 ; CHECK-NEXT:    ret void
 ;



More information about the llvm-commits mailing list