[llvm] [AArch64] Allow peephole to optimize AND + signed compare with 0 (PR #153608)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 7 13:36:21 PST 2025
https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/153608
>From 2b48dfd352e118adc9f43ee5e7c0911bf847a219 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Wed, 13 Aug 2025 19:37:23 -0400
Subject: [PATCH 1/2] Pre-commit test (NFC)
---
.../AArch64/arm64-regress-opt-cmp-signed.mir | 56 +++
llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll | 344 ++++++++++++++++++
2 files changed, 400 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
diff --git a/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
new file mode 100644
index 0000000000000..777b65f24a057
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
@@ -0,0 +1,56 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=aarch64-linux-gnu -run-pass peephole-opt -o - %s | FileCheck %s
+--- |
+ define i32 @test01() nounwind {
+ entry:
+ %0 = select i1 true, i32 1, i32 0
+ %1 = and i32 %0, 65535
+ %2 = icmp sgt i32 %1, 0
+ br i1 %2, label %if.then, label %if.end
+
+ if.then: ; preds = %entry
+ ret i32 1
+
+ if.end: ; preds = %entry
+ ret i32 0
+ }
+...
+---
+name: test01
+registers:
+ - { id: 0, class: gpr32 }
+ - { id: 1, class: gpr32common }
+body: |
+ ; CHECK-LABEL: name: test01
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK-NEXT: [[ANDWri:%[0-9]+]]:gpr32common = ANDWri killed [[ANDWri]], 15
+ ; CHECK-NEXT: $wzr = SUBSWri killed [[ANDWri]], 0, 0, implicit-def $nzcv
+ ; CHECK-NEXT: Bcc 12, %bb.2, implicit $nzcv
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.if.then:
+ ; CHECK-NEXT: $w0 = MOVi32imm 1
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.if.end:
+ ; CHECK-NEXT: $w0 = MOVi32imm 0
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ bb.0.entry:
+ successors: %bb.2.if.end, %bb.1.if.then
+
+ %0 = MOVi32imm 1
+ %1 = ANDWri killed %1, 15
+ $wzr = SUBSWri killed %1, 0, 0, implicit-def $nzcv
+ Bcc 12, %bb.2.if.end, implicit $nzcv
+
+ bb.1.if.then:
+ $w0 = MOVi32imm 1
+ RET_ReallyLR implicit $w0
+
+ bb.2.if.end:
+ $w0 = MOVi32imm 0
+ RET_ReallyLR implicit $w0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll b/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
index 33c5ba7987974..4f5431f66d9eb 100644
--- a/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
+++ b/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
@@ -161,6 +161,350 @@ define i1 @lt64_u16_and_23(i64 %0) {
ret i1 %3
}
+define i1 @test_disjoint(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp eq i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp sgt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint3:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp slt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint4(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: cmp w8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp sle i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse_4(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint_inverse_4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bic w8, w9, w8
+; CHECK-NEXT: cmp w8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp sle i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint_inverse:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bic w8, w9, w8
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp eq i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2_inverse(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint2_inverse:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bic w8, w9, w8
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp sgt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3_inverse(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint3_inverse:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bic w8, w9, w8
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp slt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: and x8, x9, x8
+; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp eq i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint2_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: and x8, x9, x8
+; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp sgt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint3_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: and x8, x9, x8
+; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp slt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint4_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint4_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: and x8, x9, x8
+; CHECK-NEXT: cmp x8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp sle i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse_4_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint_inverse_4_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bic x8, x9, x8
+; CHECK-NEXT: cmp x8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp sle i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint_inverse_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bic x8, x9, x8
+; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp eq i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2_inverse_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint2_inverse_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bic x8, x9, x8
+; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp sgt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3_inverse_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint3_inverse_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bic x8, x9, x8
+; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp slt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
; negative test
define i1 @lt3_u8(i8 %0) {
; CHECK-LABEL: lt3_u8:
>From 0985fd414a8b2a94e855de56f9392d9793ea6a35 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Fri, 22 Aug 2025 16:38:34 -0400
Subject: [PATCH 2/2] [AArch64] Allow peephole to optimize AND + signed compare
with 0
This should be the peephole's job. Because and sets V flag to 0, this is why signed comparisons with 0 are okay to replace with tst. Note this is only for AArch64, because ANDS on ARM leaves the V flag the same.
Fixes: #154387
---
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 68 ++++++++++++++++++-
.../AArch64/arm64-regress-opt-cmp-signed.mir | 3 +-
llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll | 36 ++++------
3 files changed, 80 insertions(+), 27 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 4b4073365483e..97dd9dc6b2736 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1478,6 +1478,26 @@ static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {
return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
case AArch64::SUBSXrx:
return AArch64::SUBXrx;
+ case AArch64::ANDSWri:
+ return AArch64::ANDWri;
+ case AArch64::ANDSWrr:
+ return AArch64::ANDWrr;
+ case AArch64::ANDSWrs:
+ return AArch64::ANDWrs;
+ case AArch64::ANDSXri:
+ return AArch64::ANDXri;
+ case AArch64::ANDSXrr:
+ return AArch64::ANDXrr;
+ case AArch64::ANDSXrs:
+ return AArch64::ANDXrs;
+ case AArch64::BICSWrr:
+ return AArch64::BICWrr;
+ case AArch64::BICSXrr:
+ return AArch64::BICXrr;
+ case AArch64::BICSWrs:
+ return AArch64::BICWrs;
+ case AArch64::BICSXrs:
+ return AArch64::BICXrs;
}
}
@@ -1780,6 +1800,16 @@ static unsigned sForm(MachineInstr &Instr) {
case AArch64::SUBSWri:
case AArch64::SUBSXrr:
case AArch64::SUBSXri:
+ case AArch64::ANDSWri:
+ case AArch64::ANDSWrr:
+ case AArch64::ANDSWrs:
+ case AArch64::ANDSXri:
+ case AArch64::ANDSXrr:
+ case AArch64::ANDSXrs:
+ case AArch64::BICSWrr:
+ case AArch64::BICSXrr:
+ case AArch64::BICSWrs:
+ case AArch64::BICSXrs:
return Instr.getOpcode();
case AArch64::ADDWrr:
@@ -1810,6 +1840,22 @@ static unsigned sForm(MachineInstr &Instr) {
return AArch64::ANDSWri;
case AArch64::ANDXri:
return AArch64::ANDSXri;
+ case AArch64::ANDWrr:
+ return AArch64::ANDSWrr;
+ case AArch64::ANDWrs:
+ return AArch64::ANDSWrs;
+ case AArch64::ANDXrr:
+ return AArch64::ANDSXrr;
+ case AArch64::ANDXrs:
+ return AArch64::ANDSXrs;
+ case AArch64::BICWrr:
+ return AArch64::BICSWrr;
+ case AArch64::BICXrr:
+ return AArch64::BICSXrr;
+ case AArch64::BICWrs:
+ return AArch64::BICSWrs;
+ case AArch64::BICXrs:
+ return AArch64::BICSXrs;
}
}
@@ -1947,6 +1993,25 @@ static bool isSUBSRegImm(unsigned Opcode) {
return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
}
+static bool isANDOpcode(MachineInstr &MI) {
+ unsigned Opc = sForm(MI);
+ switch (Opc) {
+ case AArch64::ANDSWri:
+ case AArch64::ANDSWrr:
+ case AArch64::ANDSWrs:
+ case AArch64::ANDSXri:
+ case AArch64::ANDSXrr:
+ case AArch64::ANDSXrs:
+ case AArch64::BICSWrr:
+ case AArch64::BICSXrr:
+ case AArch64::BICSWrs:
+ case AArch64::BICSXrs:
+ return true;
+ default:
+ return false;
+ }
+}
+
/// Check if CmpInstr can be substituted by MI.
///
/// CmpInstr can be substituted:
@@ -1984,7 +2049,8 @@ static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr,
// 1) MI and CmpInstr set N and V to the same value.
// 2) If MI is add/sub with no-signed-wrap, it produces a poison value when
// signed overflow occurs, so CmpInstr could still be simplified away.
- if (NZVCUsed->V && !MI.getFlag(MachineInstr::NoSWrap))
+ // Note that Ands and Bics instructions always clear the V flag.
+ if (NZVCUsed->V && !MI.getFlag(MachineInstr::NoSWrap) && !isANDOpcode(MI))
return false;
AccessKind AccessToCheck = AK_Write;
diff --git a/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
index 777b65f24a057..8c31e7c2d1cec 100644
--- a/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
+++ b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
@@ -26,8 +26,7 @@ body: |
; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
- ; CHECK-NEXT: [[ANDWri:%[0-9]+]]:gpr32common = ANDWri killed [[ANDWri]], 15
- ; CHECK-NEXT: $wzr = SUBSWri killed [[ANDWri]], 0, 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32common = ANDSWri killed [[ANDSWri]], 15, implicit-def $nzcv
; CHECK-NEXT: Bcc 12, %bb.2, implicit $nzcv
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.if.then:
diff --git a/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll b/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
index 4f5431f66d9eb..8297fa2d4e3f9 100644
--- a/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
+++ b/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
@@ -167,8 +167,7 @@ define i1 @test_disjoint(i1 %0, i32 %1, i32 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr w9, w2, #0x800000
; CHECK-NEXT: lsl w8, w8, w1
-; CHECK-NEXT: and w8, w9, w8
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: tst w9, w8
; CHECK-NEXT: cset w8, eq
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -188,8 +187,7 @@ define i1 @test_disjoint2(i1 %0, i32 %1, i32 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr w9, w2, #0x800000
; CHECK-NEXT: lsl w8, w8, w1
-; CHECK-NEXT: and w8, w9, w8
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: tst w9, w8
; CHECK-NEXT: cset w8, gt
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -209,8 +207,7 @@ define i1 @test_disjoint3(i1 %0, i32 %1, i32 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr w9, w2, #0x800000
; CHECK-NEXT: lsl w8, w8, w1
-; CHECK-NEXT: and w8, w9, w8
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: tst w9, w8
; CHECK-NEXT: cset w8, mi
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -273,8 +270,7 @@ define i1 @test_disjoint_inverse(i1 %0, i32 %1, i32 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr w9, w2, #0x800000
; CHECK-NEXT: lsl w8, w8, w1
-; CHECK-NEXT: bic w8, w9, w8
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: bics wzr, w9, w8
; CHECK-NEXT: cset w8, eq
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -295,8 +291,7 @@ define i1 @test_disjoint2_inverse(i1 %0, i32 %1, i32 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr w9, w2, #0x800000
; CHECK-NEXT: lsl w8, w8, w1
-; CHECK-NEXT: bic w8, w9, w8
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: bics wzr, w9, w8
; CHECK-NEXT: cset w8, gt
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -317,8 +312,7 @@ define i1 @test_disjoint3_inverse(i1 %0, i32 %1, i32 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr w9, w2, #0x800000
; CHECK-NEXT: lsl w8, w8, w1
-; CHECK-NEXT: bic w8, w9, w8
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: bics wzr, w9, w8
; CHECK-NEXT: cset w8, mi
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -339,8 +333,7 @@ define i1 @test_disjoint_64(i1 %0, i64 %1, i64 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr x9, x2, #0x80000000000000
; CHECK-NEXT: lsl x8, x8, x1
-; CHECK-NEXT: and x8, x9, x8
-; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: tst x9, x8
; CHECK-NEXT: cset w8, eq
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -360,8 +353,7 @@ define i1 @test_disjoint2_64(i1 %0, i64 %1, i64 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr x9, x2, #0x80000000000000
; CHECK-NEXT: lsl x8, x8, x1
-; CHECK-NEXT: and x8, x9, x8
-; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: tst x9, x8
; CHECK-NEXT: cset w8, gt
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -381,8 +373,7 @@ define i1 @test_disjoint3_64(i1 %0, i64 %1, i64 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr x9, x2, #0x80000000000000
; CHECK-NEXT: lsl x8, x8, x1
-; CHECK-NEXT: and x8, x9, x8
-; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: tst x9, x8
; CHECK-NEXT: cset w8, mi
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -445,8 +436,7 @@ define i1 @test_disjoint_inverse_64(i1 %0, i64 %1, i64 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr x9, x2, #0x80000000000000
; CHECK-NEXT: lsl x8, x8, x1
-; CHECK-NEXT: bic x8, x9, x8
-; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: bics xzr, x9, x8
; CHECK-NEXT: cset w8, eq
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -467,8 +457,7 @@ define i1 @test_disjoint2_inverse_64(i1 %0, i64 %1, i64 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr x9, x2, #0x80000000000000
; CHECK-NEXT: lsl x8, x8, x1
-; CHECK-NEXT: bic x8, x9, x8
-; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: bics xzr, x9, x8
; CHECK-NEXT: cset w8, gt
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
@@ -489,8 +478,7 @@ define i1 @test_disjoint3_inverse_64(i1 %0, i64 %1, i64 %2) {
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: orr x9, x2, #0x80000000000000
; CHECK-NEXT: lsl x8, x8, x1
-; CHECK-NEXT: bic x8, x9, x8
-; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: bics xzr, x9, x8
; CHECK-NEXT: cset w8, mi
; CHECK-NEXT: orr w8, w0, w8
; CHECK-NEXT: and w0, w8, #0x1
More information about the llvm-commits
mailing list