[llvm] [AArch64] Use the same fast math preservation for MachineCombiner reassociation as X86/PowerPC/RISCV. (PR #72820)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Nov 19 19:26:01 PST 2023
https://github.com/topperc created https://github.com/llvm/llvm-project/pull/72820
Don't blindly copy the original flags from the pre-reassociated instrutions.
This copied the integer poison flags which are not safe to preserve after reassociation.
For the FP flags, I think we should only keep the intersection of
the flags. Override setSpecialOperandAttr to do this.
Fixes #72777.
>From d8b126fe5cd349140a4d17d4f0e9fffa6206f54d Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sun, 19 Nov 2023 18:10:52 -0800
Subject: [PATCH 1/2] [AArch64] Add test case for pr72777. NFC
---
llvm/test/CodeGen/AArch64/pr72777.ll | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/pr72777.ll
diff --git a/llvm/test/CodeGen/AArch64/pr72777.ll b/llvm/test/CodeGen/AArch64/pr72777.ll
new file mode 100644
index 000000000000000..4fcb54456bc0391
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr72777.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+define i64 @f(i64 %0, i64 %1) {
+; CHECK-LABEL: f:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr x8, x1, #0x1
+; CHECK-NEXT: add x9, x0, x0
+; CHECK-NEXT: mov x10, #-9223372036854775808 // =0x8000000000000000
+; CHECK-NEXT: adds x8, x8, x9
+; CHECK-NEXT: lsl x9, x8, #1
+; CHECK-NEXT: cinv x10, x10, ge
+; CHECK-NEXT: cmp x8, x9, asr #1
+; CHECK-NEXT: csel x0, x10, x9, ne
+; CHECK-NEXT: ret
+ %3 = or i64 1, %1
+ %4 = add i64 %3, %0
+ %5 = add nsw i64 %4, %0
+ %6 = call i64 @llvm.sshl.sat.i64(i64 %5, i64 1)
+ ret i64 %6
+}
+
+declare i64 @llvm.sshl.sat.i64(i64, i64)
>From 947d0a03a6a267cfd5cb81a038c70fd613fb210b Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sun, 19 Nov 2023 19:19:32 -0800
Subject: [PATCH 2/2] [AArch64] Use the same fast math preservation for
MachineCombiner reassociation as X86/PowerPC/RISCV.
Don't blindly copy the original flags from the pre-reassociated instrutions.
This copies the integer poison flags which we don't want to copy.
For the FP flags, I think we should only keep the intersection of
the flags. Override setSpecialOperandAttr to do this.
Fixes #72777.
---
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 21 +++++++++++++++++++
llvm/lib/Target/AArch64/AArch64InstrInfo.h | 5 +++++
.../AArch64/machine-combiner-reassociate.mir | 16 +++++++-------
llvm/test/CodeGen/AArch64/pr72777.ll | 17 ++++++++-------
4 files changed, 43 insertions(+), 16 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 6fdf5363bae2928..1bc4b71dedb656f 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -7828,6 +7828,27 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
MI->setFlags(Flags);
}
+/// This is an architecture-specific helper function of reassociateOps.
+/// Set special operand attributes for new instructions after reassociation.
+void AArch64InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
+ MachineInstr &OldMI2,
+ MachineInstr &NewMI1,
+ MachineInstr &NewMI2) const {
+ // Propagate FP flags from the original instructions.
+ // But clear poison-generating flags because those may not be valid now.
+ // TODO: There should be a helper function for copying only fast-math-flags.
+ uint32_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
+ NewMI1.setFlags(IntersectedFlags);
+ NewMI1.clearFlag(MachineInstr::MIFlag::NoSWrap);
+ NewMI1.clearFlag(MachineInstr::MIFlag::NoUWrap);
+ NewMI1.clearFlag(MachineInstr::MIFlag::IsExact);
+
+ NewMI2.setFlags(IntersectedFlags);
+ NewMI2.clearFlag(MachineInstr::MIFlag::NoSWrap);
+ NewMI2.clearFlag(MachineInstr::MIFlag::NoUWrap);
+ NewMI2.clearFlag(MachineInstr::MIFlag::IsExact);
+}
+
/// Replace csincr-branch sequence by simple conditional branch
///
/// Examples:
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index a934103c90cbf92..c78370fe6022caa 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -299,6 +299,11 @@ class AArch64InstrInfo final : public AArch64GenInstrInfo {
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
+
+ void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
+ MachineInstr &NewMI1,
+ MachineInstr &NewMI2) const override;
+
/// AArch64 supports MachineCombiner.
bool useMachineCombiner() const override;
diff --git a/llvm/test/CodeGen/AArch64/machine-combiner-reassociate.mir b/llvm/test/CodeGen/AArch64/machine-combiner-reassociate.mir
index 4a2fc7e87bb99a3..525f6dd05c6c658 100644
--- a/llvm/test/CodeGen/AArch64/machine-combiner-reassociate.mir
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-reassociate.mir
@@ -91,11 +91,11 @@ body: |
# Check that flags on the instructions are preserved after reassociation.
# CHECK-LABEL: name: fadd_flags
-# CHECK: [[ADD1:%[0-9]+]]:fpr32 = nsz FADDSrr %0, %1, implicit $fpcr
-# CHECK-SAFE-NEXT: [[ADD2:%[0-9]+]]:fpr32 = nnan FADDSrr killed [[ADD1]], %2, implicit $fpcr
-# CHECK-SAFE-NEXT: [[ADD3:%[0-9]+]]:fpr32 = ninf FADDSrr killed [[ADD2]], %3, implicit $fpcr
-# CHECK-UNSAFE-NEXT: [[ADD2:%[0-9]+]]:fpr32 = nnan FADDSrr %2, %3, implicit $fpcr
-# CHECK-UNSAFE-NEXT: [[ADD3:%[0-9]+]]:fpr32 = ninf FADDSrr killed [[ADD1]], killed [[ADD2]], implicit $fpcr
+# CHECK: [[ADD1:%[0-9]+]]:fpr32 = nnan ninf nsz FADDSrr %0, %1, implicit $fpcr
+# CHECK-SAFE-NEXT: [[ADD2:%[0-9]+]]:fpr32 = nnan nsz FADDSrr killed [[ADD1]], %2, implicit $fpcr
+# CHECK-SAFE-NEXT: [[ADD3:%[0-9]+]]:fpr32 = ninf nsz FADDSrr killed [[ADD2]], %3, implicit $fpcr
+# CHECK-UNSAFE-NEXT: [[ADD2:%[0-9]+]]:fpr32 = nsz FADDSrr %2, %3, implicit $fpcr
+# CHECK-UNSAFE-NEXT: [[ADD3:%[0-9]+]]:fpr32 = nsz FADDSrr killed [[ADD1]], killed [[ADD2]], implicit $fpcr
---
name: fadd_flags
alignment: 4
@@ -125,8 +125,8 @@ body: |
%2:fpr32 = COPY $s2
%1:fpr32 = COPY $s1
%0:fpr32 = COPY $s0
- %4:fpr32 = nsz FADDSrr %0, %1, implicit $fpcr
- %5:fpr32 = nnan FADDSrr killed %4, %2, implicit $fpcr
- %6:fpr32 = ninf FADDSrr killed %5, %3, implicit $fpcr
+ %4:fpr32 = nsz nnan ninf FADDSrr %0, %1, implicit $fpcr
+ %5:fpr32 = nsz nnan FADDSrr killed %4, %2, implicit $fpcr
+ %6:fpr32 = nsz ninf FADDSrr killed %5, %3, implicit $fpcr
$s0 = COPY %6
RET_ReallyLR implicit $s0
diff --git a/llvm/test/CodeGen/AArch64/pr72777.ll b/llvm/test/CodeGen/AArch64/pr72777.ll
index 4fcb54456bc0391..e9021d605f1fe4a 100644
--- a/llvm/test/CodeGen/AArch64/pr72777.ll
+++ b/llvm/test/CodeGen/AArch64/pr72777.ll
@@ -4,14 +4,15 @@
define i64 @f(i64 %0, i64 %1) {
; CHECK-LABEL: f:
; CHECK: // %bb.0:
-; CHECK-NEXT: orr x8, x1, #0x1
-; CHECK-NEXT: add x9, x0, x0
-; CHECK-NEXT: mov x10, #-9223372036854775808 // =0x8000000000000000
-; CHECK-NEXT: adds x8, x8, x9
-; CHECK-NEXT: lsl x9, x8, #1
-; CHECK-NEXT: cinv x10, x10, ge
-; CHECK-NEXT: cmp x8, x9, asr #1
-; CHECK-NEXT: csel x0, x10, x9, ne
+; CHECK-NEXT: orr x9, x1, #0x1
+; CHECK-NEXT: add x10, x0, x0
+; CHECK-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000
+; CHECK-NEXT: add x9, x9, x10
+; CHECK-NEXT: lsl x10, x9, #1
+; CHECK-NEXT: cmp x9, #0
+; CHECK-NEXT: cinv x8, x8, ge
+; CHECK-NEXT: cmp x9, x10, asr #1
+; CHECK-NEXT: csel x0, x8, x10, ne
; CHECK-NEXT: ret
%3 = or i64 1, %1
%4 = add i64 %3, %0
More information about the llvm-commits
mailing list