[llvm] [AArch64] Use the same fast math preservation for MachineCombiner reassociation as X86/PowerPC/RISCV. (PR #72820)

via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 19 19:26:31 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

Author: Craig Topper (topperc)

<details>
<summary>Changes</summary>

Don't blindly copy the original flags from the pre-reassociated instrutions.
This copied the integer poison flags which are not safe to preserve after reassociation.
    
For the FP flags, I think we should only keep the intersection of
the flags. Override setSpecialOperandAttr to do this.

Fixes #<!-- -->72777.

---
Full diff: https://github.com/llvm/llvm-project/pull/72820.diff


4 Files Affected:

- (modified) llvm/lib/Target/AArch64/AArch64InstrInfo.cpp (+21) 
- (modified) llvm/lib/Target/AArch64/AArch64InstrInfo.h (+5) 
- (modified) llvm/test/CodeGen/AArch64/machine-combiner-reassociate.mir (+8-8) 
- (added) llvm/test/CodeGen/AArch64/pr72777.ll (+24) 


``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 6fdf5363bae2928..1bc4b71dedb656f 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -7828,6 +7828,27 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
     MI->setFlags(Flags);
 }
 
+/// This is an architecture-specific helper function of reassociateOps.
+/// Set special operand attributes for new instructions after reassociation.
+void AArch64InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
+                                             MachineInstr &OldMI2,
+                                             MachineInstr &NewMI1,
+                                             MachineInstr &NewMI2) const {
+  // Propagate FP flags from the original instructions.
+  // But clear poison-generating flags because those may not be valid now.
+  // TODO: There should be a helper function for copying only fast-math-flags.
+  uint32_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
+  NewMI1.setFlags(IntersectedFlags);
+  NewMI1.clearFlag(MachineInstr::MIFlag::NoSWrap);
+  NewMI1.clearFlag(MachineInstr::MIFlag::NoUWrap);
+  NewMI1.clearFlag(MachineInstr::MIFlag::IsExact);
+
+  NewMI2.setFlags(IntersectedFlags);
+  NewMI2.clearFlag(MachineInstr::MIFlag::NoSWrap);
+  NewMI2.clearFlag(MachineInstr::MIFlag::NoUWrap);
+  NewMI2.clearFlag(MachineInstr::MIFlag::IsExact);
+}
+
 /// Replace csincr-branch sequence by simple conditional branch
 ///
 /// Examples:
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index a934103c90cbf92..c78370fe6022caa 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -299,6 +299,11 @@ class AArch64InstrInfo final : public AArch64GenInstrInfo {
       SmallVectorImpl<MachineInstr *> &InsInstrs,
       SmallVectorImpl<MachineInstr *> &DelInstrs,
       DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
+
+  void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
+                             MachineInstr &NewMI1,
+                             MachineInstr &NewMI2) const override;
+
   /// AArch64 supports MachineCombiner.
   bool useMachineCombiner() const override;
 
diff --git a/llvm/test/CodeGen/AArch64/machine-combiner-reassociate.mir b/llvm/test/CodeGen/AArch64/machine-combiner-reassociate.mir
index 4a2fc7e87bb99a3..525f6dd05c6c658 100644
--- a/llvm/test/CodeGen/AArch64/machine-combiner-reassociate.mir
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-reassociate.mir
@@ -91,11 +91,11 @@ body:             |
 
 # Check that flags on the instructions are preserved after reassociation.
 # CHECK-LABEL: name: fadd_flags
-# CHECK:             [[ADD1:%[0-9]+]]:fpr32 = nsz FADDSrr %0, %1, implicit $fpcr
-# CHECK-SAFE-NEXT:   [[ADD2:%[0-9]+]]:fpr32 = nnan FADDSrr killed [[ADD1]], %2, implicit $fpcr
-# CHECK-SAFE-NEXT:   [[ADD3:%[0-9]+]]:fpr32 = ninf FADDSrr killed [[ADD2]], %3, implicit $fpcr
-# CHECK-UNSAFE-NEXT: [[ADD2:%[0-9]+]]:fpr32 = nnan FADDSrr %2, %3, implicit $fpcr
-# CHECK-UNSAFE-NEXT: [[ADD3:%[0-9]+]]:fpr32 = ninf FADDSrr killed [[ADD1]], killed [[ADD2]], implicit $fpcr
+# CHECK:             [[ADD1:%[0-9]+]]:fpr32 = nnan ninf nsz FADDSrr %0, %1, implicit $fpcr
+# CHECK-SAFE-NEXT:   [[ADD2:%[0-9]+]]:fpr32 = nnan nsz FADDSrr killed [[ADD1]], %2, implicit $fpcr
+# CHECK-SAFE-NEXT:   [[ADD3:%[0-9]+]]:fpr32 = ninf nsz FADDSrr killed [[ADD2]], %3, implicit $fpcr
+# CHECK-UNSAFE-NEXT: [[ADD2:%[0-9]+]]:fpr32 = nsz FADDSrr %2, %3, implicit $fpcr
+# CHECK-UNSAFE-NEXT: [[ADD3:%[0-9]+]]:fpr32 = nsz FADDSrr killed [[ADD1]], killed [[ADD2]], implicit $fpcr
 ---
 name:            fadd_flags
 alignment:       4
@@ -125,8 +125,8 @@ body:             |
     %2:fpr32 = COPY $s2
     %1:fpr32 = COPY $s1
     %0:fpr32 = COPY $s0
-    %4:fpr32 = nsz FADDSrr %0, %1, implicit $fpcr
-    %5:fpr32 = nnan FADDSrr killed %4, %2, implicit $fpcr
-    %6:fpr32 = ninf FADDSrr killed %5, %3, implicit $fpcr
+    %4:fpr32 = nsz nnan ninf FADDSrr %0, %1, implicit $fpcr
+    %5:fpr32 = nsz nnan FADDSrr killed %4, %2, implicit $fpcr
+    %6:fpr32 = nsz ninf FADDSrr killed %5, %3, implicit $fpcr
     $s0 = COPY %6
     RET_ReallyLR implicit $s0
diff --git a/llvm/test/CodeGen/AArch64/pr72777.ll b/llvm/test/CodeGen/AArch64/pr72777.ll
new file mode 100644
index 000000000000000..e9021d605f1fe4a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr72777.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+define i64 @f(i64 %0, i64 %1) {
+; CHECK-LABEL: f:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr x9, x1, #0x1
+; CHECK-NEXT:    add x10, x0, x0
+; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
+; CHECK-NEXT:    add x9, x9, x10
+; CHECK-NEXT:    lsl x10, x9, #1
+; CHECK-NEXT:    cmp x9, #0
+; CHECK-NEXT:    cinv x8, x8, ge
+; CHECK-NEXT:    cmp x9, x10, asr #1
+; CHECK-NEXT:    csel x0, x8, x10, ne
+; CHECK-NEXT:    ret
+  %3 = or i64 1, %1
+  %4 = add i64 %3, %0
+  %5 = add nsw i64 %4, %0
+  %6 = call i64 @llvm.sshl.sat.i64(i64 %5, i64 1)
+  ret i64 %6
+}
+
+declare i64 @llvm.sshl.sat.i64(i64, i64)

``````````

</details>


https://github.com/llvm/llvm-project/pull/72820


More information about the llvm-commits mailing list