[llvm] ad2f7fd - [AtomicExpand] Make floating point conversion happens before fence insertion

Kai Luo via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 30 18:55:09 PDT 2022


Author: Kai Luo
Date: 2022-08-31T09:54:58+08:00
New Revision: ad2f7fd286f15b6ff10f35bc3a9e069e48fb98de

URL: https://github.com/llvm/llvm-project/commit/ad2f7fd286f15b6ff10f35bc3a9e069e48fb98de
DIFF: https://github.com/llvm/llvm-project/commit/ad2f7fd286f15b6ff10f35bc3a9e069e48fb98de.diff

LOG: [AtomicExpand] Make floating point conversion happens before fence insertion

IIUC, the conversion part is not part of atomic operations and fences should be put around converted atomic operations.
This also fixes atomic load of floating point values which requires fence on PowerPC.

Reviewed By: efriedma

Differential Revision: https://reviews.llvm.org/D127609

Added: 
    

Modified: 
    llvm/lib/CodeGen/AtomicExpandPass.cpp
    llvm/test/CodeGen/PowerPC/cfence-double.ll
    llvm/test/CodeGen/PowerPC/cfence-float.ll
    llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
    llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index ad51bab8f30b3..0c8956081f7ce 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -221,6 +221,31 @@ bool AtomicExpand::runOnFunction(Function &F) {
       }
     }
 
+    if (LI && TLI->shouldCastAtomicLoadInIR(LI) ==
+                  TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
+      I = LI = convertAtomicLoadToIntegerType(LI);
+      MadeChange = true;
+    } else if (SI &&
+               TLI->shouldCastAtomicStoreInIR(SI) ==
+                   TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
+      I = SI = convertAtomicStoreToIntegerType(SI);
+      MadeChange = true;
+    } else if (RMWI &&
+               TLI->shouldCastAtomicRMWIInIR(RMWI) ==
+                   TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
+      I = RMWI = convertAtomicXchgToIntegerType(RMWI);
+      MadeChange = true;
+    } else if (CASI) {
+      // TODO: when we're ready to make the change at the IR level, we can
+      // extend convertCmpXchgToInteger for floating point too.
+      if (CASI->getCompareOperand()->getType()->isPointerTy()) {
+        // TODO: add a TLI hook to control this so that each target can
+        // convert to lowering the original type one at a time.
+        I = CASI = convertCmpXchgToIntegerType(CASI);
+        MadeChange = true;
+      }
+    }
+
     if (TLI->shouldInsertFencesForAtomic(I)) {
       auto FenceOrdering = AtomicOrdering::Monotonic;
       if (LI && isAcquireOrStronger(LI->getOrdering())) {
@@ -253,31 +278,11 @@ bool AtomicExpand::runOnFunction(Function &F) {
       }
     }
 
-    if (LI) {
-      if (TLI->shouldCastAtomicLoadInIR(LI) ==
-          TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
-        // TODO: add a TLI hook to control this so that each target can
-        // convert to lowering the original type one at a time.
-        LI = convertAtomicLoadToIntegerType(LI);
-        assert(LI->getType()->isIntegerTy() && "invariant broken");
-        MadeChange = true;
-      }
-
+    if (LI)
       MadeChange |= tryExpandAtomicLoad(LI);
-    } else if (SI) {
-      if (TLI->shouldCastAtomicStoreInIR(SI) ==
-          TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
-        // TODO: add a TLI hook to control this so that each target can
-        // convert to lowering the original type one at a time.
-        SI = convertAtomicStoreToIntegerType(SI);
-        assert(SI->getValueOperand()->getType()->isIntegerTy() &&
-               "invariant broken");
-        MadeChange = true;
-      }
-
-      if (tryExpandAtomicStore(SI))
-        MadeChange = true;
-    } else if (RMWI) {
+    else if (SI)
+      MadeChange |= tryExpandAtomicStore(SI);
+    else if (RMWI) {
       // There are two 
diff erent ways of expanding RMW instructions:
       // - into a load if it is idempotent
       // - into a Cmpxchg/LL-SC loop otherwise
@@ -287,15 +292,6 @@ bool AtomicExpand::runOnFunction(Function &F) {
         MadeChange = true;
       } else {
         AtomicRMWInst::BinOp Op = RMWI->getOperation();
-        if (TLI->shouldCastAtomicRMWIInIR(RMWI) ==
-            TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
-          // TODO: add a TLI hook to control this so that each target can
-          // convert to lowering the original type one at a time.
-          RMWI = convertAtomicXchgToIntegerType(RMWI);
-          assert(RMWI->getValOperand()->getType()->isIntegerTy() &&
-                 "invariant broken");
-          MadeChange = true;
-        }
         unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
         unsigned ValueSize = getAtomicOpSize(RMWI);
         if (ValueSize < MinCASSize &&
@@ -307,22 +303,8 @@ bool AtomicExpand::runOnFunction(Function &F) {
 
         MadeChange |= tryExpandAtomicRMW(RMWI);
       }
-    } else if (CASI) {
-      // TODO: when we're ready to make the change at the IR level, we can
-      // extend convertCmpXchgToInteger for floating point too.
-      assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() &&
-             "unimplemented - floating point not legal at IR level");
-      if (CASI->getCompareOperand()->getType()->isPointerTy()) {
-        // TODO: add a TLI hook to control this so that each target can
-        // convert to lowering the original type one at a time.
-        CASI = convertCmpXchgToIntegerType(CASI);
-        assert(CASI->getCompareOperand()->getType()->isIntegerTy() &&
-               "invariant broken");
-        MadeChange = true;
-      }
-
+    } else if (CASI)
       MadeChange |= tryExpandAtomicCmpXchg(CASI);
-    }
   }
   return MadeChange;
 }

diff  --git a/llvm/test/CodeGen/PowerPC/cfence-double.ll b/llvm/test/CodeGen/PowerPC/cfence-double.ll
index dfff8ab5a824b..bfd408acefcc5 100644
--- a/llvm/test/CodeGen/PowerPC/cfence-double.ll
+++ b/llvm/test/CodeGen/PowerPC/cfence-double.ll
@@ -1,12 +1,28 @@
-; REQUIRES: asserts
-; RUN: not --crash llc -opaque-pointers -mtriple=powerpc64le-unknown-unknown \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -opaque-pointers -mtriple=powerpc64le-unknown-unknown \
+; RUN:   < %s 2>&1 | FileCheck --check-prefix=CHECK-LE %s
+; RUN: llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: not --crash llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \
-; RUN:   < %s 2>&1 | FileCheck %s
-
-; CHECK: Assertion{{.*}}VT.isInteger() && Operand.getValueType().isInteger() && "Invalid ANY_EXTEND!"
 
 define double @foo(double* %dp) {
+; CHECK-LE-LABEL: foo:
+; CHECK-LE:       # %bb.0: # %entry
+; CHECK-LE-NEXT:    ld 3, 0(3)
+; CHECK-LE-NEXT:    cmpd 7, 3, 3
+; CHECK-LE-NEXT:    mtfprd 1, 3
+; CHECK-LE-NEXT:    bne- 7, .+4
+; CHECK-LE-NEXT:    isync
+; CHECK-LE-NEXT:    blr
+;
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ld 3, 0(3)
+; CHECK-NEXT:    cmpd 7, 3, 3
+; CHECK-NEXT:    bne- 7, .+4
+; CHECK-NEXT:    isync
+; CHECK-NEXT:    std 3, -8(1)
+; CHECK-NEXT:    lfd 1, -8(1)
+; CHECK-NEXT:    blr
 entry:
   %0 = load atomic double, double* %dp acquire, align 8
   ret double %0

diff  --git a/llvm/test/CodeGen/PowerPC/cfence-float.ll b/llvm/test/CodeGen/PowerPC/cfence-float.ll
index b85112536b3d4..2849da6c33fff 100644
--- a/llvm/test/CodeGen/PowerPC/cfence-float.ll
+++ b/llvm/test/CodeGen/PowerPC/cfence-float.ll
@@ -1,12 +1,30 @@
-; REQUIRES: asserts
-; RUN: not --crash llc -opaque-pointers -mtriple=powerpc64le-unknown-unknown \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -opaque-pointers -mtriple=powerpc64le-unknown-unknown \
+; RUN:   < %s 2>&1 | FileCheck --check-prefix=CHECK-LE %s
+; RUN: llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \
 ; RUN:   < %s 2>&1 | FileCheck %s
-; RUN: not --crash llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \
-; RUN:   < %s 2>&1 | FileCheck %s
-
-; CHECK: Assertion{{.*}}VT.isInteger() && Operand.getValueType().isInteger() && "Invalid ANY_EXTEND!"
 
 define float @bar(float* %fp) {
+; CHECK-LE-LABEL: bar:
+; CHECK-LE:       # %bb.0: # %entry
+; CHECK-LE-NEXT:    lwz 3, 0(3)
+; CHECK-LE-NEXT:    mtfprd 0, 3
+; CHECK-LE-NEXT:    cmpd 7, 3, 3
+; CHECK-LE-NEXT:    xxsldwi 0, 0, 0, 1
+; CHECK-LE-NEXT:    bne- 7, .+4
+; CHECK-LE-NEXT:    isync
+; CHECK-LE-NEXT:    xscvspdpn 1, 0
+; CHECK-LE-NEXT:    blr
+;
+; CHECK-LABEL: bar:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lwz 3, 0(3)
+; CHECK-NEXT:    cmpd 7, 3, 3
+; CHECK-NEXT:    bne- 7, .+4
+; CHECK-NEXT:    isync
+; CHECK-NEXT:    stw 3, -4(1)
+; CHECK-NEXT:    lfs 1, -4(1)
+; CHECK-NEXT:    blr
 entry:
   %0 = load atomic float, float* %fp acquire, align 4
   ret float %0

diff  --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
index b3ad5f7a8a27b..92b75d0006f24 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll
@@ -8,8 +8,8 @@ define double @foo(double* %dp) {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i64, ptr [[DP:%.*]] monotonic, align 8
+; CHECK-NEXT:    call void @llvm.ppc.cfence.i64(i64 [[TMP0]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i64 [[TMP0]] to double
-; CHECK-NEXT:    call void @llvm.ppc.cfence.f64(double [[TMP1]])
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
 entry:

diff  --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
index b918ae3fca9ed..dfbaef9c8e1bf 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll
@@ -8,8 +8,8 @@ define float @bar(float* %fp) {
 ; CHECK-LABEL: @bar(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i32, ptr [[FP:%.*]] monotonic, align 4
+; CHECK-NEXT:    call void @llvm.ppc.cfence.i32(i32 [[TMP0]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32 [[TMP0]] to float
-; CHECK-NEXT:    call void @llvm.ppc.cfence.f32(float [[TMP1]])
 ; CHECK-NEXT:    ret float [[TMP1]]
 ;
 entry:


        


More information about the llvm-commits mailing list