[llvm] [AtomicExpand][RISCV] Call shouldExpandAtomicRMWInIR before widenPartwordAtomicRMW (PR #80947)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 6 23:41:53 PST 2024


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/80947

This gives the target a chance to keep an atomicrmw op that is smaller than the minimum cmpxchg size. This is needed to support the Zabha extension for RISC-V which provides i8/i16 atomicrmw operations, but does not provide an i8/i16 cmpxchg or LR/SC instructions.

This moves the widening until after the target requests LLSC/CmpXChg/MaskedIntrinsic expansion. Once we widen, we call shouldExpandAtomicRMWInIR again to give the target another chance to make a decision about the widened operation.

I considered making the targets return AtomicExpansionKind::Expand or a new expansion kind for And/Or/Xor, but that required the targets to special case And/Or/Xor which they weren't currently doing.

This should make it easier to implement #80192.

>From 9ff0961fc519eb524b7a7334107a4b32f4dfca37 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 6 Feb 2024 22:43:14 -0800
Subject: [PATCH] [AtomicExpand][RISCV] Call shouldExpandAtomicRMWInIR before
 widenPartwordAtomicRMW.

This gives the target a chance to keep an atomicrmw op that is
smaller than the minimum cmpxchg size. This is needed to support
the Zabha extension for RISC-V which provides i8/i16 atomicrmw
operations, but does not provide an i8/i16 cmpxchg or LR/SC instructions.

This moves the widening until after the target requests
LLSC/CmpXChg/MaskedIntrinsic expansion. Once we widen, we call
shouldExpandAtomicRMWInIR again to give the target another chance
to make a decision about the widened operation.

I considered making the targets return AtomicExpansionKind::Expand
or a new expansion kind for And/Or/Xor, but that required the
targets to special case And/Or/Xor which they weren't currently doing.

This should make it easier to implement #80192.
---
 llvm/lib/CodeGen/AtomicExpandPass.cpp | 39 ++++++++++++++++-----------
 1 file changed, 23 insertions(+), 16 deletions(-)

diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index ccf3e9ec64921..faa3edb2b03c8 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -322,16 +322,6 @@ bool AtomicExpand::runOnFunction(Function &F) {
       if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
         MadeChange = true;
       } else {
-        AtomicRMWInst::BinOp Op = RMWI->getOperation();
-        unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
-        unsigned ValueSize = getAtomicOpSize(RMWI);
-        if (ValueSize < MinCASSize &&
-            (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
-             Op == AtomicRMWInst::And)) {
-          RMWI = widenPartwordAtomicRMW(RMWI);
-          MadeChange = true;
-        }
-
         MadeChange |= tryExpandAtomicRMW(RMWI);
       }
     } else if (CASI)
@@ -607,6 +597,17 @@ bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
     return true;
   }
   case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
+    unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
+    unsigned ValueSize = getAtomicOpSize(AI);
+    if (ValueSize < MinCASSize) {
+      AtomicRMWInst::BinOp Op = AI->getOperation();
+      // Widen And/Or/Xor and give the target another chance at expanding it.
+      if (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
+          Op == AtomicRMWInst::And) {
+        tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
+        return true;
+      }
+    }
     expandAtomicRMWToMaskedIntrinsic(AI);
     return true;
   }
@@ -845,6 +846,14 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
 /// part of the value.
 void AtomicExpand::expandPartwordAtomicRMW(
     AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
+  // Widen And/Or/Xor and give the target another chance at expanding it.
+  AtomicRMWInst::BinOp Op = AI->getOperation();
+  if (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
+      Op == AtomicRMWInst::And) {
+    tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
+    return;
+  }
+
   AtomicOrdering MemOpOrder = AI->getOrdering();
   SyncScope::ID SSID = AI->getSyncScopeID();
 
@@ -855,18 +864,16 @@ void AtomicExpand::expandPartwordAtomicRMW(
                        AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
 
   Value *ValOperand_Shifted = nullptr;
-  if (AI->getOperation() == AtomicRMWInst::Xchg ||
-      AI->getOperation() == AtomicRMWInst::Add ||
-      AI->getOperation() == AtomicRMWInst::Sub ||
-      AI->getOperation() == AtomicRMWInst::Nand) {
+  if (Op == AtomicRMWInst::Xchg || Op == AtomicRMWInst::Add ||
+      Op == AtomicRMWInst::Sub || Op == AtomicRMWInst::Nand) {
     ValOperand_Shifted =
         Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
                           PMV.ShiftAmt, "ValOperand_Shifted");
   }
 
   auto PerformPartwordOp = [&](IRBuilderBase &Builder, Value *Loaded) {
-    return performMaskedAtomicOp(AI->getOperation(), Builder, Loaded,
-                                 ValOperand_Shifted, AI->getValOperand(), PMV);
+    return performMaskedAtomicOp(Op, Builder, Loaded, ValOperand_Shifted,
+                                 AI->getValOperand(), PMV);
   };
 
   Value *OldResult;



More information about the llvm-commits mailing list