[llvm] [RISCV] Add a pass to remove ADDI by reassociating to fold into load/store address. (PR #127151)

via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 13 17:05:21 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

Author: Craig Topper (topperc)

<details>
<summary>Changes</summary>

SelectionDAG will not reassociate adds to the end of a chain if
there are multiple users of later additions. This prevents isel
from folding the immediate into a load/store address.
    
One easy way to see this is accessing an array in a struct with
two different indices. An ADDI will be used to get to the start
of the array then 2 different SHXADD instructions will be used to
add the scaled indices. Finally the SHXADD will be used by different
load instructions. We can remove the ADDI by folding the offset into
each load.
    
This patch adds a new pass that analyzes how an ADDI constant
propagates through address arithmetic. If the arithmetic is only
used by a load/store and the offset is small enough, we can adjust
the load/store offset and remove the ADDI. For simplicit, we replace
the ADDI with a COPY from its input register which can be cleaned up
by other passes.
    
This pass is placed before MachineCSE to allow cleanups if some
instructions become common after removing offsets from their inputs.
    
RISCVMergeBaseOffset is modified to prevent a regression by handling
an ADDI becoming a COPY from X0.

This pass gives ~3% improvement on dynamic instruction count on 541.leela_r from SPEC2017 for the train data set.

---

Patch is 40.89 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/127151.diff


10 Files Affected:

- (modified) llvm/lib/Target/RISCV/CMakeLists.txt (+1) 
- (modified) llvm/lib/Target/RISCV/RISCV.h (+3) 
- (added) llvm/lib/Target/RISCV/RISCVFoldMemOffset.cpp (+289) 
- (modified) llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp (+7) 
- (modified) llvm/lib/Target/RISCV/RISCVTargetMachine.cpp (+2) 
- (modified) llvm/test/CodeGen/RISCV/O3-pipeline.ll (+1) 
- (modified) llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll (+4-5) 
- (added) llvm/test/CodeGen/RISCV/fold-mem-offset.ll (+733) 
- (modified) llvm/test/CodeGen/RISCV/split-offsets.ll (+10-13) 
- (modified) llvm/test/CodeGen/RISCV/xtheadmemidx.ll (+2-3) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index 9b23a5ab521c8..5d1ea50eba494 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -37,6 +37,7 @@ add_llvm_target(RISCVCodeGen
   RISCVMakeCompressible.cpp
   RISCVExpandAtomicPseudoInsts.cpp
   RISCVExpandPseudoInsts.cpp
+  RISCVFoldMemOffset.cpp
   RISCVFrameLowering.cpp
   RISCVGatherScatterLowering.cpp
   RISCVIndirectBranchTracking.cpp
diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h
index 851eea1352852..641e2eb4094f9 100644
--- a/llvm/lib/Target/RISCV/RISCV.h
+++ b/llvm/lib/Target/RISCV/RISCV.h
@@ -52,6 +52,9 @@ void initializeRISCVVectorPeepholePass(PassRegistry &);
 FunctionPass *createRISCVOptWInstrsPass();
 void initializeRISCVOptWInstrsPass(PassRegistry &);
 
+FunctionPass *createRISCVFoldMemOffsetPass();
+void initializeRISCVFoldMemOffsetPass(PassRegistry &);
+
 FunctionPass *createRISCVMergeBaseOffsetOptPass();
 void initializeRISCVMergeBaseOffsetOptPass(PassRegistry &);
 
diff --git a/llvm/lib/Target/RISCV/RISCVFoldMemOffset.cpp b/llvm/lib/Target/RISCV/RISCVFoldMemOffset.cpp
new file mode 100644
index 0000000000000..b61eda499fcd0
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVFoldMemOffset.cpp
@@ -0,0 +1,289 @@
+//===- RISCVFoldMemOffset.cpp - Fold ADDI into memory offsets ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// Look for ADDIs that can be removed by folding their immediate into later
+// load/store addresses. There may be other arithmetic instructions between the
+// addi and load/store that we need to reassociate through. If the final result
+// of the arithmetic is only used by load/store addresses, we can fold the
+// offset into the all the load/store as long as it doesn't create an offset
+// that is too large.
+//
+//===---------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include <queue>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-fold-mem-offset"
+#define RISCV_FOLD_MEM_OFFSET_NAME "RISC-V Fold Memory Offset"
+
+namespace {
+
+class RISCVFoldMemOffset : public MachineFunctionPass {
+public:
+  static char ID;
+
+  RISCVFoldMemOffset() : MachineFunctionPass(ID) {}
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  bool foldOffset(Register OrigReg, int64_t InitialOffset,
+                  const MachineRegisterInfo &MRI,
+                  DenseMap<MachineInstr *, int64_t> &FoldableInstrs);
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  StringRef getPassName() const override { return RISCV_FOLD_MEM_OFFSET_NAME; }
+};
+
+// Wrapper class around a std::optional to allow accumulation.
+class FoldableOffset {
+  std::optional<int64_t> Offset;
+
+public:
+  bool hasValue() const { return Offset.has_value(); }
+  int64_t getValue() const { return *Offset; }
+
+  FoldableOffset &operator=(int64_t RHS) {
+    Offset = RHS;
+    return *this;
+  }
+
+  FoldableOffset &operator+=(int64_t RHS) {
+    if (!Offset)
+      Offset = RHS;
+    else
+      Offset = (uint64_t)*Offset + (uint64_t)RHS;
+    return *this;
+  }
+
+  FoldableOffset &operator-=(int64_t RHS) {
+    if (!Offset)
+      Offset = -(uint64_t)RHS;
+    else
+      Offset = (uint64_t)*Offset - (uint64_t)RHS;
+    return *this;
+  }
+
+  int64_t operator*() { return *Offset; }
+};
+
+} // end anonymous namespace
+
+char RISCVFoldMemOffset::ID = 0;
+INITIALIZE_PASS(RISCVFoldMemOffset, DEBUG_TYPE, RISCV_FOLD_MEM_OFFSET_NAME,
+                false, false)
+
+FunctionPass *llvm::createRISCVFoldMemOffsetPass() {
+  return new RISCVFoldMemOffset();
+}
+
+// Walk forward from the ADDI looking for arithmetic instructions we can
+// analyze or memory instructions that use it as part of their address
+// calculation. For each arithmetic instruction we lookup how the offset
+// contributes to the value in that register use that information to
+// calculate the contribution to the output of this instruction.
+// Only addition and left shift are supported.
+// FIXME: Add multiplication by constant. The constant will be in a register.
+bool RISCVFoldMemOffset::foldOffset(
+    Register OrigReg, int64_t InitialOffset, const MachineRegisterInfo &MRI,
+    DenseMap<MachineInstr *, int64_t> &FoldableInstrs) {
+  // Map to hold how much the offset contributes to the value of this register.
+  DenseMap<Register, int64_t> RegToOffsetMap;
+
+  // Insert root offset into the map.
+  RegToOffsetMap[OrigReg] = InitialOffset;
+
+  std::queue<Register> Worklist;
+  Worklist.push(OrigReg);
+
+  while (!Worklist.empty()) {
+    Register Reg = Worklist.front();
+    Worklist.pop();
+
+    for (auto &User : MRI.use_nodbg_instructions(Reg)) {
+      FoldableOffset Offset;
+
+      switch (User.getOpcode()) {
+      default:
+        return false;
+      case RISCV::ADD:
+        if (auto I = RegToOffsetMap.find(User.getOperand(1).getReg());
+            I != RegToOffsetMap.end())
+          Offset = I->second;
+        if (auto I = RegToOffsetMap.find(User.getOperand(2).getReg());
+            I != RegToOffsetMap.end())
+          Offset += I->second;
+        break;
+      case RISCV::SH1ADD:
+        if (auto I = RegToOffsetMap.find(User.getOperand(1).getReg());
+            I != RegToOffsetMap.end())
+          Offset = (uint64_t)I->second << 1;
+        if (auto I = RegToOffsetMap.find(User.getOperand(2).getReg());
+            I != RegToOffsetMap.end())
+          Offset += I->second;
+        break;
+      case RISCV::SH2ADD:
+        if (auto I = RegToOffsetMap.find(User.getOperand(1).getReg());
+            I != RegToOffsetMap.end())
+          Offset = (uint64_t)I->second << 2;
+        if (auto I = RegToOffsetMap.find(User.getOperand(2).getReg());
+            I != RegToOffsetMap.end())
+          Offset += I->second;
+        break;
+      case RISCV::SH3ADD:
+        if (auto I = RegToOffsetMap.find(User.getOperand(1).getReg());
+            I != RegToOffsetMap.end())
+          Offset = (uint64_t)I->second << 3;
+        if (auto I = RegToOffsetMap.find(User.getOperand(2).getReg());
+            I != RegToOffsetMap.end())
+          Offset += I->second;
+        break;
+      case RISCV::ADD_UW:
+      case RISCV::SH1ADD_UW:
+      case RISCV::SH2ADD_UW:
+      case RISCV::SH3ADD_UW:
+        // Don't fold through the zero extended input.
+        if (User.getOperand(1).getReg() == Reg)
+          return false;
+        if (auto I = RegToOffsetMap.find(User.getOperand(2).getReg());
+            I != RegToOffsetMap.end())
+          Offset = I->second;
+        break;
+      case RISCV::SLLI: {
+        unsigned ShAmt = User.getOperand(2).getImm();
+        if (auto I = RegToOffsetMap.find(User.getOperand(1).getReg());
+            I != RegToOffsetMap.end())
+          Offset = (uint64_t)I->second << ShAmt;
+        break;
+      }
+      case RISCV::LB:
+      case RISCV::LBU:
+      case RISCV::SB:
+      case RISCV::LH:
+      case RISCV::LH_INX:
+      case RISCV::LHU:
+      case RISCV::FLH:
+      case RISCV::SH:
+      case RISCV::SH_INX:
+      case RISCV::FSH:
+      case RISCV::LW:
+      case RISCV::LW_INX:
+      case RISCV::LWU:
+      case RISCV::FLW:
+      case RISCV::SW:
+      case RISCV::SW_INX:
+      case RISCV::FSW:
+      case RISCV::LD:
+      case RISCV::FLD:
+      case RISCV::SD:
+      case RISCV::FSD: {
+        // Can't fold into store value.
+        if (User.getOperand(0).getReg() == Reg)
+          return false;
+
+        // Existing offset must be immediate.
+        if (!User.getOperand(2).isImm())
+          return false;
+
+        // Require at least one operation between the ADDI and the load/store.
+        // We have other optimizations that should handle the simple case.
+        if (User.getOperand(1).getReg() == OrigReg)
+          return false;
+
+        auto I = RegToOffsetMap.find(User.getOperand(1).getReg());
+        if (I == RegToOffsetMap.end())
+          return false;
+
+        int64_t LocalOffset = User.getOperand(2).getImm();
+        assert(isInt<12>(LocalOffset));
+        int64_t CombinedOffset = (uint64_t)LocalOffset + (uint64_t)I->second;
+        if (!isInt<12>(CombinedOffset))
+          return false;
+
+        FoldableInstrs[&User] = CombinedOffset;
+        continue;
+      }
+      }
+
+      // If we reach here we should have an accumulated offset.
+      assert(Offset.hasValue() && "Expected an offset");
+
+      // If the offset is new or changed, add the destination register to the
+      // work list.
+      int64_t OffsetVal = Offset.getValue();
+      auto P = RegToOffsetMap.try_emplace(User.getOperand(0).getReg(),
+                                          OffsetVal);
+      if (P.second) {
+        Worklist.push(User.getOperand(0).getReg());
+      } else if (P.first->second != OffsetVal) {
+        P.first->second = OffsetVal;
+        Worklist.push(User.getOperand(0).getReg());
+      }
+    }
+  }
+
+  return true;
+}
+
+bool RISCVFoldMemOffset::runOnMachineFunction(MachineFunction &MF) {
+  if (skipFunction(MF.getFunction()))
+    return false;
+
+  // This optimization may increase size by preventing compression.
+  if (MF.getFunction().hasOptSize())
+    return false;
+
+  const MachineRegisterInfo &MRI = MF.getRegInfo();
+  const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+  const RISCVInstrInfo &TII = *ST.getInstrInfo();
+
+  bool MadeChange = false;
+  for (MachineBasicBlock &MBB : MF) {
+    for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
+      // FIXME: We can support ADDIW from an LUI+ADDIW pair if the result is
+      // equivalent to LUI+ADDI.
+      if (MI.getOpcode() != RISCV::ADDI)
+        continue;
+
+      // We only want to optimize register ADDIs.
+      if (!MI.getOperand(1).isReg() || !MI.getOperand(2).isImm())
+        continue;
+
+      int64_t Offset = MI.getOperand(2).getImm();
+      assert(isInt<12>(Offset));
+
+      DenseMap<MachineInstr *, int64_t> FoldableInstrs;
+
+      if (!foldOffset(MI.getOperand(0).getReg(), Offset, MRI, FoldableInstrs))
+        continue;
+
+      if (FoldableInstrs.empty())
+        continue;
+
+      // We can fold this ADDI.
+      // Rewrite all the instructions.
+      for (auto [MemMI, NewOffset] : FoldableInstrs)
+        MemMI->getOperand(2).setImm(NewOffset);
+
+      // Replace ADDI with a copy.
+      BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::COPY))
+          .add(MI.getOperand(0))
+          .add(MI.getOperand(1));
+      MI.eraseFromParent();
+    }
+  }
+
+  return MadeChange;
+}
diff --git a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
index bbbb1e1595982..3dab7d9bb0912 100644
--- a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
@@ -238,6 +238,13 @@ bool RISCVMergeBaseOffsetOpt::foldLargeOffset(MachineInstr &Hi,
     foldOffset(Hi, Lo, TailAdd, Offset);
     OffsetTail.eraseFromParent();
     return true;
+  } else if (OffsetTail.getOpcode() == RISCV::COPY &&
+             OffsetTail.getOperand(1).getReg() == RISCV::X0) {
+    // Fold mem offset can leave copies from X0 in place of an ADDI and they
+    // might not have been eliminated yet.
+    foldOffset(Hi, Lo, TailAdd, 0);
+    OffsetTail.eraseFromParent();
+    return true;
   }
   return false;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 167dbb53c5950..89e017807363b 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -133,6 +133,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
   initializeRISCVPostRAExpandPseudoPass(*PR);
   initializeRISCVMergeBaseOffsetOptPass(*PR);
   initializeRISCVOptWInstrsPass(*PR);
+  initializeRISCVFoldMemOffsetPass(*PR);
   initializeRISCVPreRAExpandPseudoPass(*PR);
   initializeRISCVExpandPseudoPass(*PR);
   initializeRISCVVectorPeepholePass(*PR);
@@ -590,6 +591,7 @@ void RISCVPassConfig::addMachineSSAOptimization() {
   addPass(createRISCVVectorPeepholePass());
   // TODO: Move this to pre regalloc
   addPass(createRISCVVMV0EliminationPass());
+  addPass(createRISCVFoldMemOffsetPass());
 
   TargetPassConfig::addMachineSSAOptimization();
 
diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index 2646dfeca4eb6..194223eee69eb 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -98,6 +98,7 @@
 ; CHECK-NEXT:       Finalize ISel and expand pseudo-instructions
 ; CHECK-NEXT:       RISC-V Vector Peephole Optimization
 ; CHECK-NEXT:       RISC-V VMV0 Elimination
+; CHECK-NEXT:       RISC-V Fold Memory Offset
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       Early Tail Duplication
 ; CHECK-NEXT:       Optimize machine instruction PHIs
diff --git a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
index 59ba3652c89e9..80ee5776f76f1 100644
--- a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
+++ b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
@@ -1205,12 +1205,11 @@ define i32 @crash() {
 ;
 ; RV64I-LARGE-LABEL: crash:
 ; RV64I-LARGE:       # %bb.0: # %entry
-; RV64I-LARGE-NEXT:    li a0, 1
 ; RV64I-LARGE-NEXT:  .Lpcrel_hi15:
-; RV64I-LARGE-NEXT:    auipc a1, %pcrel_hi(.LCPI21_0)
-; RV64I-LARGE-NEXT:    ld a1, %pcrel_lo(.Lpcrel_hi15)(a1)
-; RV64I-LARGE-NEXT:    add a0, a1, a0
-; RV64I-LARGE-NEXT:    lbu a0, 400(a0)
+; RV64I-LARGE-NEXT:    auipc a0, %pcrel_hi(.LCPI21_0)
+; RV64I-LARGE-NEXT:    ld a0, %pcrel_lo(.Lpcrel_hi15)(a0)
+; RV64I-LARGE-NEXT:    add a0, a0, zero
+; RV64I-LARGE-NEXT:    lbu a0, 401(a0)
 ; RV64I-LARGE-NEXT:    seqz a0, a0
 ; RV64I-LARGE-NEXT:    sw a0, 0(zero)
 ; RV64I-LARGE-NEXT:    li a0, 0
diff --git a/llvm/test/CodeGen/RISCV/fold-mem-offset.ll b/llvm/test/CodeGen/RISCV/fold-mem-offset.ll
new file mode 100644
index 0000000000000..b12fa509b0bea
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/fold-mem-offset.ll
@@ -0,0 +1,733 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 | FileCheck %s --check-prefixes=CHECK,RV32I
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 | FileCheck %s --check-prefixes=CHECK,RV64I
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zba | FileCheck %s --check-prefixes=ZBA,RV32ZBA
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zba | FileCheck %s --check-prefixes=ZBA,RV64ZBA
+
+define i64 @test_sh3add(ptr %p, iXLen %x, iXLen %y) {
+; RV32I-LABEL: test_sh3add:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    slli a1, a1, 3
+; RV32I-NEXT:    slli a2, a2, 3
+; RV32I-NEXT:    add a1, a1, a0
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    lw a2, 480(a1)
+; RV32I-NEXT:    lw a1, 484(a1)
+; RV32I-NEXT:    lw a3, 400(a0)
+; RV32I-NEXT:    lw a0, 404(a0)
+; RV32I-NEXT:    add a1, a0, a1
+; RV32I-NEXT:    add a0, a3, a2
+; RV32I-NEXT:    sltu a2, a0, a3
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_sh3add:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    slli a1, a1, 3
+; RV64I-NEXT:    slli a2, a2, 3
+; RV64I-NEXT:    add a1, a1, a0
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    ld a1, 480(a1)
+; RV64I-NEXT:    ld a0, 400(a0)
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBA-LABEL: test_sh3add:
+; RV32ZBA:       # %bb.0: # %entry
+; RV32ZBA-NEXT:    sh3add a1, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a2, a0
+; RV32ZBA-NEXT:    lw a2, 480(a1)
+; RV32ZBA-NEXT:    lw a1, 484(a1)
+; RV32ZBA-NEXT:    lw a3, 400(a0)
+; RV32ZBA-NEXT:    lw a0, 404(a0)
+; RV32ZBA-NEXT:    add a1, a0, a1
+; RV32ZBA-NEXT:    add a0, a3, a2
+; RV32ZBA-NEXT:    sltu a2, a0, a3
+; RV32ZBA-NEXT:    add a1, a1, a2
+; RV32ZBA-NEXT:    ret
+;
+; RV64ZBA-LABEL: test_sh3add:
+; RV64ZBA:       # %bb.0: # %entry
+; RV64ZBA-NEXT:    sh3add a1, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a2, a0
+; RV64ZBA-NEXT:    ld a1, 480(a1)
+; RV64ZBA-NEXT:    ld a0, 400(a0)
+; RV64ZBA-NEXT:    add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+entry:
+  %b = getelementptr inbounds nuw i8, ptr %p, i64 400
+  %add = add iXLen %x, 10
+  %arrayidx = getelementptr inbounds nuw [100 x i64], ptr %b, i64 0, iXLen %add
+  %0 = load i64, ptr %arrayidx, align 8
+  %arrayidx2 = getelementptr inbounds nuw [100 x i64], ptr %b, i64 0, iXLen %y
+  %1 = load i64, ptr %arrayidx2, align 8
+  %add3 = add nsw i64 %1, %0
+  ret i64 %add3
+}
+
+define signext i32 @test_sh2add(ptr %p, iXLen %x, iXLen %y) {
+; RV32I-LABEL: test_sh2add:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    slli a1, a1, 2
+; RV32I-NEXT:    slli a2, a2, 2
+; RV32I-NEXT:    add a1, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    lw a1, 1200(a1)
+; RV32I-NEXT:    lw a0, 1240(a0)
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_sh2add:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    slli a1, a1, 2
+; RV64I-NEXT:    slli a2, a2, 2
+; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    lw a1, 1200(a1)
+; RV64I-NEXT:    lw a0, 1240(a0)
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBA-LABEL: test_sh2add:
+; RV32ZBA:       # %bb.0: # %entry
+; RV32ZBA-NEXT:    sh2add a1, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a2, a0
+; RV32ZBA-NEXT:    lw a1, 1200(a1)
+; RV32ZBA-NEXT:    lw a0, 1240(a0)
+; RV32ZBA-NEXT:    add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+;
+; RV64ZBA-LABEL: test_sh2add:
+; RV64ZBA:       # %bb.0: # %entry
+; RV64ZBA-NEXT:    sh2add a1, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a2, a0
+; RV64ZBA-NEXT:    lw a1, 1200(a1)
+; RV64ZBA-NEXT:    lw a0, 1240(a0)
+; RV64ZBA-NEXT:    addw a0, a0, a1
+; RV64ZBA-NEXT:    ret
+entry:
+  %c = getelementptr inbounds nuw i8, ptr %p, i64 1200
+  %arrayidx = getelementptr inbounds nuw [100 x i32], ptr %c, i64 0, iXLen %x
+  %0 = load i32, ptr %arrayidx, align 4
+  %add = add iXLen %y, 10
+  %arrayidx2 = getelementptr inbounds nuw [100 x i32], ptr %c, i64 0, iXLen %add
+  %1 = load i32, ptr %arrayidx2, align 4
+  %add3 = add nsw i32 %1, %0
+  ret i32 %add3
+}
+
+define signext i16 @test_sh1add(ptr %p, iXLen %x, iXLen %y) {
+; RV32I-LABEL: test_sh1add:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    slli a2, a2, 1
+; RV32I-NEXT:    add a1, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    lh a1, 1600(a1)
+; RV32I-NEXT:    lh a0, 1620(a0)
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_sh1add:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    slli a1, a1, 1
+; RV64I-NEXT:    slli a2, a2, 1
+; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    lh a1, 1600(a1)
+; RV64I-NEXT:    lh a0, 1620(a0)
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    ret
+;
+; RV32ZBA-LABEL: test_sh1add:
+; RV32ZBA:       # %bb.0: # %entry
+; RV32ZBA-NEXT:    sh1add a1, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a2, a0
+; RV32ZBA-NEXT:    lh a1, 1600(a1)
+; RV32ZBA-NEXT:    lh a0, 1620(a0)
+; RV32ZBA-NEXT:    add a0, a0, a1
+; RV32ZBA-NEXT:    slli a0, a0, 16
+; RV32ZBA-NEXT:    srai a0, a0, 16
+; RV32ZBA-NEXT:    ret
+;
+; RV64ZBA-LABEL: test_sh1add:
+; RV64ZBA:       # %bb.0: # %entry
+; RV64ZBA-NEXT:    sh1add a1, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a2, a0
+; RV64ZBA-NEXT:    lh a1, 1600(a1)
+; RV64ZBA-NEXT:    lh a0, 1620(a0)
+; RV64ZBA-NEXT:    add a0, a0, a1
+; RV64ZBA-NEXT:    slli a0, a0, 48
+; RV64ZBA-NEXT:    srai a0, a0, 48
+; RV64ZBA-NEXT:    ret
+entry:
+  %d = getelementptr inbounds nuw i8, ptr %p, i64 1600
+  %arrayidx = getelementptr inbounds nuw [100 x i16], ptr %d, i64 0, iXLen %x
+  %0 = load i16, ptr %arrayidx, align 2
+  %add = add iXLen %y, 10
+  %arrayidx2 = getelementptr inbounds nuw [100 x i16], ptr %d, i64 0...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/127151


More information about the llvm-commits mailing list