[llvm] c9e08fa - [RISCV] Add a pass to merge moving parameter registers instructions for Zcmp

via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 21 00:41:57 PDT 2023


Author: WuXinlong
Date: 2023-06-21T15:41:51+08:00
New Revision: c9e08fa6066649d96cff8c20da42eb0b44dc878b

URL: https://github.com/llvm/llvm-project/commit/c9e08fa6066649d96cff8c20da42eb0b44dc878b
DIFF: https://github.com/llvm/llvm-project/commit/c9e08fa6066649d96cff8c20da42eb0b44dc878b.diff

LOG: [RISCV] Add a pass to merge moving parameter registers instructions for Zcmp

This patch adds a pass to generate `cm.mvsa01` & `cm.mva01s`.

RISCVMoveOptimizer.cpp which combines two mv inst into one cm.mva01s or cm.mva01s.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D150415

Added: 
    llvm/lib/Target/RISCV/RISCVMoveMerger.cpp
    llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll

Modified: 
    llvm/lib/Target/RISCV/CMakeLists.txt
    llvm/lib/Target/RISCV/RISCV.h
    llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
    llvm/test/CodeGen/RISCV/O3-pipeline.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index aea1150a17ae5..5203c00415bce 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -36,6 +36,7 @@ add_llvm_target(RISCVCodeGen
   RISCVMergeBaseOffset.cpp
   RISCVOptWInstrs.cpp
   RISCVRedundantCopyElimination.cpp
+  RISCVMoveMerger.cpp
   RISCVRegisterInfo.cpp
   RISCVRVVInitUndef.cpp
   RISCVSubtarget.cpp

diff  --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h
index 9efd95d2effce..3f5966dd8538f 100644
--- a/llvm/lib/Target/RISCV/RISCV.h
+++ b/llvm/lib/Target/RISCV/RISCV.h
@@ -70,6 +70,9 @@ FunctionPass *createRISCVInitUndefPass();
 void initializeRISCVInitUndefPass(PassRegistry &);
 extern char &RISCVInitUndefID;
 
+FunctionPass *createRISCVMoveMergePass();
+void initializeRISCVMoveMergePass(PassRegistry &);
+
 InstructionSelector *createRISCVInstructionSelector(const RISCVTargetMachine &,
                                                     RISCVSubtarget &,
                                                     RISCVRegisterBankInfo &);

diff  --git a/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp b/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp
new file mode 100644
index 0000000000000..daf75acff97df
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp
@@ -0,0 +1,238 @@
+//===---------- RISCVMoveMerge.cpp - RISCV move merge pass -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that performs move related peephole optimizations
+// as Zcmp has specified. This pass should be run after register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVInstrInfo.h"
+#include "RISCVMachineFunctionInfo.h"
+
+using namespace llvm;
+
+#define RISCV_MOVE_MERGE_NAME "RISC-V Zcmp move merging pass"
+
+namespace {
+struct RISCVMoveMerge : public MachineFunctionPass {
+  static char ID;
+
+  RISCVMoveMerge() : MachineFunctionPass(ID) {
+    initializeRISCVMoveMergePass(*PassRegistry::getPassRegistry());
+  }
+
+  const RISCVInstrInfo *TII;
+  const TargetRegisterInfo *TRI;
+
+  // Track which register units have been modified and used.
+  LiveRegUnits ModifiedRegUnits, UsedRegUnits;
+
+  bool isCandidateToMergeMVA01S(const DestSourcePair &RegPair);
+  bool isCandidateToMergeMVSA01(const DestSourcePair &RegPair);
+  // Merge the two instructions indicated into a single pair instruction.
+  MachineBasicBlock::iterator
+  mergePairedInsns(MachineBasicBlock::iterator I,
+                   MachineBasicBlock::iterator Paired, unsigned Opcode);
+
+  // Look for C.MV instruction that can be combined with
+  // the given instruction into CM.MVA01S or CM.MVSA01. Return the matching
+  // instruction if one exists.
+  MachineBasicBlock::iterator
+  findMatchingInst(MachineBasicBlock::iterator &MBBI, unsigned InstOpcode,
+                   const DestSourcePair &RegPair);
+  bool mergeMoveSARegPair(MachineBasicBlock &MBB);
+  bool runOnMachineFunction(MachineFunction &Fn) override;
+
+  StringRef getPassName() const override { return RISCV_MOVE_MERGE_NAME; }
+};
+
+char RISCVMoveMerge::ID = 0;
+
+} // end of anonymous namespace
+
+INITIALIZE_PASS(RISCVMoveMerge, "riscv-move-merge", RISCV_MOVE_MERGE_NAME,
+                false, false)
+
+// Check if registers meet CM.MVA01S constraints.
+bool RISCVMoveMerge::isCandidateToMergeMVA01S(const DestSourcePair &RegPair) {
+  Register Destination = RegPair.Destination->getReg();
+  Register Source = RegPair.Source->getReg();
+  // If destination is not a0 or a1.
+  if ((Destination == RISCV::X10 || Destination == RISCV::X11) &&
+      RISCV::SR07RegClass.contains(Source))
+    return true;
+  return false;
+}
+
+// Check if registers meet CM.MVSA01 constraints.
+bool RISCVMoveMerge::isCandidateToMergeMVSA01(const DestSourcePair &RegPair) {
+  Register Destination = RegPair.Destination->getReg();
+  Register Source = RegPair.Source->getReg();
+  // If Source is s0 - s7.
+  if ((Source == RISCV::X10 || Source == RISCV::X11) &&
+      RISCV::SR07RegClass.contains(Destination))
+    return true;
+  return false;
+}
+
+MachineBasicBlock::iterator
+RISCVMoveMerge::mergePairedInsns(MachineBasicBlock::iterator I,
+                                 MachineBasicBlock::iterator Paired,
+                                 unsigned Opcode) {
+  const MachineOperand *Sreg1, *Sreg2;
+  MachineBasicBlock::iterator E = I->getParent()->end();
+  MachineBasicBlock::iterator NextI = next_nodbg(I, E);
+  DestSourcePair FirstPair = TII->isCopyInstrImpl(*I).value();
+  DestSourcePair PairedRegs = TII->isCopyInstrImpl(*Paired).value();
+  Register ARegInFirstPair = Opcode == RISCV::CM_MVA01S
+                                 ? FirstPair.Destination->getReg()
+                                 : FirstPair.Source->getReg();
+
+  if (NextI == Paired)
+    NextI = next_nodbg(NextI, E);
+  DebugLoc DL = I->getDebugLoc();
+
+  // The order of S-reg depends on which instruction holds A0, instead of
+  // the order of register pair.
+  // e,g.
+  //   mv a1, s1
+  //   mv a0, s2    =>  cm.mva01s s2,s1
+  //
+  //   mv a0, s2
+  //   mv a1, s1    =>  cm.mva01s s2,s1
+  bool StartWithX10 = ARegInFirstPair == RISCV::X10;
+  if (Opcode == RISCV::CM_MVA01S) {
+    Sreg1 = StartWithX10 ? FirstPair.Source : PairedRegs.Source;
+    Sreg2 = StartWithX10 ? PairedRegs.Source : FirstPair.Source;
+  } else {
+    Sreg1 = StartWithX10 ? FirstPair.Destination : PairedRegs.Destination;
+    Sreg2 = StartWithX10 ? PairedRegs.Destination : FirstPair.Destination;
+  }
+
+  BuildMI(*I->getParent(), I, DL, TII->get(Opcode)).add(*Sreg1).add(*Sreg2);
+
+  I->eraseFromParent();
+  Paired->eraseFromParent();
+  return NextI;
+}
+
+MachineBasicBlock::iterator
+RISCVMoveMerge::findMatchingInst(MachineBasicBlock::iterator &MBBI,
+                                 unsigned InstOpcode,
+                                 const DestSourcePair &RegPair) {
+  MachineBasicBlock::iterator E = MBBI->getParent()->end();
+
+  // Track which register units have been modified and used between the first
+  // insn and the second insn.
+  ModifiedRegUnits.clear();
+  UsedRegUnits.clear();
+
+  for (MachineBasicBlock::iterator I = next_nodbg(MBBI, E); I != E;
+       I = next_nodbg(I, E)) {
+
+    MachineInstr &MI = *I;
+
+    if (auto SecondPair = TII->isCopyInstrImpl(MI)) {
+      Register SourceReg = SecondPair->Source->getReg();
+      Register DestReg = SecondPair->Destination->getReg();
+
+      if (InstOpcode == RISCV::CM_MVA01S &&
+          isCandidateToMergeMVA01S(*SecondPair)) {
+        // If register pair is valid and destination registers are 
diff erent.
+        if ((RegPair.Destination->getReg() == DestReg))
+          return E;
+
+        //  If paired destination register was modified or used, the source reg
+        //  was modified, there is no possibility of finding matching
+        //  instruction so exit early.
+        if (!ModifiedRegUnits.available(DestReg) ||
+            !UsedRegUnits.available(DestReg) ||
+            !ModifiedRegUnits.available(SourceReg))
+          return E;
+
+        return I;
+      } else if (InstOpcode == RISCV::CM_MVSA01 &&
+                 isCandidateToMergeMVSA01(*SecondPair)) {
+        if ((RegPair.Source->getReg() == SourceReg) ||
+            (RegPair.Destination->getReg() == DestReg))
+          return E;
+
+        if (!ModifiedRegUnits.available(DestReg) ||
+            !UsedRegUnits.available(DestReg) ||
+            !ModifiedRegUnits.available(SourceReg))
+          return E;
+
+        return I;
+      }
+    }
+    // Update modified / used register units.
+    LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
+  }
+  return E;
+}
+
+// Finds instructions, which could be represented as C.MV instructions and
+// merged into CM.MVA01S or CM.MVSA01.
+bool RISCVMoveMerge::mergeMoveSARegPair(MachineBasicBlock &MBB) {
+  bool Modified = false;
+
+  for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+       MBBI != E;) {
+    // Check if the instruction can be compressed to C.MV instruction. If it
+    // can, return Dest/Src register pair.
+    auto RegPair = TII->isCopyInstrImpl(*MBBI);
+    if (RegPair.has_value()) {
+      unsigned Opcode = 0;
+
+      if (isCandidateToMergeMVA01S(*RegPair))
+        Opcode = RISCV::CM_MVA01S;
+      else if (isCandidateToMergeMVSA01(*RegPair))
+        Opcode = RISCV::CM_MVSA01;
+      else {
+        ++MBBI;
+        continue;
+      }
+
+      MachineBasicBlock::iterator Paired =
+          findMatchingInst(MBBI, Opcode, RegPair.value());
+      // If matching instruction can be found merge them.
+      if (Paired != E) {
+        MBBI = mergePairedInsns(MBBI, Paired, Opcode);
+        Modified = true;
+        continue;
+      }
+    }
+    ++MBBI;
+  }
+  return Modified;
+}
+
+bool RISCVMoveMerge::runOnMachineFunction(MachineFunction &Fn) {
+  if (skipFunction(Fn.getFunction()))
+    return false;
+
+  const RISCVSubtarget *Subtarget = &Fn.getSubtarget<RISCVSubtarget>();
+  if (!Subtarget->hasStdExtZcmp())
+    return false;
+
+  TII = Subtarget->getInstrInfo();
+  TRI = Subtarget->getRegisterInfo();
+  // Resize the modified and used register unit trackers.  We do this once
+  // per function and then clear the register units each time we optimize a
+  // move.
+  ModifiedRegUnits.init(*TRI);
+  UsedRegUnits.init(*TRI);
+  bool Modified = false;
+  for (auto &MBB : Fn)
+    Modified |= mergeMoveSARegPair(MBB);
+  return Modified;
+}
+
+/// createRISCVMoveMergePass - returns an instance of the
+/// move merge pass.
+FunctionPass *llvm::createRISCVMoveMergePass() { return new RISCVMoveMerge(); }

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 90f9696c90563..9f3649ed10985 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -87,6 +87,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
   initializeRISCVInsertReadWriteCSRPass(*PR);
   initializeRISCVDAGToDAGISelPass(*PR);
   initializeRISCVInitUndefPass(*PR);
+  initializeRISCVMoveMergePass(*PR);
 }
 
 static StringRef computeDataLayout(const Triple &TT) {
@@ -348,6 +349,8 @@ void RISCVPassConfig::addPreEmitPass() {
 }
 
 void RISCVPassConfig::addPreEmitPass2() {
+  if (TM->getOptLevel() != CodeGenOpt::None)
+    addPass(createRISCVMoveMergePass());
   addPass(createRISCVExpandPseudoPass());
 
   // Schedule the expansion of AMOs at the last possible moment, avoiding the

diff  --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index b0ebfb15d0515..aa92f2d8bc32c 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -177,6 +177,7 @@
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       Machine Optimization Remark Emitter
 ; CHECK-NEXT:       Stack Frame Layout Analysis
+; CHECK-NEXT:       RISC-V Zcmp move merging pass
 ; CHECK-NEXT:       RISC-V pseudo instruction expansion pass
 ; CHECK-NEXT:       RISC-V atomic pseudo instruction expansion pass
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis

diff  --git a/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
new file mode 100644
index 0000000000000..8ac6d6de638c1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
@@ -0,0 +1,197 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=CHECK32I %s
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zcmp -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=CHECK32ZCMP %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=CHECK64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zcmp -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=CHECK64ZCMP %s
+
+declare i32 @foo(i32)
+declare i32 @func(i32,i32)
+
+define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
+; CHECK32I-LABEL: zcmp_mv:
+; CHECK32I:       # %bb.0:
+; CHECK32I-NEXT:    addi sp, sp, -16
+; CHECK32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; CHECK32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; CHECK32I-NEXT:    mv s0, a1
+; CHECK32I-NEXT:    mv s1, a0
+; CHECK32I-NEXT:    call func at plt
+; CHECK32I-NEXT:    mv s2, a0
+; CHECK32I-NEXT:    mv a0, s1
+; CHECK32I-NEXT:    mv a1, s0
+; CHECK32I-NEXT:    call func at plt
+; CHECK32I-NEXT:    add a0, s2, s0
+; CHECK32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; CHECK32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; CHECK32I-NEXT:    addi sp, sp, 16
+; CHECK32I-NEXT:    ret
+;
+; CHECK32ZCMP-LABEL: zcmp_mv:
+; CHECK32ZCMP:       # %bb.0:
+; CHECK32ZCMP-NEXT:    addi sp, sp, -16
+; CHECK32ZCMP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32ZCMP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK32ZCMP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; CHECK32ZCMP-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; CHECK32ZCMP-NEXT:    cm.mvsa01 s1, s0
+; CHECK32ZCMP-NEXT:    call func at plt
+; CHECK32ZCMP-NEXT:    mv s2, a0
+; CHECK32ZCMP-NEXT:    cm.mva01s s1, s0
+; CHECK32ZCMP-NEXT:    call func at plt
+; CHECK32ZCMP-NEXT:    add a0, s2, s0
+; CHECK32ZCMP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32ZCMP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK32ZCMP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; CHECK32ZCMP-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; CHECK32ZCMP-NEXT:    addi sp, sp, 16
+; CHECK32ZCMP-NEXT:    ret
+;
+; CHECK64I-LABEL: zcmp_mv:
+; CHECK64I:       # %bb.0:
+; CHECK64I-NEXT:    addi sp, sp, -32
+; CHECK64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; CHECK64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; CHECK64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; CHECK64I-NEXT:    mv s0, a1
+; CHECK64I-NEXT:    mv s1, a0
+; CHECK64I-NEXT:    call func at plt
+; CHECK64I-NEXT:    mv s2, a0
+; CHECK64I-NEXT:    mv a0, s1
+; CHECK64I-NEXT:    mv a1, s0
+; CHECK64I-NEXT:    call func at plt
+; CHECK64I-NEXT:    addw a0, s2, s0
+; CHECK64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; CHECK64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; CHECK64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; CHECK64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK64I-NEXT:    addi sp, sp, 32
+; CHECK64I-NEXT:    ret
+;
+; CHECK64ZCMP-LABEL: zcmp_mv:
+; CHECK64ZCMP:       # %bb.0:
+; CHECK64ZCMP-NEXT:    addi sp, sp, -32
+; CHECK64ZCMP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK64ZCMP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; CHECK64ZCMP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; CHECK64ZCMP-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; CHECK64ZCMP-NEXT:    cm.mvsa01 s1, s0
+; CHECK64ZCMP-NEXT:    call func at plt
+; CHECK64ZCMP-NEXT:    mv s2, a0
+; CHECK64ZCMP-NEXT:    cm.mva01s s1, s0
+; CHECK64ZCMP-NEXT:    call func at plt
+; CHECK64ZCMP-NEXT:    addw a0, s2, s0
+; CHECK64ZCMP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; CHECK64ZCMP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; CHECK64ZCMP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; CHECK64ZCMP-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK64ZCMP-NEXT:    addi sp, sp, 32
+; CHECK64ZCMP-NEXT:    ret
+  %call = call i32 @func(i32 %num, i32 %f)
+  %call1 = call i32 @func(i32 %num, i32 %f)
+  %res = add i32 %call, %f
+  ret i32 %res
+}
+
+define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
+; CHECK32I-LABEL: not_zcmp_mv:
+; CHECK32I:       # %bb.0:
+; CHECK32I-NEXT:    addi sp, sp, -16
+; CHECK32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; CHECK32I-NEXT:    mv s0, a1
+; CHECK32I-NEXT:    call foo at plt
+; CHECK32I-NEXT:    mv s1, a0
+; CHECK32I-NEXT:    mv a0, s0
+; CHECK32I-NEXT:    call foo at plt
+; CHECK32I-NEXT:    mv a0, s1
+; CHECK32I-NEXT:    call foo at plt
+; CHECK32I-NEXT:    li a0, 1
+; CHECK32I-NEXT:    mv a1, s0
+; CHECK32I-NEXT:    call func at plt
+; CHECK32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; CHECK32I-NEXT:    addi sp, sp, 16
+; CHECK32I-NEXT:    ret
+;
+; CHECK32ZCMP-LABEL: not_zcmp_mv:
+; CHECK32ZCMP:       # %bb.0:
+; CHECK32ZCMP-NEXT:    addi sp, sp, -16
+; CHECK32ZCMP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32ZCMP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK32ZCMP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; CHECK32ZCMP-NEXT:    mv s0, a1
+; CHECK32ZCMP-NEXT:    call foo at plt
+; CHECK32ZCMP-NEXT:    mv s1, a0
+; CHECK32ZCMP-NEXT:    mv a0, s0
+; CHECK32ZCMP-NEXT:    call foo at plt
+; CHECK32ZCMP-NEXT:    mv a0, s1
+; CHECK32ZCMP-NEXT:    call foo at plt
+; CHECK32ZCMP-NEXT:    li a0, 1
+; CHECK32ZCMP-NEXT:    mv a1, s0
+; CHECK32ZCMP-NEXT:    call func at plt
+; CHECK32ZCMP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32ZCMP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK32ZCMP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; CHECK32ZCMP-NEXT:    addi sp, sp, 16
+; CHECK32ZCMP-NEXT:    ret
+;
+; CHECK64I-LABEL: not_zcmp_mv:
+; CHECK64I:       # %bb.0:
+; CHECK64I-NEXT:    addi sp, sp, -32
+; CHECK64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; CHECK64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; CHECK64I-NEXT:    mv s0, a1
+; CHECK64I-NEXT:    call foo at plt
+; CHECK64I-NEXT:    mv s1, a0
+; CHECK64I-NEXT:    mv a0, s0
+; CHECK64I-NEXT:    call foo at plt
+; CHECK64I-NEXT:    mv a0, s1
+; CHECK64I-NEXT:    call foo at plt
+; CHECK64I-NEXT:    li a0, 1
+; CHECK64I-NEXT:    mv a1, s0
+; CHECK64I-NEXT:    call func at plt
+; CHECK64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; CHECK64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; CHECK64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; CHECK64I-NEXT:    addi sp, sp, 32
+; CHECK64I-NEXT:    ret
+;
+; CHECK64ZCMP-LABEL: not_zcmp_mv:
+; CHECK64ZCMP:       # %bb.0:
+; CHECK64ZCMP-NEXT:    addi sp, sp, -32
+; CHECK64ZCMP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK64ZCMP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; CHECK64ZCMP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; CHECK64ZCMP-NEXT:    mv s0, a1
+; CHECK64ZCMP-NEXT:    call foo at plt
+; CHECK64ZCMP-NEXT:    mv s1, a0
+; CHECK64ZCMP-NEXT:    mv a0, s0
+; CHECK64ZCMP-NEXT:    call foo at plt
+; CHECK64ZCMP-NEXT:    mv a0, s1
+; CHECK64ZCMP-NEXT:    call foo at plt
+; CHECK64ZCMP-NEXT:    li a0, 1
+; CHECK64ZCMP-NEXT:    mv a1, s0
+; CHECK64ZCMP-NEXT:    call func at plt
+; CHECK64ZCMP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; CHECK64ZCMP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; CHECK64ZCMP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; CHECK64ZCMP-NEXT:    addi sp, sp, 32
+; CHECK64ZCMP-NEXT:    ret
+  %call = call i32 @foo(i32 %num)
+  %call1 = call i32 @foo(i32 %f)
+  %tmp = call i32 @foo(i32 %call)
+  %res = call i32 @func(i32 1, i32 %f)
+  ret i32 %res
+}


        


More information about the llvm-commits mailing list