[llvm] [RISCV] Reduce constant pool usage without FP extension (PR #158346)

Shaoce SUN via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 12 11:52:56 PDT 2025


https://github.com/sunshaoce created https://github.com/llvm/llvm-project/pull/158346

The recognition range can be extended later.

>From 5b538f8f3c4e9a65f3fc904ff6ae8c8d9b8249a8 Mon Sep 17 00:00:00 2001
From: Shaoce SUN <sunshaoce at outlook.com>
Date: Sat, 13 Sep 2025 02:51:48 +0800
Subject: [PATCH] [RISCV] Reduce constant pool usage without FP extension

The recognition range can be extended later.
---
 .../RISCV/GISel/RISCVInstructionSelector.cpp  |  17 ++-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  17 ++-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.h   |   1 +
 .../RISCV/GISel/RISCVRegisterBankInfo.cpp     |  33 +++---
 .../CodeGen/RISCV/GlobalISel/constantpool.ll  |  44 +++----
 .../CodeGen/RISCV/GlobalISel/double-arith.ll  |  54 ++++-----
 .../CodeGen/RISCV/GlobalISel/float-arith.ll   | 112 +++++++-----------
 llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll  |   8 +-
 8 files changed, 135 insertions(+), 151 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 7df1b7e580002..29633481b72be 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -745,10 +745,19 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
       if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
         return false;
 
-      unsigned Opcode = Size == 64   ? RISCV::FMV_D_X
-                        : Size == 32 ? RISCV::FMV_W_X
-                                     : RISCV::FMV_H_X;
-      auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
+      unsigned Opcode = RISCV::INIT_UNDEF;
+      MachineInstrBuilder FMV;
+      if (Subtarget->hasStdExtF() || Subtarget->hasStdExtD() ||
+          Subtarget->hasStdExtZfh()) {
+        Opcode = Size == 64   ? RISCV::FMV_D_X
+                 : Size == 32 ? RISCV::FMV_W_X
+                              : RISCV::FMV_H_X;
+        FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
+      } else {
+        Opcode =
+            (Subtarget->is64Bit() && Size == 32) ? RISCV::ADDW : RISCV::ADD;
+        FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg, Register(RISCV::X0)});
+      }
       if (!FMV.constrainAllUses(TII, TRI, RBI))
         return false;
     } else {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 16f34a89a52ec..61543a28d80c2 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -572,7 +572,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
       .legalFor(ST.hasStdExtF(), {s32})
       .legalFor(ST.hasStdExtD(), {s64})
       .legalFor(ST.hasStdExtZfh(), {s16})
-      .lowerFor({s32, s64, s128});
+      .customFor(!ST.is64Bit(), {s32})
+      .customFor(ST.is64Bit(), {s32, s64})
+      .lowerFor({s64, s128});
 
   getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
       .legalFor(ST.hasStdExtF(), {{sXLen, s32}})
@@ -869,6 +871,12 @@ bool RISCVLegalizerInfo::shouldBeInConstantPool(const APInt &APImm,
   return !(!SeqLo.empty() && (SeqLo.size() + 2) <= STI.getMaxBuildIntsCost());
 }
 
+bool RISCVLegalizerInfo::shouldBeInFConstantPool(const APFloat &APImm) const {
+  if (APImm.isZero() || APImm.isExactlyValue(1.0))
+    return false;
+  return true;
+}
+
 bool RISCVLegalizerInfo::legalizeVScale(MachineInstr &MI,
                                         MachineIRBuilder &MIB) const {
   const LLT XLenTy(STI.getXLenVT());
@@ -1358,7 +1366,12 @@ bool RISCVLegalizerInfo::legalizeCustom(
     return false;
   case TargetOpcode::G_ABS:
     return Helper.lowerAbsToMaxNeg(MI);
-  // TODO: G_FCONSTANT
+  case TargetOpcode::G_FCONSTANT: {
+    const ConstantFP *ConstVal = MI.getOperand(1).getFPImm();
+    if (!shouldBeInFConstantPool(ConstVal->getValue()))
+      return true;
+    return Helper.lowerFConstant(MI);
+  }
   case TargetOpcode::G_CONSTANT: {
     const Function &F = MF.getFunction();
     // TODO: if PSI and BFI are present, add " ||
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 4451866745194..bd6d1665849c8 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -39,6 +39,7 @@ class RISCVLegalizerInfo : public LegalizerInfo {
 
 private:
   bool shouldBeInConstantPool(const APInt &APImm, bool ShouldOptForSize) const;
+  bool shouldBeInFConstantPool(const APFloat &APImm) const;
   bool legalizeShlAshrLshr(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
                            GISelChangeObserver &Observer) const;
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index a082b18867666..b0d0ce7bdf529 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -112,7 +112,8 @@ using namespace llvm;
 RISCVRegisterBankInfo::RISCVRegisterBankInfo(unsigned HwMode)
     : RISCVGenRegisterBankInfo(HwMode) {}
 
-static const RegisterBankInfo::ValueMapping *getFPValueMapping(unsigned Size) {
+static const RegisterBankInfo::ValueMapping *
+getFPValueMapping(unsigned Size, bool HasFPExt = true) {
   unsigned Idx;
   switch (Size) {
   default:
@@ -121,10 +122,10 @@ static const RegisterBankInfo::ValueMapping *getFPValueMapping(unsigned Size) {
     Idx = RISCV::FPRB16Idx;
     break;
   case 32:
-    Idx = RISCV::FPRB32Idx;
+    Idx = HasFPExt ? RISCV::FPRB32Idx : RISCV::GPRB32Idx;
     break;
   case 64:
-    Idx = RISCV::FPRB64Idx;
+    Idx = HasFPExt ? RISCV::FPRB64Idx : RISCV::GPRB64Idx;
     break;
   }
   return &RISCV::ValueMappings[Idx];
@@ -219,6 +220,10 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   const TargetSubtargetInfo &STI = MF.getSubtarget();
   const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
 
+  bool HasFPExt = STI.hasFeature(RISCV::FeatureStdExtF) ||
+                  STI.hasFeature(RISCV::FeatureStdExtD) ||
+                  STI.hasFeature(RISCV::FeatureStdExtZfh);
+
   unsigned GPRSize = getMaximumSize(RISCV::GPRBRegBankID);
   assert((GPRSize == 32 || GPRSize == 64) && "Unexpected GPR size");
 
@@ -266,7 +271,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     if (Ty.isVector())
       Mapping = getVRBValueMapping(Size.getKnownMinValue());
     else if (isPreISelGenericFloatingPointOpcode(Opc))
-      Mapping = getFPValueMapping(Size.getFixedValue());
+      Mapping = getFPValueMapping(Size.getFixedValue(), HasFPExt);
     else
       Mapping = GPRValueMapping;
 
@@ -301,7 +306,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     if (DstTy.isVector())
       Mapping = getVRBValueMapping(DstMinSize);
     else if (anyUseOnlyUseFP(Dst, MRI, TRI))
-      Mapping = getFPValueMapping(DstMinSize);
+      Mapping = getFPValueMapping(DstMinSize, HasFPExt);
 
     return getInstructionMapping(DefaultMappingID, /*Cost=*/1, Mapping,
                                  NumOperands);
@@ -339,7 +344,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
       // assume this was a floating point load in the IR. If it was
       // not, we would have had a bitcast before reaching that
       // instruction.
-      OpdsMapping[0] = getFPValueMapping(Size);
+      OpdsMapping[0] = getFPValueMapping(Size, HasFPExt);
       break;
     }
 
@@ -367,7 +372,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
 
     MachineInstr *DefMI = MRI.getVRegDef(MI.getOperand(0).getReg());
     if (onlyDefinesFP(*DefMI, MRI, TRI))
-      OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
+      OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits(), HasFPExt);
     break;
   }
   case TargetOpcode::G_SELECT: {
@@ -432,7 +437,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
 
     const ValueMapping *Mapping = GPRValueMapping;
     if (NumFP >= 2)
-      Mapping = getFPValueMapping(Ty.getSizeInBits());
+      Mapping = getFPValueMapping(Ty.getSizeInBits(), HasFPExt);
 
     OpdsMapping[0] = OpdsMapping[2] = OpdsMapping[3] = Mapping;
     break;
@@ -444,13 +449,13 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   case RISCV::G_FCLASS: {
     LLT Ty = MRI.getType(MI.getOperand(1).getReg());
     OpdsMapping[0] = GPRValueMapping;
-    OpdsMapping[1] = getFPValueMapping(Ty.getSizeInBits());
+    OpdsMapping[1] = getFPValueMapping(Ty.getSizeInBits(), HasFPExt);
     break;
   }
   case TargetOpcode::G_SITOFP:
   case TargetOpcode::G_UITOFP: {
     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
-    OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
+    OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits(), HasFPExt);
     OpdsMapping[1] = GPRValueMapping;
     break;
   }
@@ -468,7 +473,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
     if (GPRSize == 32 && Ty.getSizeInBits() == 64) {
       assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
-      OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
+      OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits(), HasFPExt);
       OpdsMapping[1] = GPRValueMapping;
       OpdsMapping[2] = GPRValueMapping;
     }
@@ -481,7 +486,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
       assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
       OpdsMapping[0] = GPRValueMapping;
       OpdsMapping[1] = GPRValueMapping;
-      OpdsMapping[2] = getFPValueMapping(Ty.getSizeInBits());
+      OpdsMapping[2] = getFPValueMapping(Ty.getSizeInBits(), HasFPExt);
     }
     break;
   }
@@ -495,7 +500,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     if ((GPRSize == 32 && ScalarTy.getSizeInBits() == 64) ||
         onlyDefinesFP(*DefMI, MRI, TRI)) {
       assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
-      OpdsMapping[1] = getFPValueMapping(ScalarTy.getSizeInBits());
+      OpdsMapping[1] = getFPValueMapping(ScalarTy.getSizeInBits(), HasFPExt);
     } else
       OpdsMapping[1] = GPRValueMapping;
     break;
@@ -514,7 +519,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
          OpdsMapping[Idx] =
              getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue());
        else if (isPreISelGenericFloatingPointOpcode(Opc))
-         OpdsMapping[Idx] = getFPValueMapping(Ty.getSizeInBits());
+         OpdsMapping[Idx] = getFPValueMapping(Ty.getSizeInBits(), HasFPExt);
        else
          OpdsMapping[Idx] = GPRValueMapping;
     }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/constantpool.ll b/llvm/test/CodeGen/RISCV/GlobalISel/constantpool.ll
index 1eeeb60c2eb40..849bb13009e16 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/constantpool.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/constantpool.ll
@@ -15,47 +15,37 @@
 define void @constpool_f32(ptr %p) {
 ; RV32-SMALL-LABEL: constpool_f32:
 ; RV32-SMALL:       # %bb.0:
-; RV32-SMALL-NEXT:    lui a1, %hi(.LCPI0_0)
-; RV32-SMALL-NEXT:    lw a1, %lo(.LCPI0_0)(a1)
+; RV32-SMALL-NEXT:    lui a1, 260096
 ; RV32-SMALL-NEXT:    sw a1, 0(a0)
 ; RV32-SMALL-NEXT:    ret
 ;
 ; RV32-MEDIUM-LABEL: constpool_f32:
 ; RV32-MEDIUM:       # %bb.0:
-; RV32-MEDIUM-NEXT:  .Lpcrel_hi0:
-; RV32-MEDIUM-NEXT:    auipc a1, %pcrel_hi(.LCPI0_0)
-; RV32-MEDIUM-NEXT:    lw a1, %pcrel_lo(.Lpcrel_hi0)(a1)
+; RV32-MEDIUM-NEXT:    lui a1, 260096
 ; RV32-MEDIUM-NEXT:    sw a1, 0(a0)
 ; RV32-MEDIUM-NEXT:    ret
 ;
 ; RV32-PIC-LABEL: constpool_f32:
 ; RV32-PIC:       # %bb.0:
-; RV32-PIC-NEXT:  .Lpcrel_hi0:
-; RV32-PIC-NEXT:    auipc a1, %pcrel_hi(.LCPI0_0)
-; RV32-PIC-NEXT:    lw a1, %pcrel_lo(.Lpcrel_hi0)(a1)
+; RV32-PIC-NEXT:    lui a1, 260096
 ; RV32-PIC-NEXT:    sw a1, 0(a0)
 ; RV32-PIC-NEXT:    ret
 ;
 ; RV64-SMALL-LABEL: constpool_f32:
 ; RV64-SMALL:       # %bb.0:
-; RV64-SMALL-NEXT:    lui a1, %hi(.LCPI0_0)
-; RV64-SMALL-NEXT:    lw a1, %lo(.LCPI0_0)(a1)
+; RV64-SMALL-NEXT:    lui a1, 260096
 ; RV64-SMALL-NEXT:    sw a1, 0(a0)
 ; RV64-SMALL-NEXT:    ret
 ;
 ; RV64-MEDIUM-LABEL: constpool_f32:
 ; RV64-MEDIUM:       # %bb.0:
-; RV64-MEDIUM-NEXT:  .Lpcrel_hi0:
-; RV64-MEDIUM-NEXT:    auipc a1, %pcrel_hi(.LCPI0_0)
-; RV64-MEDIUM-NEXT:    lw a1, %pcrel_lo(.Lpcrel_hi0)(a1)
+; RV64-MEDIUM-NEXT:    lui a1, 260096
 ; RV64-MEDIUM-NEXT:    sw a1, 0(a0)
 ; RV64-MEDIUM-NEXT:    ret
 ;
 ; RV64-PIC-LABEL: constpool_f32:
 ; RV64-PIC:       # %bb.0:
-; RV64-PIC-NEXT:  .Lpcrel_hi0:
-; RV64-PIC-NEXT:    auipc a1, %pcrel_hi(.LCPI0_0)
-; RV64-PIC-NEXT:    lw a1, %pcrel_lo(.Lpcrel_hi0)(a1)
+; RV64-PIC-NEXT:    lui a1, 260096
 ; RV64-PIC-NEXT:    sw a1, 0(a0)
 ; RV64-PIC-NEXT:    ret
   store float 1.0, ptr %p
@@ -75,9 +65,9 @@ define void @constpool_f64(ptr %p) {
 ;
 ; RV32-MEDIUM-LABEL: constpool_f64:
 ; RV32-MEDIUM:       # %bb.0:
-; RV32-MEDIUM-NEXT:  .Lpcrel_hi1:
+; RV32-MEDIUM-NEXT:  .Lpcrel_hi0:
 ; RV32-MEDIUM-NEXT:    auipc a1, %pcrel_hi(.LCPI1_0)
-; RV32-MEDIUM-NEXT:    addi a1, a1, %pcrel_lo(.Lpcrel_hi1)
+; RV32-MEDIUM-NEXT:    addi a1, a1, %pcrel_lo(.Lpcrel_hi0)
 ; RV32-MEDIUM-NEXT:    lw a2, 0(a1)
 ; RV32-MEDIUM-NEXT:    lw a1, 4(a1)
 ; RV32-MEDIUM-NEXT:    sw a2, 0(a0)
@@ -86,9 +76,9 @@ define void @constpool_f64(ptr %p) {
 ;
 ; RV32-PIC-LABEL: constpool_f64:
 ; RV32-PIC:       # %bb.0:
-; RV32-PIC-NEXT:  .Lpcrel_hi1:
+; RV32-PIC-NEXT:  .Lpcrel_hi0:
 ; RV32-PIC-NEXT:    auipc a1, %pcrel_hi(.LCPI1_0)
-; RV32-PIC-NEXT:    addi a1, a1, %pcrel_lo(.Lpcrel_hi1)
+; RV32-PIC-NEXT:    addi a1, a1, %pcrel_lo(.Lpcrel_hi0)
 ; RV32-PIC-NEXT:    lw a2, 0(a1)
 ; RV32-PIC-NEXT:    lw a1, 4(a1)
 ; RV32-PIC-NEXT:    sw a2, 0(a0)
@@ -97,24 +87,22 @@ define void @constpool_f64(ptr %p) {
 ;
 ; RV64-SMALL-LABEL: constpool_f64:
 ; RV64-SMALL:       # %bb.0:
-; RV64-SMALL-NEXT:    lui a1, %hi(.LCPI1_0)
-; RV64-SMALL-NEXT:    ld a1, %lo(.LCPI1_0)(a1)
+; RV64-SMALL-NEXT:    li a1, 1023
+; RV64-SMALL-NEXT:    slli a1, a1, 52
 ; RV64-SMALL-NEXT:    sd a1, 0(a0)
 ; RV64-SMALL-NEXT:    ret
 ;
 ; RV64-MEDIUM-LABEL: constpool_f64:
 ; RV64-MEDIUM:       # %bb.0:
-; RV64-MEDIUM-NEXT:  .Lpcrel_hi1:
-; RV64-MEDIUM-NEXT:    auipc a1, %pcrel_hi(.LCPI1_0)
-; RV64-MEDIUM-NEXT:    ld a1, %pcrel_lo(.Lpcrel_hi1)(a1)
+; RV64-MEDIUM-NEXT:    li a1, 1023
+; RV64-MEDIUM-NEXT:    slli a1, a1, 52
 ; RV64-MEDIUM-NEXT:    sd a1, 0(a0)
 ; RV64-MEDIUM-NEXT:    ret
 ;
 ; RV64-PIC-LABEL: constpool_f64:
 ; RV64-PIC:       # %bb.0:
-; RV64-PIC-NEXT:  .Lpcrel_hi1:
-; RV64-PIC-NEXT:    auipc a1, %pcrel_hi(.LCPI1_0)
-; RV64-PIC-NEXT:    ld a1, %pcrel_lo(.Lpcrel_hi1)(a1)
+; RV64-PIC-NEXT:    li a1, 1023
+; RV64-PIC-NEXT:    slli a1, a1, 52
 ; RV64-PIC-NEXT:    sd a1, 0(a0)
 ; RV64-PIC-NEXT:    ret
   store double 1.0, ptr %p
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
index 12684f30dbee0..c4dcf72ae6606 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
@@ -508,9 +508,8 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    lui a0, %hi(.LCPI14_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI14_0)(a0)
 ; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
@@ -606,14 +605,12 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI15_0)
-; RV64I-NEXT:    ld s1, %lo(.LCPI15_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
@@ -716,14 +713,12 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV64I-NEXT:    ld s1, %lo(.LCPI16_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
@@ -869,9 +864,8 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI19_0)(a1)
 ; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
@@ -948,9 +942,8 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI20_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI20_0)(a1)
 ; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
@@ -1078,9 +1071,8 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    lui a0, %hi(.LCPI22_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI22_0)(a0)
 ; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
@@ -1193,18 +1185,16 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV64I-NEXT:    ld s1, %lo(.LCPI23_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s3
@@ -1309,20 +1299,18 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI24_0)
-; RV64I-NEXT:    ld s1, %lo(.LCPI24_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __subdf3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll
index 739f225ad1525..763b6d6544747 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll
@@ -472,9 +472,8 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    lui a0, %hi(.LCPI14_0)
-; RV32I-NEXT:    lw a1, %lo(.LCPI14_0)(a0)
 ; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a2, a0, a2
@@ -495,9 +494,8 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    lui a0, %hi(.LCPI14_0)
-; RV64I-NEXT:    lw a1, %lo(.LCPI14_0)(a0)
 ; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    sext.w a1, zero
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a2, a0, a2
@@ -533,14 +531,12 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
-; RV32I-NEXT:    lui a1, %hi(.LCPI15_0)
-; RV32I-NEXT:    lw s1, %lo(.LCPI15_0)(a1)
-; RV32I-NEXT:    mv s2, a2
-; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, s3, a2
@@ -565,14 +561,13 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI15_0)
-; RV64I-NEXT:    lw s1, %lo(.LCPI15_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    sext.w s2, zero
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a1, s3, a2
@@ -614,14 +609,12 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32I-NEXT:    lw s1, %lo(.LCPI16_0)(a1)
-; RV32I-NEXT:    mv s2, a2
-; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, s3, a2
@@ -646,14 +639,13 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV64I-NEXT:    lw s1, %lo(.LCPI16_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    sext.w s2, zero
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a1, s3, a2
@@ -778,9 +770,8 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
-; RV32I-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32I-NEXT:    lw a1, %lo(.LCPI19_0)(a1)
 ; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
@@ -800,9 +791,8 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV64I-NEXT:    lw a1, %lo(.LCPI19_0)(a1)
 ; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    sext.w a1, zero
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
@@ -836,9 +826,8 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lui a1, %hi(.LCPI20_0)
-; RV32I-NEXT:    lw a1, %lo(.LCPI20_0)(a1)
 ; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a1, a0, a1
@@ -859,9 +848,8 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI20_0)
-; RV64I-NEXT:    lw a1, %lo(.LCPI20_0)(a1)
 ; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    sext.w a1, zero
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a1, a0, a1
@@ -935,9 +923,8 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    lui a0, %hi(.LCPI22_0)
-; RV32I-NEXT:    lw a1, %lo(.LCPI22_0)(a0)
 ; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
@@ -961,9 +948,8 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    lui a0, %hi(.LCPI22_0)
-; RV64I-NEXT:    lw a1, %lo(.LCPI22_0)(a0)
 ; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    sext.w a1, zero
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
@@ -1004,18 +990,16 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
-; RV32I-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32I-NEXT:    lw s1, %lo(.LCPI23_0)(a1)
-; RV32I-NEXT:    mv s2, a2
-; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s3
@@ -1042,18 +1026,17 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV64I-NEXT:    lw s1, %lo(.LCPI23_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    sext.w s2, zero
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s3
@@ -1097,20 +1080,18 @@ define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
-; RV32I-NEXT:    lui a1, %hi(.LCPI24_0)
-; RV32I-NEXT:    lw s1, %lo(.LCPI24_0)(a1)
-; RV32I-NEXT:    mv s2, a2
-; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __mulsf3
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __subsf3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -1129,20 +1110,19 @@ define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI24_0)
-; RV64I-NEXT:    lw s1, %lo(.LCPI24_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv s1, a2
+; RV64I-NEXT:    sext.w s2, zero
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __mulsf3
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __subsf3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
index d9ddf655c283a..f43b255a679e3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -437,8 +437,8 @@ define void @va1_caller() nounwind {
 ; LP64:       # %bb.0:
 ; LP64-NEXT:    addi sp, sp, -16
 ; LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-NEXT:    lui a0, %hi(.LCPI3_0)
-; LP64-NEXT:    ld a1, %lo(.LCPI3_0)(a0)
+; LP64-NEXT:    li a0, 1023
+; LP64-NEXT:    slli a1, a0, 52
 ; LP64-NEXT:    li a2, 2
 ; LP64-NEXT:    call va1
 ; LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -494,8 +494,8 @@ define void @va1_caller() nounwind {
 ; RV64-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64-WITHFP-NEXT:    addi s0, sp, 16
-; RV64-WITHFP-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV64-WITHFP-NEXT:    ld a1, %lo(.LCPI3_0)(a0)
+; RV64-WITHFP-NEXT:    li a0, 1023
+; RV64-WITHFP-NEXT:    slli a1, a0, 52
 ; RV64-WITHFP-NEXT:    li a2, 2
 ; RV64-WITHFP-NEXT:    call va1
 ; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload



More information about the llvm-commits mailing list