[llvm] 8286378 - LoongArch: Remove VK_CALL/VK_CALL_PLT and don't print %plt

via llvm-commits llvm-commits at lists.llvm.org
Tue May 6 23:30:51 PDT 2025


Author: Fangrui Song
Date: 2025-05-07T14:30:47+08:00
New Revision: 82863783a2518b814e7ffc03b0e40a185a39d1b8

URL: https://github.com/llvm/llvm-project/commit/82863783a2518b814e7ffc03b0e40a185a39d1b8
DIFF: https://github.com/llvm/llvm-project/commit/82863783a2518b814e7ffc03b0e40a185a39d1b8.diff

LOG: LoongArch: Remove VK_CALL/VK_CALL_PLT and don't print %plt

`%plt` is a redundant relocation specifier: `bl %plt(foo)` is identical
to `bl foo`. Let's replace VK_CALL/VK_CALL_PLT with R_LARCH_B26 and
remove the only specifier constants.

Pull Request: https://github.com/llvm/llvm-project/pull/138632

Added: 
    

Modified: 
    llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
    llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
    llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp
    llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.cpp
    llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.h
    llvm/test/CodeGen/LoongArch/addrspacecast.ll
    llvm/test/CodeGen/LoongArch/alloca.ll
    llvm/test/CodeGen/LoongArch/bnez-beqz.ll
    llvm/test/CodeGen/LoongArch/code-models.ll
    llvm/test/CodeGen/LoongArch/double-br-fcmp.ll
    llvm/test/CodeGen/LoongArch/eh-dwarf-cfa.ll
    llvm/test/CodeGen/LoongArch/exception-pointer-register.ll
    llvm/test/CodeGen/LoongArch/fdiv-reciprocal-estimate.ll
    llvm/test/CodeGen/LoongArch/float-br-fcmp.ll
    llvm/test/CodeGen/LoongArch/fp-expand.ll
    llvm/test/CodeGen/LoongArch/fp-max-min.ll
    llvm/test/CodeGen/LoongArch/fp-maximumnum-minimumnum.ll
    llvm/test/CodeGen/LoongArch/fp-reciprocal.ll
    llvm/test/CodeGen/LoongArch/fp-trunc-store.ll
    llvm/test/CodeGen/LoongArch/fp16-promote.ll
    llvm/test/CodeGen/LoongArch/frint.ll
    llvm/test/CodeGen/LoongArch/fsqrt-reciprocal-estimate.ll
    llvm/test/CodeGen/LoongArch/fsqrt.ll
    llvm/test/CodeGen/LoongArch/intrinsic-csr-side-effects.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/call.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
    llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
    llvm/test/CodeGen/LoongArch/numeric-reg-names.ll
    llvm/test/CodeGen/LoongArch/soft-fp-to-int.ll
    llvm/test/CodeGen/LoongArch/spill-reload-cfr.ll
    llvm/test/CodeGen/LoongArch/stack-realignment-with-variable-sized-objects.ll
    llvm/test/CodeGen/LoongArch/stack-realignment.ll
    llvm/test/CodeGen/LoongArch/statepoint-call-lowering.ll
    llvm/test/CodeGen/LoongArch/tls-models.ll
    llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll
    llvm/test/CodeGen/LoongArch/vector-fp-imm.ll
    llvm/test/MC/LoongArch/Relocations/relocations.s

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp b/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
index faac1a221cb59..39c5e034f2a48 100644
--- a/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
+++ b/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
@@ -517,9 +517,7 @@ class LoongArchOperand : public MCParsedAsmOperand {
     int64_t Imm;
     LoongArchMCExpr::Specifier VK = LoongArchMCExpr::VK_None;
     bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
-    bool IsValidKind =
-        VK == LoongArchMCExpr::VK_None || VK == LoongArchMCExpr::VK_CALL ||
-        VK == LoongArchMCExpr::VK_CALL_PLT || VK == ELF::R_LARCH_B26;
+    bool IsValidKind = VK == LoongArchMCExpr::VK_None || VK == ELF::R_LARCH_B26;
     return IsConstantImm
                ? isShiftedInt<26, 2>(Imm) && IsValidKind
                : LoongArchAsmParser::classifySymbolRef(getImm(), VK) &&
@@ -793,7 +791,6 @@ ParseStatus LoongArchAsmParser::parseSImm26Operand(OperandVector &Operands) {
 
   MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
   Res = MCSymbolRefExpr::create(Sym, getContext());
-  Res = LoongArchMCExpr::create(Res, LoongArchMCExpr::VK_CALL, getContext());
   Operands.push_back(LoongArchOperand::createImm(Res, S, E));
   return ParseStatus::Success;
 }

diff  --git a/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp b/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
index ae76463e7ebc8..24bf5a18cfb00 100644
--- a/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
@@ -35,10 +35,8 @@ static MCOperand lowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym,
     Kind = LoongArchMCExpr::VK_None;
     break;
   case LoongArchII::MO_CALL:
-    Kind = LoongArchMCExpr::VK_CALL;
-    break;
   case LoongArchII::MO_CALL_PLT:
-    Kind = LoongArchMCExpr::VK_CALL_PLT;
+    Kind = ELF::R_LARCH_B26;
     break;
   case LoongArchII::MO_PCREL_HI:
     Kind = ELF::R_LARCH_PCALA_HI20;

diff  --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp
index d16fb5c52b8e8..5770a76b9f214 100644
--- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp
+++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp
@@ -147,8 +147,6 @@ LoongArchMCCodeEmitter::getExprOpValue(const MCInst &MI, const MCOperand &MO,
       FixupKind = LoongArch::fixup_loongarch_b21;
       break;
     case ELF::R_LARCH_B26:
-    case LoongArchMCExpr::VK_CALL:
-    case LoongArchMCExpr::VK_CALL_PLT:
       FixupKind = LoongArch::fixup_loongarch_b26;
       break;
     case ELF::R_LARCH_ABS_HI20:

diff  --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.cpp
index ac39b1fa411f2..994d2489a8278 100644
--- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.cpp
+++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.cpp
@@ -32,7 +32,7 @@ const LoongArchMCExpr *LoongArchMCExpr::create(const MCExpr *Expr, uint16_t S,
 
 void LoongArchMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
   Specifier S = getSpecifier();
-  bool HasVariant = S != VK_None && S != VK_CALL;
+  bool HasVariant = S != VK_None && S != ELF::R_LARCH_B26;
 
   if (HasVariant)
     OS << '%' << getSpecifierName(specifier) << '(';
@@ -63,14 +63,10 @@ StringRef LoongArchMCExpr::getSpecifierName(uint16_t S) {
   switch (S) {
   default:
     llvm_unreachable("Invalid ELF symbol kind");
-  case VK_CALL_PLT:
-    return "plt";
   case ELF::R_LARCH_B16:
     return "b16";
   case ELF::R_LARCH_B21:
     return "b21";
-  case ELF::R_LARCH_B26:
-    return "b26";
   case ELF::R_LARCH_ABS_HI20:
     return "abs_hi20";
   case ELF::R_LARCH_ABS_LO12:
@@ -176,7 +172,7 @@ StringRef LoongArchMCExpr::getSpecifierName(uint16_t S) {
 
 LoongArchMCExpr::Specifier LoongArchMCExpr::parseSpecifier(StringRef name) {
   return StringSwitch<LoongArchMCExpr::Specifier>(name)
-      .Case("plt", VK_CALL_PLT)
+      .Case("plt", ELF::R_LARCH_B26)
       .Case("b16", ELF::R_LARCH_B16)
       .Case("b21", ELF::R_LARCH_B21)
       .Case("b26", ELF::R_LARCH_B26)

diff  --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.h b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.h
index aac49979fb60a..06ebbc034042b 100644
--- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.h
+++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.h
@@ -23,11 +23,7 @@ class StringRef;
 class LoongArchMCExpr : public MCTargetExpr {
 public:
   using Specifier = uint16_t;
-  enum {
-    VK_None,
-    VK_CALL = 1000, // larger than relocation types
-    VK_CALL_PLT,
-  };
+  enum { VK_None };
 
 private:
   const MCExpr *Expr;

diff  --git a/llvm/test/CodeGen/LoongArch/addrspacecast.ll b/llvm/test/CodeGen/LoongArch/addrspacecast.ll
index b177e8fc17dd6..d41c90b386de7 100644
--- a/llvm/test/CodeGen/LoongArch/addrspacecast.ll
+++ b/llvm/test/CodeGen/LoongArch/addrspacecast.ll
@@ -24,7 +24,7 @@ define void @cast1(ptr %ptr) {
 ; LA32-NEXT:    .cfi_def_cfa_offset 16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
-; LA32-NEXT:    bl %plt(foo)
+; LA32-NEXT:    bl foo
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/alloca.ll b/llvm/test/CodeGen/LoongArch/alloca.ll
index effd7daffe610..8a3b2aefaee6a 100644
--- a/llvm/test/CodeGen/LoongArch/alloca.ll
+++ b/llvm/test/CodeGen/LoongArch/alloca.ll
@@ -20,7 +20,7 @@ define void @simple_alloca(i32 %n) nounwind {
 ; LA32-NEXT:    bstrins.w $a0, $zero, 3, 0
 ; LA32-NEXT:    sub.w $a0, $sp, $a0
 ; LA32-NEXT:    move $sp, $a0
-; LA32-NEXT:    bl %plt(notdead)
+; LA32-NEXT:    bl notdead
 ; LA32-NEXT:    addi.w $sp, $fp, -16
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -67,7 +67,7 @@ define void @scoped_alloca(i32 %n) nounwind {
 ; LA32-NEXT:    bstrins.w $a0, $zero, 3, 0
 ; LA32-NEXT:    sub.w $a0, $sp, $a0
 ; LA32-NEXT:    move $sp, $a0
-; LA32-NEXT:    bl %plt(notdead)
+; LA32-NEXT:    bl notdead
 ; LA32-NEXT:    move $sp, $s0
 ; LA32-NEXT:    addi.w $sp, $fp, -16
 ; LA32-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
@@ -137,7 +137,7 @@ define void @alloca_callframe(i32 %n) nounwind {
 ; LA32-NEXT:    ori $a6, $zero, 7
 ; LA32-NEXT:    ori $a7, $zero, 8
 ; LA32-NEXT:    st.w $t0, $sp, 0
-; LA32-NEXT:    bl %plt(func)
+; LA32-NEXT:    bl func
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    addi.w $sp, $fp, -16
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/LoongArch/bnez-beqz.ll b/llvm/test/CodeGen/LoongArch/bnez-beqz.ll
index 93bbcbbf2bf66..3b1dabaf6ea14 100644
--- a/llvm/test/CodeGen/LoongArch/bnez-beqz.ll
+++ b/llvm/test/CodeGen/LoongArch/bnez-beqz.ll
@@ -11,7 +11,7 @@ define void @bnez_i32(i32 signext %0) nounwind {
 ; LA32-NEXT:  # %bb.1: # %f
 ; LA32-NEXT:    ret
 ; LA32-NEXT:  .LBB0_2: # %t
-; LA32-NEXT:    b %plt(bar)
+; LA32-NEXT:    b bar
 ;
 ; LA64-LABEL: bnez_i32:
 ; LA64:       # %bb.0: # %start
@@ -38,7 +38,7 @@ define void @beqz_i32(i32 signext %0) nounwind {
 ; LA32:       # %bb.0: # %start
 ; LA32-NEXT:    beqz $a0, .LBB1_2
 ; LA32-NEXT:  # %bb.1: # %t
-; LA32-NEXT:    b %plt(bar)
+; LA32-NEXT:    b bar
 ; LA32-NEXT:  .LBB1_2: # %f
 ; LA32-NEXT:    ret
 ;
@@ -70,7 +70,7 @@ define void @bnez_i64(i64 %0) nounwind {
 ; LA32-NEXT:  # %bb.1: # %f
 ; LA32-NEXT:    ret
 ; LA32-NEXT:  .LBB2_2: # %t
-; LA32-NEXT:    b %plt(bar)
+; LA32-NEXT:    b bar
 ;
 ; LA64-LABEL: bnez_i64:
 ; LA64:       # %bb.0: # %start
@@ -98,7 +98,7 @@ define void @beqz_i64(i64 %0) nounwind {
 ; LA32-NEXT:    or $a0, $a0, $a1
 ; LA32-NEXT:    beqz $a0, .LBB3_2
 ; LA32-NEXT:  # %bb.1: # %t
-; LA32-NEXT:    b %plt(bar)
+; LA32-NEXT:    b bar
 ; LA32-NEXT:  .LBB3_2: # %f
 ; LA32-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/LoongArch/code-models.ll b/llvm/test/CodeGen/LoongArch/code-models.ll
index c012068862334..f4459655e6138 100644
--- a/llvm/test/CodeGen/LoongArch/code-models.ll
+++ b/llvm/test/CodeGen/LoongArch/code-models.ll
@@ -14,7 +14,7 @@ define i32 @call_globaladdress(i32 %a) nounwind {
 ; SMALL:       # %bb.0:
 ; SMALL-NEXT:    addi.d $sp, $sp, -16
 ; SMALL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; SMALL-NEXT:    bl %plt(callee)
+; SMALL-NEXT:    bl callee
 ; SMALL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; SMALL-NEXT:    addi.d $sp, $sp, 16
 ; SMALL-NEXT:    ret
@@ -55,7 +55,7 @@ define void @call_external_sym(ptr %dst) {
 ; SMALL-NEXT:    .cfi_offset 1, -8
 ; SMALL-NEXT:    ori $a2, $zero, 1000
 ; SMALL-NEXT:    move $a1, $zero
-; SMALL-NEXT:    bl %plt(memset)
+; SMALL-NEXT:    bl memset
 ; SMALL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; SMALL-NEXT:    addi.d $sp, $sp, 16
 ; SMALL-NEXT:    ret
@@ -101,7 +101,7 @@ declare i32 @callee_tail(i32 %i)
 define i32 @caller_tail(i32 %i) nounwind {
 ; SMALL-LABEL: caller_tail:
 ; SMALL:       # %bb.0: # %entry
-; SMALL-NEXT:    b %plt(callee_tail)
+; SMALL-NEXT:    b callee_tail
 ;
 ; MEDIUM-LABEL: caller_tail:
 ; MEDIUM:       # %bb.0: # %entry

diff  --git a/llvm/test/CodeGen/LoongArch/double-br-fcmp.ll b/llvm/test/CodeGen/LoongArch/double-br-fcmp.ll
index 6a5b856a42b2e..cb89bcd097495 100644
--- a/llvm/test/CodeGen/LoongArch/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/LoongArch/double-br-fcmp.ll
@@ -14,7 +14,7 @@ define void @br_fcmp_oeq_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB0_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_oeq_bcnez:
 ; LA64:       # %bb.0:
@@ -46,7 +46,7 @@ define void @br_fcmp_oeq_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB1_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_oeq_bceqz:
 ; LA64:       # %bb.0:
@@ -78,7 +78,7 @@ define void @br_fcmp_ogt_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB2_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ogt_bcnez:
 ; LA64:       # %bb.0:
@@ -110,7 +110,7 @@ define void @br_fcmp_ogt_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB3_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ogt_bceqz:
 ; LA64:       # %bb.0:
@@ -142,7 +142,7 @@ define void @br_fcmp_oge_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB4_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_oge_bcnez:
 ; LA64:       # %bb.0:
@@ -174,7 +174,7 @@ define void @br_fcmp_oge_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB5_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_oge_bceqz:
 ; LA64:       # %bb.0:
@@ -206,7 +206,7 @@ define void @br_fcmp_olt_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB6_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_olt_bcnez:
 ; LA64:       # %bb.0:
@@ -238,7 +238,7 @@ define void @br_fcmp_olt_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB7_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_olt_bceqz:
 ; LA64:       # %bb.0:
@@ -270,7 +270,7 @@ define void @br_fcmp_ole_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB8_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ole_bcnez:
 ; LA64:       # %bb.0:
@@ -302,7 +302,7 @@ define void @br_fcmp_ole_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB9_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ole_bceqz:
 ; LA64:       # %bb.0:
@@ -334,7 +334,7 @@ define void @br_fcmp_one_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB10_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_one_bcnez:
 ; LA64:       # %bb.0:
@@ -366,7 +366,7 @@ define void @br_fcmp_one_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB11_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_one_bceqz:
 ; LA64:       # %bb.0:
@@ -398,7 +398,7 @@ define void @br_fcmp_ord_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB12_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ord_bcnez:
 ; LA64:       # %bb.0:
@@ -430,7 +430,7 @@ define void @br_fcmp_ord_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB13_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ord_bceqz:
 ; LA64:       # %bb.0:
@@ -462,7 +462,7 @@ define void @br_fcmp_ueq_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB14_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ueq_bcnez:
 ; LA64:       # %bb.0:
@@ -494,7 +494,7 @@ define void @br_fcmp_ueq_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB15_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ueq_bceqz:
 ; LA64:       # %bb.0:
@@ -526,7 +526,7 @@ define void @br_fcmp_ugt_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB16_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ugt_bcnez:
 ; LA64:       # %bb.0:
@@ -558,7 +558,7 @@ define void @br_fcmp_ugt_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB17_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ugt_bceqz:
 ; LA64:       # %bb.0:
@@ -590,7 +590,7 @@ define void @br_fcmp_uge_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB18_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_uge_bcnez:
 ; LA64:       # %bb.0:
@@ -622,7 +622,7 @@ define void @br_fcmp_uge_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB19_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_uge_bceqz:
 ; LA64:       # %bb.0:
@@ -654,7 +654,7 @@ define void @br_fcmp_ult_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB20_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ult_bcnez:
 ; LA64:       # %bb.0:
@@ -686,7 +686,7 @@ define void @br_fcmp_ult_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB21_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ult_bceqz:
 ; LA64:       # %bb.0:
@@ -718,7 +718,7 @@ define void @br_fcmp_ule_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB22_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ule_bcnez:
 ; LA64:       # %bb.0:
@@ -750,7 +750,7 @@ define void @br_fcmp_ule_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB23_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ule_bceqz:
 ; LA64:       # %bb.0:
@@ -782,7 +782,7 @@ define void @br_fcmp_une_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB24_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_une_bcnez:
 ; LA64:       # %bb.0:
@@ -814,7 +814,7 @@ define void @br_fcmp_une_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB25_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_une_bceqz:
 ; LA64:       # %bb.0:
@@ -846,7 +846,7 @@ define void @br_fcmp_uno_bcnez(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB26_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_uno_bcnez:
 ; LA64:       # %bb.0:
@@ -878,7 +878,7 @@ define void @br_fcmp_uno_bceqz(double %a, double %b) nounwind {
 ; LA32-NEXT:  .LBB27_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_uno_bceqz:
 ; LA64:       # %bb.0:

diff  --git a/llvm/test/CodeGen/LoongArch/eh-dwarf-cfa.ll b/llvm/test/CodeGen/LoongArch/eh-dwarf-cfa.ll
index f23c5364b93e0..224755eb73d38 100644
--- a/llvm/test/CodeGen/LoongArch/eh-dwarf-cfa.ll
+++ b/llvm/test/CodeGen/LoongArch/eh-dwarf-cfa.ll
@@ -10,7 +10,7 @@ define void @dwarf() {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    addi.w $a0, $sp, 16
-; LA32-NEXT:    bl %plt(foo)
+; LA32-NEXT:    bl foo
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/exception-pointer-register.ll b/llvm/test/CodeGen/LoongArch/exception-pointer-register.ll
index 91fa34aa3acfb..11cd573641071 100644
--- a/llvm/test/CodeGen/LoongArch/exception-pointer-register.ll
+++ b/llvm/test/CodeGen/LoongArch/exception-pointer-register.ll
@@ -28,13 +28,13 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; LA32-NEXT:  # %bb.1: # %bb2
 ; LA32-NEXT:  .Ltmp0:
 ; LA32-NEXT:    move $a0, $fp
-; LA32-NEXT:    bl %plt(bar)
+; LA32-NEXT:    bl bar
 ; LA32-NEXT:  .Ltmp1:
 ; LA32-NEXT:    b .LBB0_3
 ; LA32-NEXT:  .LBB0_2: # %bb1
 ; LA32-NEXT:  .Ltmp2:
 ; LA32-NEXT:    move $a0, $fp
-; LA32-NEXT:    bl %plt(foo)
+; LA32-NEXT:    bl foo
 ; LA32-NEXT:  .Ltmp3:
 ; LA32-NEXT:  .LBB0_3: # %end2
 ; LA32-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
@@ -48,7 +48,7 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; LA32-NEXT:    move $a0, $fp
 ; LA32-NEXT:    bl callee
 ; LA32-NEXT:    move $a0, $s0
-; LA32-NEXT:    bl %plt(_Unwind_Resume)
+; LA32-NEXT:    bl _Unwind_Resume
 ;
 ; LA64-LABEL: caller:
 ; LA64:       # %bb.0: # %entry

diff  --git a/llvm/test/CodeGen/LoongArch/fdiv-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/fdiv-reciprocal-estimate.ll
index 50f2d21a9cc84..63c26bd016b0e 100644
--- a/llvm/test/CodeGen/LoongArch/fdiv-reciprocal-estimate.ll
+++ b/llvm/test/CodeGen/LoongArch/fdiv-reciprocal-estimate.ll
@@ -9,15 +9,15 @@
 define float @fdiv_s(float %x, float %y) {
 ; LA32F-LABEL: fdiv_s:
 ; LA32F:       # %bb.0:
-; LA32F-NEXT:    fdiv.s	$fa0, $fa0, $fa1
+; LA32F-NEXT:    fdiv.s $fa0, $fa0, $fa1
 ; LA32F-NEXT:    ret
 ;
 ; LA32F-FRECIPE-LABEL: fdiv_s:
 ; LA32F-FRECIPE:       # %bb.0:
-; LA32F-FRECIPE-NEXT:    frecipe.s	$fa2, $fa1
-; LA32F-FRECIPE-NEXT:    fmul.s	$fa3, $fa0, $fa2
-; LA32F-FRECIPE-NEXT:    fnmsub.s	$fa0, $fa1, $fa3, $fa0
-; LA32F-FRECIPE-NEXT:    fmadd.s	$fa0, $fa2, $fa0, $fa3
+; LA32F-FRECIPE-NEXT:    frecipe.s $fa2, $fa1
+; LA32F-FRECIPE-NEXT:    fmul.s $fa3, $fa0, $fa2
+; LA32F-FRECIPE-NEXT:    fnmsub.s $fa0, $fa1, $fa3, $fa0
+; LA32F-FRECIPE-NEXT:    fmadd.s $fa0, $fa2, $fa0, $fa3
 ; LA32F-FRECIPE-NEXT:    ret
 ;
 ; LA64D-LABEL: fdiv_s:
@@ -27,10 +27,10 @@ define float @fdiv_s(float %x, float %y) {
 ;
 ; LA64D-FRECIPE-LABEL: fdiv_s:
 ; LA64D-FRECIPE:       # %bb.0:
-; LA64D-FRECIPE-NEXT:    frecipe.s	$fa2, $fa1
-; LA64D-FRECIPE-NEXT:    fmul.s	$fa3, $fa0, $fa2
-; LA64D-FRECIPE-NEXT:    fnmsub.s	$fa0, $fa1, $fa3, $fa0
-; LA64D-FRECIPE-NEXT:    fmadd.s	$fa0, $fa2, $fa0, $fa3
+; LA64D-FRECIPE-NEXT:    frecipe.s $fa2, $fa1
+; LA64D-FRECIPE-NEXT:    fmul.s $fa3, $fa0, $fa2
+; LA64D-FRECIPE-NEXT:    fnmsub.s $fa0, $fa1, $fa3, $fa0
+; LA64D-FRECIPE-NEXT:    fmadd.s $fa0, $fa2, $fa0, $fa3
 ; LA64D-FRECIPE-NEXT:    ret
   %div = fdiv fast float %x, %y
   ret float %div
@@ -39,24 +39,24 @@ define float @fdiv_s(float %x, float %y) {
 define double @fdiv_d(double %x, double %y) {
 ; LA32F-LABEL: fdiv_d:
 ; LA32F:       # %bb.0:
-; LA32F-NEXT:    addi.w	$sp, $sp, -16
+; LA32F-NEXT:    addi.w $sp, $sp, -16
 ; LA32F-NEXT:    .cfi_def_cfa_offset 16
-; LA32F-NEXT:    st.w	$ra, $sp, 12                    # 4-byte Folded Spill
+; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    .cfi_offset 1, -4
-; LA32F-NEXT:    bl	%plt(__divdf3)
-; LA32F-NEXT:    ld.w	$ra, $sp, 12                    # 4-byte Folded Reload
-; LA32F-NEXT:    addi.w	$sp, $sp, 16
+; LA32F-NEXT:    bl __divdf3
+; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
 ;
 ; LA32F-FRECIPE-LABEL: fdiv_d:
 ; LA32F-FRECIPE:       # %bb.0:
-; LA32F-FRECIPE-NEXT:    addi.w	$sp, $sp, -16
+; LA32F-FRECIPE-NEXT:    addi.w $sp, $sp, -16
 ; LA32F-FRECIPE-NEXT:    .cfi_def_cfa_offset 16
-; LA32F-FRECIPE-NEXT:    st.w	$ra, $sp, 12                    # 4-byte Folded Spill
+; LA32F-FRECIPE-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-FRECIPE-NEXT:    .cfi_offset 1, -4
-; LA32F-FRECIPE-NEXT:    bl	%plt(__divdf3)
-; LA32F-FRECIPE-NEXT:    ld.w	$ra, $sp, 12                    # 4-byte Folded Reload
-; LA32F-FRECIPE-NEXT:    addi.w	$sp, $sp, 16
+; LA32F-FRECIPE-NEXT:    bl __divdf3
+; LA32F-FRECIPE-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-FRECIPE-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-FRECIPE-NEXT:    ret
 ;
 ; LA64D-LABEL: fdiv_d:

diff  --git a/llvm/test/CodeGen/LoongArch/float-br-fcmp.ll b/llvm/test/CodeGen/LoongArch/float-br-fcmp.ll
index 316cd7c37f217..a761bffcacfc8 100644
--- a/llvm/test/CodeGen/LoongArch/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/LoongArch/float-br-fcmp.ll
@@ -14,7 +14,7 @@ define void @br_fcmp_oeq_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB0_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_oeq_bcnez_float:
 ; LA64:       # %bb.0:
@@ -46,7 +46,7 @@ define void @br_fcmp_oeq_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB1_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_oeq_bceqz_float:
 ; LA64:       # %bb.0:
@@ -78,7 +78,7 @@ define void @br_fcmp_ogt_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB2_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ogt_bcnez_float:
 ; LA64:       # %bb.0:
@@ -110,7 +110,7 @@ define void @br_fcmp_ogt_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB3_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ogt_bceqz_float:
 ; LA64:       # %bb.0:
@@ -142,7 +142,7 @@ define void @br_fcmp_oge_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB4_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_oge_bcnez_float:
 ; LA64:       # %bb.0:
@@ -174,7 +174,7 @@ define void @br_fcmp_oge_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB5_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_oge_bceqz_float:
 ; LA64:       # %bb.0:
@@ -206,7 +206,7 @@ define void @br_fcmp_olt_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB6_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_olt_bcnez_float:
 ; LA64:       # %bb.0:
@@ -238,7 +238,7 @@ define void @br_fcmp_olt_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB7_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_olt_bceqz_float:
 ; LA64:       # %bb.0:
@@ -270,7 +270,7 @@ define void @br_fcmp_ole_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB8_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ole_bcnez_float:
 ; LA64:       # %bb.0:
@@ -302,7 +302,7 @@ define void @br_fcmp_ole_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB9_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ole_bceqz_float:
 ; LA64:       # %bb.0:
@@ -334,7 +334,7 @@ define void @br_fcmp_one_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB10_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_one_bcnez_float:
 ; LA64:       # %bb.0:
@@ -366,7 +366,7 @@ define void @br_fcmp_one_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB11_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_one_bceqz_float:
 ; LA64:       # %bb.0:
@@ -398,7 +398,7 @@ define void @br_fcmp_ord_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB12_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ord_bcnez_float:
 ; LA64:       # %bb.0:
@@ -430,7 +430,7 @@ define void @br_fcmp_ord_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB13_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ord_bceqz_float:
 ; LA64:       # %bb.0:
@@ -462,7 +462,7 @@ define void @br_fcmp_ueq_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB14_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ueq_bcnez_float:
 ; LA64:       # %bb.0:
@@ -494,7 +494,7 @@ define void @br_fcmp_ueq_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB15_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ueq_bceqz_float:
 ; LA64:       # %bb.0:
@@ -526,7 +526,7 @@ define void @br_fcmp_ugt_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB16_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ugt_bcnez_float:
 ; LA64:       # %bb.0:
@@ -558,7 +558,7 @@ define void @br_fcmp_ugt_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB17_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ugt_bceqz_float:
 ; LA64:       # %bb.0:
@@ -590,7 +590,7 @@ define void @br_fcmp_uge_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB18_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_uge_bcnez_float:
 ; LA64:       # %bb.0:
@@ -622,7 +622,7 @@ define void @br_fcmp_uge_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB19_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_uge_bceqz_float:
 ; LA64:       # %bb.0:
@@ -654,7 +654,7 @@ define void @br_fcmp_ult_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB20_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ult_bcnez_float:
 ; LA64:       # %bb.0:
@@ -686,7 +686,7 @@ define void @br_fcmp_ult_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB21_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ult_bceqz_float:
 ; LA64:       # %bb.0:
@@ -718,7 +718,7 @@ define void @br_fcmp_ule_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB22_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ule_bcnez_float:
 ; LA64:       # %bb.0:
@@ -750,7 +750,7 @@ define void @br_fcmp_ule_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB23_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_ule_bceqz_float:
 ; LA64:       # %bb.0:
@@ -782,7 +782,7 @@ define void @br_fcmp_une_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB24_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_une_bcnez_float:
 ; LA64:       # %bb.0:
@@ -814,7 +814,7 @@ define void @br_fcmp_une_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB25_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_une_bceqz_float:
 ; LA64:       # %bb.0:
@@ -846,7 +846,7 @@ define void @br_fcmp_uno_bcnez_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB26_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_uno_bcnez_float:
 ; LA64:       # %bb.0:
@@ -878,7 +878,7 @@ define void @br_fcmp_uno_bceqz_float(float %a, float %b) nounwind {
 ; LA32-NEXT:  .LBB27_2: # %if.then
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(abort)
+; LA32-NEXT:    bl abort
 ;
 ; LA64-LABEL: br_fcmp_uno_bceqz_float:
 ; LA64:       # %bb.0:

diff  --git a/llvm/test/CodeGen/LoongArch/fp-expand.ll b/llvm/test/CodeGen/LoongArch/fp-expand.ll
index 0939094dae23a..1eefdf24c6664 100644
--- a/llvm/test/CodeGen/LoongArch/fp-expand.ll
+++ b/llvm/test/CodeGen/LoongArch/fp-expand.ll
@@ -14,7 +14,7 @@ declare double @llvm.pow.f64(double, double)
 define float @sin_f32(float %a) nounwind {
 ; LA32-LABEL: sin_f32:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    b %plt(sinf)
+; LA32-NEXT:    b sinf
 ;
 ; LA64-LABEL: sin_f32:
 ; LA64:       # %bb.0:
@@ -27,7 +27,7 @@ define float @sin_f32(float %a) nounwind {
 define float @cos_f32(float %a) nounwind {
 ; LA32-LABEL: cos_f32:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    b %plt(cosf)
+; LA32-NEXT:    b cosf
 ;
 ; LA64-LABEL: cos_f32:
 ; LA64:       # %bb.0:
@@ -45,10 +45,10 @@ define float @sincos_f32(float %a) nounwind {
 ; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
 ; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    bl %plt(sinf)
+; LA32-NEXT:    bl sinf
 ; LA32-NEXT:    fmov.s $fs1, $fa0
 ; LA32-NEXT:    fmov.s $fa0, $fs0
-; LA32-NEXT:    bl %plt(cosf)
+; LA32-NEXT:    bl cosf
 ; LA32-NEXT:    fadd.s $fa0, $fs1, $fa0
 ; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
 ; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
@@ -84,7 +84,7 @@ define float @sincos_f32(float %a) nounwind {
 define float @pow_f32(float %a, float %b) nounwind {
 ; LA32-LABEL: pow_f32:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    b %plt(powf)
+; LA32-NEXT:    b powf
 ;
 ; LA64-LABEL: pow_f32:
 ; LA64:       # %bb.0:
@@ -97,7 +97,7 @@ define float @pow_f32(float %a, float %b) nounwind {
 define float @frem_f32(float %a, float %b) nounwind {
 ; LA32-LABEL: frem_f32:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    b %plt(fmodf)
+; LA32-NEXT:    b fmodf
 ;
 ; LA64-LABEL: frem_f32:
 ; LA64:       # %bb.0:
@@ -110,7 +110,7 @@ define float @frem_f32(float %a, float %b) nounwind {
 define double @sin_f64(double %a) nounwind {
 ; LA32-LABEL: sin_f64:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    b %plt(sin)
+; LA32-NEXT:    b sin
 ;
 ; LA64-LABEL: sin_f64:
 ; LA64:       # %bb.0:
@@ -123,7 +123,7 @@ define double @sin_f64(double %a) nounwind {
 define double @cos_f64(double %a) nounwind {
 ; LA32-LABEL: cos_f64:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    b %plt(cos)
+; LA32-NEXT:    b cos
 ;
 ; LA64-LABEL: cos_f64:
 ; LA64:       # %bb.0:
@@ -141,10 +141,10 @@ define double @sincos_f64(double %a) nounwind {
 ; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
 ; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    fmov.d $fs0, $fa0
-; LA32-NEXT:    bl %plt(sin)
+; LA32-NEXT:    bl sin
 ; LA32-NEXT:    fmov.d $fs1, $fa0
 ; LA32-NEXT:    fmov.d $fa0, $fs0
-; LA32-NEXT:    bl %plt(cos)
+; LA32-NEXT:    bl cos
 ; LA32-NEXT:    fadd.d $fa0, $fs1, $fa0
 ; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
 ; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
@@ -180,7 +180,7 @@ define double @sincos_f64(double %a) nounwind {
 define double @pow_f64(double %a, double %b) nounwind {
 ; LA32-LABEL: pow_f64:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    b %plt(pow)
+; LA32-NEXT:    b pow
 ;
 ; LA64-LABEL: pow_f64:
 ; LA64:       # %bb.0:
@@ -193,7 +193,7 @@ define double @pow_f64(double %a, double %b) nounwind {
 define double @frem_f64(double %a, double %b) nounwind {
 ; LA32-LABEL: frem_f64:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    b %plt(fmod)
+; LA32-NEXT:    b fmod
 ;
 ; LA64-LABEL: frem_f64:
 ; LA64:       # %bb.0:

diff  --git a/llvm/test/CodeGen/LoongArch/fp-max-min.ll b/llvm/test/CodeGen/LoongArch/fp-max-min.ll
index 1adf4273b3158..9bf3e6c4c0fa0 100644
--- a/llvm/test/CodeGen/LoongArch/fp-max-min.ll
+++ b/llvm/test/CodeGen/LoongArch/fp-max-min.ll
@@ -48,7 +48,7 @@ define double @maxnum_double(double %x, double %y) {
 ; LA32F-NEXT:    .cfi_def_cfa_offset 16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    .cfi_offset 1, -4
-; LA32F-NEXT:    bl %plt(fmax)
+; LA32F-NEXT:    bl fmax
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -121,7 +121,7 @@ define double @minnum_double(double %x, double %y) {
 ; LA32F-NEXT:    .cfi_def_cfa_offset 16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    .cfi_offset 1, -4
-; LA32F-NEXT:    bl %plt(fmin)
+; LA32F-NEXT:    bl fmin
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/fp-maximumnum-minimumnum.ll b/llvm/test/CodeGen/LoongArch/fp-maximumnum-minimumnum.ll
index 607e50cb5a6c6..8718d61196d38 100644
--- a/llvm/test/CodeGen/LoongArch/fp-maximumnum-minimumnum.ll
+++ b/llvm/test/CodeGen/LoongArch/fp-maximumnum-minimumnum.ll
@@ -109,7 +109,7 @@ define double @maximumnum_double(double %x, double %y) {
 ; LA32F-NEXT:    .cfi_def_cfa_offset 16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    .cfi_offset 1, -4
-; LA32F-NEXT:    bl %plt(fmaximum_num)
+; LA32F-NEXT:    bl fmaximum_num
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -151,7 +151,7 @@ define double @maximumnum_double_nsz(double %x, double %y) {
 ; LA32F-NEXT:    .cfi_def_cfa_offset 16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    .cfi_offset 1, -4
-; LA32F-NEXT:    bl %plt(fmaximum_num)
+; LA32F-NEXT:    bl fmaximum_num
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -193,7 +193,7 @@ define double @maximumnum_double_nnan(double %x, double %y) {
 ; LA32F-NEXT:    .cfi_def_cfa_offset 16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    .cfi_offset 1, -4
-; LA32F-NEXT:    bl %plt(fmaximum_num)
+; LA32F-NEXT:    bl fmaximum_num
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -322,7 +322,7 @@ define double @minimumnum_double(double %x, double %y) {
 ; LA32F-NEXT:    .cfi_def_cfa_offset 16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    .cfi_offset 1, -4
-; LA32F-NEXT:    bl %plt(fminimum_num)
+; LA32F-NEXT:    bl fminimum_num
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -364,7 +364,7 @@ define double @minimumnum_double_nsz(double %x, double %y) {
 ; LA32F-NEXT:    .cfi_def_cfa_offset 16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    .cfi_offset 1, -4
-; LA32F-NEXT:    bl %plt(fminimum_num)
+; LA32F-NEXT:    bl fminimum_num
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -406,7 +406,7 @@ define double @minimumnum_double_nnan(double %x, double %y) {
 ; LA32F-NEXT:    .cfi_def_cfa_offset 16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    .cfi_offset 1, -4
-; LA32F-NEXT:    bl %plt(fminimum_num)
+; LA32F-NEXT:    bl fminimum_num
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/fp-reciprocal.ll b/llvm/test/CodeGen/LoongArch/fp-reciprocal.ll
index 04caf2555fca6..11e246eafe4ff 100644
--- a/llvm/test/CodeGen/LoongArch/fp-reciprocal.ll
+++ b/llvm/test/CodeGen/LoongArch/fp-reciprocal.ll
@@ -38,7 +38,7 @@ define double @f64_reciprocal(double %a) nounwind {
 ; LA32F-NEXT:    move $a2, $a0
 ; LA32F-NEXT:    lu12i.w $a1, 261888
 ; LA32F-NEXT:    move $a0, $zero
-; LA32F-NEXT:    bl %plt(__divdf3)
+; LA32F-NEXT:    bl __divdf3
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/fp-trunc-store.ll b/llvm/test/CodeGen/LoongArch/fp-trunc-store.ll
index 2db3bdb234eb0..644f7cc03fa94 100644
--- a/llvm/test/CodeGen/LoongArch/fp-trunc-store.ll
+++ b/llvm/test/CodeGen/LoongArch/fp-trunc-store.ll
@@ -13,7 +13,7 @@ define void @fp_trunc(ptr %a, double %b) nounwind {
 ; LA32F-NEXT:    move $fp, $a0
 ; LA32F-NEXT:    move $a0, $a1
 ; LA32F-NEXT:    move $a1, $a2
-; LA32F-NEXT:    bl %plt(__truncdfsf2)
+; LA32F-NEXT:    bl __truncdfsf2
 ; LA32F-NEXT:    fst.s $fa0, $fp, 0
 ; LA32F-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/LoongArch/fp16-promote.ll b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
index 61a371629557e..6a1610c27937d 100644
--- a/llvm/test/CodeGen/LoongArch/fp16-promote.ll
+++ b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
@@ -23,7 +23,7 @@ define float @test_fpextend_float(ptr %p) nounwind {
 ; LA32-LABEL: test_fpextend_float:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ld.hu $a0, $a0, 0
-; LA32-NEXT:    b %plt(__extendhfsf2)
+; LA32-NEXT:    b __extendhfsf2
 ;
 ; LA64-LABEL: test_fpextend_float:
 ; LA64:       # %bb.0:
@@ -41,7 +41,7 @@ define double @test_fpextend_double(ptr %p) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ld.hu $a0, $a0, 0
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fcvt.d.s $fa0, $fa0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
@@ -70,7 +70,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    bl %plt(__truncsfhf2)
+; LA32-NEXT:    bl __truncsfhf2
 ; LA32-NEXT:    st.h $a0, $fp, 0
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -102,7 +102,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    bl %plt(__truncdfhf2)
+; LA32-NEXT:    bl __truncdfhf2
 ; LA32-NEXT:    st.h $a0, $fp, 0
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -136,12 +136,12 @@ define half @test_fadd_reg(half %a, half %b) nounwind {
 ; LA32-NEXT:    fst.d $fs0, $sp, 0 # 8-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
 ; LA32-NEXT:    move $a0, $a1
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fmov.s $fs0, $fa0
 ; LA32-NEXT:    move $a0, $fp
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fs0
-; LA32-NEXT:    bl %plt(__truncsfhf2)
+; LA32-NEXT:    bl __truncsfhf2
 ; LA32-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -185,12 +185,12 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
 ; LA32-NEXT:    move $fp, $a0
 ; LA32-NEXT:    ld.hu $s0, $a0, 0
 ; LA32-NEXT:    ld.hu $a0, $a1, 0
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fmov.s $fs0, $fa0
 ; LA32-NEXT:    move $a0, $s0
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fs0
-; LA32-NEXT:    bl %plt(__truncsfhf2)
+; LA32-NEXT:    bl __truncsfhf2
 ; LA32-NEXT:    st.h $a0, $fp, 0
 ; LA32-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
 ; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
@@ -241,12 +241,12 @@ define half @test_fmul_reg(half %a, half %b) nounwind {
 ; LA32-NEXT:    fst.d $fs0, $sp, 0 # 8-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
 ; LA32-NEXT:    move $a0, $a1
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fmov.s $fs0, $fa0
 ; LA32-NEXT:    move $a0, $fp
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fmul.s $fa0, $fa0, $fs0
-; LA32-NEXT:    bl %plt(__truncsfhf2)
+; LA32-NEXT:    bl __truncsfhf2
 ; LA32-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -290,12 +290,12 @@ define void @test_fmul_mem(ptr %p, ptr %q) nounwind {
 ; LA32-NEXT:    move $fp, $a0
 ; LA32-NEXT:    ld.hu $s0, $a0, 0
 ; LA32-NEXT:    ld.hu $a0, $a1, 0
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fmov.s $fs0, $fa0
 ; LA32-NEXT:    move $a0, $s0
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fmul.s $fa0, $fa0, $fs0
-; LA32-NEXT:    bl %plt(__truncsfhf2)
+; LA32-NEXT:    bl __truncsfhf2
 ; LA32-NEXT:    st.h $a0, $fp, 0
 ; LA32-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
 ; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
@@ -343,10 +343,10 @@ define half @freeze_half_undef() nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    movgr2fr.w $fa0, $zero
-; LA32-NEXT:    bl %plt(__truncsfhf2)
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fa0
-; LA32-NEXT:    bl %plt(__truncsfhf2)
+; LA32-NEXT:    bl __truncsfhf2
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -376,9 +376,9 @@ define half @freeze_half_poison(half %maybe.poison) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fa0
-; LA32-NEXT:    bl %plt(__truncsfhf2)
+; LA32-NEXT:    bl __truncsfhf2
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -405,7 +405,7 @@ define signext i32 @test_half_to_s32(half %a) nounwind {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -433,7 +433,7 @@ define zeroext i32 @test_half_to_s32_u32(half %a) nounwind {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__extendhfsf2)
+; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -462,8 +462,8 @@ define i64 @test_half_to_i64(half %a) nounwind {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__extendhfsf2)
-; LA32-NEXT:    bl %plt(__fixsfdi)
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    bl __fixsfdi
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/frint.ll b/llvm/test/CodeGen/LoongArch/frint.ll
index 48b1ff86ecf9c..42c0a57c35d03 100644
--- a/llvm/test/CodeGen/LoongArch/frint.ll
+++ b/llvm/test/CodeGen/LoongArch/frint.ll
@@ -7,11 +7,11 @@
 define float @rint_f32(float %f) nounwind {
 ; LA32F-LABEL: rint_f32:
 ; LA32F:       # %bb.0: # %entry
-; LA32F-NEXT:    b %plt(rintf)
+; LA32F-NEXT:    b rintf
 ;
 ; LA32D-LABEL: rint_f32:
 ; LA32D:       # %bb.0: # %entry
-; LA32D-NEXT:    b %plt(rintf)
+; LA32D-NEXT:    b rintf
 ;
 ; LA64F-LABEL: rint_f32:
 ; LA64F:       # %bb.0: # %entry
@@ -34,14 +34,14 @@ define double @rint_f64(double %d) nounwind {
 ; LA32F:       # %bb.0: # %entry
 ; LA32F-NEXT:    addi.w $sp, $sp, -16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32F-NEXT:    bl %plt(rint)
+; LA32F-NEXT:    bl rint
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: rint_f64:
 ; LA32D:       # %bb.0: # %entry
-; LA32D-NEXT:    b %plt(rint)
+; LA32D-NEXT:    b rint
 ;
 ; LA64F-LABEL: rint_f64:
 ; LA64F:       # %bb.0: # %entry

diff  --git a/llvm/test/CodeGen/LoongArch/fsqrt-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/fsqrt-reciprocal-estimate.ll
index 5f14352fccd60..e5c848e0f1542 100644
--- a/llvm/test/CodeGen/LoongArch/fsqrt-reciprocal-estimate.ll
+++ b/llvm/test/CodeGen/LoongArch/fsqrt-reciprocal-estimate.ll
@@ -55,12 +55,12 @@ define double @frsqrt_f64(double %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    addi.w	$sp, $sp, -16
 ; LA32F-NEXT:    st.w	$ra, $sp, 12 
-; LA32F-NEXT:    bl	%plt(sqrt)
+; LA32F-NEXT:    bl	sqrt
 ; LA32F-NEXT:    move	$a2, $a0
 ; LA32F-NEXT:    move	$a3, $a1
 ; LA32F-NEXT:    lu12i.w	$a1, 261888
 ; LA32F-NEXT:    move	$a0, $zero
-; LA32F-NEXT:    bl	%plt(__divdf3)
+; LA32F-NEXT:    bl	__divdf3
 ; LA32F-NEXT:    ld.w	$ra, $sp, 12  
 ; LA32F-NEXT:    addi.w	$sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -69,12 +69,12 @@ define double @frsqrt_f64(double %a) nounwind {
 ; LA32F-FRECIPE:       # %bb.0:
 ; LA32F-FRECIPE-NEXT:    addi.w	$sp, $sp, -16
 ; LA32F-FRECIPE-NEXT:    st.w	$ra, $sp, 12                    # 4-byte Folded Spill
-; LA32F-FRECIPE-NEXT:    bl	%plt(sqrt)
+; LA32F-FRECIPE-NEXT:    bl	sqrt
 ; LA32F-FRECIPE-NEXT:    move	$a2, $a0
 ; LA32F-FRECIPE-NEXT:    move	$a3, $a1
 ; LA32F-FRECIPE-NEXT:    lu12i.w	$a1, 261888
 ; LA32F-FRECIPE-NEXT:    move	$a0, $zero
-; LA32F-FRECIPE-NEXT:    bl	%plt(__divdf3)
+; LA32F-FRECIPE-NEXT:    bl	__divdf3
 ; LA32F-FRECIPE-NEXT:    ld.w	$ra, $sp, 12                    # 4-byte Folded Reload
 ; LA32F-FRECIPE-NEXT:    addi.w	$sp, $sp, 16
 ; LA32F-FRECIPE-NEXT:    ret
@@ -117,21 +117,21 @@ define double @sqrt_simplify_before_recip_3_uses_f64(double %x, ptr %p1, ptr %p2
 ; LA32F-NEXT:    st.w	$s4, $sp, 4                     # 4-byte Folded Spill
 ; LA32F-NEXT:    move	$fp, $a3
 ; LA32F-NEXT:    move	$s0, $a2
-; LA32F-NEXT:    bl	%plt(sqrt)
+; LA32F-NEXT:    bl	sqrt
 ; LA32F-NEXT:    move	$s1, $a0
 ; LA32F-NEXT:    move	$s2, $a1
 ; LA32F-NEXT:    lu12i.w	$a1, 261888
 ; LA32F-NEXT:    move	$a0, $zero
 ; LA32F-NEXT:    move	$a2, $s1
 ; LA32F-NEXT:    move	$a3, $s2
-; LA32F-NEXT:    bl	%plt(__divdf3)
+; LA32F-NEXT:    bl	__divdf3
 ; LA32F-NEXT:    move	$s3, $a0
 ; LA32F-NEXT:    move	$s4, $a1
 ; LA32F-NEXT:    lu12i.w	$a1, 263248
 ; LA32F-NEXT:    move	$a0, $zero
 ; LA32F-NEXT:    move	$a2, $s1
 ; LA32F-NEXT:    move	$a3, $s2
-; LA32F-NEXT:    bl	%plt(__divdf3)
+; LA32F-NEXT:    bl	__divdf3
 ; LA32F-NEXT:    st.w	$s3, $s0, 0
 ; LA32F-NEXT:    st.w	$s4, $s0, 4
 ; LA32F-NEXT:    st.w	$a0, $fp, 0
@@ -160,21 +160,21 @@ define double @sqrt_simplify_before_recip_3_uses_f64(double %x, ptr %p1, ptr %p2
 ; LA32F-FRECIPE-NEXT:    st.w	$s4, $sp, 4                     # 4-byte Folded Spill
 ; LA32F-FRECIPE-NEXT:    move	$fp, $a3
 ; LA32F-FRECIPE-NEXT:    move	$s0, $a2
-; LA32F-FRECIPE-NEXT:    bl	%plt(sqrt)
+; LA32F-FRECIPE-NEXT:    bl	sqrt
 ; LA32F-FRECIPE-NEXT:    move	$s1, $a0
 ; LA32F-FRECIPE-NEXT:    move	$s2, $a1
 ; LA32F-FRECIPE-NEXT:    lu12i.w	$a1, 261888
 ; LA32F-FRECIPE-NEXT:    move	$a0, $zero
 ; LA32F-FRECIPE-NEXT:    move	$a2, $s1
 ; LA32F-FRECIPE-NEXT:    move	$a3, $s2
-; LA32F-FRECIPE-NEXT:    bl	%plt(__divdf3)
+; LA32F-FRECIPE-NEXT:    bl	__divdf3
 ; LA32F-FRECIPE-NEXT:    move	$s3, $a0
 ; LA32F-FRECIPE-NEXT:    move	$s4, $a1
 ; LA32F-FRECIPE-NEXT:    lu12i.w	$a1, 263248
 ; LA32F-FRECIPE-NEXT:    move	$a0, $zero
 ; LA32F-FRECIPE-NEXT:    move	$a2, $s1
 ; LA32F-FRECIPE-NEXT:    move	$a3, $s2
-; LA32F-FRECIPE-NEXT:    bl	%plt(__divdf3)
+; LA32F-FRECIPE-NEXT:    bl	__divdf3
 ; LA32F-FRECIPE-NEXT:    st.w	$s3, $s0, 0
 ; LA32F-FRECIPE-NEXT:    st.w	$s4, $s0, 4
 ; LA32F-FRECIPE-NEXT:    st.w	$a0, $fp, 0
@@ -247,21 +247,21 @@ define double @sqrt_simplify_before_recip_3_uses_order_f64(double %x, ptr %p1, p
 ; LA32F-NEXT:    st.w	$s4, $sp, 4                     # 4-byte Folded Spill
 ; LA32F-NEXT:    move	$fp, $a3
 ; LA32F-NEXT:    move	$s0, $a2
-; LA32F-NEXT:    bl	%plt(sqrt)
+; LA32F-NEXT:    bl	sqrt
 ; LA32F-NEXT:    move	$s1, $a0
 ; LA32F-NEXT:    move	$s2, $a1
 ; LA32F-NEXT:    lu12i.w	$a1, 263248
 ; LA32F-NEXT:    move	$a0, $zero
 ; LA32F-NEXT:    move	$a2, $s1
 ; LA32F-NEXT:    move	$a3, $s2
-; LA32F-NEXT:    bl	%plt(__divdf3)
+; LA32F-NEXT:    bl	__divdf3
 ; LA32F-NEXT:    move	$s3, $a0
 ; LA32F-NEXT:    move	$s4, $a1
 ; LA32F-NEXT:    lu12i.w	$a1, 263256
 ; LA32F-NEXT:    move	$a0, $zero
 ; LA32F-NEXT:    move	$a2, $s1
 ; LA32F-NEXT:    move	$a3, $s2
-; LA32F-NEXT:    bl	%plt(__divdf3)
+; LA32F-NEXT:    bl	__divdf3
 ; LA32F-NEXT:    st.w	$s3, $s0, 0
 ; LA32F-NEXT:    st.w	$s4, $s0, 4
 ; LA32F-NEXT:    st.w	$a0, $fp, 0
@@ -290,21 +290,21 @@ define double @sqrt_simplify_before_recip_3_uses_order_f64(double %x, ptr %p1, p
 ; LA32F-FRECIPE-NEXT:    st.w	$s4, $sp, 4                     # 4-byte Folded Spill
 ; LA32F-FRECIPE-NEXT:    move	$fp, $a3
 ; LA32F-FRECIPE-NEXT:    move	$s0, $a2
-; LA32F-FRECIPE-NEXT:    bl	%plt(sqrt)
+; LA32F-FRECIPE-NEXT:    bl	sqrt
 ; LA32F-FRECIPE-NEXT:    move	$s1, $a0
 ; LA32F-FRECIPE-NEXT:    move	$s2, $a1
 ; LA32F-FRECIPE-NEXT:    lu12i.w	$a1, 263248
 ; LA32F-FRECIPE-NEXT:    move	$a0, $zero
 ; LA32F-FRECIPE-NEXT:    move	$a2, $s1
 ; LA32F-FRECIPE-NEXT:    move	$a3, $s2
-; LA32F-FRECIPE-NEXT:    bl	%plt(__divdf3)
+; LA32F-FRECIPE-NEXT:    bl	__divdf3
 ; LA32F-FRECIPE-NEXT:    move	$s3, $a0
 ; LA32F-FRECIPE-NEXT:    move	$s4, $a1
 ; LA32F-FRECIPE-NEXT:    lu12i.w	$a1, 263256
 ; LA32F-FRECIPE-NEXT:    move	$a0, $zero
 ; LA32F-FRECIPE-NEXT:    move	$a2, $s1
 ; LA32F-FRECIPE-NEXT:    move	$a3, $s2
-; LA32F-FRECIPE-NEXT:    bl	%plt(__divdf3)
+; LA32F-FRECIPE-NEXT:    bl	__divdf3
 ; LA32F-FRECIPE-NEXT:    st.w	$s3, $s0, 0
 ; LA32F-FRECIPE-NEXT:    st.w	$s4, $s0, 4
 ; LA32F-FRECIPE-NEXT:    st.w	$a0, $fp, 0
@@ -384,28 +384,28 @@ define double @sqrt_simplify_before_recip_4_uses_f64(double %x, ptr %p1, ptr %p2
 ; LA32F-NEXT:    move	$fp, $a4
 ; LA32F-NEXT:    move	$s0, $a3
 ; LA32F-NEXT:    move	$s1, $a2
-; LA32F-NEXT:    bl	%plt(sqrt)
+; LA32F-NEXT:    bl	sqrt
 ; LA32F-NEXT:    move	$s2, $a0
 ; LA32F-NEXT:    move	$s3, $a1
 ; LA32F-NEXT:    lu12i.w	$a1, 261888
 ; LA32F-NEXT:    move	$a0, $zero
 ; LA32F-NEXT:    move	$a2, $s2
 ; LA32F-NEXT:    move	$a3, $s3
-; LA32F-NEXT:    bl	%plt(__divdf3)
+; LA32F-NEXT:    bl	__divdf3
 ; LA32F-NEXT:    move	$s4, $a0
 ; LA32F-NEXT:    move	$s5, $a1
 ; LA32F-NEXT:    lu12i.w	$a1, 263248
 ; LA32F-NEXT:    move	$a0, $zero
 ; LA32F-NEXT:    move	$a2, $s2
 ; LA32F-NEXT:    move	$a3, $s3
-; LA32F-NEXT:    bl	%plt(__divdf3)
+; LA32F-NEXT:    bl	__divdf3
 ; LA32F-NEXT:    move	$s6, $a0
 ; LA32F-NEXT:    move	$s7, $a1
 ; LA32F-NEXT:    lu12i.w	$a1, 263256
 ; LA32F-NEXT:    move	$a0, $zero
 ; LA32F-NEXT:    move	$a2, $s2
 ; LA32F-NEXT:    move	$a3, $s3
-; LA32F-NEXT:    bl	%plt(__divdf3)
+; LA32F-NEXT:    bl	__divdf3
 ; LA32F-NEXT:    st.w	$s4, $s1, 0
 ; LA32F-NEXT:    st.w	$s5, $s1, 4
 ; LA32F-NEXT:    st.w	$s6, $s0, 0
@@ -443,28 +443,28 @@ define double @sqrt_simplify_before_recip_4_uses_f64(double %x, ptr %p1, ptr %p2
 ; LA32F-FRECIPE-NEXT:    move	$fp, $a4
 ; LA32F-FRECIPE-NEXT:    move	$s0, $a3
 ; LA32F-FRECIPE-NEXT:    move	$s1, $a2
-; LA32F-FRECIPE-NEXT:    bl	%plt(sqrt)
+; LA32F-FRECIPE-NEXT:    bl	sqrt
 ; LA32F-FRECIPE-NEXT:    move	$s2, $a0
 ; LA32F-FRECIPE-NEXT:    move	$s3, $a1
 ; LA32F-FRECIPE-NEXT:    lu12i.w	$a1, 261888
 ; LA32F-FRECIPE-NEXT:    move	$a0, $zero
 ; LA32F-FRECIPE-NEXT:    move	$a2, $s2
 ; LA32F-FRECIPE-NEXT:    move	$a3, $s3
-; LA32F-FRECIPE-NEXT:    bl	%plt(__divdf3)
+; LA32F-FRECIPE-NEXT:    bl	__divdf3
 ; LA32F-FRECIPE-NEXT:    move	$s4, $a0
 ; LA32F-FRECIPE-NEXT:    move	$s5, $a1
 ; LA32F-FRECIPE-NEXT:    lu12i.w	$a1, 263248
 ; LA32F-FRECIPE-NEXT:    move	$a0, $zero
 ; LA32F-FRECIPE-NEXT:    move	$a2, $s2
 ; LA32F-FRECIPE-NEXT:    move	$a3, $s3
-; LA32F-FRECIPE-NEXT:    bl	%plt(__divdf3)
+; LA32F-FRECIPE-NEXT:    bl	__divdf3
 ; LA32F-FRECIPE-NEXT:    move	$s6, $a0
 ; LA32F-FRECIPE-NEXT:    move	$s7, $a1
 ; LA32F-FRECIPE-NEXT:    lu12i.w	$a1, 263256
 ; LA32F-FRECIPE-NEXT:    move	$a0, $zero
 ; LA32F-FRECIPE-NEXT:    move	$a2, $s2
 ; LA32F-FRECIPE-NEXT:    move	$a3, $s3
-; LA32F-FRECIPE-NEXT:    bl	%plt(__divdf3)
+; LA32F-FRECIPE-NEXT:    bl	__divdf3
 ; LA32F-FRECIPE-NEXT:    st.w	$s4, $s1, 0
 ; LA32F-FRECIPE-NEXT:    st.w	$s5, $s1, 4
 ; LA32F-FRECIPE-NEXT:    st.w	$s6, $s0, 0

diff  --git a/llvm/test/CodeGen/LoongArch/fsqrt.ll b/llvm/test/CodeGen/LoongArch/fsqrt.ll
index c5f02ba53039a..e0cb4d39474b1 100644
--- a/llvm/test/CodeGen/LoongArch/fsqrt.ll
+++ b/llvm/test/CodeGen/LoongArch/fsqrt.ll
@@ -36,7 +36,7 @@ define double @fsqrt_f64(double %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    addi.w $sp, $sp, -16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32F-NEXT:    bl %plt(sqrt)
+; LA32F-NEXT:    bl sqrt
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -94,12 +94,12 @@ define double @frsqrt_f64(double %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    addi.w $sp, $sp, -16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32F-NEXT:    bl %plt(sqrt)
+; LA32F-NEXT:    bl sqrt
 ; LA32F-NEXT:    move $a2, $a0
 ; LA32F-NEXT:    move $a3, $a1
 ; LA32F-NEXT:    lu12i.w $a1, 261888
 ; LA32F-NEXT:    move $a0, $zero
-; LA32F-NEXT:    bl %plt(__divdf3)
+; LA32F-NEXT:    bl __divdf3
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/intrinsic-csr-side-effects.ll b/llvm/test/CodeGen/LoongArch/intrinsic-csr-side-effects.ll
index c2f77b0a8a70e..68d2c10d0ce1f 100644
--- a/llvm/test/CodeGen/LoongArch/intrinsic-csr-side-effects.ll
+++ b/llvm/test/CodeGen/LoongArch/intrinsic-csr-side-effects.ll
@@ -19,7 +19,7 @@ define dso_local void @foo(i32 noundef signext %flag) nounwind {
 ; LA32-NEXT:    andi $a0, $a0, 1
 ; LA32-NEXT:    bnez $a0, .LBB0_4
 ; LA32-NEXT:  # %bb.3: # %if.then2
-; LA32-NEXT:    b %plt(bug)
+; LA32-NEXT:    b bug
 ; LA32-NEXT:  .LBB0_4: # %if.end3
 ; LA32-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
index b1af9c17b6018..fc393e133dd4a 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
@@ -243,7 +243,7 @@ define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 2
-; LA32-NEXT:    bl %plt(__atomic_exchange_8)
+; LA32-NEXT:    bl __atomic_exchange_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -374,7 +374,7 @@ define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 2
-; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
+; LA32-NEXT:    bl __atomic_fetch_add_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -506,7 +506,7 @@ define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 2
-; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
+; LA32-NEXT:    bl __atomic_fetch_sub_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -649,7 +649,7 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 2
-; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
+; LA32-NEXT:    bl __atomic_fetch_nand_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -768,7 +768,7 @@ define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 2
-; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
+; LA32-NEXT:    bl __atomic_fetch_and_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -867,7 +867,7 @@ define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 2
-; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
+; LA32-NEXT:    bl __atomic_fetch_or_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -966,7 +966,7 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 2
-; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
+; LA32-NEXT:    bl __atomic_fetch_xor_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -1221,7 +1221,7 @@ define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    bl %plt(__atomic_exchange_8)
+; LA32-NEXT:    bl __atomic_exchange_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -1352,7 +1352,7 @@ define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
+; LA32-NEXT:    bl __atomic_fetch_add_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -1484,7 +1484,7 @@ define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
+; LA32-NEXT:    bl __atomic_fetch_sub_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -1627,7 +1627,7 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
+; LA32-NEXT:    bl __atomic_fetch_nand_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -1746,7 +1746,7 @@ define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
+; LA32-NEXT:    bl __atomic_fetch_and_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -1845,7 +1845,7 @@ define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
+; LA32-NEXT:    bl __atomic_fetch_or_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -1944,7 +1944,7 @@ define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
+; LA32-NEXT:    bl __atomic_fetch_xor_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -2199,7 +2199,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 4
-; LA32-NEXT:    bl %plt(__atomic_exchange_8)
+; LA32-NEXT:    bl __atomic_exchange_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -2330,7 +2330,7 @@ define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 4
-; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
+; LA32-NEXT:    bl __atomic_fetch_add_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -2462,7 +2462,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 4
-; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
+; LA32-NEXT:    bl __atomic_fetch_sub_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -2605,7 +2605,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 4
-; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
+; LA32-NEXT:    bl __atomic_fetch_nand_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -2724,7 +2724,7 @@ define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 4
-; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
+; LA32-NEXT:    bl __atomic_fetch_and_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -2823,7 +2823,7 @@ define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 4
-; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
+; LA32-NEXT:    bl __atomic_fetch_or_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -2922,7 +2922,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 4
-; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
+; LA32-NEXT:    bl __atomic_fetch_xor_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -3177,7 +3177,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_exchange_8)
+; LA32-NEXT:    bl __atomic_exchange_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -3308,7 +3308,7 @@ define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
+; LA32-NEXT:    bl __atomic_fetch_add_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -3440,7 +3440,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
+; LA32-NEXT:    bl __atomic_fetch_sub_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -3583,7 +3583,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
+; LA32-NEXT:    bl __atomic_fetch_nand_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -3702,7 +3702,7 @@ define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
+; LA32-NEXT:    bl __atomic_fetch_and_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -3801,7 +3801,7 @@ define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
+; LA32-NEXT:    bl __atomic_fetch_or_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -3900,7 +3900,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ori $a3, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
+; LA32-NEXT:    bl __atomic_fetch_xor_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -4155,7 +4155,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_exchange_8)
+; LA32-NEXT:    bl __atomic_exchange_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -4286,7 +4286,7 @@ define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
+; LA32-NEXT:    bl __atomic_fetch_add_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -4418,7 +4418,7 @@ define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
+; LA32-NEXT:    bl __atomic_fetch_sub_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -4561,7 +4561,7 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
+; LA32-NEXT:    bl __atomic_fetch_nand_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -4680,7 +4680,7 @@ define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
+; LA32-NEXT:    bl __atomic_fetch_and_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -4779,7 +4779,7 @@ define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
+; LA32-NEXT:    bl __atomic_fetch_or_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -4878,7 +4878,7 @@ define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
+; LA32-NEXT:    bl __atomic_fetch_xor_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/call.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/call.ll
index 653af9f490905..45e55765a62a4 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/call.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/call.ll
@@ -9,7 +9,7 @@ define i32 @test_call_external(i32 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(external_function)
+; LA32-NEXT:    bl external_function
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -46,7 +46,7 @@ define i32 @test_call_defined(i32 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(defined_function)
+; LA32-NEXT:    bl defined_function
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
index 8d08942c314aa..14682d62d7d31 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
@@ -83,7 +83,7 @@ define double @convert_i64_to_double(i64 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__floatdidf)
+; LA32-NEXT:    bl __floatdidf
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -146,7 +146,7 @@ define i64 @convert_double_to_i64(double %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__fixdfdi)
+; LA32-NEXT:    bl __fixdfdi
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -165,7 +165,7 @@ define i64 @convert_double_to_u64(double %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__fixunsdfdi)
+; LA32-NEXT:    bl __fixunsdfdi
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -252,7 +252,7 @@ define double @convert_u64_to_double(i64 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__floatundidf)
+; LA32-NEXT:    bl __floatundidf
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
index e4cfd7a5fec30..ac1e1df5395c6 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
@@ -93,7 +93,7 @@ define i64 @convert_float_to_i64(float %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    addi.w $sp, $sp, -16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32F-NEXT:    bl %plt(__fixsfdi)
+; LA32F-NEXT:    bl __fixsfdi
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -102,7 +102,7 @@ define i64 @convert_float_to_i64(float %a) nounwind {
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    addi.w $sp, $sp, -16
 ; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32D-NEXT:    bl %plt(__fixsfdi)
+; LA32D-NEXT:    bl __fixsfdi
 ; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32D-NEXT:    addi.w $sp, $sp, 16
 ; LA32D-NEXT:    ret
@@ -247,7 +247,7 @@ define i64 @convert_float_to_u64(float %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    addi.w $sp, $sp, -16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32F-NEXT:    bl %plt(__fixunssfdi)
+; LA32F-NEXT:    bl __fixunssfdi
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -256,7 +256,7 @@ define i64 @convert_float_to_u64(float %a) nounwind {
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    addi.w $sp, $sp, -16
 ; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32D-NEXT:    bl %plt(__fixunssfdi)
+; LA32D-NEXT:    bl __fixunssfdi
 ; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32D-NEXT:    addi.w $sp, $sp, 16
 ; LA32D-NEXT:    ret
@@ -389,7 +389,7 @@ define float @convert_i64_to_float(i64 %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    addi.w $sp, $sp, -16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32F-NEXT:    bl %plt(__floatdisf)
+; LA32F-NEXT:    bl __floatdisf
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -398,7 +398,7 @@ define float @convert_i64_to_float(i64 %a) nounwind {
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    addi.w $sp, $sp, -16
 ; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32D-NEXT:    bl %plt(__floatdisf)
+; LA32D-NEXT:    bl __floatdisf
 ; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32D-NEXT:    addi.w $sp, $sp, 16
 ; LA32D-NEXT:    ret
@@ -534,7 +534,7 @@ define float @convert_u64_to_float(i64 %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    addi.w $sp, $sp, -16
 ; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32F-NEXT:    bl %plt(__floatundisf)
+; LA32F-NEXT:    bl __floatundisf
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
 ; LA32F-NEXT:    ret
@@ -543,7 +543,7 @@ define float @convert_u64_to_float(i64 %a) nounwind {
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    addi.w $sp, $sp, -16
 ; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32D-NEXT:    bl %plt(__floatundisf)
+; LA32D-NEXT:    bl __floatundisf
 ; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32D-NEXT:    addi.w $sp, $sp, 16
 ; LA32D-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
index 9ef74e4960ce7..78cabd37c0ad9 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
@@ -58,7 +58,7 @@ define i64 @load_acquire_i64(ptr %ptr) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    ori $a1, $zero, 2
-; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    bl __atomic_load_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -114,7 +114,7 @@ define double @load_acquire_double(ptr %ptr) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    ori $a1, $zero, 2
-; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    bl __atomic_load_8
 ; LA32-NEXT:    st.w $a1, $sp, 4
 ; LA32-NEXT:    st.w $a0, $sp, 0
 ; LA32-NEXT:    fld.d $fa0, $sp, 0
@@ -182,7 +182,7 @@ define i64 @load_unordered_i64(ptr %ptr) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    bl __atomic_load_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -233,7 +233,7 @@ define double @load_unordered_double(ptr %ptr) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    bl __atomic_load_8
 ; LA32-NEXT:    st.w $a1, $sp, 4
 ; LA32-NEXT:    st.w $a0, $sp, 0
 ; LA32-NEXT:    fld.d $fa0, $sp, 0
@@ -300,7 +300,7 @@ define i64 @load_monotonic_i64(ptr %ptr) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    bl __atomic_load_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -351,7 +351,7 @@ define double @load_monotonic_double(ptr %ptr) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    bl __atomic_load_8
 ; LA32-NEXT:    st.w $a1, $sp, 4
 ; LA32-NEXT:    st.w $a0, $sp, 0
 ; LA32-NEXT:    fld.d $fa0, $sp, 0
@@ -424,7 +424,7 @@ define i64 @load_seq_cst_i64(ptr %ptr) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    ori $a1, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    bl __atomic_load_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -480,7 +480,7 @@ define double @load_seq_cst_double(ptr %ptr) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    ori $a1, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    bl __atomic_load_8
 ; LA32-NEXT:    st.w $a1, $sp, 4
 ; LA32-NEXT:    st.w $a0, $sp, 0
 ; LA32-NEXT:    fld.d $fa0, $sp, 0
@@ -553,7 +553,7 @@ define void @store_release_i64(ptr %ptr, i64 %v) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    bl %plt(__atomic_store_8)
+; LA32-NEXT:    bl __atomic_store_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -609,7 +609,7 @@ define void @store_release_double(ptr %ptr, double %v) {
 ; LA32-NEXT:    ld.w $a1, $sp, 0
 ; LA32-NEXT:    ld.w $a2, $sp, 4
 ; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    bl %plt(__atomic_store_8)
+; LA32-NEXT:    bl __atomic_store_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -673,7 +673,7 @@ define void @store_unordered_i64(ptr %ptr, i64 %v) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_store_8)
+; LA32-NEXT:    bl __atomic_store_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -727,7 +727,7 @@ define void @store_unordered_double(ptr %ptr, double %v) {
 ; LA32-NEXT:    ld.w $a1, $sp, 0
 ; LA32-NEXT:    ld.w $a2, $sp, 4
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_store_8)
+; LA32-NEXT:    bl __atomic_store_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -791,7 +791,7 @@ define void @store_monotonic_i64(ptr %ptr, i64 %v) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_store_8)
+; LA32-NEXT:    bl __atomic_store_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -845,7 +845,7 @@ define void @store_monotonic_double(ptr %ptr, double %v) {
 ; LA32-NEXT:    ld.w $a1, $sp, 0
 ; LA32-NEXT:    ld.w $a2, $sp, 4
 ; LA32-NEXT:    move $a3, $zero
-; LA32-NEXT:    bl %plt(__atomic_store_8)
+; LA32-NEXT:    bl __atomic_store_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -919,7 +919,7 @@ define void @store_seq_cst_i64(ptr %ptr, i64 %v) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    ori $a3, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_store_8)
+; LA32-NEXT:    bl __atomic_store_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -977,7 +977,7 @@ define void @store_seq_cst_double(ptr %ptr, double %v) {
 ; LA32-NEXT:    ld.w $a1, $sp, 0
 ; LA32-NEXT:    ld.w $a2, $sp, 4
 ; LA32-NEXT:    ori $a3, $zero, 5
-; LA32-NEXT:    bl %plt(__atomic_store_8)
+; LA32-NEXT:    bl __atomic_store_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
index 99824f6d7718e..d88f2951cb8a9 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
@@ -258,7 +258,7 @@ define i64 @sdiv_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    .cfi_def_cfa_offset 16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
-; LA32-NEXT:    bl %plt(__divdi3)
+; LA32-NEXT:    bl __divdi3
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -274,7 +274,7 @@ define i64 @sdiv_i64(i64 %a, i64 %b) {
 ; LA32-TRAP-NEXT:    .cfi_def_cfa_offset 16
 ; LA32-TRAP-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-TRAP-NEXT:    .cfi_offset 1, -4
-; LA32-TRAP-NEXT:    bl %plt(__divdi3)
+; LA32-TRAP-NEXT:    bl __divdi3
 ; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
 ; LA32-TRAP-NEXT:    ret
@@ -542,7 +542,7 @@ define i64 @udiv_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    .cfi_def_cfa_offset 16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
-; LA32-NEXT:    bl %plt(__udivdi3)
+; LA32-NEXT:    bl __udivdi3
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -558,7 +558,7 @@ define i64 @udiv_i64(i64 %a, i64 %b) {
 ; LA32-TRAP-NEXT:    .cfi_def_cfa_offset 16
 ; LA32-TRAP-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-TRAP-NEXT:    .cfi_offset 1, -4
-; LA32-TRAP-NEXT:    bl %plt(__udivdi3)
+; LA32-TRAP-NEXT:    bl __udivdi3
 ; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
 ; LA32-TRAP-NEXT:    ret
@@ -830,7 +830,7 @@ define i64 @srem_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    .cfi_def_cfa_offset 16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
-; LA32-NEXT:    bl %plt(__moddi3)
+; LA32-NEXT:    bl __moddi3
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -846,7 +846,7 @@ define i64 @srem_i64(i64 %a, i64 %b) {
 ; LA32-TRAP-NEXT:    .cfi_def_cfa_offset 16
 ; LA32-TRAP-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-TRAP-NEXT:    .cfi_offset 1, -4
-; LA32-TRAP-NEXT:    bl %plt(__moddi3)
+; LA32-TRAP-NEXT:    bl __moddi3
 ; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
 ; LA32-TRAP-NEXT:    ret
@@ -1118,7 +1118,7 @@ define i64 @urem_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    .cfi_def_cfa_offset 16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
-; LA32-NEXT:    bl %plt(__umoddi3)
+; LA32-NEXT:    bl __umoddi3
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -1134,7 +1134,7 @@ define i64 @urem_i64(i64 %a, i64 %b) {
 ; LA32-TRAP-NEXT:    .cfi_def_cfa_offset 16
 ; LA32-TRAP-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-TRAP-NEXT:    .cfi_offset 1, -4
-; LA32-TRAP-NEXT:    bl %plt(__umoddi3)
+; LA32-TRAP-NEXT:    bl __umoddi3
 ; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
 ; LA32-TRAP-NEXT:    ret
@@ -1164,7 +1164,7 @@ define signext i32 @pr107414(i32 signext %x) {
 ; LA32-NEXT:    lu12i.w $a0, -266831
 ; LA32-NEXT:    ori $a0, $a0, 3337
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    bl %plt(__divdi3)
+; LA32-NEXT:    bl __divdi3
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -1189,7 +1189,7 @@ define signext i32 @pr107414(i32 signext %x) {
 ; LA32-TRAP-NEXT:    lu12i.w $a0, -266831
 ; LA32-TRAP-NEXT:    ori $a0, $a0, 3337
 ; LA32-TRAP-NEXT:    move $a1, $zero
-; LA32-TRAP-NEXT:    bl %plt(__divdi3)
+; LA32-TRAP-NEXT:    bl __divdi3
 ; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
 ; LA32-TRAP-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
index 0f9275fda34ba..9142e718e8adc 100644
--- a/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
+++ b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
@@ -212,7 +212,7 @@ define void @test_la_tls_ld(i32 signext %n) {
 ; LA32-NEXT:  .LBB3_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    move $a0, $s0
-; LA32-NEXT:    bl %plt(__tls_get_addr)
+; LA32-NEXT:    bl __tls_get_addr
 ; LA32-NEXT:    ld.w $zero, $a0, 0
 ; LA32-NEXT:    addi.w $s1, $s1, 1
 ; LA32-NEXT:    blt $s1, $fp, .LBB3_1
@@ -388,7 +388,7 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
 ; LA32-NEXT:  .LBB5_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    move $a0, $s0
-; LA32-NEXT:    bl %plt(__tls_get_addr)
+; LA32-NEXT:    bl __tls_get_addr
 ; LA32-NEXT:    ld.w $zero, $a0, 0
 ; LA32-NEXT:    addi.w $s1, $s1, 1
 ; LA32-NEXT:    blt $s1, $fp, .LBB5_1

diff  --git a/llvm/test/CodeGen/LoongArch/numeric-reg-names.ll b/llvm/test/CodeGen/LoongArch/numeric-reg-names.ll
index 5a1158358de13..73f4dbbbdd026 100644
--- a/llvm/test/CodeGen/LoongArch/numeric-reg-names.ll
+++ b/llvm/test/CodeGen/LoongArch/numeric-reg-names.ll
@@ -17,7 +17,7 @@ define i32 @main() {
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    pcalau12i $r4, %pc_hi20(.str_1)
 ; LA32-NEXT:    addi.w $r4, $r4, %pc_lo12(.str_1)
-; LA32-NEXT:    bl %plt(printf)
+; LA32-NEXT:    bl printf
 ; LA32-NEXT:    move $r4, $r0
 ; LA32-NEXT:    ld.w $r1, $r3, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $r3, $r3, 16

diff  --git a/llvm/test/CodeGen/LoongArch/soft-fp-to-int.ll b/llvm/test/CodeGen/LoongArch/soft-fp-to-int.ll
index c429d3132f640..e7f75cddc3ec9 100644
--- a/llvm/test/CodeGen/LoongArch/soft-fp-to-int.ll
+++ b/llvm/test/CodeGen/LoongArch/soft-fp-to-int.ll
@@ -16,7 +16,7 @@ define i32 @fptosi_i32_fp128(fp128 %X) nounwind {
 ; LA32-NEXT:    st.w $a2, $sp, 12
 ; LA32-NEXT:    addi.w $a0, $sp, 8
 ; LA32-NEXT:    st.w $a1, $sp, 8
-; LA32-NEXT:    bl %plt(__fixtfsi)
+; LA32-NEXT:    bl __fixtfsi
 ; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 32
 ; LA32-NEXT:    ret
@@ -39,7 +39,7 @@ define i32 @fptosi_i32_double(double %X) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__fixdfsi)
+; LA32-NEXT:    bl __fixdfsi
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -62,7 +62,7 @@ define i32 @fptosi_i32_float(float %X) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__fixsfsi)
+; LA32-NEXT:    bl __fixsfsi
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -94,7 +94,7 @@ define i64 @fptosi_i64_fp128(fp128 %X) nounwind {
 ; LA32-NEXT:    st.w $a2, $sp, 4
 ; LA32-NEXT:    addi.w $a0, $sp, 0
 ; LA32-NEXT:    st.w $a1, $sp, 0
-; LA32-NEXT:    bl %plt(__fixtfdi)
+; LA32-NEXT:    bl __fixtfdi
 ; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 32
 ; LA32-NEXT:    ret
@@ -117,7 +117,7 @@ define i64 @fptosi_i64_double(double %X) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__fixdfdi)
+; LA32-NEXT:    bl __fixdfdi
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -140,7 +140,7 @@ define i64 @fptosi_i64_float(float %X) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl %plt(__fixsfdi)
+; LA32-NEXT:    bl __fixsfdi
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/spill-reload-cfr.ll b/llvm/test/CodeGen/LoongArch/spill-reload-cfr.ll
index 9cb364e05c326..66640caa2e5ec 100644
--- a/llvm/test/CodeGen/LoongArch/spill-reload-cfr.ll
+++ b/llvm/test/CodeGen/LoongArch/spill-reload-cfr.ll
@@ -18,7 +18,7 @@ define i1 @load_store_fcc_reg(float %a, i1 %c) {
 ; LA32-NEXT:    .cfi_offset 56, -16
 ; LA32-NEXT:    move $fp, $a0
 ; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    bl %plt(foo)
+; LA32-NEXT:    bl foo
 ; LA32-NEXT:    movgr2fr.w $fa0, $zero
 ; LA32-NEXT:    fcmp.cult.s $fcc0, $fa0, $fs0
 ; LA32-NEXT:    bcnez $fcc0, .LBB0_2

diff  --git a/llvm/test/CodeGen/LoongArch/stack-realignment-with-variable-sized-objects.ll b/llvm/test/CodeGen/LoongArch/stack-realignment-with-variable-sized-objects.ll
index f6c6c5aa225c6..fe7a8f8f2d6bd 100644
--- a/llvm/test/CodeGen/LoongArch/stack-realignment-with-variable-sized-objects.ll
+++ b/llvm/test/CodeGen/LoongArch/stack-realignment-with-variable-sized-objects.ll
@@ -26,7 +26,7 @@ define void @caller(i32 %n) {
 ; LA32-NEXT:    sub.w $a0, $sp, $a0
 ; LA32-NEXT:    move $sp, $a0
 ; LA32-NEXT:    addi.w $a1, $s8, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    addi.w $sp, $fp, -64
 ; LA32-NEXT:    ld.w $s8, $sp, 52 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 56 # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/LoongArch/stack-realignment.ll b/llvm/test/CodeGen/LoongArch/stack-realignment.ll
index 6d4210eb5b647..0645339358b64 100644
--- a/llvm/test/CodeGen/LoongArch/stack-realignment.ll
+++ b/llvm/test/CodeGen/LoongArch/stack-realignment.ll
@@ -19,7 +19,7 @@ define void @caller32() {
 ; LA32-NEXT:    .cfi_def_cfa 22, 0
 ; LA32-NEXT:    bstrins.w $sp, $zero, 4, 0
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    addi.w $sp, $fp, -32
 ; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
@@ -58,7 +58,7 @@ define void @caller_no_realign32() "no-realign-stack" {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -93,7 +93,7 @@ define void @caller64() {
 ; LA32-NEXT:    .cfi_def_cfa 22, 0
 ; LA32-NEXT:    bstrins.w $sp, $zero, 5, 0
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    addi.w $sp, $fp, -64
 ; LA32-NEXT:    ld.w $fp, $sp, 56 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 60 # 4-byte Folded Reload
@@ -132,7 +132,7 @@ define void @caller_no_realign64() "no-realign-stack" {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -167,7 +167,7 @@ define void @caller128() {
 ; LA32-NEXT:    .cfi_def_cfa 22, 0
 ; LA32-NEXT:    bstrins.w $sp, $zero, 6, 0
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    addi.w $sp, $fp, -128
 ; LA32-NEXT:    ld.w $fp, $sp, 120 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 124 # 4-byte Folded Reload
@@ -206,7 +206,7 @@ define void @caller_no_realign128() "no-realign-stack" {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -241,7 +241,7 @@ define void @caller256() {
 ; LA32-NEXT:    .cfi_def_cfa 22, 0
 ; LA32-NEXT:    bstrins.w $sp, $zero, 7, 0
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    addi.w $sp, $fp, -256
 ; LA32-NEXT:    ld.w $fp, $sp, 248 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 252 # 4-byte Folded Reload
@@ -280,7 +280,7 @@ define void @caller_no_realign256() "no-realign-stack" {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -315,7 +315,7 @@ define void @caller512() {
 ; LA32-NEXT:    .cfi_def_cfa 22, 0
 ; LA32-NEXT:    bstrins.w $sp, $zero, 8, 0
 ; LA32-NEXT:    addi.w $a0, $sp, 512
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    addi.w $sp, $fp, -1024
 ; LA32-NEXT:    ld.w $fp, $sp, 1016 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 1020 # 4-byte Folded Reload
@@ -354,7 +354,7 @@ define void @caller_no_realign512() "no-realign-stack" {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -390,7 +390,7 @@ define void @caller1024() {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    bstrins.w $sp, $zero, 9, 0
 ; LA32-NEXT:    addi.w $a0, $sp, 1024
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    addi.w $sp, $fp, -2048
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ld.w $fp, $sp, 2024 # 4-byte Folded Reload
@@ -432,7 +432,7 @@ define void @caller_no_realign1024() "no-realign-stack" {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -470,7 +470,7 @@ define void @caller2048() {
 ; LA32-NEXT:    bstrins.w $sp, $zero, 10, 0
 ; LA32-NEXT:    ori $a0, $zero, 2048
 ; LA32-NEXT:    add.w $a0, $sp, $a0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    lu12i.w $a0, 1
 ; LA32-NEXT:    sub.w $sp, $fp, $a0
 ; LA32-NEXT:    addi.w $sp, $sp, 2032
@@ -518,7 +518,7 @@ define void @caller_no_realign2048() "no-realign-stack" {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -557,7 +557,7 @@ define void @caller4096() {
 ; LA32-NEXT:    bstrins.w $sp, $zero, 11, 0
 ; LA32-NEXT:    lu12i.w $a0, 1
 ; LA32-NEXT:    add.w $a0, $sp, $a0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    lu12i.w $a0, 2
 ; LA32-NEXT:    sub.w $sp, $fp, $a0
 ; LA32-NEXT:    lu12i.w $a0, 1
@@ -608,7 +608,7 @@ define void @caller_no_realign4096() "no-realign-stack" {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    addi.w $a0, $sp, 0
-; LA32-NEXT:    bl %plt(callee)
+; LA32-NEXT:    bl callee
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/statepoint-call-lowering.ll b/llvm/test/CodeGen/LoongArch/statepoint-call-lowering.ll
index 1926dbd423153..a037282efa6ad 100644
--- a/llvm/test/CodeGen/LoongArch/statepoint-call-lowering.ll
+++ b/llvm/test/CodeGen/LoongArch/statepoint-call-lowering.ll
@@ -19,7 +19,7 @@ define i1 @test_i1_return() nounwind gc "statepoint-example" {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    bl %plt(return_i1)
+; CHECK-NEXT:    bl return_i1
 ; CHECK-NEXT:  .Ltmp0:
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
@@ -35,7 +35,7 @@ define i32 @test_i32_return() nounwind gc "statepoint-example" {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    bl %plt(return_i32)
+; CHECK-NEXT:    bl return_i32
 ; CHECK-NEXT:  .Ltmp1:
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
@@ -51,7 +51,7 @@ define ptr @test_i32ptr_return() nounwind gc "statepoint-example" {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    bl %plt(return_i32ptr)
+; CHECK-NEXT:    bl return_i32ptr
 ; CHECK-NEXT:  .Ltmp2:
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
@@ -67,7 +67,7 @@ define float @test_float_return() nounwind gc "statepoint-example" {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    bl %plt(return_float)
+; CHECK-NEXT:    bl return_float
 ; CHECK-NEXT:  .Ltmp3:
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
@@ -83,7 +83,7 @@ define %struct @test_struct_return() nounwind gc "statepoint-example" {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    bl %plt(return_struct)
+; CHECK-NEXT:    bl return_struct
 ; CHECK-NEXT:  .Ltmp4:
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
@@ -100,7 +100,7 @@ define i1 @test_relocate(ptr addrspace(1) %a) nounwind gc "statepoint-example" {
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; CHECK-NEXT:    st.d $a0, $sp, 0
-; CHECK-NEXT:    bl %plt(return_i1)
+; CHECK-NEXT:    bl return_i1
 ; CHECK-NEXT:  .Ltmp5:
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
@@ -119,7 +119,7 @@ define void @test_void_vararg() nounwind gc "statepoint-example" {
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; CHECK-NEXT:    ori $a0, $zero, 42
 ; CHECK-NEXT:    ori $a1, $zero, 43
-; CHECK-NEXT:    bl %plt(varargf)
+; CHECK-NEXT:    bl varargf
 ; CHECK-NEXT:  .Ltmp6:
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
@@ -158,7 +158,7 @@ define i1 @test_cross_bb(ptr addrspace(1) %a, i1 %external_cond) nounwind gc "st
 ; CHECK-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
 ; CHECK-NEXT:    andi $fp, $a1, 1
 ; CHECK-NEXT:    st.d $a0, $sp, 8
-; CHECK-NEXT:    bl %plt(return_i1)
+; CHECK-NEXT:    bl return_i1
 ; CHECK-NEXT:  .Ltmp8:
 ; CHECK-NEXT:    beqz $fp, .LBB8_2
 ; CHECK-NEXT:  # %bb.1: # %left
@@ -207,7 +207,7 @@ define void @test_attributes(ptr byval(%struct2) %s) nounwind gc "statepoint-exa
 ; CHECK-NEXT:    ori $a2, $zero, 17
 ; CHECK-NEXT:    addi.d $a3, $sp, 0
 ; CHECK-NEXT:    move $a1, $zero
-; CHECK-NEXT:    bl %plt(consume_attributes)
+; CHECK-NEXT:    bl consume_attributes
 ; CHECK-NEXT:  .Ltmp9:
 ; CHECK-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 32

diff  --git a/llvm/test/CodeGen/LoongArch/tls-models.ll b/llvm/test/CodeGen/LoongArch/tls-models.ll
index ffd480a4bd840..50d994fb85327 100644
--- a/llvm/test/CodeGen/LoongArch/tls-models.ll
+++ b/llvm/test/CodeGen/LoongArch/tls-models.ll
@@ -30,7 +30,7 @@ define ptr @f1() nounwind {
 ; LA32PIC-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32PIC-NEXT:    pcalau12i $a0, %gd_pc_hi20(unspecified)
 ; LA32PIC-NEXT:    addi.w $a0, $a0, %got_pc_lo12(unspecified)
-; LA32PIC-NEXT:    bl %plt(__tls_get_addr)
+; LA32PIC-NEXT:    bl __tls_get_addr
 ; LA32PIC-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32PIC-NEXT:    addi.w $sp, $sp, 16
 ; LA32PIC-NEXT:    ret
@@ -144,7 +144,7 @@ define ptr @f2() nounwind {
 ; LA32PIC-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32PIC-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
 ; LA32PIC-NEXT:    addi.w $a0, $a0, %got_pc_lo12(ld)
-; LA32PIC-NEXT:    bl %plt(__tls_get_addr)
+; LA32PIC-NEXT:    bl __tls_get_addr
 ; LA32PIC-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32PIC-NEXT:    addi.w $sp, $sp, 16
 ; LA32PIC-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll b/llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll
index 5ab20b83b5f82..925fdf3d60646 100644
--- a/llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll
+++ b/llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll
@@ -15,7 +15,7 @@ define void @t0(ptr %out, ptr %in) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    ori $a2, $zero, 16
-; LA32-NEXT:    bl %plt(memcpy)
+; LA32-NEXT:    bl memcpy
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/vector-fp-imm.ll b/llvm/test/CodeGen/LoongArch/vector-fp-imm.ll
index d043eefb96a50..80c17a7c67efb 100644
--- a/llvm/test/CodeGen/LoongArch/vector-fp-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/vector-fp-imm.ll
@@ -379,14 +379,14 @@ define void @test_d2(ptr %P, ptr %S) nounwind {
 ; LA32F-NEXT:    move $a0, $a2
 ; LA32F-NEXT:    move $a1, $a4
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    move $s2, $a0
 ; LA32F-NEXT:    move $s3, $a1
 ; LA32F-NEXT:    lu12i.w $a3, 262144
 ; LA32F-NEXT:    move $a0, $fp
 ; LA32F-NEXT:    move $a1, $s0
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    st.w $a0, $s1, 8
 ; LA32F-NEXT:    st.w $a1, $s1, 12
 ; LA32F-NEXT:    st.w $s2, $s1, 0
@@ -484,28 +484,28 @@ define void @test_d4(ptr %P, ptr %S) nounwind {
 ; LA32F-NEXT:    move $a0, $a2
 ; LA32F-NEXT:    move $a1, $a4
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    move $s6, $a0
 ; LA32F-NEXT:    move $s7, $a1
 ; LA32F-NEXT:    lu12i.w $a3, 262144
 ; LA32F-NEXT:    move $a0, $s3
 ; LA32F-NEXT:    move $a1, $s4
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    move $s3, $a0
 ; LA32F-NEXT:    move $s4, $a1
 ; LA32F-NEXT:    lu12i.w $a3, 262272
 ; LA32F-NEXT:    move $a0, $s1
 ; LA32F-NEXT:    move $a1, $s2
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    move $s1, $a0
 ; LA32F-NEXT:    move $s2, $a1
 ; LA32F-NEXT:    lu12i.w $a3, 262400
 ; LA32F-NEXT:    move $a0, $fp
 ; LA32F-NEXT:    move $a1, $s0
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    st.w $a0, $s5, 24
 ; LA32F-NEXT:    st.w $a1, $s5, 28
 ; LA32F-NEXT:    st.w $s1, $s5, 16
@@ -660,7 +660,7 @@ define void @test_d8(ptr %P, ptr %S) nounwind {
 ; LA32F-NEXT:    move $a0, $a2
 ; LA32F-NEXT:    move $a1, $a4
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    st.w $a0, $sp, 40 # 4-byte Folded Spill
 ; LA32F-NEXT:    st.w $a1, $sp, 36 # 4-byte Folded Spill
 ; LA32F-NEXT:    lu12i.w $a3, 262144
@@ -668,7 +668,7 @@ define void @test_d8(ptr %P, ptr %S) nounwind {
 ; LA32F-NEXT:    move $a1, $s0
 ; LA32F-NEXT:    move $a2, $zero
 ; LA32F-NEXT:    move $s0, $a3
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    st.w $a0, $sp, 24 # 4-byte Folded Spill
 ; LA32F-NEXT:    st.w $a1, $sp, 20 # 4-byte Folded Spill
 ; LA32F-NEXT:    lu12i.w $s7, 262272
@@ -676,42 +676,42 @@ define void @test_d8(ptr %P, ptr %S) nounwind {
 ; LA32F-NEXT:    move $a1, $s2
 ; LA32F-NEXT:    move $a2, $zero
 ; LA32F-NEXT:    move $a3, $s7
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    st.w $a0, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    move $s2, $a1
 ; LA32F-NEXT:    lu12i.w $a3, 262400
 ; LA32F-NEXT:    move $a0, $s5
 ; LA32F-NEXT:    move $a1, $s6
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    move $s5, $a0
 ; LA32F-NEXT:    move $s6, $a1
 ; LA32F-NEXT:    move $a0, $s3
 ; LA32F-NEXT:    move $a1, $s4
 ; LA32F-NEXT:    move $a2, $zero
 ; LA32F-NEXT:    lu12i.w $a3, 261888
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    move $s3, $a0
 ; LA32F-NEXT:    move $s4, $a1
 ; LA32F-NEXT:    move $a0, $s8
 ; LA32F-NEXT:    ld.w $a1, $sp, 16 # 4-byte Folded Reload
 ; LA32F-NEXT:    move $a2, $zero
 ; LA32F-NEXT:    move $a3, $s0
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    move $s8, $a0
 ; LA32F-NEXT:    move $s0, $a1
 ; LA32F-NEXT:    ld.w $a0, $sp, 32 # 4-byte Folded Reload
 ; LA32F-NEXT:    ld.w $a1, $sp, 28 # 4-byte Folded Reload
 ; LA32F-NEXT:    move $a2, $zero
 ; LA32F-NEXT:    move $a3, $s7
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    move $s7, $a0
 ; LA32F-NEXT:    move $s1, $a1
 ; LA32F-NEXT:    ld.w $a0, $sp, 48 # 4-byte Folded Reload
 ; LA32F-NEXT:    ld.w $a1, $sp, 44 # 4-byte Folded Reload
 ; LA32F-NEXT:    move $a2, $zero
 ; LA32F-NEXT:    lu12i.w $a3, 262400
-; LA32F-NEXT:    bl %plt(__adddf3)
+; LA32F-NEXT:    bl __adddf3
 ; LA32F-NEXT:    st.w $a0, $fp, 56
 ; LA32F-NEXT:    st.w $a1, $fp, 60
 ; LA32F-NEXT:    st.w $s7, $fp, 48

diff  --git a/llvm/test/MC/LoongArch/Relocations/relocations.s b/llvm/test/MC/LoongArch/Relocations/relocations.s
index bd8ecd085496b..b23ae447bfd6b 100644
--- a/llvm/test/MC/LoongArch/Relocations/relocations.s
+++ b/llvm/test/MC/LoongArch/Relocations/relocations.s
@@ -23,7 +23,7 @@ bnez $t1, %b21(foo)
 
 bl %plt(foo)
 # RELOC: R_LARCH_B26
-# INSTR: bl %plt(foo)
+# INSTR: bl foo
 
 bl foo
 # RELOC: R_LARCH_B26


        


More information about the llvm-commits mailing list